1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 4 * 5 * Swap reorganised 29.12.95, Stephen Tweedie. 6 * kswapd added: 7.1.96 sct 7 * Removed kswapd_ctl limits, and swap out as many pages as needed 8 * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 9 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 10 * Multiqueue VM started 5.8.00, Rik van Riel. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/mm.h> 16 #include <linux/sched/mm.h> 17 #include <linux/module.h> 18 #include <linux/gfp.h> 19 #include <linux/kernel_stat.h> 20 #include <linux/swap.h> 21 #include <linux/pagemap.h> 22 #include <linux/init.h> 23 #include <linux/highmem.h> 24 #include <linux/vmpressure.h> 25 #include <linux/vmstat.h> 26 #include <linux/file.h> 27 #include <linux/writeback.h> 28 #include <linux/blkdev.h> 29 #include <linux/buffer_head.h> /* for buffer_heads_over_limit */ 30 #include <linux/mm_inline.h> 31 #include <linux/backing-dev.h> 32 #include <linux/rmap.h> 33 #include <linux/topology.h> 34 #include <linux/cpu.h> 35 #include <linux/cpuset.h> 36 #include <linux/compaction.h> 37 #include <linux/notifier.h> 38 #include <linux/delay.h> 39 #include <linux/kthread.h> 40 #include <linux/freezer.h> 41 #include <linux/memcontrol.h> 42 #include <linux/migrate.h> 43 #include <linux/delayacct.h> 44 #include <linux/sysctl.h> 45 #include <linux/memory-tiers.h> 46 #include <linux/oom.h> 47 #include <linux/pagevec.h> 48 #include <linux/prefetch.h> 49 #include <linux/printk.h> 50 #include <linux/dax.h> 51 #include <linux/psi.h> 52 #include <linux/pagewalk.h> 53 #include <linux/shmem_fs.h> 54 #include <linux/ctype.h> 55 #include <linux/debugfs.h> 56 #include <linux/khugepaged.h> 57 #include <linux/rculist_nulls.h> 58 #include <linux/random.h> 59 #include <linux/mmu_notifier.h> 60 61 #include <asm/tlbflush.h> 62 #include <asm/div64.h> 63 64 #include <linux/swapops.h> 65 #include <linux/balloon_compaction.h> 66 #include <linux/sched/sysctl.h> 67 68 #include "internal.h" 69 #include "swap.h" 70 71 #define CREATE_TRACE_POINTS 72 #include <trace/events/vmscan.h> 73 74 struct scan_control { 75 /* How many pages shrink_list() should reclaim */ 76 unsigned long nr_to_reclaim; 77 78 /* 79 * Nodemask of nodes allowed by the caller. If NULL, all nodes 80 * are scanned. 81 */ 82 nodemask_t *nodemask; 83 84 /* 85 * The memory cgroup that hit its limit and as a result is the 86 * primary target of this reclaim invocation. 87 */ 88 struct mem_cgroup *target_mem_cgroup; 89 90 /* 91 * Scan pressure balancing between anon and file LRUs 92 */ 93 unsigned long anon_cost; 94 unsigned long file_cost; 95 96 #ifdef CONFIG_MEMCG 97 /* Swappiness value for proactive reclaim. Always use sc_swappiness()! */ 98 int *proactive_swappiness; 99 #endif 100 101 /* Can active folios be deactivated as part of reclaim? */ 102 #define DEACTIVATE_ANON 1 103 #define DEACTIVATE_FILE 2 104 unsigned int may_deactivate:2; 105 unsigned int force_deactivate:1; 106 unsigned int skipped_deactivate:1; 107 108 /* Writepage batching in laptop mode; RECLAIM_WRITE */ 109 unsigned int may_writepage:1; 110 111 /* Can mapped folios be reclaimed? */ 112 unsigned int may_unmap:1; 113 114 /* Can folios be swapped as part of reclaim? */ 115 unsigned int may_swap:1; 116 117 /* Not allow cache_trim_mode to be turned on as part of reclaim? */ 118 unsigned int no_cache_trim_mode:1; 119 120 /* Has cache_trim_mode failed at least once? */ 121 unsigned int cache_trim_mode_failed:1; 122 123 /* Proactive reclaim invoked by userspace through memory.reclaim */ 124 unsigned int proactive:1; 125 126 /* 127 * Cgroup memory below memory.low is protected as long as we 128 * don't threaten to OOM. If any cgroup is reclaimed at 129 * reduced force or passed over entirely due to its memory.low 130 * setting (memcg_low_skipped), and nothing is reclaimed as a 131 * result, then go back for one more cycle that reclaims the protected 132 * memory (memcg_low_reclaim) to avert OOM. 133 */ 134 unsigned int memcg_low_reclaim:1; 135 unsigned int memcg_low_skipped:1; 136 137 /* Shared cgroup tree walk failed, rescan the whole tree */ 138 unsigned int memcg_full_walk:1; 139 140 unsigned int hibernation_mode:1; 141 142 /* One of the zones is ready for compaction */ 143 unsigned int compaction_ready:1; 144 145 /* There is easily reclaimable cold cache in the current node */ 146 unsigned int cache_trim_mode:1; 147 148 /* The file folios on the current node are dangerously low */ 149 unsigned int file_is_tiny:1; 150 151 /* Always discard instead of demoting to lower tier memory */ 152 unsigned int no_demotion:1; 153 154 /* Allocation order */ 155 s8 order; 156 157 /* Scan (total_size >> priority) pages at once */ 158 s8 priority; 159 160 /* The highest zone to isolate folios for reclaim from */ 161 s8 reclaim_idx; 162 163 /* This context's GFP mask */ 164 gfp_t gfp_mask; 165 166 /* Incremented by the number of inactive pages that were scanned */ 167 unsigned long nr_scanned; 168 169 /* Number of pages freed so far during a call to shrink_zones() */ 170 unsigned long nr_reclaimed; 171 172 struct { 173 unsigned int dirty; 174 unsigned int unqueued_dirty; 175 unsigned int congested; 176 unsigned int writeback; 177 unsigned int immediate; 178 unsigned int file_taken; 179 unsigned int taken; 180 } nr; 181 182 /* for recording the reclaimed slab by now */ 183 struct reclaim_state reclaim_state; 184 }; 185 186 #ifdef ARCH_HAS_PREFETCHW 187 #define prefetchw_prev_lru_folio(_folio, _base, _field) \ 188 do { \ 189 if ((_folio)->lru.prev != _base) { \ 190 struct folio *prev; \ 191 \ 192 prev = lru_to_folio(&(_folio->lru)); \ 193 prefetchw(&prev->_field); \ 194 } \ 195 } while (0) 196 #else 197 #define prefetchw_prev_lru_folio(_folio, _base, _field) do { } while (0) 198 #endif 199 200 /* 201 * From 0 .. MAX_SWAPPINESS. Higher means more swappy. 202 */ 203 int vm_swappiness = 60; 204 205 #ifdef CONFIG_MEMCG 206 207 /* Returns true for reclaim through cgroup limits or cgroup interfaces. */ 208 static bool cgroup_reclaim(struct scan_control *sc) 209 { 210 return sc->target_mem_cgroup; 211 } 212 213 /* 214 * Returns true for reclaim on the root cgroup. This is true for direct 215 * allocator reclaim and reclaim through cgroup interfaces on the root cgroup. 216 */ 217 static bool root_reclaim(struct scan_control *sc) 218 { 219 return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup); 220 } 221 222 /** 223 * writeback_throttling_sane - is the usual dirty throttling mechanism available? 224 * @sc: scan_control in question 225 * 226 * The normal page dirty throttling mechanism in balance_dirty_pages() is 227 * completely broken with the legacy memcg and direct stalling in 228 * shrink_folio_list() is used for throttling instead, which lacks all the 229 * niceties such as fairness, adaptive pausing, bandwidth proportional 230 * allocation and configurability. 231 * 232 * This function tests whether the vmscan currently in progress can assume 233 * that the normal dirty throttling mechanism is operational. 234 */ 235 static bool writeback_throttling_sane(struct scan_control *sc) 236 { 237 if (!cgroup_reclaim(sc)) 238 return true; 239 #ifdef CONFIG_CGROUP_WRITEBACK 240 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 241 return true; 242 #endif 243 return false; 244 } 245 246 static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg) 247 { 248 if (sc->proactive && sc->proactive_swappiness) 249 return *sc->proactive_swappiness; 250 return mem_cgroup_swappiness(memcg); 251 } 252 #else 253 static bool cgroup_reclaim(struct scan_control *sc) 254 { 255 return false; 256 } 257 258 static bool root_reclaim(struct scan_control *sc) 259 { 260 return true; 261 } 262 263 static bool writeback_throttling_sane(struct scan_control *sc) 264 { 265 return true; 266 } 267 268 static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg) 269 { 270 return READ_ONCE(vm_swappiness); 271 } 272 #endif 273 274 static void set_task_reclaim_state(struct task_struct *task, 275 struct reclaim_state *rs) 276 { 277 /* Check for an overwrite */ 278 WARN_ON_ONCE(rs && task->reclaim_state); 279 280 /* Check for the nulling of an already-nulled member */ 281 WARN_ON_ONCE(!rs && !task->reclaim_state); 282 283 task->reclaim_state = rs; 284 } 285 286 /* 287 * flush_reclaim_state(): add pages reclaimed outside of LRU-based reclaim to 288 * scan_control->nr_reclaimed. 289 */ 290 static void flush_reclaim_state(struct scan_control *sc) 291 { 292 /* 293 * Currently, reclaim_state->reclaimed includes three types of pages 294 * freed outside of vmscan: 295 * (1) Slab pages. 296 * (2) Clean file pages from pruned inodes (on highmem systems). 297 * (3) XFS freed buffer pages. 298 * 299 * For all of these cases, we cannot universally link the pages to a 300 * single memcg. For example, a memcg-aware shrinker can free one object 301 * charged to the target memcg, causing an entire page to be freed. 302 * If we count the entire page as reclaimed from the memcg, we end up 303 * overestimating the reclaimed amount (potentially under-reclaiming). 304 * 305 * Only count such pages for global reclaim to prevent under-reclaiming 306 * from the target memcg; preventing unnecessary retries during memcg 307 * charging and false positives from proactive reclaim. 308 * 309 * For uncommon cases where the freed pages were actually mostly 310 * charged to the target memcg, we end up underestimating the reclaimed 311 * amount. This should be fine. The freed pages will be uncharged 312 * anyway, even if they are not counted here properly, and we will be 313 * able to make forward progress in charging (which is usually in a 314 * retry loop). 315 * 316 * We can go one step further, and report the uncharged objcg pages in 317 * memcg reclaim, to make reporting more accurate and reduce 318 * underestimation, but it's probably not worth the complexity for now. 319 */ 320 if (current->reclaim_state && root_reclaim(sc)) { 321 sc->nr_reclaimed += current->reclaim_state->reclaimed; 322 current->reclaim_state->reclaimed = 0; 323 } 324 } 325 326 static bool can_demote(int nid, struct scan_control *sc) 327 { 328 if (!numa_demotion_enabled) 329 return false; 330 if (sc && sc->no_demotion) 331 return false; 332 if (next_demotion_node(nid) == NUMA_NO_NODE) 333 return false; 334 335 return true; 336 } 337 338 static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg, 339 int nid, 340 struct scan_control *sc) 341 { 342 if (memcg == NULL) { 343 /* 344 * For non-memcg reclaim, is there 345 * space in any swap device? 346 */ 347 if (get_nr_swap_pages() > 0) 348 return true; 349 } else { 350 /* Is the memcg below its swap limit? */ 351 if (mem_cgroup_get_nr_swap_pages(memcg) > 0) 352 return true; 353 } 354 355 /* 356 * The page can not be swapped. 357 * 358 * Can it be reclaimed from this node via demotion? 359 */ 360 return can_demote(nid, sc); 361 } 362 363 /* 364 * This misses isolated folios which are not accounted for to save counters. 365 * As the data only determines if reclaim or compaction continues, it is 366 * not expected that isolated folios will be a dominating factor. 367 */ 368 unsigned long zone_reclaimable_pages(struct zone *zone) 369 { 370 unsigned long nr; 371 372 nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) + 373 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE); 374 if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL)) 375 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) + 376 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON); 377 /* 378 * If there are no reclaimable file-backed or anonymous pages, 379 * ensure zones with sufficient free pages are not skipped. 380 * This prevents zones like DMA32 from being ignored in reclaim 381 * scenarios where they can still help alleviate memory pressure. 382 */ 383 if (nr == 0) 384 nr = zone_page_state_snapshot(zone, NR_FREE_PAGES); 385 return nr; 386 } 387 388 /** 389 * lruvec_lru_size - Returns the number of pages on the given LRU list. 390 * @lruvec: lru vector 391 * @lru: lru to use 392 * @zone_idx: zones to consider (use MAX_NR_ZONES - 1 for the whole LRU list) 393 */ 394 static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, 395 int zone_idx) 396 { 397 unsigned long size = 0; 398 int zid; 399 400 for (zid = 0; zid <= zone_idx; zid++) { 401 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; 402 403 if (!managed_zone(zone)) 404 continue; 405 406 if (!mem_cgroup_disabled()) 407 size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid); 408 else 409 size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru); 410 } 411 return size; 412 } 413 414 static unsigned long drop_slab_node(int nid) 415 { 416 unsigned long freed = 0; 417 struct mem_cgroup *memcg = NULL; 418 419 memcg = mem_cgroup_iter(NULL, NULL, NULL); 420 do { 421 freed += shrink_slab(GFP_KERNEL, nid, memcg, 0); 422 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); 423 424 return freed; 425 } 426 427 void drop_slab(void) 428 { 429 int nid; 430 int shift = 0; 431 unsigned long freed; 432 433 do { 434 freed = 0; 435 for_each_online_node(nid) { 436 if (fatal_signal_pending(current)) 437 return; 438 439 freed += drop_slab_node(nid); 440 } 441 } while ((freed >> shift++) > 1); 442 } 443 444 static int reclaimer_offset(void) 445 { 446 BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != 447 PGDEMOTE_DIRECT - PGDEMOTE_KSWAPD); 448 BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != 449 PGDEMOTE_KHUGEPAGED - PGDEMOTE_KSWAPD); 450 BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != 451 PGSCAN_DIRECT - PGSCAN_KSWAPD); 452 BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != 453 PGSCAN_KHUGEPAGED - PGSCAN_KSWAPD); 454 455 if (current_is_kswapd()) 456 return 0; 457 if (current_is_khugepaged()) 458 return PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD; 459 return PGSTEAL_DIRECT - PGSTEAL_KSWAPD; 460 } 461 462 static inline int is_page_cache_freeable(struct folio *folio) 463 { 464 /* 465 * A freeable page cache folio is referenced only by the caller 466 * that isolated the folio, the page cache and optional filesystem 467 * private data at folio->private. 468 */ 469 return folio_ref_count(folio) - folio_test_private(folio) == 470 1 + folio_nr_pages(folio); 471 } 472 473 /* 474 * We detected a synchronous write error writing a folio out. Probably 475 * -ENOSPC. We need to propagate that into the address_space for a subsequent 476 * fsync(), msync() or close(). 477 * 478 * The tricky part is that after writepage we cannot touch the mapping: nothing 479 * prevents it from being freed up. But we have a ref on the folio and once 480 * that folio is locked, the mapping is pinned. 481 * 482 * We're allowed to run sleeping folio_lock() here because we know the caller has 483 * __GFP_FS. 484 */ 485 static void handle_write_error(struct address_space *mapping, 486 struct folio *folio, int error) 487 { 488 folio_lock(folio); 489 if (folio_mapping(folio) == mapping) 490 mapping_set_error(mapping, error); 491 folio_unlock(folio); 492 } 493 494 static bool skip_throttle_noprogress(pg_data_t *pgdat) 495 { 496 int reclaimable = 0, write_pending = 0; 497 int i; 498 499 /* 500 * If kswapd is disabled, reschedule if necessary but do not 501 * throttle as the system is likely near OOM. 502 */ 503 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) 504 return true; 505 506 /* 507 * If there are a lot of dirty/writeback folios then do not 508 * throttle as throttling will occur when the folios cycle 509 * towards the end of the LRU if still under writeback. 510 */ 511 for (i = 0; i < MAX_NR_ZONES; i++) { 512 struct zone *zone = pgdat->node_zones + i; 513 514 if (!managed_zone(zone)) 515 continue; 516 517 reclaimable += zone_reclaimable_pages(zone); 518 write_pending += zone_page_state_snapshot(zone, 519 NR_ZONE_WRITE_PENDING); 520 } 521 if (2 * write_pending <= reclaimable) 522 return true; 523 524 return false; 525 } 526 527 void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason) 528 { 529 wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason]; 530 long timeout, ret; 531 DEFINE_WAIT(wait); 532 533 /* 534 * Do not throttle user workers, kthreads other than kswapd or 535 * workqueues. They may be required for reclaim to make 536 * forward progress (e.g. journalling workqueues or kthreads). 537 */ 538 if (!current_is_kswapd() && 539 current->flags & (PF_USER_WORKER|PF_KTHREAD)) { 540 cond_resched(); 541 return; 542 } 543 544 /* 545 * These figures are pulled out of thin air. 546 * VMSCAN_THROTTLE_ISOLATED is a transient condition based on too many 547 * parallel reclaimers which is a short-lived event so the timeout is 548 * short. Failing to make progress or waiting on writeback are 549 * potentially long-lived events so use a longer timeout. This is shaky 550 * logic as a failure to make progress could be due to anything from 551 * writeback to a slow device to excessive referenced folios at the tail 552 * of the inactive LRU. 553 */ 554 switch(reason) { 555 case VMSCAN_THROTTLE_WRITEBACK: 556 timeout = HZ/10; 557 558 if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) { 559 WRITE_ONCE(pgdat->nr_reclaim_start, 560 node_page_state(pgdat, NR_THROTTLED_WRITTEN)); 561 } 562 563 break; 564 case VMSCAN_THROTTLE_CONGESTED: 565 fallthrough; 566 case VMSCAN_THROTTLE_NOPROGRESS: 567 if (skip_throttle_noprogress(pgdat)) { 568 cond_resched(); 569 return; 570 } 571 572 timeout = 1; 573 574 break; 575 case VMSCAN_THROTTLE_ISOLATED: 576 timeout = HZ/50; 577 break; 578 default: 579 WARN_ON_ONCE(1); 580 timeout = HZ; 581 break; 582 } 583 584 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); 585 ret = schedule_timeout(timeout); 586 finish_wait(wqh, &wait); 587 588 if (reason == VMSCAN_THROTTLE_WRITEBACK) 589 atomic_dec(&pgdat->nr_writeback_throttled); 590 591 trace_mm_vmscan_throttled(pgdat->node_id, jiffies_to_usecs(timeout), 592 jiffies_to_usecs(timeout - ret), 593 reason); 594 } 595 596 /* 597 * Account for folios written if tasks are throttled waiting on dirty 598 * folios to clean. If enough folios have been cleaned since throttling 599 * started then wakeup the throttled tasks. 600 */ 601 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, 602 int nr_throttled) 603 { 604 unsigned long nr_written; 605 606 node_stat_add_folio(folio, NR_THROTTLED_WRITTEN); 607 608 /* 609 * This is an inaccurate read as the per-cpu deltas may not 610 * be synchronised. However, given that the system is 611 * writeback throttled, it is not worth taking the penalty 612 * of getting an accurate count. At worst, the throttle 613 * timeout guarantees forward progress. 614 */ 615 nr_written = node_page_state(pgdat, NR_THROTTLED_WRITTEN) - 616 READ_ONCE(pgdat->nr_reclaim_start); 617 618 if (nr_written > SWAP_CLUSTER_MAX * nr_throttled) 619 wake_up(&pgdat->reclaim_wait[VMSCAN_THROTTLE_WRITEBACK]); 620 } 621 622 /* possible outcome of pageout() */ 623 typedef enum { 624 /* failed to write folio out, folio is locked */ 625 PAGE_KEEP, 626 /* move folio to the active list, folio is locked */ 627 PAGE_ACTIVATE, 628 /* folio has been sent to the disk successfully, folio is unlocked */ 629 PAGE_SUCCESS, 630 /* folio is clean and locked */ 631 PAGE_CLEAN, 632 } pageout_t; 633 634 /* 635 * pageout is called by shrink_folio_list() for each dirty folio. 636 * Calls ->writepage(). 637 */ 638 static pageout_t pageout(struct folio *folio, struct address_space *mapping, 639 struct swap_iocb **plug, struct list_head *folio_list) 640 { 641 /* 642 * If the folio is dirty, only perform writeback if that write 643 * will be non-blocking. To prevent this allocation from being 644 * stalled by pagecache activity. But note that there may be 645 * stalls if we need to run get_block(). We could test 646 * PagePrivate for that. 647 * 648 * If this process is currently in __generic_file_write_iter() against 649 * this folio's queue, we can perform writeback even if that 650 * will block. 651 * 652 * If the folio is swapcache, write it back even if that would 653 * block, for some throttling. This happens by accident, because 654 * swap_backing_dev_info is bust: it doesn't reflect the 655 * congestion state of the swapdevs. Easy to fix, if needed. 656 */ 657 if (!is_page_cache_freeable(folio)) 658 return PAGE_KEEP; 659 if (!mapping) { 660 /* 661 * Some data journaling orphaned folios can have 662 * folio->mapping == NULL while being dirty with clean buffers. 663 */ 664 if (folio_test_private(folio)) { 665 if (try_to_free_buffers(folio)) { 666 folio_clear_dirty(folio); 667 pr_info("%s: orphaned folio\n", __func__); 668 return PAGE_CLEAN; 669 } 670 } 671 return PAGE_KEEP; 672 } 673 if (mapping->a_ops->writepage == NULL) 674 return PAGE_ACTIVATE; 675 676 if (folio_clear_dirty_for_io(folio)) { 677 int res; 678 struct writeback_control wbc = { 679 .sync_mode = WB_SYNC_NONE, 680 .nr_to_write = SWAP_CLUSTER_MAX, 681 .range_start = 0, 682 .range_end = LLONG_MAX, 683 .for_reclaim = 1, 684 .swap_plug = plug, 685 }; 686 687 /* 688 * The large shmem folio can be split if CONFIG_THP_SWAP is 689 * not enabled or contiguous swap entries are failed to 690 * allocate. 691 */ 692 if (shmem_mapping(mapping) && folio_test_large(folio)) 693 wbc.list = folio_list; 694 695 folio_set_reclaim(folio); 696 res = mapping->a_ops->writepage(&folio->page, &wbc); 697 if (res < 0) 698 handle_write_error(mapping, folio, res); 699 if (res == AOP_WRITEPAGE_ACTIVATE) { 700 folio_clear_reclaim(folio); 701 return PAGE_ACTIVATE; 702 } 703 704 if (!folio_test_writeback(folio)) { 705 /* synchronous write or broken a_ops? */ 706 folio_clear_reclaim(folio); 707 } 708 trace_mm_vmscan_write_folio(folio); 709 node_stat_add_folio(folio, NR_VMSCAN_WRITE); 710 return PAGE_SUCCESS; 711 } 712 713 return PAGE_CLEAN; 714 } 715 716 /* 717 * Same as remove_mapping, but if the folio is removed from the mapping, it 718 * gets returned with a refcount of 0. 719 */ 720 static int __remove_mapping(struct address_space *mapping, struct folio *folio, 721 bool reclaimed, struct mem_cgroup *target_memcg) 722 { 723 int refcount; 724 void *shadow = NULL; 725 726 BUG_ON(!folio_test_locked(folio)); 727 BUG_ON(mapping != folio_mapping(folio)); 728 729 if (!folio_test_swapcache(folio)) 730 spin_lock(&mapping->host->i_lock); 731 xa_lock_irq(&mapping->i_pages); 732 /* 733 * The non racy check for a busy folio. 734 * 735 * Must be careful with the order of the tests. When someone has 736 * a ref to the folio, it may be possible that they dirty it then 737 * drop the reference. So if the dirty flag is tested before the 738 * refcount here, then the following race may occur: 739 * 740 * get_user_pages(&page); 741 * [user mapping goes away] 742 * write_to(page); 743 * !folio_test_dirty(folio) [good] 744 * folio_set_dirty(folio); 745 * folio_put(folio); 746 * !refcount(folio) [good, discard it] 747 * 748 * [oops, our write_to data is lost] 749 * 750 * Reversing the order of the tests ensures such a situation cannot 751 * escape unnoticed. The smp_rmb is needed to ensure the folio->flags 752 * load is not satisfied before that of folio->_refcount. 753 * 754 * Note that if the dirty flag is always set via folio_mark_dirty, 755 * and thus under the i_pages lock, then this ordering is not required. 756 */ 757 refcount = 1 + folio_nr_pages(folio); 758 if (!folio_ref_freeze(folio, refcount)) 759 goto cannot_free; 760 /* note: atomic_cmpxchg in folio_ref_freeze provides the smp_rmb */ 761 if (unlikely(folio_test_dirty(folio))) { 762 folio_ref_unfreeze(folio, refcount); 763 goto cannot_free; 764 } 765 766 if (folio_test_swapcache(folio)) { 767 swp_entry_t swap = folio->swap; 768 769 if (reclaimed && !mapping_exiting(mapping)) 770 shadow = workingset_eviction(folio, target_memcg); 771 __delete_from_swap_cache(folio, swap, shadow); 772 mem_cgroup_swapout(folio, swap); 773 xa_unlock_irq(&mapping->i_pages); 774 put_swap_folio(folio, swap); 775 } else { 776 void (*free_folio)(struct folio *); 777 778 free_folio = mapping->a_ops->free_folio; 779 /* 780 * Remember a shadow entry for reclaimed file cache in 781 * order to detect refaults, thus thrashing, later on. 782 * 783 * But don't store shadows in an address space that is 784 * already exiting. This is not just an optimization, 785 * inode reclaim needs to empty out the radix tree or 786 * the nodes are lost. Don't plant shadows behind its 787 * back. 788 * 789 * We also don't store shadows for DAX mappings because the 790 * only page cache folios found in these are zero pages 791 * covering holes, and because we don't want to mix DAX 792 * exceptional entries and shadow exceptional entries in the 793 * same address_space. 794 */ 795 if (reclaimed && folio_is_file_lru(folio) && 796 !mapping_exiting(mapping) && !dax_mapping(mapping)) 797 shadow = workingset_eviction(folio, target_memcg); 798 __filemap_remove_folio(folio, shadow); 799 xa_unlock_irq(&mapping->i_pages); 800 if (mapping_shrinkable(mapping)) 801 inode_add_lru(mapping->host); 802 spin_unlock(&mapping->host->i_lock); 803 804 if (free_folio) 805 free_folio(folio); 806 } 807 808 return 1; 809 810 cannot_free: 811 xa_unlock_irq(&mapping->i_pages); 812 if (!folio_test_swapcache(folio)) 813 spin_unlock(&mapping->host->i_lock); 814 return 0; 815 } 816 817 /** 818 * remove_mapping() - Attempt to remove a folio from its mapping. 819 * @mapping: The address space. 820 * @folio: The folio to remove. 821 * 822 * If the folio is dirty, under writeback or if someone else has a ref 823 * on it, removal will fail. 824 * Return: The number of pages removed from the mapping. 0 if the folio 825 * could not be removed. 826 * Context: The caller should have a single refcount on the folio and 827 * hold its lock. 828 */ 829 long remove_mapping(struct address_space *mapping, struct folio *folio) 830 { 831 if (__remove_mapping(mapping, folio, false, NULL)) { 832 /* 833 * Unfreezing the refcount with 1 effectively 834 * drops the pagecache ref for us without requiring another 835 * atomic operation. 836 */ 837 folio_ref_unfreeze(folio, 1); 838 return folio_nr_pages(folio); 839 } 840 return 0; 841 } 842 843 /** 844 * folio_putback_lru - Put previously isolated folio onto appropriate LRU list. 845 * @folio: Folio to be returned to an LRU list. 846 * 847 * Add previously isolated @folio to appropriate LRU list. 848 * The folio may still be unevictable for other reasons. 849 * 850 * Context: lru_lock must not be held, interrupts must be enabled. 851 */ 852 void folio_putback_lru(struct folio *folio) 853 { 854 folio_add_lru(folio); 855 folio_put(folio); /* drop ref from isolate */ 856 } 857 858 enum folio_references { 859 FOLIOREF_RECLAIM, 860 FOLIOREF_RECLAIM_CLEAN, 861 FOLIOREF_KEEP, 862 FOLIOREF_ACTIVATE, 863 }; 864 865 static enum folio_references folio_check_references(struct folio *folio, 866 struct scan_control *sc) 867 { 868 int referenced_ptes, referenced_folio; 869 unsigned long vm_flags; 870 871 referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup, 872 &vm_flags); 873 referenced_folio = folio_test_clear_referenced(folio); 874 875 /* 876 * The supposedly reclaimable folio was found to be in a VM_LOCKED vma. 877 * Let the folio, now marked Mlocked, be moved to the unevictable list. 878 */ 879 if (vm_flags & VM_LOCKED) 880 return FOLIOREF_ACTIVATE; 881 882 /* 883 * There are two cases to consider. 884 * 1) Rmap lock contention: rotate. 885 * 2) Skip the non-shared swapbacked folio mapped solely by 886 * the exiting or OOM-reaped process. 887 */ 888 if (referenced_ptes == -1) 889 return FOLIOREF_KEEP; 890 891 if (referenced_ptes) { 892 /* 893 * All mapped folios start out with page table 894 * references from the instantiating fault, so we need 895 * to look twice if a mapped file/anon folio is used more 896 * than once. 897 * 898 * Mark it and spare it for another trip around the 899 * inactive list. Another page table reference will 900 * lead to its activation. 901 * 902 * Note: the mark is set for activated folios as well 903 * so that recently deactivated but used folios are 904 * quickly recovered. 905 */ 906 folio_set_referenced(folio); 907 908 if (referenced_folio || referenced_ptes > 1) 909 return FOLIOREF_ACTIVATE; 910 911 /* 912 * Activate file-backed executable folios after first usage. 913 */ 914 if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) 915 return FOLIOREF_ACTIVATE; 916 917 return FOLIOREF_KEEP; 918 } 919 920 /* Reclaim if clean, defer dirty folios to writeback */ 921 if (referenced_folio && folio_is_file_lru(folio)) 922 return FOLIOREF_RECLAIM_CLEAN; 923 924 return FOLIOREF_RECLAIM; 925 } 926 927 /* Check if a folio is dirty or under writeback */ 928 static void folio_check_dirty_writeback(struct folio *folio, 929 bool *dirty, bool *writeback) 930 { 931 struct address_space *mapping; 932 933 /* 934 * Anonymous folios are not handled by flushers and must be written 935 * from reclaim context. Do not stall reclaim based on them. 936 * MADV_FREE anonymous folios are put into inactive file list too. 937 * They could be mistakenly treated as file lru. So further anon 938 * test is needed. 939 */ 940 if (!folio_is_file_lru(folio) || 941 (folio_test_anon(folio) && !folio_test_swapbacked(folio))) { 942 *dirty = false; 943 *writeback = false; 944 return; 945 } 946 947 /* By default assume that the folio flags are accurate */ 948 *dirty = folio_test_dirty(folio); 949 *writeback = folio_test_writeback(folio); 950 951 /* Verify dirty/writeback state if the filesystem supports it */ 952 if (!folio_test_private(folio)) 953 return; 954 955 mapping = folio_mapping(folio); 956 if (mapping && mapping->a_ops->is_dirty_writeback) 957 mapping->a_ops->is_dirty_writeback(folio, dirty, writeback); 958 } 959 960 struct folio *alloc_migrate_folio(struct folio *src, unsigned long private) 961 { 962 struct folio *dst; 963 nodemask_t *allowed_mask; 964 struct migration_target_control *mtc; 965 966 mtc = (struct migration_target_control *)private; 967 968 allowed_mask = mtc->nmask; 969 /* 970 * make sure we allocate from the target node first also trying to 971 * demote or reclaim pages from the target node via kswapd if we are 972 * low on free memory on target node. If we don't do this and if 973 * we have free memory on the slower(lower) memtier, we would start 974 * allocating pages from slower(lower) memory tiers without even forcing 975 * a demotion of cold pages from the target memtier. This can result 976 * in the kernel placing hot pages in slower(lower) memory tiers. 977 */ 978 mtc->nmask = NULL; 979 mtc->gfp_mask |= __GFP_THISNODE; 980 dst = alloc_migration_target(src, (unsigned long)mtc); 981 if (dst) 982 return dst; 983 984 mtc->gfp_mask &= ~__GFP_THISNODE; 985 mtc->nmask = allowed_mask; 986 987 return alloc_migration_target(src, (unsigned long)mtc); 988 } 989 990 /* 991 * Take folios on @demote_folios and attempt to demote them to another node. 992 * Folios which are not demoted are left on @demote_folios. 993 */ 994 static unsigned int demote_folio_list(struct list_head *demote_folios, 995 struct pglist_data *pgdat) 996 { 997 int target_nid = next_demotion_node(pgdat->node_id); 998 unsigned int nr_succeeded; 999 nodemask_t allowed_mask; 1000 1001 struct migration_target_control mtc = { 1002 /* 1003 * Allocate from 'node', or fail quickly and quietly. 1004 * When this happens, 'page' will likely just be discarded 1005 * instead of migrated. 1006 */ 1007 .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | __GFP_NOWARN | 1008 __GFP_NOMEMALLOC | GFP_NOWAIT, 1009 .nid = target_nid, 1010 .nmask = &allowed_mask, 1011 .reason = MR_DEMOTION, 1012 }; 1013 1014 if (list_empty(demote_folios)) 1015 return 0; 1016 1017 if (target_nid == NUMA_NO_NODE) 1018 return 0; 1019 1020 node_get_allowed_targets(pgdat, &allowed_mask); 1021 1022 /* Demotion ignores all cpuset and mempolicy settings */ 1023 migrate_pages(demote_folios, alloc_migrate_folio, NULL, 1024 (unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION, 1025 &nr_succeeded); 1026 1027 return nr_succeeded; 1028 } 1029 1030 static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask) 1031 { 1032 if (gfp_mask & __GFP_FS) 1033 return true; 1034 if (!folio_test_swapcache(folio) || !(gfp_mask & __GFP_IO)) 1035 return false; 1036 /* 1037 * We can "enter_fs" for swap-cache with only __GFP_IO 1038 * providing this isn't SWP_FS_OPS. 1039 * ->flags can be updated non-atomicially (scan_swap_map_slots), 1040 * but that will never affect SWP_FS_OPS, so the data_race 1041 * is safe. 1042 */ 1043 return !data_race(folio_swap_flags(folio) & SWP_FS_OPS); 1044 } 1045 1046 /* 1047 * shrink_folio_list() returns the number of reclaimed pages 1048 */ 1049 static unsigned int shrink_folio_list(struct list_head *folio_list, 1050 struct pglist_data *pgdat, struct scan_control *sc, 1051 struct reclaim_stat *stat, bool ignore_references) 1052 { 1053 struct folio_batch free_folios; 1054 LIST_HEAD(ret_folios); 1055 LIST_HEAD(demote_folios); 1056 unsigned int nr_reclaimed = 0; 1057 unsigned int pgactivate = 0; 1058 bool do_demote_pass; 1059 struct swap_iocb *plug = NULL; 1060 1061 folio_batch_init(&free_folios); 1062 memset(stat, 0, sizeof(*stat)); 1063 cond_resched(); 1064 do_demote_pass = can_demote(pgdat->node_id, sc); 1065 1066 retry: 1067 while (!list_empty(folio_list)) { 1068 struct address_space *mapping; 1069 struct folio *folio; 1070 enum folio_references references = FOLIOREF_RECLAIM; 1071 bool dirty, writeback; 1072 unsigned int nr_pages; 1073 1074 cond_resched(); 1075 1076 folio = lru_to_folio(folio_list); 1077 list_del(&folio->lru); 1078 1079 if (!folio_trylock(folio)) 1080 goto keep; 1081 1082 VM_BUG_ON_FOLIO(folio_test_active(folio), folio); 1083 1084 nr_pages = folio_nr_pages(folio); 1085 1086 /* Account the number of base pages */ 1087 sc->nr_scanned += nr_pages; 1088 1089 if (unlikely(!folio_evictable(folio))) 1090 goto activate_locked; 1091 1092 if (!sc->may_unmap && folio_mapped(folio)) 1093 goto keep_locked; 1094 1095 /* folio_update_gen() tried to promote this page? */ 1096 if (lru_gen_enabled() && !ignore_references && 1097 folio_mapped(folio) && folio_test_referenced(folio)) 1098 goto keep_locked; 1099 1100 /* 1101 * The number of dirty pages determines if a node is marked 1102 * reclaim_congested. kswapd will stall and start writing 1103 * folios if the tail of the LRU is all dirty unqueued folios. 1104 */ 1105 folio_check_dirty_writeback(folio, &dirty, &writeback); 1106 if (dirty || writeback) 1107 stat->nr_dirty += nr_pages; 1108 1109 if (dirty && !writeback) 1110 stat->nr_unqueued_dirty += nr_pages; 1111 1112 /* 1113 * Treat this folio as congested if folios are cycling 1114 * through the LRU so quickly that the folios marked 1115 * for immediate reclaim are making it to the end of 1116 * the LRU a second time. 1117 */ 1118 if (writeback && folio_test_reclaim(folio)) 1119 stat->nr_congested += nr_pages; 1120 1121 /* 1122 * If a folio at the tail of the LRU is under writeback, there 1123 * are three cases to consider. 1124 * 1125 * 1) If reclaim is encountering an excessive number 1126 * of folios under writeback and this folio has both 1127 * the writeback and reclaim flags set, then it 1128 * indicates that folios are being queued for I/O but 1129 * are being recycled through the LRU before the I/O 1130 * can complete. Waiting on the folio itself risks an 1131 * indefinite stall if it is impossible to writeback 1132 * the folio due to I/O error or disconnected storage 1133 * so instead note that the LRU is being scanned too 1134 * quickly and the caller can stall after the folio 1135 * list has been processed. 1136 * 1137 * 2) Global or new memcg reclaim encounters a folio that is 1138 * not marked for immediate reclaim, or the caller does not 1139 * have __GFP_FS (or __GFP_IO if it's simply going to swap, 1140 * not to fs). In this case mark the folio for immediate 1141 * reclaim and continue scanning. 1142 * 1143 * Require may_enter_fs() because we would wait on fs, which 1144 * may not have submitted I/O yet. And the loop driver might 1145 * enter reclaim, and deadlock if it waits on a folio for 1146 * which it is needed to do the write (loop masks off 1147 * __GFP_IO|__GFP_FS for this reason); but more thought 1148 * would probably show more reasons. 1149 * 1150 * 3) Legacy memcg encounters a folio that already has the 1151 * reclaim flag set. memcg does not have any dirty folio 1152 * throttling so we could easily OOM just because too many 1153 * folios are in writeback and there is nothing else to 1154 * reclaim. Wait for the writeback to complete. 1155 * 1156 * In cases 1) and 2) we activate the folios to get them out of 1157 * the way while we continue scanning for clean folios on the 1158 * inactive list and refilling from the active list. The 1159 * observation here is that waiting for disk writes is more 1160 * expensive than potentially causing reloads down the line. 1161 * Since they're marked for immediate reclaim, they won't put 1162 * memory pressure on the cache working set any longer than it 1163 * takes to write them to disk. 1164 */ 1165 if (folio_test_writeback(folio)) { 1166 /* Case 1 above */ 1167 if (current_is_kswapd() && 1168 folio_test_reclaim(folio) && 1169 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { 1170 stat->nr_immediate += nr_pages; 1171 goto activate_locked; 1172 1173 /* Case 2 above */ 1174 } else if (writeback_throttling_sane(sc) || 1175 !folio_test_reclaim(folio) || 1176 !may_enter_fs(folio, sc->gfp_mask)) { 1177 /* 1178 * This is slightly racy - 1179 * folio_end_writeback() might have 1180 * just cleared the reclaim flag, then 1181 * setting the reclaim flag here ends up 1182 * interpreted as the readahead flag - but 1183 * that does not matter enough to care. 1184 * What we do want is for this folio to 1185 * have the reclaim flag set next time 1186 * memcg reclaim reaches the tests above, 1187 * so it will then wait for writeback to 1188 * avoid OOM; and it's also appropriate 1189 * in global reclaim. 1190 */ 1191 folio_set_reclaim(folio); 1192 stat->nr_writeback += nr_pages; 1193 goto activate_locked; 1194 1195 /* Case 3 above */ 1196 } else { 1197 folio_unlock(folio); 1198 folio_wait_writeback(folio); 1199 /* then go back and try same folio again */ 1200 list_add_tail(&folio->lru, folio_list); 1201 continue; 1202 } 1203 } 1204 1205 if (!ignore_references) 1206 references = folio_check_references(folio, sc); 1207 1208 switch (references) { 1209 case FOLIOREF_ACTIVATE: 1210 goto activate_locked; 1211 case FOLIOREF_KEEP: 1212 stat->nr_ref_keep += nr_pages; 1213 goto keep_locked; 1214 case FOLIOREF_RECLAIM: 1215 case FOLIOREF_RECLAIM_CLEAN: 1216 ; /* try to reclaim the folio below */ 1217 } 1218 1219 /* 1220 * Before reclaiming the folio, try to relocate 1221 * its contents to another node. 1222 */ 1223 if (do_demote_pass && 1224 (thp_migration_supported() || !folio_test_large(folio))) { 1225 list_add(&folio->lru, &demote_folios); 1226 folio_unlock(folio); 1227 continue; 1228 } 1229 1230 /* 1231 * Anonymous process memory has backing store? 1232 * Try to allocate it some swap space here. 1233 * Lazyfree folio could be freed directly 1234 */ 1235 if (folio_test_anon(folio) && folio_test_swapbacked(folio)) { 1236 if (!folio_test_swapcache(folio)) { 1237 if (!(sc->gfp_mask & __GFP_IO)) 1238 goto keep_locked; 1239 if (folio_maybe_dma_pinned(folio)) 1240 goto keep_locked; 1241 if (folio_test_large(folio)) { 1242 /* cannot split folio, skip it */ 1243 if (!can_split_folio(folio, 1, NULL)) 1244 goto activate_locked; 1245 /* 1246 * Split partially mapped folios right away. 1247 * We can free the unmapped pages without IO. 1248 */ 1249 if (data_race(!list_empty(&folio->_deferred_list) && 1250 folio_test_partially_mapped(folio)) && 1251 split_folio_to_list(folio, folio_list)) 1252 goto activate_locked; 1253 } 1254 if (!add_to_swap(folio)) { 1255 int __maybe_unused order = folio_order(folio); 1256 1257 if (!folio_test_large(folio)) 1258 goto activate_locked_split; 1259 /* Fallback to swap normal pages */ 1260 if (split_folio_to_list(folio, folio_list)) 1261 goto activate_locked; 1262 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1263 if (nr_pages >= HPAGE_PMD_NR) { 1264 count_memcg_folio_events(folio, 1265 THP_SWPOUT_FALLBACK, 1); 1266 count_vm_event(THP_SWPOUT_FALLBACK); 1267 } 1268 #endif 1269 count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK); 1270 if (!add_to_swap(folio)) 1271 goto activate_locked_split; 1272 } 1273 } 1274 } 1275 1276 /* 1277 * If the folio was split above, the tail pages will make 1278 * their own pass through this function and be accounted 1279 * then. 1280 */ 1281 if ((nr_pages > 1) && !folio_test_large(folio)) { 1282 sc->nr_scanned -= (nr_pages - 1); 1283 nr_pages = 1; 1284 } 1285 1286 /* 1287 * The folio is mapped into the page tables of one or more 1288 * processes. Try to unmap it here. 1289 */ 1290 if (folio_mapped(folio)) { 1291 enum ttu_flags flags = TTU_BATCH_FLUSH; 1292 bool was_swapbacked = folio_test_swapbacked(folio); 1293 1294 if (folio_test_pmd_mappable(folio)) 1295 flags |= TTU_SPLIT_HUGE_PMD; 1296 /* 1297 * Without TTU_SYNC, try_to_unmap will only begin to 1298 * hold PTL from the first present PTE within a large 1299 * folio. Some initial PTEs might be skipped due to 1300 * races with parallel PTE writes in which PTEs can be 1301 * cleared temporarily before being written new present 1302 * values. This will lead to a large folio is still 1303 * mapped while some subpages have been partially 1304 * unmapped after try_to_unmap; TTU_SYNC helps 1305 * try_to_unmap acquire PTL from the first PTE, 1306 * eliminating the influence of temporary PTE values. 1307 */ 1308 if (folio_test_large(folio)) 1309 flags |= TTU_SYNC; 1310 1311 try_to_unmap(folio, flags); 1312 if (folio_mapped(folio)) { 1313 stat->nr_unmap_fail += nr_pages; 1314 if (!was_swapbacked && 1315 folio_test_swapbacked(folio)) 1316 stat->nr_lazyfree_fail += nr_pages; 1317 goto activate_locked; 1318 } 1319 } 1320 1321 /* 1322 * Folio is unmapped now so it cannot be newly pinned anymore. 1323 * No point in trying to reclaim folio if it is pinned. 1324 * Furthermore we don't want to reclaim underlying fs metadata 1325 * if the folio is pinned and thus potentially modified by the 1326 * pinning process as that may upset the filesystem. 1327 */ 1328 if (folio_maybe_dma_pinned(folio)) 1329 goto activate_locked; 1330 1331 mapping = folio_mapping(folio); 1332 if (folio_test_dirty(folio)) { 1333 /* 1334 * Only kswapd can writeback filesystem folios 1335 * to avoid risk of stack overflow. But avoid 1336 * injecting inefficient single-folio I/O into 1337 * flusher writeback as much as possible: only 1338 * write folios when we've encountered many 1339 * dirty folios, and when we've already scanned 1340 * the rest of the LRU for clean folios and see 1341 * the same dirty folios again (with the reclaim 1342 * flag set). 1343 */ 1344 if (folio_is_file_lru(folio) && 1345 (!current_is_kswapd() || 1346 !folio_test_reclaim(folio) || 1347 !test_bit(PGDAT_DIRTY, &pgdat->flags))) { 1348 /* 1349 * Immediately reclaim when written back. 1350 * Similar in principle to folio_deactivate() 1351 * except we already have the folio isolated 1352 * and know it's dirty 1353 */ 1354 node_stat_mod_folio(folio, NR_VMSCAN_IMMEDIATE, 1355 nr_pages); 1356 folio_set_reclaim(folio); 1357 1358 goto activate_locked; 1359 } 1360 1361 if (references == FOLIOREF_RECLAIM_CLEAN) 1362 goto keep_locked; 1363 if (!may_enter_fs(folio, sc->gfp_mask)) 1364 goto keep_locked; 1365 if (!sc->may_writepage) 1366 goto keep_locked; 1367 1368 /* 1369 * Folio is dirty. Flush the TLB if a writable entry 1370 * potentially exists to avoid CPU writes after I/O 1371 * starts and then write it out here. 1372 */ 1373 try_to_unmap_flush_dirty(); 1374 switch (pageout(folio, mapping, &plug, folio_list)) { 1375 case PAGE_KEEP: 1376 goto keep_locked; 1377 case PAGE_ACTIVATE: 1378 /* 1379 * If shmem folio is split when writeback to swap, 1380 * the tail pages will make their own pass through 1381 * this function and be accounted then. 1382 */ 1383 if (nr_pages > 1 && !folio_test_large(folio)) { 1384 sc->nr_scanned -= (nr_pages - 1); 1385 nr_pages = 1; 1386 } 1387 goto activate_locked; 1388 case PAGE_SUCCESS: 1389 if (nr_pages > 1 && !folio_test_large(folio)) { 1390 sc->nr_scanned -= (nr_pages - 1); 1391 nr_pages = 1; 1392 } 1393 stat->nr_pageout += nr_pages; 1394 1395 if (folio_test_writeback(folio)) 1396 goto keep; 1397 if (folio_test_dirty(folio)) 1398 goto keep; 1399 1400 /* 1401 * A synchronous write - probably a ramdisk. Go 1402 * ahead and try to reclaim the folio. 1403 */ 1404 if (!folio_trylock(folio)) 1405 goto keep; 1406 if (folio_test_dirty(folio) || 1407 folio_test_writeback(folio)) 1408 goto keep_locked; 1409 mapping = folio_mapping(folio); 1410 fallthrough; 1411 case PAGE_CLEAN: 1412 ; /* try to free the folio below */ 1413 } 1414 } 1415 1416 /* 1417 * If the folio has buffers, try to free the buffer 1418 * mappings associated with this folio. If we succeed 1419 * we try to free the folio as well. 1420 * 1421 * We do this even if the folio is dirty. 1422 * filemap_release_folio() does not perform I/O, but it 1423 * is possible for a folio to have the dirty flag set, 1424 * but it is actually clean (all its buffers are clean). 1425 * This happens if the buffers were written out directly, 1426 * with submit_bh(). ext3 will do this, as well as 1427 * the blockdev mapping. filemap_release_folio() will 1428 * discover that cleanness and will drop the buffers 1429 * and mark the folio clean - it can be freed. 1430 * 1431 * Rarely, folios can have buffers and no ->mapping. 1432 * These are the folios which were not successfully 1433 * invalidated in truncate_cleanup_folio(). We try to 1434 * drop those buffers here and if that worked, and the 1435 * folio is no longer mapped into process address space 1436 * (refcount == 1) it can be freed. Otherwise, leave 1437 * the folio on the LRU so it is swappable. 1438 */ 1439 if (folio_needs_release(folio)) { 1440 if (!filemap_release_folio(folio, sc->gfp_mask)) 1441 goto activate_locked; 1442 if (!mapping && folio_ref_count(folio) == 1) { 1443 folio_unlock(folio); 1444 if (folio_put_testzero(folio)) 1445 goto free_it; 1446 else { 1447 /* 1448 * rare race with speculative reference. 1449 * the speculative reference will free 1450 * this folio shortly, so we may 1451 * increment nr_reclaimed here (and 1452 * leave it off the LRU). 1453 */ 1454 nr_reclaimed += nr_pages; 1455 continue; 1456 } 1457 } 1458 } 1459 1460 if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) { 1461 /* follow __remove_mapping for reference */ 1462 if (!folio_ref_freeze(folio, 1)) 1463 goto keep_locked; 1464 /* 1465 * The folio has only one reference left, which is 1466 * from the isolation. After the caller puts the 1467 * folio back on the lru and drops the reference, the 1468 * folio will be freed anyway. It doesn't matter 1469 * which lru it goes on. So we don't bother checking 1470 * the dirty flag here. 1471 */ 1472 count_vm_events(PGLAZYFREED, nr_pages); 1473 count_memcg_folio_events(folio, PGLAZYFREED, nr_pages); 1474 } else if (!mapping || !__remove_mapping(mapping, folio, true, 1475 sc->target_mem_cgroup)) 1476 goto keep_locked; 1477 1478 folio_unlock(folio); 1479 free_it: 1480 /* 1481 * Folio may get swapped out as a whole, need to account 1482 * all pages in it. 1483 */ 1484 nr_reclaimed += nr_pages; 1485 1486 folio_unqueue_deferred_split(folio); 1487 if (folio_batch_add(&free_folios, folio) == 0) { 1488 mem_cgroup_uncharge_folios(&free_folios); 1489 try_to_unmap_flush(); 1490 free_unref_folios(&free_folios); 1491 } 1492 continue; 1493 1494 activate_locked_split: 1495 /* 1496 * The tail pages that are failed to add into swap cache 1497 * reach here. Fixup nr_scanned and nr_pages. 1498 */ 1499 if (nr_pages > 1) { 1500 sc->nr_scanned -= (nr_pages - 1); 1501 nr_pages = 1; 1502 } 1503 activate_locked: 1504 /* Not a candidate for swapping, so reclaim swap space. */ 1505 if (folio_test_swapcache(folio) && 1506 (mem_cgroup_swap_full(folio) || folio_test_mlocked(folio))) 1507 folio_free_swap(folio); 1508 VM_BUG_ON_FOLIO(folio_test_active(folio), folio); 1509 if (!folio_test_mlocked(folio)) { 1510 int type = folio_is_file_lru(folio); 1511 folio_set_active(folio); 1512 stat->nr_activate[type] += nr_pages; 1513 count_memcg_folio_events(folio, PGACTIVATE, nr_pages); 1514 } 1515 keep_locked: 1516 folio_unlock(folio); 1517 keep: 1518 list_add(&folio->lru, &ret_folios); 1519 VM_BUG_ON_FOLIO(folio_test_lru(folio) || 1520 folio_test_unevictable(folio), folio); 1521 } 1522 /* 'folio_list' is always empty here */ 1523 1524 /* Migrate folios selected for demotion */ 1525 stat->nr_demoted = demote_folio_list(&demote_folios, pgdat); 1526 nr_reclaimed += stat->nr_demoted; 1527 /* Folios that could not be demoted are still in @demote_folios */ 1528 if (!list_empty(&demote_folios)) { 1529 /* Folios which weren't demoted go back on @folio_list */ 1530 list_splice_init(&demote_folios, folio_list); 1531 1532 /* 1533 * goto retry to reclaim the undemoted folios in folio_list if 1534 * desired. 1535 * 1536 * Reclaiming directly from top tier nodes is not often desired 1537 * due to it breaking the LRU ordering: in general memory 1538 * should be reclaimed from lower tier nodes and demoted from 1539 * top tier nodes. 1540 * 1541 * However, disabling reclaim from top tier nodes entirely 1542 * would cause ooms in edge scenarios where lower tier memory 1543 * is unreclaimable for whatever reason, eg memory being 1544 * mlocked or too hot to reclaim. We can disable reclaim 1545 * from top tier nodes in proactive reclaim though as that is 1546 * not real memory pressure. 1547 */ 1548 if (!sc->proactive) { 1549 do_demote_pass = false; 1550 goto retry; 1551 } 1552 } 1553 1554 pgactivate = stat->nr_activate[0] + stat->nr_activate[1]; 1555 1556 mem_cgroup_uncharge_folios(&free_folios); 1557 try_to_unmap_flush(); 1558 free_unref_folios(&free_folios); 1559 1560 list_splice(&ret_folios, folio_list); 1561 count_vm_events(PGACTIVATE, pgactivate); 1562 1563 if (plug) 1564 swap_write_unplug(plug); 1565 return nr_reclaimed; 1566 } 1567 1568 unsigned int reclaim_clean_pages_from_list(struct zone *zone, 1569 struct list_head *folio_list) 1570 { 1571 struct scan_control sc = { 1572 .gfp_mask = GFP_KERNEL, 1573 .may_unmap = 1, 1574 }; 1575 struct reclaim_stat stat; 1576 unsigned int nr_reclaimed; 1577 struct folio *folio, *next; 1578 LIST_HEAD(clean_folios); 1579 unsigned int noreclaim_flag; 1580 1581 list_for_each_entry_safe(folio, next, folio_list, lru) { 1582 if (!folio_test_hugetlb(folio) && folio_is_file_lru(folio) && 1583 !folio_test_dirty(folio) && !__folio_test_movable(folio) && 1584 !folio_test_unevictable(folio)) { 1585 folio_clear_active(folio); 1586 list_move(&folio->lru, &clean_folios); 1587 } 1588 } 1589 1590 /* 1591 * We should be safe here since we are only dealing with file pages and 1592 * we are not kswapd and therefore cannot write dirty file pages. But 1593 * call memalloc_noreclaim_save() anyway, just in case these conditions 1594 * change in the future. 1595 */ 1596 noreclaim_flag = memalloc_noreclaim_save(); 1597 nr_reclaimed = shrink_folio_list(&clean_folios, zone->zone_pgdat, &sc, 1598 &stat, true); 1599 memalloc_noreclaim_restore(noreclaim_flag); 1600 1601 list_splice(&clean_folios, folio_list); 1602 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, 1603 -(long)nr_reclaimed); 1604 /* 1605 * Since lazyfree pages are isolated from file LRU from the beginning, 1606 * they will rotate back to anonymous LRU in the end if it failed to 1607 * discard so isolated count will be mismatched. 1608 * Compensate the isolated count for both LRU lists. 1609 */ 1610 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, 1611 stat.nr_lazyfree_fail); 1612 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, 1613 -(long)stat.nr_lazyfree_fail); 1614 return nr_reclaimed; 1615 } 1616 1617 /* 1618 * Update LRU sizes after isolating pages. The LRU size updates must 1619 * be complete before mem_cgroup_update_lru_size due to a sanity check. 1620 */ 1621 static __always_inline void update_lru_sizes(struct lruvec *lruvec, 1622 enum lru_list lru, unsigned long *nr_zone_taken) 1623 { 1624 int zid; 1625 1626 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1627 if (!nr_zone_taken[zid]) 1628 continue; 1629 1630 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); 1631 } 1632 1633 } 1634 1635 /* 1636 * Isolating page from the lruvec to fill in @dst list by nr_to_scan times. 1637 * 1638 * lruvec->lru_lock is heavily contended. Some of the functions that 1639 * shrink the lists perform better by taking out a batch of pages 1640 * and working on them outside the LRU lock. 1641 * 1642 * For pagecache intensive workloads, this function is the hottest 1643 * spot in the kernel (apart from copy_*_user functions). 1644 * 1645 * Lru_lock must be held before calling this function. 1646 * 1647 * @nr_to_scan: The number of eligible pages to look through on the list. 1648 * @lruvec: The LRU vector to pull pages from. 1649 * @dst: The temp list to put pages on to. 1650 * @nr_scanned: The number of pages that were scanned. 1651 * @sc: The scan_control struct for this reclaim session 1652 * @lru: LRU list id for isolating 1653 * 1654 * returns how many pages were moved onto *@dst. 1655 */ 1656 static unsigned long isolate_lru_folios(unsigned long nr_to_scan, 1657 struct lruvec *lruvec, struct list_head *dst, 1658 unsigned long *nr_scanned, struct scan_control *sc, 1659 enum lru_list lru) 1660 { 1661 struct list_head *src = &lruvec->lists[lru]; 1662 unsigned long nr_taken = 0; 1663 unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 }; 1664 unsigned long nr_skipped[MAX_NR_ZONES] = { 0, }; 1665 unsigned long skipped = 0; 1666 unsigned long scan, total_scan, nr_pages; 1667 LIST_HEAD(folios_skipped); 1668 1669 total_scan = 0; 1670 scan = 0; 1671 while (scan < nr_to_scan && !list_empty(src)) { 1672 struct list_head *move_to = src; 1673 struct folio *folio; 1674 1675 folio = lru_to_folio(src); 1676 prefetchw_prev_lru_folio(folio, src, flags); 1677 1678 nr_pages = folio_nr_pages(folio); 1679 total_scan += nr_pages; 1680 1681 if (folio_zonenum(folio) > sc->reclaim_idx) { 1682 nr_skipped[folio_zonenum(folio)] += nr_pages; 1683 move_to = &folios_skipped; 1684 goto move; 1685 } 1686 1687 /* 1688 * Do not count skipped folios because that makes the function 1689 * return with no isolated folios if the LRU mostly contains 1690 * ineligible folios. This causes the VM to not reclaim any 1691 * folios, triggering a premature OOM. 1692 * Account all pages in a folio. 1693 */ 1694 scan += nr_pages; 1695 1696 if (!folio_test_lru(folio)) 1697 goto move; 1698 if (!sc->may_unmap && folio_mapped(folio)) 1699 goto move; 1700 1701 /* 1702 * Be careful not to clear the lru flag until after we're 1703 * sure the folio is not being freed elsewhere -- the 1704 * folio release code relies on it. 1705 */ 1706 if (unlikely(!folio_try_get(folio))) 1707 goto move; 1708 1709 if (!folio_test_clear_lru(folio)) { 1710 /* Another thread is already isolating this folio */ 1711 folio_put(folio); 1712 goto move; 1713 } 1714 1715 nr_taken += nr_pages; 1716 nr_zone_taken[folio_zonenum(folio)] += nr_pages; 1717 move_to = dst; 1718 move: 1719 list_move(&folio->lru, move_to); 1720 } 1721 1722 /* 1723 * Splice any skipped folios to the start of the LRU list. Note that 1724 * this disrupts the LRU order when reclaiming for lower zones but 1725 * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX 1726 * scanning would soon rescan the same folios to skip and waste lots 1727 * of cpu cycles. 1728 */ 1729 if (!list_empty(&folios_skipped)) { 1730 int zid; 1731 1732 list_splice(&folios_skipped, src); 1733 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1734 if (!nr_skipped[zid]) 1735 continue; 1736 1737 __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]); 1738 skipped += nr_skipped[zid]; 1739 } 1740 } 1741 *nr_scanned = total_scan; 1742 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, 1743 total_scan, skipped, nr_taken, lru); 1744 update_lru_sizes(lruvec, lru, nr_zone_taken); 1745 return nr_taken; 1746 } 1747 1748 /** 1749 * folio_isolate_lru() - Try to isolate a folio from its LRU list. 1750 * @folio: Folio to isolate from its LRU list. 1751 * 1752 * Isolate a @folio from an LRU list and adjust the vmstat statistic 1753 * corresponding to whatever LRU list the folio was on. 1754 * 1755 * The folio will have its LRU flag cleared. If it was found on the 1756 * active list, it will have the Active flag set. If it was found on the 1757 * unevictable list, it will have the Unevictable flag set. These flags 1758 * may need to be cleared by the caller before letting the page go. 1759 * 1760 * Context: 1761 * 1762 * (1) Must be called with an elevated refcount on the folio. This is a 1763 * fundamental difference from isolate_lru_folios() (which is called 1764 * without a stable reference). 1765 * (2) The lru_lock must not be held. 1766 * (3) Interrupts must be enabled. 1767 * 1768 * Return: true if the folio was removed from an LRU list. 1769 * false if the folio was not on an LRU list. 1770 */ 1771 bool folio_isolate_lru(struct folio *folio) 1772 { 1773 bool ret = false; 1774 1775 VM_BUG_ON_FOLIO(!folio_ref_count(folio), folio); 1776 1777 if (folio_test_clear_lru(folio)) { 1778 struct lruvec *lruvec; 1779 1780 folio_get(folio); 1781 lruvec = folio_lruvec_lock_irq(folio); 1782 lruvec_del_folio(lruvec, folio); 1783 unlock_page_lruvec_irq(lruvec); 1784 ret = true; 1785 } 1786 1787 return ret; 1788 } 1789 1790 /* 1791 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and 1792 * then get rescheduled. When there are massive number of tasks doing page 1793 * allocation, such sleeping direct reclaimers may keep piling up on each CPU, 1794 * the LRU list will go small and be scanned faster than necessary, leading to 1795 * unnecessary swapping, thrashing and OOM. 1796 */ 1797 static bool too_many_isolated(struct pglist_data *pgdat, int file, 1798 struct scan_control *sc) 1799 { 1800 unsigned long inactive, isolated; 1801 bool too_many; 1802 1803 if (current_is_kswapd()) 1804 return false; 1805 1806 if (!writeback_throttling_sane(sc)) 1807 return false; 1808 1809 if (file) { 1810 inactive = node_page_state(pgdat, NR_INACTIVE_FILE); 1811 isolated = node_page_state(pgdat, NR_ISOLATED_FILE); 1812 } else { 1813 inactive = node_page_state(pgdat, NR_INACTIVE_ANON); 1814 isolated = node_page_state(pgdat, NR_ISOLATED_ANON); 1815 } 1816 1817 /* 1818 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they 1819 * won't get blocked by normal direct-reclaimers, forming a circular 1820 * deadlock. 1821 */ 1822 if (gfp_has_io_fs(sc->gfp_mask)) 1823 inactive >>= 3; 1824 1825 too_many = isolated > inactive; 1826 1827 /* Wake up tasks throttled due to too_many_isolated. */ 1828 if (!too_many) 1829 wake_throttle_isolated(pgdat); 1830 1831 return too_many; 1832 } 1833 1834 /* 1835 * move_folios_to_lru() moves folios from private @list to appropriate LRU list. 1836 * 1837 * Returns the number of pages moved to the given lruvec. 1838 */ 1839 static unsigned int move_folios_to_lru(struct lruvec *lruvec, 1840 struct list_head *list) 1841 { 1842 int nr_pages, nr_moved = 0; 1843 struct folio_batch free_folios; 1844 1845 folio_batch_init(&free_folios); 1846 while (!list_empty(list)) { 1847 struct folio *folio = lru_to_folio(list); 1848 1849 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 1850 list_del(&folio->lru); 1851 if (unlikely(!folio_evictable(folio))) { 1852 spin_unlock_irq(&lruvec->lru_lock); 1853 folio_putback_lru(folio); 1854 spin_lock_irq(&lruvec->lru_lock); 1855 continue; 1856 } 1857 1858 /* 1859 * The folio_set_lru needs to be kept here for list integrity. 1860 * Otherwise: 1861 * #0 move_folios_to_lru #1 release_pages 1862 * if (!folio_put_testzero()) 1863 * if (folio_put_testzero()) 1864 * !lru //skip lru_lock 1865 * folio_set_lru() 1866 * list_add(&folio->lru,) 1867 * list_add(&folio->lru,) 1868 */ 1869 folio_set_lru(folio); 1870 1871 if (unlikely(folio_put_testzero(folio))) { 1872 __folio_clear_lru_flags(folio); 1873 1874 folio_unqueue_deferred_split(folio); 1875 if (folio_batch_add(&free_folios, folio) == 0) { 1876 spin_unlock_irq(&lruvec->lru_lock); 1877 mem_cgroup_uncharge_folios(&free_folios); 1878 free_unref_folios(&free_folios); 1879 spin_lock_irq(&lruvec->lru_lock); 1880 } 1881 1882 continue; 1883 } 1884 1885 /* 1886 * All pages were isolated from the same lruvec (and isolation 1887 * inhibits memcg migration). 1888 */ 1889 VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); 1890 lruvec_add_folio(lruvec, folio); 1891 nr_pages = folio_nr_pages(folio); 1892 nr_moved += nr_pages; 1893 if (folio_test_active(folio)) 1894 workingset_age_nonresident(lruvec, nr_pages); 1895 } 1896 1897 if (free_folios.nr) { 1898 spin_unlock_irq(&lruvec->lru_lock); 1899 mem_cgroup_uncharge_folios(&free_folios); 1900 free_unref_folios(&free_folios); 1901 spin_lock_irq(&lruvec->lru_lock); 1902 } 1903 1904 return nr_moved; 1905 } 1906 1907 /* 1908 * If a kernel thread (such as nfsd for loop-back mounts) services a backing 1909 * device by writing to the page cache it sets PF_LOCAL_THROTTLE. In this case 1910 * we should not throttle. Otherwise it is safe to do so. 1911 */ 1912 static int current_may_throttle(void) 1913 { 1914 return !(current->flags & PF_LOCAL_THROTTLE); 1915 } 1916 1917 /* 1918 * shrink_inactive_list() is a helper for shrink_node(). It returns the number 1919 * of reclaimed pages 1920 */ 1921 static unsigned long shrink_inactive_list(unsigned long nr_to_scan, 1922 struct lruvec *lruvec, struct scan_control *sc, 1923 enum lru_list lru) 1924 { 1925 LIST_HEAD(folio_list); 1926 unsigned long nr_scanned; 1927 unsigned int nr_reclaimed = 0; 1928 unsigned long nr_taken; 1929 struct reclaim_stat stat; 1930 bool file = is_file_lru(lru); 1931 enum vm_event_item item; 1932 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 1933 bool stalled = false; 1934 1935 while (unlikely(too_many_isolated(pgdat, file, sc))) { 1936 if (stalled) 1937 return 0; 1938 1939 /* wait a bit for the reclaimer. */ 1940 stalled = true; 1941 reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED); 1942 1943 /* We are about to die and free our memory. Return now. */ 1944 if (fatal_signal_pending(current)) 1945 return SWAP_CLUSTER_MAX; 1946 } 1947 1948 lru_add_drain(); 1949 1950 spin_lock_irq(&lruvec->lru_lock); 1951 1952 nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &folio_list, 1953 &nr_scanned, sc, lru); 1954 1955 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); 1956 item = PGSCAN_KSWAPD + reclaimer_offset(); 1957 if (!cgroup_reclaim(sc)) 1958 __count_vm_events(item, nr_scanned); 1959 __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned); 1960 __count_vm_events(PGSCAN_ANON + file, nr_scanned); 1961 1962 spin_unlock_irq(&lruvec->lru_lock); 1963 1964 if (nr_taken == 0) 1965 return 0; 1966 1967 nr_reclaimed = shrink_folio_list(&folio_list, pgdat, sc, &stat, false); 1968 1969 spin_lock_irq(&lruvec->lru_lock); 1970 move_folios_to_lru(lruvec, &folio_list); 1971 1972 __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(), 1973 stat.nr_demoted); 1974 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); 1975 item = PGSTEAL_KSWAPD + reclaimer_offset(); 1976 if (!cgroup_reclaim(sc)) 1977 __count_vm_events(item, nr_reclaimed); 1978 __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed); 1979 __count_vm_events(PGSTEAL_ANON + file, nr_reclaimed); 1980 spin_unlock_irq(&lruvec->lru_lock); 1981 1982 lru_note_cost(lruvec, file, stat.nr_pageout, nr_scanned - nr_reclaimed); 1983 1984 /* 1985 * If dirty folios are scanned that are not queued for IO, it 1986 * implies that flushers are not doing their job. This can 1987 * happen when memory pressure pushes dirty folios to the end of 1988 * the LRU before the dirty limits are breached and the dirty 1989 * data has expired. It can also happen when the proportion of 1990 * dirty folios grows not through writes but through memory 1991 * pressure reclaiming all the clean cache. And in some cases, 1992 * the flushers simply cannot keep up with the allocation 1993 * rate. Nudge the flusher threads in case they are asleep. 1994 */ 1995 if (stat.nr_unqueued_dirty == nr_taken) { 1996 wakeup_flusher_threads(WB_REASON_VMSCAN); 1997 /* 1998 * For cgroupv1 dirty throttling is achieved by waking up 1999 * the kernel flusher here and later waiting on folios 2000 * which are in writeback to finish (see shrink_folio_list()). 2001 * 2002 * Flusher may not be able to issue writeback quickly 2003 * enough for cgroupv1 writeback throttling to work 2004 * on a large system. 2005 */ 2006 if (!writeback_throttling_sane(sc)) 2007 reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK); 2008 } 2009 2010 sc->nr.dirty += stat.nr_dirty; 2011 sc->nr.congested += stat.nr_congested; 2012 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty; 2013 sc->nr.writeback += stat.nr_writeback; 2014 sc->nr.immediate += stat.nr_immediate; 2015 sc->nr.taken += nr_taken; 2016 if (file) 2017 sc->nr.file_taken += nr_taken; 2018 2019 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, 2020 nr_scanned, nr_reclaimed, &stat, sc->priority, file); 2021 return nr_reclaimed; 2022 } 2023 2024 /* 2025 * shrink_active_list() moves folios from the active LRU to the inactive LRU. 2026 * 2027 * We move them the other way if the folio is referenced by one or more 2028 * processes. 2029 * 2030 * If the folios are mostly unmapped, the processing is fast and it is 2031 * appropriate to hold lru_lock across the whole operation. But if 2032 * the folios are mapped, the processing is slow (folio_referenced()), so 2033 * we should drop lru_lock around each folio. It's impossible to balance 2034 * this, so instead we remove the folios from the LRU while processing them. 2035 * It is safe to rely on the active flag against the non-LRU folios in here 2036 * because nobody will play with that bit on a non-LRU folio. 2037 * 2038 * The downside is that we have to touch folio->_refcount against each folio. 2039 * But we had to alter folio->flags anyway. 2040 */ 2041 static void shrink_active_list(unsigned long nr_to_scan, 2042 struct lruvec *lruvec, 2043 struct scan_control *sc, 2044 enum lru_list lru) 2045 { 2046 unsigned long nr_taken; 2047 unsigned long nr_scanned; 2048 unsigned long vm_flags; 2049 LIST_HEAD(l_hold); /* The folios which were snipped off */ 2050 LIST_HEAD(l_active); 2051 LIST_HEAD(l_inactive); 2052 unsigned nr_deactivate, nr_activate; 2053 unsigned nr_rotated = 0; 2054 bool file = is_file_lru(lru); 2055 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 2056 2057 lru_add_drain(); 2058 2059 spin_lock_irq(&lruvec->lru_lock); 2060 2061 nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &l_hold, 2062 &nr_scanned, sc, lru); 2063 2064 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); 2065 2066 if (!cgroup_reclaim(sc)) 2067 __count_vm_events(PGREFILL, nr_scanned); 2068 __count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned); 2069 2070 spin_unlock_irq(&lruvec->lru_lock); 2071 2072 while (!list_empty(&l_hold)) { 2073 struct folio *folio; 2074 2075 cond_resched(); 2076 folio = lru_to_folio(&l_hold); 2077 list_del(&folio->lru); 2078 2079 if (unlikely(!folio_evictable(folio))) { 2080 folio_putback_lru(folio); 2081 continue; 2082 } 2083 2084 if (unlikely(buffer_heads_over_limit)) { 2085 if (folio_needs_release(folio) && 2086 folio_trylock(folio)) { 2087 filemap_release_folio(folio, 0); 2088 folio_unlock(folio); 2089 } 2090 } 2091 2092 /* Referenced or rmap lock contention: rotate */ 2093 if (folio_referenced(folio, 0, sc->target_mem_cgroup, 2094 &vm_flags) != 0) { 2095 /* 2096 * Identify referenced, file-backed active folios and 2097 * give them one more trip around the active list. So 2098 * that executable code get better chances to stay in 2099 * memory under moderate memory pressure. Anon folios 2100 * are not likely to be evicted by use-once streaming 2101 * IO, plus JVM can create lots of anon VM_EXEC folios, 2102 * so we ignore them here. 2103 */ 2104 if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) { 2105 nr_rotated += folio_nr_pages(folio); 2106 list_add(&folio->lru, &l_active); 2107 continue; 2108 } 2109 } 2110 2111 folio_clear_active(folio); /* we are de-activating */ 2112 folio_set_workingset(folio); 2113 list_add(&folio->lru, &l_inactive); 2114 } 2115 2116 /* 2117 * Move folios back to the lru list. 2118 */ 2119 spin_lock_irq(&lruvec->lru_lock); 2120 2121 nr_activate = move_folios_to_lru(lruvec, &l_active); 2122 nr_deactivate = move_folios_to_lru(lruvec, &l_inactive); 2123 2124 __count_vm_events(PGDEACTIVATE, nr_deactivate); 2125 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate); 2126 2127 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); 2128 spin_unlock_irq(&lruvec->lru_lock); 2129 2130 if (nr_rotated) 2131 lru_note_cost(lruvec, file, 0, nr_rotated); 2132 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, 2133 nr_deactivate, nr_rotated, sc->priority, file); 2134 } 2135 2136 static unsigned int reclaim_folio_list(struct list_head *folio_list, 2137 struct pglist_data *pgdat) 2138 { 2139 struct reclaim_stat stat; 2140 unsigned int nr_reclaimed; 2141 struct folio *folio; 2142 struct scan_control sc = { 2143 .gfp_mask = GFP_KERNEL, 2144 .may_writepage = 1, 2145 .may_unmap = 1, 2146 .may_swap = 1, 2147 .no_demotion = 1, 2148 }; 2149 2150 nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &stat, true); 2151 while (!list_empty(folio_list)) { 2152 folio = lru_to_folio(folio_list); 2153 list_del(&folio->lru); 2154 folio_putback_lru(folio); 2155 } 2156 trace_mm_vmscan_reclaim_pages(pgdat->node_id, sc.nr_scanned, nr_reclaimed, &stat); 2157 2158 return nr_reclaimed; 2159 } 2160 2161 unsigned long reclaim_pages(struct list_head *folio_list) 2162 { 2163 int nid; 2164 unsigned int nr_reclaimed = 0; 2165 LIST_HEAD(node_folio_list); 2166 unsigned int noreclaim_flag; 2167 2168 if (list_empty(folio_list)) 2169 return nr_reclaimed; 2170 2171 noreclaim_flag = memalloc_noreclaim_save(); 2172 2173 nid = folio_nid(lru_to_folio(folio_list)); 2174 do { 2175 struct folio *folio = lru_to_folio(folio_list); 2176 2177 if (nid == folio_nid(folio)) { 2178 folio_clear_active(folio); 2179 list_move(&folio->lru, &node_folio_list); 2180 continue; 2181 } 2182 2183 nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid)); 2184 nid = folio_nid(lru_to_folio(folio_list)); 2185 } while (!list_empty(folio_list)); 2186 2187 nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid)); 2188 2189 memalloc_noreclaim_restore(noreclaim_flag); 2190 2191 return nr_reclaimed; 2192 } 2193 2194 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 2195 struct lruvec *lruvec, struct scan_control *sc) 2196 { 2197 if (is_active_lru(lru)) { 2198 if (sc->may_deactivate & (1 << is_file_lru(lru))) 2199 shrink_active_list(nr_to_scan, lruvec, sc, lru); 2200 else 2201 sc->skipped_deactivate = 1; 2202 return 0; 2203 } 2204 2205 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru); 2206 } 2207 2208 /* 2209 * The inactive anon list should be small enough that the VM never has 2210 * to do too much work. 2211 * 2212 * The inactive file list should be small enough to leave most memory 2213 * to the established workingset on the scan-resistant active list, 2214 * but large enough to avoid thrashing the aggregate readahead window. 2215 * 2216 * Both inactive lists should also be large enough that each inactive 2217 * folio has a chance to be referenced again before it is reclaimed. 2218 * 2219 * If that fails and refaulting is observed, the inactive list grows. 2220 * 2221 * The inactive_ratio is the target ratio of ACTIVE to INACTIVE folios 2222 * on this LRU, maintained by the pageout code. An inactive_ratio 2223 * of 3 means 3:1 or 25% of the folios are kept on the inactive list. 2224 * 2225 * total target max 2226 * memory ratio inactive 2227 * ------------------------------------- 2228 * 10MB 1 5MB 2229 * 100MB 1 50MB 2230 * 1GB 3 250MB 2231 * 10GB 10 0.9GB 2232 * 100GB 31 3GB 2233 * 1TB 101 10GB 2234 * 10TB 320 32GB 2235 */ 2236 static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru) 2237 { 2238 enum lru_list active_lru = inactive_lru + LRU_ACTIVE; 2239 unsigned long inactive, active; 2240 unsigned long inactive_ratio; 2241 unsigned long gb; 2242 2243 inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru); 2244 active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru); 2245 2246 gb = (inactive + active) >> (30 - PAGE_SHIFT); 2247 if (gb) 2248 inactive_ratio = int_sqrt(10 * gb); 2249 else 2250 inactive_ratio = 1; 2251 2252 return inactive * inactive_ratio < active; 2253 } 2254 2255 enum scan_balance { 2256 SCAN_EQUAL, 2257 SCAN_FRACT, 2258 SCAN_ANON, 2259 SCAN_FILE, 2260 }; 2261 2262 static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc) 2263 { 2264 unsigned long file; 2265 struct lruvec *target_lruvec; 2266 2267 if (lru_gen_enabled()) 2268 return; 2269 2270 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); 2271 2272 /* 2273 * Flush the memory cgroup stats in rate-limited way as we don't need 2274 * most accurate stats here. We may switch to regular stats flushing 2275 * in the future once it is cheap enough. 2276 */ 2277 mem_cgroup_flush_stats_ratelimited(sc->target_mem_cgroup); 2278 2279 /* 2280 * Determine the scan balance between anon and file LRUs. 2281 */ 2282 spin_lock_irq(&target_lruvec->lru_lock); 2283 sc->anon_cost = target_lruvec->anon_cost; 2284 sc->file_cost = target_lruvec->file_cost; 2285 spin_unlock_irq(&target_lruvec->lru_lock); 2286 2287 /* 2288 * Target desirable inactive:active list ratios for the anon 2289 * and file LRU lists. 2290 */ 2291 if (!sc->force_deactivate) { 2292 unsigned long refaults; 2293 2294 /* 2295 * When refaults are being observed, it means a new 2296 * workingset is being established. Deactivate to get 2297 * rid of any stale active pages quickly. 2298 */ 2299 refaults = lruvec_page_state(target_lruvec, 2300 WORKINGSET_ACTIVATE_ANON); 2301 if (refaults != target_lruvec->refaults[WORKINGSET_ANON] || 2302 inactive_is_low(target_lruvec, LRU_INACTIVE_ANON)) 2303 sc->may_deactivate |= DEACTIVATE_ANON; 2304 else 2305 sc->may_deactivate &= ~DEACTIVATE_ANON; 2306 2307 refaults = lruvec_page_state(target_lruvec, 2308 WORKINGSET_ACTIVATE_FILE); 2309 if (refaults != target_lruvec->refaults[WORKINGSET_FILE] || 2310 inactive_is_low(target_lruvec, LRU_INACTIVE_FILE)) 2311 sc->may_deactivate |= DEACTIVATE_FILE; 2312 else 2313 sc->may_deactivate &= ~DEACTIVATE_FILE; 2314 } else 2315 sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE; 2316 2317 /* 2318 * If we have plenty of inactive file pages that aren't 2319 * thrashing, try to reclaim those first before touching 2320 * anonymous pages. 2321 */ 2322 file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE); 2323 if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE) && 2324 !sc->no_cache_trim_mode) 2325 sc->cache_trim_mode = 1; 2326 else 2327 sc->cache_trim_mode = 0; 2328 2329 /* 2330 * Prevent the reclaimer from falling into the cache trap: as 2331 * cache pages start out inactive, every cache fault will tip 2332 * the scan balance towards the file LRU. And as the file LRU 2333 * shrinks, so does the window for rotation from references. 2334 * This means we have a runaway feedback loop where a tiny 2335 * thrashing file LRU becomes infinitely more attractive than 2336 * anon pages. Try to detect this based on file LRU size. 2337 */ 2338 if (!cgroup_reclaim(sc)) { 2339 unsigned long total_high_wmark = 0; 2340 unsigned long free, anon; 2341 int z; 2342 2343 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); 2344 file = node_page_state(pgdat, NR_ACTIVE_FILE) + 2345 node_page_state(pgdat, NR_INACTIVE_FILE); 2346 2347 for (z = 0; z < MAX_NR_ZONES; z++) { 2348 struct zone *zone = &pgdat->node_zones[z]; 2349 2350 if (!managed_zone(zone)) 2351 continue; 2352 2353 total_high_wmark += high_wmark_pages(zone); 2354 } 2355 2356 /* 2357 * Consider anon: if that's low too, this isn't a 2358 * runaway file reclaim problem, but rather just 2359 * extreme pressure. Reclaim as per usual then. 2360 */ 2361 anon = node_page_state(pgdat, NR_INACTIVE_ANON); 2362 2363 sc->file_is_tiny = 2364 file + free <= total_high_wmark && 2365 !(sc->may_deactivate & DEACTIVATE_ANON) && 2366 anon >> sc->priority; 2367 } 2368 } 2369 2370 /* 2371 * Determine how aggressively the anon and file LRU lists should be 2372 * scanned. 2373 * 2374 * nr[0] = anon inactive folios to scan; nr[1] = anon active folios to scan 2375 * nr[2] = file inactive folios to scan; nr[3] = file active folios to scan 2376 */ 2377 static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, 2378 unsigned long *nr) 2379 { 2380 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 2381 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 2382 unsigned long anon_cost, file_cost, total_cost; 2383 int swappiness = sc_swappiness(sc, memcg); 2384 u64 fraction[ANON_AND_FILE]; 2385 u64 denominator = 0; /* gcc */ 2386 enum scan_balance scan_balance; 2387 unsigned long ap, fp; 2388 enum lru_list lru; 2389 2390 /* If we have no swap space, do not bother scanning anon folios. */ 2391 if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) { 2392 scan_balance = SCAN_FILE; 2393 goto out; 2394 } 2395 2396 /* 2397 * Global reclaim will swap to prevent OOM even with no 2398 * swappiness, but memcg users want to use this knob to 2399 * disable swapping for individual groups completely when 2400 * using the memory controller's swap limit feature would be 2401 * too expensive. 2402 */ 2403 if (cgroup_reclaim(sc) && !swappiness) { 2404 scan_balance = SCAN_FILE; 2405 goto out; 2406 } 2407 2408 /* 2409 * Do not apply any pressure balancing cleverness when the 2410 * system is close to OOM, scan both anon and file equally 2411 * (unless the swappiness setting disagrees with swapping). 2412 */ 2413 if (!sc->priority && swappiness) { 2414 scan_balance = SCAN_EQUAL; 2415 goto out; 2416 } 2417 2418 /* 2419 * If the system is almost out of file pages, force-scan anon. 2420 */ 2421 if (sc->file_is_tiny) { 2422 scan_balance = SCAN_ANON; 2423 goto out; 2424 } 2425 2426 /* 2427 * If there is enough inactive page cache, we do not reclaim 2428 * anything from the anonymous working right now. 2429 */ 2430 if (sc->cache_trim_mode) { 2431 scan_balance = SCAN_FILE; 2432 goto out; 2433 } 2434 2435 scan_balance = SCAN_FRACT; 2436 /* 2437 * Calculate the pressure balance between anon and file pages. 2438 * 2439 * The amount of pressure we put on each LRU is inversely 2440 * proportional to the cost of reclaiming each list, as 2441 * determined by the share of pages that are refaulting, times 2442 * the relative IO cost of bringing back a swapped out 2443 * anonymous page vs reloading a filesystem page (swappiness). 2444 * 2445 * Although we limit that influence to ensure no list gets 2446 * left behind completely: at least a third of the pressure is 2447 * applied, before swappiness. 2448 * 2449 * With swappiness at 100, anon and file have equal IO cost. 2450 */ 2451 total_cost = sc->anon_cost + sc->file_cost; 2452 anon_cost = total_cost + sc->anon_cost; 2453 file_cost = total_cost + sc->file_cost; 2454 total_cost = anon_cost + file_cost; 2455 2456 ap = swappiness * (total_cost + 1); 2457 ap /= anon_cost + 1; 2458 2459 fp = (MAX_SWAPPINESS - swappiness) * (total_cost + 1); 2460 fp /= file_cost + 1; 2461 2462 fraction[0] = ap; 2463 fraction[1] = fp; 2464 denominator = ap + fp; 2465 out: 2466 for_each_evictable_lru(lru) { 2467 bool file = is_file_lru(lru); 2468 unsigned long lruvec_size; 2469 unsigned long low, min; 2470 unsigned long scan; 2471 2472 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); 2473 mem_cgroup_protection(sc->target_mem_cgroup, memcg, 2474 &min, &low); 2475 2476 if (min || low) { 2477 /* 2478 * Scale a cgroup's reclaim pressure by proportioning 2479 * its current usage to its memory.low or memory.min 2480 * setting. 2481 * 2482 * This is important, as otherwise scanning aggression 2483 * becomes extremely binary -- from nothing as we 2484 * approach the memory protection threshold, to totally 2485 * nominal as we exceed it. This results in requiring 2486 * setting extremely liberal protection thresholds. It 2487 * also means we simply get no protection at all if we 2488 * set it too low, which is not ideal. 2489 * 2490 * If there is any protection in place, we reduce scan 2491 * pressure by how much of the total memory used is 2492 * within protection thresholds. 2493 * 2494 * There is one special case: in the first reclaim pass, 2495 * we skip over all groups that are within their low 2496 * protection. If that fails to reclaim enough pages to 2497 * satisfy the reclaim goal, we come back and override 2498 * the best-effort low protection. However, we still 2499 * ideally want to honor how well-behaved groups are in 2500 * that case instead of simply punishing them all 2501 * equally. As such, we reclaim them based on how much 2502 * memory they are using, reducing the scan pressure 2503 * again by how much of the total memory used is under 2504 * hard protection. 2505 */ 2506 unsigned long cgroup_size = mem_cgroup_size(memcg); 2507 unsigned long protection; 2508 2509 /* memory.low scaling, make sure we retry before OOM */ 2510 if (!sc->memcg_low_reclaim && low > min) { 2511 protection = low; 2512 sc->memcg_low_skipped = 1; 2513 } else { 2514 protection = min; 2515 } 2516 2517 /* Avoid TOCTOU with earlier protection check */ 2518 cgroup_size = max(cgroup_size, protection); 2519 2520 scan = lruvec_size - lruvec_size * protection / 2521 (cgroup_size + 1); 2522 2523 /* 2524 * Minimally target SWAP_CLUSTER_MAX pages to keep 2525 * reclaim moving forwards, avoiding decrementing 2526 * sc->priority further than desirable. 2527 */ 2528 scan = max(scan, SWAP_CLUSTER_MAX); 2529 } else { 2530 scan = lruvec_size; 2531 } 2532 2533 scan >>= sc->priority; 2534 2535 /* 2536 * If the cgroup's already been deleted, make sure to 2537 * scrape out the remaining cache. 2538 */ 2539 if (!scan && !mem_cgroup_online(memcg)) 2540 scan = min(lruvec_size, SWAP_CLUSTER_MAX); 2541 2542 switch (scan_balance) { 2543 case SCAN_EQUAL: 2544 /* Scan lists relative to size */ 2545 break; 2546 case SCAN_FRACT: 2547 /* 2548 * Scan types proportional to swappiness and 2549 * their relative recent reclaim efficiency. 2550 * Make sure we don't miss the last page on 2551 * the offlined memory cgroups because of a 2552 * round-off error. 2553 */ 2554 scan = mem_cgroup_online(memcg) ? 2555 div64_u64(scan * fraction[file], denominator) : 2556 DIV64_U64_ROUND_UP(scan * fraction[file], 2557 denominator); 2558 break; 2559 case SCAN_FILE: 2560 case SCAN_ANON: 2561 /* Scan one type exclusively */ 2562 if ((scan_balance == SCAN_FILE) != file) 2563 scan = 0; 2564 break; 2565 default: 2566 /* Look ma, no brain */ 2567 BUG(); 2568 } 2569 2570 nr[lru] = scan; 2571 } 2572 } 2573 2574 /* 2575 * Anonymous LRU management is a waste if there is 2576 * ultimately no way to reclaim the memory. 2577 */ 2578 static bool can_age_anon_pages(struct pglist_data *pgdat, 2579 struct scan_control *sc) 2580 { 2581 /* Aging the anon LRU is valuable if swap is present: */ 2582 if (total_swap_pages > 0) 2583 return true; 2584 2585 /* Also valuable if anon pages can be demoted: */ 2586 return can_demote(pgdat->node_id, sc); 2587 } 2588 2589 #ifdef CONFIG_LRU_GEN 2590 2591 #ifdef CONFIG_LRU_GEN_ENABLED 2592 DEFINE_STATIC_KEY_ARRAY_TRUE(lru_gen_caps, NR_LRU_GEN_CAPS); 2593 #define get_cap(cap) static_branch_likely(&lru_gen_caps[cap]) 2594 #else 2595 DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS); 2596 #define get_cap(cap) static_branch_unlikely(&lru_gen_caps[cap]) 2597 #endif 2598 2599 static bool should_walk_mmu(void) 2600 { 2601 return arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK); 2602 } 2603 2604 static bool should_clear_pmd_young(void) 2605 { 2606 return arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG); 2607 } 2608 2609 /****************************************************************************** 2610 * shorthand helpers 2611 ******************************************************************************/ 2612 2613 #define DEFINE_MAX_SEQ(lruvec) \ 2614 unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq) 2615 2616 #define DEFINE_MIN_SEQ(lruvec) \ 2617 unsigned long min_seq[ANON_AND_FILE] = { \ 2618 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \ 2619 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \ 2620 } 2621 2622 #define for_each_gen_type_zone(gen, type, zone) \ 2623 for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \ 2624 for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \ 2625 for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++) 2626 2627 #define get_memcg_gen(seq) ((seq) % MEMCG_NR_GENS) 2628 #define get_memcg_bin(bin) ((bin) % MEMCG_NR_BINS) 2629 2630 static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid) 2631 { 2632 struct pglist_data *pgdat = NODE_DATA(nid); 2633 2634 #ifdef CONFIG_MEMCG 2635 if (memcg) { 2636 struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec; 2637 2638 /* see the comment in mem_cgroup_lruvec() */ 2639 if (!lruvec->pgdat) 2640 lruvec->pgdat = pgdat; 2641 2642 return lruvec; 2643 } 2644 #endif 2645 VM_WARN_ON_ONCE(!mem_cgroup_disabled()); 2646 2647 return &pgdat->__lruvec; 2648 } 2649 2650 static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc) 2651 { 2652 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 2653 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 2654 2655 if (!sc->may_swap) 2656 return 0; 2657 2658 if (!can_demote(pgdat->node_id, sc) && 2659 mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH) 2660 return 0; 2661 2662 return sc_swappiness(sc, memcg); 2663 } 2664 2665 static int get_nr_gens(struct lruvec *lruvec, int type) 2666 { 2667 return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1; 2668 } 2669 2670 static bool __maybe_unused seq_is_valid(struct lruvec *lruvec) 2671 { 2672 /* see the comment on lru_gen_folio */ 2673 return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS && 2674 get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) && 2675 get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS; 2676 } 2677 2678 /****************************************************************************** 2679 * Bloom filters 2680 ******************************************************************************/ 2681 2682 /* 2683 * Bloom filters with m=1<<15, k=2 and the false positive rates of ~1/5 when 2684 * n=10,000 and ~1/2 when n=20,000, where, conventionally, m is the number of 2685 * bits in a bitmap, k is the number of hash functions and n is the number of 2686 * inserted items. 2687 * 2688 * Page table walkers use one of the two filters to reduce their search space. 2689 * To get rid of non-leaf entries that no longer have enough leaf entries, the 2690 * aging uses the double-buffering technique to flip to the other filter each 2691 * time it produces a new generation. For non-leaf entries that have enough 2692 * leaf entries, the aging carries them over to the next generation in 2693 * walk_pmd_range(); the eviction also report them when walking the rmap 2694 * in lru_gen_look_around(). 2695 * 2696 * For future optimizations: 2697 * 1. It's not necessary to keep both filters all the time. The spare one can be 2698 * freed after the RCU grace period and reallocated if needed again. 2699 * 2. And when reallocating, it's worth scaling its size according to the number 2700 * of inserted entries in the other filter, to reduce the memory overhead on 2701 * small systems and false positives on large systems. 2702 * 3. Jenkins' hash function is an alternative to Knuth's. 2703 */ 2704 #define BLOOM_FILTER_SHIFT 15 2705 2706 static inline int filter_gen_from_seq(unsigned long seq) 2707 { 2708 return seq % NR_BLOOM_FILTERS; 2709 } 2710 2711 static void get_item_key(void *item, int *key) 2712 { 2713 u32 hash = hash_ptr(item, BLOOM_FILTER_SHIFT * 2); 2714 2715 BUILD_BUG_ON(BLOOM_FILTER_SHIFT * 2 > BITS_PER_TYPE(u32)); 2716 2717 key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1); 2718 key[1] = hash >> BLOOM_FILTER_SHIFT; 2719 } 2720 2721 static bool test_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq, 2722 void *item) 2723 { 2724 int key[2]; 2725 unsigned long *filter; 2726 int gen = filter_gen_from_seq(seq); 2727 2728 filter = READ_ONCE(mm_state->filters[gen]); 2729 if (!filter) 2730 return true; 2731 2732 get_item_key(item, key); 2733 2734 return test_bit(key[0], filter) && test_bit(key[1], filter); 2735 } 2736 2737 static void update_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq, 2738 void *item) 2739 { 2740 int key[2]; 2741 unsigned long *filter; 2742 int gen = filter_gen_from_seq(seq); 2743 2744 filter = READ_ONCE(mm_state->filters[gen]); 2745 if (!filter) 2746 return; 2747 2748 get_item_key(item, key); 2749 2750 if (!test_bit(key[0], filter)) 2751 set_bit(key[0], filter); 2752 if (!test_bit(key[1], filter)) 2753 set_bit(key[1], filter); 2754 } 2755 2756 static void reset_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq) 2757 { 2758 unsigned long *filter; 2759 int gen = filter_gen_from_seq(seq); 2760 2761 filter = mm_state->filters[gen]; 2762 if (filter) { 2763 bitmap_clear(filter, 0, BIT(BLOOM_FILTER_SHIFT)); 2764 return; 2765 } 2766 2767 filter = bitmap_zalloc(BIT(BLOOM_FILTER_SHIFT), 2768 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN); 2769 WRITE_ONCE(mm_state->filters[gen], filter); 2770 } 2771 2772 /****************************************************************************** 2773 * mm_struct list 2774 ******************************************************************************/ 2775 2776 #ifdef CONFIG_LRU_GEN_WALKS_MMU 2777 2778 static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg) 2779 { 2780 static struct lru_gen_mm_list mm_list = { 2781 .fifo = LIST_HEAD_INIT(mm_list.fifo), 2782 .lock = __SPIN_LOCK_UNLOCKED(mm_list.lock), 2783 }; 2784 2785 #ifdef CONFIG_MEMCG 2786 if (memcg) 2787 return &memcg->mm_list; 2788 #endif 2789 VM_WARN_ON_ONCE(!mem_cgroup_disabled()); 2790 2791 return &mm_list; 2792 } 2793 2794 static struct lru_gen_mm_state *get_mm_state(struct lruvec *lruvec) 2795 { 2796 return &lruvec->mm_state; 2797 } 2798 2799 static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk) 2800 { 2801 int key; 2802 struct mm_struct *mm; 2803 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); 2804 struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec); 2805 2806 mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list); 2807 key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap); 2808 2809 if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap)) 2810 return NULL; 2811 2812 clear_bit(key, &mm->lru_gen.bitmap); 2813 2814 return mmget_not_zero(mm) ? mm : NULL; 2815 } 2816 2817 void lru_gen_add_mm(struct mm_struct *mm) 2818 { 2819 int nid; 2820 struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm); 2821 struct lru_gen_mm_list *mm_list = get_mm_list(memcg); 2822 2823 VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list)); 2824 #ifdef CONFIG_MEMCG 2825 VM_WARN_ON_ONCE(mm->lru_gen.memcg); 2826 mm->lru_gen.memcg = memcg; 2827 #endif 2828 spin_lock(&mm_list->lock); 2829 2830 for_each_node_state(nid, N_MEMORY) { 2831 struct lruvec *lruvec = get_lruvec(memcg, nid); 2832 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 2833 2834 /* the first addition since the last iteration */ 2835 if (mm_state->tail == &mm_list->fifo) 2836 mm_state->tail = &mm->lru_gen.list; 2837 } 2838 2839 list_add_tail(&mm->lru_gen.list, &mm_list->fifo); 2840 2841 spin_unlock(&mm_list->lock); 2842 } 2843 2844 void lru_gen_del_mm(struct mm_struct *mm) 2845 { 2846 int nid; 2847 struct lru_gen_mm_list *mm_list; 2848 struct mem_cgroup *memcg = NULL; 2849 2850 if (list_empty(&mm->lru_gen.list)) 2851 return; 2852 2853 #ifdef CONFIG_MEMCG 2854 memcg = mm->lru_gen.memcg; 2855 #endif 2856 mm_list = get_mm_list(memcg); 2857 2858 spin_lock(&mm_list->lock); 2859 2860 for_each_node(nid) { 2861 struct lruvec *lruvec = get_lruvec(memcg, nid); 2862 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 2863 2864 /* where the current iteration continues after */ 2865 if (mm_state->head == &mm->lru_gen.list) 2866 mm_state->head = mm_state->head->prev; 2867 2868 /* where the last iteration ended before */ 2869 if (mm_state->tail == &mm->lru_gen.list) 2870 mm_state->tail = mm_state->tail->next; 2871 } 2872 2873 list_del_init(&mm->lru_gen.list); 2874 2875 spin_unlock(&mm_list->lock); 2876 2877 #ifdef CONFIG_MEMCG 2878 mem_cgroup_put(mm->lru_gen.memcg); 2879 mm->lru_gen.memcg = NULL; 2880 #endif 2881 } 2882 2883 #ifdef CONFIG_MEMCG 2884 void lru_gen_migrate_mm(struct mm_struct *mm) 2885 { 2886 struct mem_cgroup *memcg; 2887 struct task_struct *task = rcu_dereference_protected(mm->owner, true); 2888 2889 VM_WARN_ON_ONCE(task->mm != mm); 2890 lockdep_assert_held(&task->alloc_lock); 2891 2892 /* for mm_update_next_owner() */ 2893 if (mem_cgroup_disabled()) 2894 return; 2895 2896 /* migration can happen before addition */ 2897 if (!mm->lru_gen.memcg) 2898 return; 2899 2900 rcu_read_lock(); 2901 memcg = mem_cgroup_from_task(task); 2902 rcu_read_unlock(); 2903 if (memcg == mm->lru_gen.memcg) 2904 return; 2905 2906 VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list)); 2907 2908 lru_gen_del_mm(mm); 2909 lru_gen_add_mm(mm); 2910 } 2911 #endif 2912 2913 #else /* !CONFIG_LRU_GEN_WALKS_MMU */ 2914 2915 static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg) 2916 { 2917 return NULL; 2918 } 2919 2920 static struct lru_gen_mm_state *get_mm_state(struct lruvec *lruvec) 2921 { 2922 return NULL; 2923 } 2924 2925 static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk) 2926 { 2927 return NULL; 2928 } 2929 2930 #endif 2931 2932 static void reset_mm_stats(struct lru_gen_mm_walk *walk, bool last) 2933 { 2934 int i; 2935 int hist; 2936 struct lruvec *lruvec = walk->lruvec; 2937 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 2938 2939 lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock); 2940 2941 hist = lru_hist_from_seq(walk->seq); 2942 2943 for (i = 0; i < NR_MM_STATS; i++) { 2944 WRITE_ONCE(mm_state->stats[hist][i], 2945 mm_state->stats[hist][i] + walk->mm_stats[i]); 2946 walk->mm_stats[i] = 0; 2947 } 2948 2949 if (NR_HIST_GENS > 1 && last) { 2950 hist = lru_hist_from_seq(walk->seq + 1); 2951 2952 for (i = 0; i < NR_MM_STATS; i++) 2953 WRITE_ONCE(mm_state->stats[hist][i], 0); 2954 } 2955 } 2956 2957 static bool iterate_mm_list(struct lru_gen_mm_walk *walk, struct mm_struct **iter) 2958 { 2959 bool first = false; 2960 bool last = false; 2961 struct mm_struct *mm = NULL; 2962 struct lruvec *lruvec = walk->lruvec; 2963 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 2964 struct lru_gen_mm_list *mm_list = get_mm_list(memcg); 2965 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 2966 2967 /* 2968 * mm_state->seq is incremented after each iteration of mm_list. There 2969 * are three interesting cases for this page table walker: 2970 * 1. It tries to start a new iteration with a stale max_seq: there is 2971 * nothing left to do. 2972 * 2. It started the next iteration: it needs to reset the Bloom filter 2973 * so that a fresh set of PTE tables can be recorded. 2974 * 3. It ended the current iteration: it needs to reset the mm stats 2975 * counters and tell its caller to increment max_seq. 2976 */ 2977 spin_lock(&mm_list->lock); 2978 2979 VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->seq); 2980 2981 if (walk->seq <= mm_state->seq) 2982 goto done; 2983 2984 if (!mm_state->head) 2985 mm_state->head = &mm_list->fifo; 2986 2987 if (mm_state->head == &mm_list->fifo) 2988 first = true; 2989 2990 do { 2991 mm_state->head = mm_state->head->next; 2992 if (mm_state->head == &mm_list->fifo) { 2993 WRITE_ONCE(mm_state->seq, mm_state->seq + 1); 2994 last = true; 2995 break; 2996 } 2997 2998 /* force scan for those added after the last iteration */ 2999 if (!mm_state->tail || mm_state->tail == mm_state->head) { 3000 mm_state->tail = mm_state->head->next; 3001 walk->force_scan = true; 3002 } 3003 } while (!(mm = get_next_mm(walk))); 3004 done: 3005 if (*iter || last) 3006 reset_mm_stats(walk, last); 3007 3008 spin_unlock(&mm_list->lock); 3009 3010 if (mm && first) 3011 reset_bloom_filter(mm_state, walk->seq + 1); 3012 3013 if (*iter) 3014 mmput_async(*iter); 3015 3016 *iter = mm; 3017 3018 return last; 3019 } 3020 3021 static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long seq) 3022 { 3023 bool success = false; 3024 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 3025 struct lru_gen_mm_list *mm_list = get_mm_list(memcg); 3026 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 3027 3028 spin_lock(&mm_list->lock); 3029 3030 VM_WARN_ON_ONCE(mm_state->seq + 1 < seq); 3031 3032 if (seq > mm_state->seq) { 3033 mm_state->head = NULL; 3034 mm_state->tail = NULL; 3035 WRITE_ONCE(mm_state->seq, mm_state->seq + 1); 3036 success = true; 3037 } 3038 3039 spin_unlock(&mm_list->lock); 3040 3041 return success; 3042 } 3043 3044 /****************************************************************************** 3045 * PID controller 3046 ******************************************************************************/ 3047 3048 /* 3049 * A feedback loop based on Proportional-Integral-Derivative (PID) controller. 3050 * 3051 * The P term is refaulted/(evicted+protected) from a tier in the generation 3052 * currently being evicted; the I term is the exponential moving average of the 3053 * P term over the generations previously evicted, using the smoothing factor 3054 * 1/2; the D term isn't supported. 3055 * 3056 * The setpoint (SP) is always the first tier of one type; the process variable 3057 * (PV) is either any tier of the other type or any other tier of the same 3058 * type. 3059 * 3060 * The error is the difference between the SP and the PV; the correction is to 3061 * turn off protection when SP>PV or turn on protection when SP<PV. 3062 * 3063 * For future optimizations: 3064 * 1. The D term may discount the other two terms over time so that long-lived 3065 * generations can resist stale information. 3066 */ 3067 struct ctrl_pos { 3068 unsigned long refaulted; 3069 unsigned long total; 3070 int gain; 3071 }; 3072 3073 static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain, 3074 struct ctrl_pos *pos) 3075 { 3076 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3077 int hist = lru_hist_from_seq(lrugen->min_seq[type]); 3078 3079 pos->refaulted = lrugen->avg_refaulted[type][tier] + 3080 atomic_long_read(&lrugen->refaulted[hist][type][tier]); 3081 pos->total = lrugen->avg_total[type][tier] + 3082 atomic_long_read(&lrugen->evicted[hist][type][tier]); 3083 if (tier) 3084 pos->total += lrugen->protected[hist][type][tier - 1]; 3085 pos->gain = gain; 3086 } 3087 3088 static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover) 3089 { 3090 int hist, tier; 3091 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3092 bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1; 3093 unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1; 3094 3095 lockdep_assert_held(&lruvec->lru_lock); 3096 3097 if (!carryover && !clear) 3098 return; 3099 3100 hist = lru_hist_from_seq(seq); 3101 3102 for (tier = 0; tier < MAX_NR_TIERS; tier++) { 3103 if (carryover) { 3104 unsigned long sum; 3105 3106 sum = lrugen->avg_refaulted[type][tier] + 3107 atomic_long_read(&lrugen->refaulted[hist][type][tier]); 3108 WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2); 3109 3110 sum = lrugen->avg_total[type][tier] + 3111 atomic_long_read(&lrugen->evicted[hist][type][tier]); 3112 if (tier) 3113 sum += lrugen->protected[hist][type][tier - 1]; 3114 WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2); 3115 } 3116 3117 if (clear) { 3118 atomic_long_set(&lrugen->refaulted[hist][type][tier], 0); 3119 atomic_long_set(&lrugen->evicted[hist][type][tier], 0); 3120 if (tier) 3121 WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 0); 3122 } 3123 } 3124 } 3125 3126 static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv) 3127 { 3128 /* 3129 * Return true if the PV has a limited number of refaults or a lower 3130 * refaulted/total than the SP. 3131 */ 3132 return pv->refaulted < MIN_LRU_BATCH || 3133 pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <= 3134 (sp->refaulted + 1) * pv->total * pv->gain; 3135 } 3136 3137 /****************************************************************************** 3138 * the aging 3139 ******************************************************************************/ 3140 3141 /* promote pages accessed through page tables */ 3142 static int folio_update_gen(struct folio *folio, int gen) 3143 { 3144 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); 3145 3146 VM_WARN_ON_ONCE(gen >= MAX_NR_GENS); 3147 3148 do { 3149 /* lru_gen_del_folio() has isolated this page? */ 3150 if (!(old_flags & LRU_GEN_MASK)) { 3151 /* for shrink_folio_list() */ 3152 new_flags = old_flags | BIT(PG_referenced); 3153 continue; 3154 } 3155 3156 new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS); 3157 new_flags |= (gen + 1UL) << LRU_GEN_PGOFF; 3158 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); 3159 3160 return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; 3161 } 3162 3163 /* protect pages accessed multiple times through file descriptors */ 3164 static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 3165 { 3166 int type = folio_is_file_lru(folio); 3167 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3168 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); 3169 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); 3170 3171 VM_WARN_ON_ONCE_FOLIO(!(old_flags & LRU_GEN_MASK), folio); 3172 3173 do { 3174 new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; 3175 /* folio_update_gen() has promoted this page? */ 3176 if (new_gen >= 0 && new_gen != old_gen) 3177 return new_gen; 3178 3179 new_gen = (old_gen + 1) % MAX_NR_GENS; 3180 3181 new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS); 3182 new_flags |= (new_gen + 1UL) << LRU_GEN_PGOFF; 3183 /* for folio_end_writeback() */ 3184 if (reclaiming) 3185 new_flags |= BIT(PG_reclaim); 3186 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); 3187 3188 lru_gen_update_size(lruvec, folio, old_gen, new_gen); 3189 3190 return new_gen; 3191 } 3192 3193 static void update_batch_size(struct lru_gen_mm_walk *walk, struct folio *folio, 3194 int old_gen, int new_gen) 3195 { 3196 int type = folio_is_file_lru(folio); 3197 int zone = folio_zonenum(folio); 3198 int delta = folio_nr_pages(folio); 3199 3200 VM_WARN_ON_ONCE(old_gen >= MAX_NR_GENS); 3201 VM_WARN_ON_ONCE(new_gen >= MAX_NR_GENS); 3202 3203 walk->batched++; 3204 3205 walk->nr_pages[old_gen][type][zone] -= delta; 3206 walk->nr_pages[new_gen][type][zone] += delta; 3207 } 3208 3209 static void reset_batch_size(struct lru_gen_mm_walk *walk) 3210 { 3211 int gen, type, zone; 3212 struct lruvec *lruvec = walk->lruvec; 3213 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3214 3215 walk->batched = 0; 3216 3217 for_each_gen_type_zone(gen, type, zone) { 3218 enum lru_list lru = type * LRU_INACTIVE_FILE; 3219 int delta = walk->nr_pages[gen][type][zone]; 3220 3221 if (!delta) 3222 continue; 3223 3224 walk->nr_pages[gen][type][zone] = 0; 3225 WRITE_ONCE(lrugen->nr_pages[gen][type][zone], 3226 lrugen->nr_pages[gen][type][zone] + delta); 3227 3228 if (lru_gen_is_active(lruvec, gen)) 3229 lru += LRU_ACTIVE; 3230 __update_lru_size(lruvec, lru, zone, delta); 3231 } 3232 } 3233 3234 static int should_skip_vma(unsigned long start, unsigned long end, struct mm_walk *args) 3235 { 3236 struct address_space *mapping; 3237 struct vm_area_struct *vma = args->vma; 3238 struct lru_gen_mm_walk *walk = args->private; 3239 3240 if (!vma_is_accessible(vma)) 3241 return true; 3242 3243 if (is_vm_hugetlb_page(vma)) 3244 return true; 3245 3246 if (!vma_has_recency(vma)) 3247 return true; 3248 3249 if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) 3250 return true; 3251 3252 if (vma == get_gate_vma(vma->vm_mm)) 3253 return true; 3254 3255 if (vma_is_anonymous(vma)) 3256 return !walk->can_swap; 3257 3258 if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping)) 3259 return true; 3260 3261 mapping = vma->vm_file->f_mapping; 3262 if (mapping_unevictable(mapping)) 3263 return true; 3264 3265 if (shmem_mapping(mapping)) 3266 return !walk->can_swap; 3267 3268 /* to exclude special mappings like dax, etc. */ 3269 return !mapping->a_ops->read_folio; 3270 } 3271 3272 /* 3273 * Some userspace memory allocators map many single-page VMAs. Instead of 3274 * returning back to the PGD table for each of such VMAs, finish an entire PMD 3275 * table to reduce zigzags and improve cache performance. 3276 */ 3277 static bool get_next_vma(unsigned long mask, unsigned long size, struct mm_walk *args, 3278 unsigned long *vm_start, unsigned long *vm_end) 3279 { 3280 unsigned long start = round_up(*vm_end, size); 3281 unsigned long end = (start | ~mask) + 1; 3282 VMA_ITERATOR(vmi, args->mm, start); 3283 3284 VM_WARN_ON_ONCE(mask & size); 3285 VM_WARN_ON_ONCE((start & mask) != (*vm_start & mask)); 3286 3287 for_each_vma(vmi, args->vma) { 3288 if (end && end <= args->vma->vm_start) 3289 return false; 3290 3291 if (should_skip_vma(args->vma->vm_start, args->vma->vm_end, args)) 3292 continue; 3293 3294 *vm_start = max(start, args->vma->vm_start); 3295 *vm_end = min(end - 1, args->vma->vm_end - 1) + 1; 3296 3297 return true; 3298 } 3299 3300 return false; 3301 } 3302 3303 static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned long addr, 3304 struct pglist_data *pgdat) 3305 { 3306 unsigned long pfn = pte_pfn(pte); 3307 3308 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); 3309 3310 if (!pte_present(pte) || is_zero_pfn(pfn)) 3311 return -1; 3312 3313 if (WARN_ON_ONCE(pte_devmap(pte) || pte_special(pte))) 3314 return -1; 3315 3316 if (!pte_young(pte) && !mm_has_notifiers(vma->vm_mm)) 3317 return -1; 3318 3319 if (WARN_ON_ONCE(!pfn_valid(pfn))) 3320 return -1; 3321 3322 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) 3323 return -1; 3324 3325 return pfn; 3326 } 3327 3328 static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr, 3329 struct pglist_data *pgdat) 3330 { 3331 unsigned long pfn = pmd_pfn(pmd); 3332 3333 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); 3334 3335 if (!pmd_present(pmd) || is_huge_zero_pmd(pmd)) 3336 return -1; 3337 3338 if (WARN_ON_ONCE(pmd_devmap(pmd))) 3339 return -1; 3340 3341 if (!pmd_young(pmd) && !mm_has_notifiers(vma->vm_mm)) 3342 return -1; 3343 3344 if (WARN_ON_ONCE(!pfn_valid(pfn))) 3345 return -1; 3346 3347 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) 3348 return -1; 3349 3350 return pfn; 3351 } 3352 3353 static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg, 3354 struct pglist_data *pgdat, bool can_swap) 3355 { 3356 struct folio *folio; 3357 3358 folio = pfn_folio(pfn); 3359 if (folio_nid(folio) != pgdat->node_id) 3360 return NULL; 3361 3362 if (folio_memcg(folio) != memcg) 3363 return NULL; 3364 3365 /* file VMAs can contain anon pages from COW */ 3366 if (!folio_is_file_lru(folio) && !can_swap) 3367 return NULL; 3368 3369 return folio; 3370 } 3371 3372 static bool suitable_to_scan(int total, int young) 3373 { 3374 int n = clamp_t(int, cache_line_size() / sizeof(pte_t), 2, 8); 3375 3376 /* suitable if the average number of young PTEs per cacheline is >=1 */ 3377 return young * n >= total; 3378 } 3379 3380 static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end, 3381 struct mm_walk *args) 3382 { 3383 int i; 3384 pte_t *pte; 3385 spinlock_t *ptl; 3386 unsigned long addr; 3387 int total = 0; 3388 int young = 0; 3389 struct lru_gen_mm_walk *walk = args->private; 3390 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); 3391 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); 3392 DEFINE_MAX_SEQ(walk->lruvec); 3393 int old_gen, new_gen = lru_gen_from_seq(max_seq); 3394 pmd_t pmdval; 3395 3396 pte = pte_offset_map_rw_nolock(args->mm, pmd, start & PMD_MASK, &pmdval, 3397 &ptl); 3398 if (!pte) 3399 return false; 3400 if (!spin_trylock(ptl)) { 3401 pte_unmap(pte); 3402 return false; 3403 } 3404 3405 if (unlikely(!pmd_same(pmdval, pmdp_get_lockless(pmd)))) { 3406 pte_unmap_unlock(pte, ptl); 3407 return false; 3408 } 3409 3410 arch_enter_lazy_mmu_mode(); 3411 restart: 3412 for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) { 3413 unsigned long pfn; 3414 struct folio *folio; 3415 pte_t ptent = ptep_get(pte + i); 3416 3417 total++; 3418 walk->mm_stats[MM_LEAF_TOTAL]++; 3419 3420 pfn = get_pte_pfn(ptent, args->vma, addr, pgdat); 3421 if (pfn == -1) 3422 continue; 3423 3424 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); 3425 if (!folio) 3426 continue; 3427 3428 if (!ptep_clear_young_notify(args->vma, addr, pte + i)) 3429 continue; 3430 3431 young++; 3432 walk->mm_stats[MM_LEAF_YOUNG]++; 3433 3434 if (pte_dirty(ptent) && !folio_test_dirty(folio) && 3435 !(folio_test_anon(folio) && folio_test_swapbacked(folio) && 3436 !folio_test_swapcache(folio))) 3437 folio_mark_dirty(folio); 3438 3439 old_gen = folio_update_gen(folio, new_gen); 3440 if (old_gen >= 0 && old_gen != new_gen) 3441 update_batch_size(walk, folio, old_gen, new_gen); 3442 } 3443 3444 if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end)) 3445 goto restart; 3446 3447 arch_leave_lazy_mmu_mode(); 3448 pte_unmap_unlock(pte, ptl); 3449 3450 return suitable_to_scan(total, young); 3451 } 3452 3453 static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area_struct *vma, 3454 struct mm_walk *args, unsigned long *bitmap, unsigned long *first) 3455 { 3456 int i; 3457 pmd_t *pmd; 3458 spinlock_t *ptl; 3459 struct lru_gen_mm_walk *walk = args->private; 3460 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); 3461 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); 3462 DEFINE_MAX_SEQ(walk->lruvec); 3463 int old_gen, new_gen = lru_gen_from_seq(max_seq); 3464 3465 VM_WARN_ON_ONCE(pud_leaf(*pud)); 3466 3467 /* try to batch at most 1+MIN_LRU_BATCH+1 entries */ 3468 if (*first == -1) { 3469 *first = addr; 3470 bitmap_zero(bitmap, MIN_LRU_BATCH); 3471 return; 3472 } 3473 3474 i = addr == -1 ? 0 : pmd_index(addr) - pmd_index(*first); 3475 if (i && i <= MIN_LRU_BATCH) { 3476 __set_bit(i - 1, bitmap); 3477 return; 3478 } 3479 3480 pmd = pmd_offset(pud, *first); 3481 3482 ptl = pmd_lockptr(args->mm, pmd); 3483 if (!spin_trylock(ptl)) 3484 goto done; 3485 3486 arch_enter_lazy_mmu_mode(); 3487 3488 do { 3489 unsigned long pfn; 3490 struct folio *folio; 3491 3492 /* don't round down the first address */ 3493 addr = i ? (*first & PMD_MASK) + i * PMD_SIZE : *first; 3494 3495 if (!pmd_present(pmd[i])) 3496 goto next; 3497 3498 if (!pmd_trans_huge(pmd[i])) { 3499 if (!walk->force_scan && should_clear_pmd_young() && 3500 !mm_has_notifiers(args->mm)) 3501 pmdp_test_and_clear_young(vma, addr, pmd + i); 3502 goto next; 3503 } 3504 3505 pfn = get_pmd_pfn(pmd[i], vma, addr, pgdat); 3506 if (pfn == -1) 3507 goto next; 3508 3509 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); 3510 if (!folio) 3511 goto next; 3512 3513 if (!pmdp_clear_young_notify(vma, addr, pmd + i)) 3514 goto next; 3515 3516 walk->mm_stats[MM_LEAF_YOUNG]++; 3517 3518 if (pmd_dirty(pmd[i]) && !folio_test_dirty(folio) && 3519 !(folio_test_anon(folio) && folio_test_swapbacked(folio) && 3520 !folio_test_swapcache(folio))) 3521 folio_mark_dirty(folio); 3522 3523 old_gen = folio_update_gen(folio, new_gen); 3524 if (old_gen >= 0 && old_gen != new_gen) 3525 update_batch_size(walk, folio, old_gen, new_gen); 3526 next: 3527 i = i > MIN_LRU_BATCH ? 0 : find_next_bit(bitmap, MIN_LRU_BATCH, i) + 1; 3528 } while (i <= MIN_LRU_BATCH); 3529 3530 arch_leave_lazy_mmu_mode(); 3531 spin_unlock(ptl); 3532 done: 3533 *first = -1; 3534 } 3535 3536 static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end, 3537 struct mm_walk *args) 3538 { 3539 int i; 3540 pmd_t *pmd; 3541 unsigned long next; 3542 unsigned long addr; 3543 struct vm_area_struct *vma; 3544 DECLARE_BITMAP(bitmap, MIN_LRU_BATCH); 3545 unsigned long first = -1; 3546 struct lru_gen_mm_walk *walk = args->private; 3547 struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec); 3548 3549 VM_WARN_ON_ONCE(pud_leaf(*pud)); 3550 3551 /* 3552 * Finish an entire PMD in two passes: the first only reaches to PTE 3553 * tables to avoid taking the PMD lock; the second, if necessary, takes 3554 * the PMD lock to clear the accessed bit in PMD entries. 3555 */ 3556 pmd = pmd_offset(pud, start & PUD_MASK); 3557 restart: 3558 /* walk_pte_range() may call get_next_vma() */ 3559 vma = args->vma; 3560 for (i = pmd_index(start), addr = start; addr != end; i++, addr = next) { 3561 pmd_t val = pmdp_get_lockless(pmd + i); 3562 3563 next = pmd_addr_end(addr, end); 3564 3565 if (!pmd_present(val) || is_huge_zero_pmd(val)) { 3566 walk->mm_stats[MM_LEAF_TOTAL]++; 3567 continue; 3568 } 3569 3570 if (pmd_trans_huge(val)) { 3571 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); 3572 unsigned long pfn = get_pmd_pfn(val, vma, addr, pgdat); 3573 3574 walk->mm_stats[MM_LEAF_TOTAL]++; 3575 3576 if (pfn != -1) 3577 walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first); 3578 continue; 3579 } 3580 3581 if (!walk->force_scan && should_clear_pmd_young() && 3582 !mm_has_notifiers(args->mm)) { 3583 if (!pmd_young(val)) 3584 continue; 3585 3586 walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first); 3587 } 3588 3589 if (!walk->force_scan && !test_bloom_filter(mm_state, walk->seq, pmd + i)) 3590 continue; 3591 3592 walk->mm_stats[MM_NONLEAF_FOUND]++; 3593 3594 if (!walk_pte_range(&val, addr, next, args)) 3595 continue; 3596 3597 walk->mm_stats[MM_NONLEAF_ADDED]++; 3598 3599 /* carry over to the next generation */ 3600 update_bloom_filter(mm_state, walk->seq + 1, pmd + i); 3601 } 3602 3603 walk_pmd_range_locked(pud, -1, vma, args, bitmap, &first); 3604 3605 if (i < PTRS_PER_PMD && get_next_vma(PUD_MASK, PMD_SIZE, args, &start, &end)) 3606 goto restart; 3607 } 3608 3609 static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end, 3610 struct mm_walk *args) 3611 { 3612 int i; 3613 pud_t *pud; 3614 unsigned long addr; 3615 unsigned long next; 3616 struct lru_gen_mm_walk *walk = args->private; 3617 3618 VM_WARN_ON_ONCE(p4d_leaf(*p4d)); 3619 3620 pud = pud_offset(p4d, start & P4D_MASK); 3621 restart: 3622 for (i = pud_index(start), addr = start; addr != end; i++, addr = next) { 3623 pud_t val = READ_ONCE(pud[i]); 3624 3625 next = pud_addr_end(addr, end); 3626 3627 if (!pud_present(val) || WARN_ON_ONCE(pud_leaf(val))) 3628 continue; 3629 3630 walk_pmd_range(&val, addr, next, args); 3631 3632 if (need_resched() || walk->batched >= MAX_LRU_BATCH) { 3633 end = (addr | ~PUD_MASK) + 1; 3634 goto done; 3635 } 3636 } 3637 3638 if (i < PTRS_PER_PUD && get_next_vma(P4D_MASK, PUD_SIZE, args, &start, &end)) 3639 goto restart; 3640 3641 end = round_up(end, P4D_SIZE); 3642 done: 3643 if (!end || !args->vma) 3644 return 1; 3645 3646 walk->next_addr = max(end, args->vma->vm_start); 3647 3648 return -EAGAIN; 3649 } 3650 3651 static void walk_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk) 3652 { 3653 static const struct mm_walk_ops mm_walk_ops = { 3654 .test_walk = should_skip_vma, 3655 .p4d_entry = walk_pud_range, 3656 .walk_lock = PGWALK_RDLOCK, 3657 }; 3658 int err; 3659 struct lruvec *lruvec = walk->lruvec; 3660 3661 walk->next_addr = FIRST_USER_ADDRESS; 3662 3663 do { 3664 DEFINE_MAX_SEQ(lruvec); 3665 3666 err = -EBUSY; 3667 3668 /* another thread might have called inc_max_seq() */ 3669 if (walk->seq != max_seq) 3670 break; 3671 3672 /* the caller might be holding the lock for write */ 3673 if (mmap_read_trylock(mm)) { 3674 err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk); 3675 3676 mmap_read_unlock(mm); 3677 } 3678 3679 if (walk->batched) { 3680 spin_lock_irq(&lruvec->lru_lock); 3681 reset_batch_size(walk); 3682 spin_unlock_irq(&lruvec->lru_lock); 3683 } 3684 3685 cond_resched(); 3686 } while (err == -EAGAIN); 3687 } 3688 3689 static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat, bool force_alloc) 3690 { 3691 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; 3692 3693 if (pgdat && current_is_kswapd()) { 3694 VM_WARN_ON_ONCE(walk); 3695 3696 walk = &pgdat->mm_walk; 3697 } else if (!walk && force_alloc) { 3698 VM_WARN_ON_ONCE(current_is_kswapd()); 3699 3700 walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN); 3701 } 3702 3703 current->reclaim_state->mm_walk = walk; 3704 3705 return walk; 3706 } 3707 3708 static void clear_mm_walk(void) 3709 { 3710 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; 3711 3712 VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages))); 3713 VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats))); 3714 3715 current->reclaim_state->mm_walk = NULL; 3716 3717 if (!current_is_kswapd()) 3718 kfree(walk); 3719 } 3720 3721 static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap) 3722 { 3723 int zone; 3724 int remaining = MAX_LRU_BATCH; 3725 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3726 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); 3727 3728 if (type == LRU_GEN_ANON && !can_swap) 3729 goto done; 3730 3731 /* prevent cold/hot inversion if force_scan is true */ 3732 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 3733 struct list_head *head = &lrugen->folios[old_gen][type][zone]; 3734 3735 while (!list_empty(head)) { 3736 struct folio *folio = lru_to_folio(head); 3737 3738 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); 3739 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); 3740 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); 3741 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); 3742 3743 new_gen = folio_inc_gen(lruvec, folio, false); 3744 list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]); 3745 3746 if (!--remaining) 3747 return false; 3748 } 3749 } 3750 done: 3751 reset_ctrl_pos(lruvec, type, true); 3752 WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1); 3753 3754 return true; 3755 } 3756 3757 static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap) 3758 { 3759 int gen, type, zone; 3760 bool success = false; 3761 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3762 DEFINE_MIN_SEQ(lruvec); 3763 3764 VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); 3765 3766 /* find the oldest populated generation */ 3767 for (type = !can_swap; type < ANON_AND_FILE; type++) { 3768 while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) { 3769 gen = lru_gen_from_seq(min_seq[type]); 3770 3771 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 3772 if (!list_empty(&lrugen->folios[gen][type][zone])) 3773 goto next; 3774 } 3775 3776 min_seq[type]++; 3777 } 3778 next: 3779 ; 3780 } 3781 3782 /* see the comment on lru_gen_folio */ 3783 if (can_swap) { 3784 min_seq[LRU_GEN_ANON] = min(min_seq[LRU_GEN_ANON], min_seq[LRU_GEN_FILE]); 3785 min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]); 3786 } 3787 3788 for (type = !can_swap; type < ANON_AND_FILE; type++) { 3789 if (min_seq[type] == lrugen->min_seq[type]) 3790 continue; 3791 3792 reset_ctrl_pos(lruvec, type, true); 3793 WRITE_ONCE(lrugen->min_seq[type], min_seq[type]); 3794 success = true; 3795 } 3796 3797 return success; 3798 } 3799 3800 static bool inc_max_seq(struct lruvec *lruvec, unsigned long seq, 3801 bool can_swap, bool force_scan) 3802 { 3803 bool success; 3804 int prev, next; 3805 int type, zone; 3806 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3807 restart: 3808 if (seq < READ_ONCE(lrugen->max_seq)) 3809 return false; 3810 3811 spin_lock_irq(&lruvec->lru_lock); 3812 3813 VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); 3814 3815 success = seq == lrugen->max_seq; 3816 if (!success) 3817 goto unlock; 3818 3819 for (type = ANON_AND_FILE - 1; type >= 0; type--) { 3820 if (get_nr_gens(lruvec, type) != MAX_NR_GENS) 3821 continue; 3822 3823 VM_WARN_ON_ONCE(!force_scan && (type == LRU_GEN_FILE || can_swap)); 3824 3825 if (inc_min_seq(lruvec, type, can_swap)) 3826 continue; 3827 3828 spin_unlock_irq(&lruvec->lru_lock); 3829 cond_resched(); 3830 goto restart; 3831 } 3832 3833 /* 3834 * Update the active/inactive LRU sizes for compatibility. Both sides of 3835 * the current max_seq need to be covered, since max_seq+1 can overlap 3836 * with min_seq[LRU_GEN_ANON] if swapping is constrained. And if they do 3837 * overlap, cold/hot inversion happens. 3838 */ 3839 prev = lru_gen_from_seq(lrugen->max_seq - 1); 3840 next = lru_gen_from_seq(lrugen->max_seq + 1); 3841 3842 for (type = 0; type < ANON_AND_FILE; type++) { 3843 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 3844 enum lru_list lru = type * LRU_INACTIVE_FILE; 3845 long delta = lrugen->nr_pages[prev][type][zone] - 3846 lrugen->nr_pages[next][type][zone]; 3847 3848 if (!delta) 3849 continue; 3850 3851 __update_lru_size(lruvec, lru, zone, delta); 3852 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta); 3853 } 3854 } 3855 3856 for (type = 0; type < ANON_AND_FILE; type++) 3857 reset_ctrl_pos(lruvec, type, false); 3858 3859 WRITE_ONCE(lrugen->timestamps[next], jiffies); 3860 /* make sure preceding modifications appear */ 3861 smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1); 3862 unlock: 3863 spin_unlock_irq(&lruvec->lru_lock); 3864 3865 return success; 3866 } 3867 3868 static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long seq, 3869 bool can_swap, bool force_scan) 3870 { 3871 bool success; 3872 struct lru_gen_mm_walk *walk; 3873 struct mm_struct *mm = NULL; 3874 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3875 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 3876 3877 VM_WARN_ON_ONCE(seq > READ_ONCE(lrugen->max_seq)); 3878 3879 if (!mm_state) 3880 return inc_max_seq(lruvec, seq, can_swap, force_scan); 3881 3882 /* see the comment in iterate_mm_list() */ 3883 if (seq <= READ_ONCE(mm_state->seq)) 3884 return false; 3885 3886 /* 3887 * If the hardware doesn't automatically set the accessed bit, fallback 3888 * to lru_gen_look_around(), which only clears the accessed bit in a 3889 * handful of PTEs. Spreading the work out over a period of time usually 3890 * is less efficient, but it avoids bursty page faults. 3891 */ 3892 if (!should_walk_mmu()) { 3893 success = iterate_mm_list_nowalk(lruvec, seq); 3894 goto done; 3895 } 3896 3897 walk = set_mm_walk(NULL, true); 3898 if (!walk) { 3899 success = iterate_mm_list_nowalk(lruvec, seq); 3900 goto done; 3901 } 3902 3903 walk->lruvec = lruvec; 3904 walk->seq = seq; 3905 walk->can_swap = can_swap; 3906 walk->force_scan = force_scan; 3907 3908 do { 3909 success = iterate_mm_list(walk, &mm); 3910 if (mm) 3911 walk_mm(mm, walk); 3912 } while (mm); 3913 done: 3914 if (success) { 3915 success = inc_max_seq(lruvec, seq, can_swap, force_scan); 3916 WARN_ON_ONCE(!success); 3917 } 3918 3919 return success; 3920 } 3921 3922 /****************************************************************************** 3923 * working set protection 3924 ******************************************************************************/ 3925 3926 static void set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc) 3927 { 3928 int priority; 3929 unsigned long reclaimable; 3930 3931 if (sc->priority != DEF_PRIORITY || sc->nr_to_reclaim < MIN_LRU_BATCH) 3932 return; 3933 /* 3934 * Determine the initial priority based on 3935 * (total >> priority) * reclaimed_to_scanned_ratio = nr_to_reclaim, 3936 * where reclaimed_to_scanned_ratio = inactive / total. 3937 */ 3938 reclaimable = node_page_state(pgdat, NR_INACTIVE_FILE); 3939 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) 3940 reclaimable += node_page_state(pgdat, NR_INACTIVE_ANON); 3941 3942 /* round down reclaimable and round up sc->nr_to_reclaim */ 3943 priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1); 3944 3945 /* 3946 * The estimation is based on LRU pages only, so cap it to prevent 3947 * overshoots of shrinker objects by large margins. 3948 */ 3949 sc->priority = clamp(priority, DEF_PRIORITY / 2, DEF_PRIORITY); 3950 } 3951 3952 static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc) 3953 { 3954 int gen, type, zone; 3955 unsigned long total = 0; 3956 bool can_swap = get_swappiness(lruvec, sc); 3957 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3958 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 3959 DEFINE_MAX_SEQ(lruvec); 3960 DEFINE_MIN_SEQ(lruvec); 3961 3962 for (type = !can_swap; type < ANON_AND_FILE; type++) { 3963 unsigned long seq; 3964 3965 for (seq = min_seq[type]; seq <= max_seq; seq++) { 3966 gen = lru_gen_from_seq(seq); 3967 3968 for (zone = 0; zone < MAX_NR_ZONES; zone++) 3969 total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); 3970 } 3971 } 3972 3973 /* whether the size is big enough to be helpful */ 3974 return mem_cgroup_online(memcg) ? (total >> sc->priority) : total; 3975 } 3976 3977 static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc, 3978 unsigned long min_ttl) 3979 { 3980 int gen; 3981 unsigned long birth; 3982 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 3983 DEFINE_MIN_SEQ(lruvec); 3984 3985 if (mem_cgroup_below_min(NULL, memcg)) 3986 return false; 3987 3988 if (!lruvec_is_sizable(lruvec, sc)) 3989 return false; 3990 3991 /* see the comment on lru_gen_folio */ 3992 gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]); 3993 birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); 3994 3995 return time_is_before_jiffies(birth + min_ttl); 3996 } 3997 3998 /* to protect the working set of the last N jiffies */ 3999 static unsigned long lru_gen_min_ttl __read_mostly; 4000 4001 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) 4002 { 4003 struct mem_cgroup *memcg; 4004 unsigned long min_ttl = READ_ONCE(lru_gen_min_ttl); 4005 bool reclaimable = !min_ttl; 4006 4007 VM_WARN_ON_ONCE(!current_is_kswapd()); 4008 4009 set_initial_priority(pgdat, sc); 4010 4011 memcg = mem_cgroup_iter(NULL, NULL, NULL); 4012 do { 4013 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); 4014 4015 mem_cgroup_calculate_protection(NULL, memcg); 4016 4017 if (!reclaimable) 4018 reclaimable = lruvec_is_reclaimable(lruvec, sc, min_ttl); 4019 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); 4020 4021 /* 4022 * The main goal is to OOM kill if every generation from all memcgs is 4023 * younger than min_ttl. However, another possibility is all memcgs are 4024 * either too small or below min. 4025 */ 4026 if (!reclaimable && mutex_trylock(&oom_lock)) { 4027 struct oom_control oc = { 4028 .gfp_mask = sc->gfp_mask, 4029 }; 4030 4031 out_of_memory(&oc); 4032 4033 mutex_unlock(&oom_lock); 4034 } 4035 } 4036 4037 /****************************************************************************** 4038 * rmap/PT walk feedback 4039 ******************************************************************************/ 4040 4041 /* 4042 * This function exploits spatial locality when shrink_folio_list() walks the 4043 * rmap. It scans the adjacent PTEs of a young PTE and promotes hot pages. If 4044 * the scan was done cacheline efficiently, it adds the PMD entry pointing to 4045 * the PTE table to the Bloom filter. This forms a feedback loop between the 4046 * eviction and the aging. 4047 */ 4048 bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw) 4049 { 4050 int i; 4051 unsigned long start; 4052 unsigned long end; 4053 struct lru_gen_mm_walk *walk; 4054 int young = 1; 4055 pte_t *pte = pvmw->pte; 4056 unsigned long addr = pvmw->address; 4057 struct vm_area_struct *vma = pvmw->vma; 4058 struct folio *folio = pfn_folio(pvmw->pfn); 4059 bool can_swap = !folio_is_file_lru(folio); 4060 struct mem_cgroup *memcg = folio_memcg(folio); 4061 struct pglist_data *pgdat = folio_pgdat(folio); 4062 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); 4063 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 4064 DEFINE_MAX_SEQ(lruvec); 4065 int old_gen, new_gen = lru_gen_from_seq(max_seq); 4066 4067 lockdep_assert_held(pvmw->ptl); 4068 VM_WARN_ON_ONCE_FOLIO(folio_test_lru(folio), folio); 4069 4070 if (!ptep_clear_young_notify(vma, addr, pte)) 4071 return false; 4072 4073 if (spin_is_contended(pvmw->ptl)) 4074 return true; 4075 4076 /* exclude special VMAs containing anon pages from COW */ 4077 if (vma->vm_flags & VM_SPECIAL) 4078 return true; 4079 4080 /* avoid taking the LRU lock under the PTL when possible */ 4081 walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL; 4082 4083 start = max(addr & PMD_MASK, vma->vm_start); 4084 end = min(addr | ~PMD_MASK, vma->vm_end - 1) + 1; 4085 4086 if (end - start == PAGE_SIZE) 4087 return true; 4088 4089 if (end - start > MIN_LRU_BATCH * PAGE_SIZE) { 4090 if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2) 4091 end = start + MIN_LRU_BATCH * PAGE_SIZE; 4092 else if (end - addr < MIN_LRU_BATCH * PAGE_SIZE / 2) 4093 start = end - MIN_LRU_BATCH * PAGE_SIZE; 4094 else { 4095 start = addr - MIN_LRU_BATCH * PAGE_SIZE / 2; 4096 end = addr + MIN_LRU_BATCH * PAGE_SIZE / 2; 4097 } 4098 } 4099 4100 arch_enter_lazy_mmu_mode(); 4101 4102 pte -= (addr - start) / PAGE_SIZE; 4103 4104 for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) { 4105 unsigned long pfn; 4106 pte_t ptent = ptep_get(pte + i); 4107 4108 pfn = get_pte_pfn(ptent, vma, addr, pgdat); 4109 if (pfn == -1) 4110 continue; 4111 4112 folio = get_pfn_folio(pfn, memcg, pgdat, can_swap); 4113 if (!folio) 4114 continue; 4115 4116 if (!ptep_clear_young_notify(vma, addr, pte + i)) 4117 continue; 4118 4119 young++; 4120 4121 if (pte_dirty(ptent) && !folio_test_dirty(folio) && 4122 !(folio_test_anon(folio) && folio_test_swapbacked(folio) && 4123 !folio_test_swapcache(folio))) 4124 folio_mark_dirty(folio); 4125 4126 if (walk) { 4127 old_gen = folio_update_gen(folio, new_gen); 4128 if (old_gen >= 0 && old_gen != new_gen) 4129 update_batch_size(walk, folio, old_gen, new_gen); 4130 4131 continue; 4132 } 4133 4134 old_gen = folio_lru_gen(folio); 4135 if (old_gen < 0) 4136 folio_set_referenced(folio); 4137 else if (old_gen != new_gen) { 4138 folio_clear_lru_refs(folio); 4139 folio_activate(folio); 4140 } 4141 } 4142 4143 arch_leave_lazy_mmu_mode(); 4144 4145 /* feedback from rmap walkers to page table walkers */ 4146 if (mm_state && suitable_to_scan(i, young)) 4147 update_bloom_filter(mm_state, max_seq, pvmw->pmd); 4148 4149 return true; 4150 } 4151 4152 /****************************************************************************** 4153 * memcg LRU 4154 ******************************************************************************/ 4155 4156 /* see the comment on MEMCG_NR_GENS */ 4157 enum { 4158 MEMCG_LRU_NOP, 4159 MEMCG_LRU_HEAD, 4160 MEMCG_LRU_TAIL, 4161 MEMCG_LRU_OLD, 4162 MEMCG_LRU_YOUNG, 4163 }; 4164 4165 static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op) 4166 { 4167 int seg; 4168 int old, new; 4169 unsigned long flags; 4170 int bin = get_random_u32_below(MEMCG_NR_BINS); 4171 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 4172 4173 spin_lock_irqsave(&pgdat->memcg_lru.lock, flags); 4174 4175 VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list)); 4176 4177 seg = 0; 4178 new = old = lruvec->lrugen.gen; 4179 4180 /* see the comment on MEMCG_NR_GENS */ 4181 if (op == MEMCG_LRU_HEAD) 4182 seg = MEMCG_LRU_HEAD; 4183 else if (op == MEMCG_LRU_TAIL) 4184 seg = MEMCG_LRU_TAIL; 4185 else if (op == MEMCG_LRU_OLD) 4186 new = get_memcg_gen(pgdat->memcg_lru.seq); 4187 else if (op == MEMCG_LRU_YOUNG) 4188 new = get_memcg_gen(pgdat->memcg_lru.seq + 1); 4189 else 4190 VM_WARN_ON_ONCE(true); 4191 4192 WRITE_ONCE(lruvec->lrugen.seg, seg); 4193 WRITE_ONCE(lruvec->lrugen.gen, new); 4194 4195 hlist_nulls_del_rcu(&lruvec->lrugen.list); 4196 4197 if (op == MEMCG_LRU_HEAD || op == MEMCG_LRU_OLD) 4198 hlist_nulls_add_head_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); 4199 else 4200 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); 4201 4202 pgdat->memcg_lru.nr_memcgs[old]--; 4203 pgdat->memcg_lru.nr_memcgs[new]++; 4204 4205 if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq)) 4206 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); 4207 4208 spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags); 4209 } 4210 4211 #ifdef CONFIG_MEMCG 4212 4213 void lru_gen_online_memcg(struct mem_cgroup *memcg) 4214 { 4215 int gen; 4216 int nid; 4217 int bin = get_random_u32_below(MEMCG_NR_BINS); 4218 4219 for_each_node(nid) { 4220 struct pglist_data *pgdat = NODE_DATA(nid); 4221 struct lruvec *lruvec = get_lruvec(memcg, nid); 4222 4223 spin_lock_irq(&pgdat->memcg_lru.lock); 4224 4225 VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list)); 4226 4227 gen = get_memcg_gen(pgdat->memcg_lru.seq); 4228 4229 lruvec->lrugen.gen = gen; 4230 4231 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]); 4232 pgdat->memcg_lru.nr_memcgs[gen]++; 4233 4234 spin_unlock_irq(&pgdat->memcg_lru.lock); 4235 } 4236 } 4237 4238 void lru_gen_offline_memcg(struct mem_cgroup *memcg) 4239 { 4240 int nid; 4241 4242 for_each_node(nid) { 4243 struct lruvec *lruvec = get_lruvec(memcg, nid); 4244 4245 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_OLD); 4246 } 4247 } 4248 4249 void lru_gen_release_memcg(struct mem_cgroup *memcg) 4250 { 4251 int gen; 4252 int nid; 4253 4254 for_each_node(nid) { 4255 struct pglist_data *pgdat = NODE_DATA(nid); 4256 struct lruvec *lruvec = get_lruvec(memcg, nid); 4257 4258 spin_lock_irq(&pgdat->memcg_lru.lock); 4259 4260 if (hlist_nulls_unhashed(&lruvec->lrugen.list)) 4261 goto unlock; 4262 4263 gen = lruvec->lrugen.gen; 4264 4265 hlist_nulls_del_init_rcu(&lruvec->lrugen.list); 4266 pgdat->memcg_lru.nr_memcgs[gen]--; 4267 4268 if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq)) 4269 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); 4270 unlock: 4271 spin_unlock_irq(&pgdat->memcg_lru.lock); 4272 } 4273 } 4274 4275 void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid) 4276 { 4277 struct lruvec *lruvec = get_lruvec(memcg, nid); 4278 4279 /* see the comment on MEMCG_NR_GENS */ 4280 if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_HEAD) 4281 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_HEAD); 4282 } 4283 4284 #endif /* CONFIG_MEMCG */ 4285 4286 /****************************************************************************** 4287 * the eviction 4288 ******************************************************************************/ 4289 4290 static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc, 4291 int tier_idx) 4292 { 4293 bool success; 4294 bool dirty, writeback; 4295 int gen = folio_lru_gen(folio); 4296 int type = folio_is_file_lru(folio); 4297 int zone = folio_zonenum(folio); 4298 int delta = folio_nr_pages(folio); 4299 int refs = folio_lru_refs(folio); 4300 int tier = lru_tier_from_refs(refs); 4301 struct lru_gen_folio *lrugen = &lruvec->lrugen; 4302 4303 VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio); 4304 4305 /* unevictable */ 4306 if (!folio_evictable(folio)) { 4307 success = lru_gen_del_folio(lruvec, folio, true); 4308 VM_WARN_ON_ONCE_FOLIO(!success, folio); 4309 folio_set_unevictable(folio); 4310 lruvec_add_folio(lruvec, folio); 4311 __count_vm_events(UNEVICTABLE_PGCULLED, delta); 4312 return true; 4313 } 4314 4315 /* promoted */ 4316 if (gen != lru_gen_from_seq(lrugen->min_seq[type])) { 4317 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); 4318 return true; 4319 } 4320 4321 /* protected */ 4322 if (tier > tier_idx || refs == BIT(LRU_REFS_WIDTH)) { 4323 int hist = lru_hist_from_seq(lrugen->min_seq[type]); 4324 4325 gen = folio_inc_gen(lruvec, folio, false); 4326 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); 4327 4328 WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 4329 lrugen->protected[hist][type][tier - 1] + delta); 4330 return true; 4331 } 4332 4333 /* ineligible */ 4334 if (!folio_test_lru(folio) || zone > sc->reclaim_idx) { 4335 gen = folio_inc_gen(lruvec, folio, false); 4336 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); 4337 return true; 4338 } 4339 4340 dirty = folio_test_dirty(folio); 4341 writeback = folio_test_writeback(folio); 4342 if (type == LRU_GEN_FILE && dirty) { 4343 sc->nr.file_taken += delta; 4344 if (!writeback) 4345 sc->nr.unqueued_dirty += delta; 4346 } 4347 4348 /* waiting for writeback */ 4349 if (folio_test_locked(folio) || writeback || 4350 (type == LRU_GEN_FILE && dirty)) { 4351 gen = folio_inc_gen(lruvec, folio, true); 4352 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); 4353 return true; 4354 } 4355 4356 return false; 4357 } 4358 4359 static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc) 4360 { 4361 bool success; 4362 4363 /* swap constrained */ 4364 if (!(sc->gfp_mask & __GFP_IO) && 4365 (folio_test_dirty(folio) || 4366 (folio_test_anon(folio) && !folio_test_swapcache(folio)))) 4367 return false; 4368 4369 /* raced with release_pages() */ 4370 if (!folio_try_get(folio)) 4371 return false; 4372 4373 /* raced with another isolation */ 4374 if (!folio_test_clear_lru(folio)) { 4375 folio_put(folio); 4376 return false; 4377 } 4378 4379 /* see the comment on MAX_NR_TIERS */ 4380 if (!folio_test_referenced(folio)) 4381 folio_clear_lru_refs(folio); 4382 4383 /* for shrink_folio_list() */ 4384 folio_clear_reclaim(folio); 4385 folio_clear_referenced(folio); 4386 4387 success = lru_gen_del_folio(lruvec, folio, true); 4388 VM_WARN_ON_ONCE_FOLIO(!success, folio); 4389 4390 return true; 4391 } 4392 4393 static int scan_folios(struct lruvec *lruvec, struct scan_control *sc, 4394 int type, int tier, struct list_head *list) 4395 { 4396 int i; 4397 int gen; 4398 enum vm_event_item item; 4399 int sorted = 0; 4400 int scanned = 0; 4401 int isolated = 0; 4402 int skipped = 0; 4403 int remaining = MAX_LRU_BATCH; 4404 struct lru_gen_folio *lrugen = &lruvec->lrugen; 4405 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 4406 4407 VM_WARN_ON_ONCE(!list_empty(list)); 4408 4409 if (get_nr_gens(lruvec, type) == MIN_NR_GENS) 4410 return 0; 4411 4412 gen = lru_gen_from_seq(lrugen->min_seq[type]); 4413 4414 for (i = MAX_NR_ZONES; i > 0; i--) { 4415 LIST_HEAD(moved); 4416 int skipped_zone = 0; 4417 int zone = (sc->reclaim_idx + i) % MAX_NR_ZONES; 4418 struct list_head *head = &lrugen->folios[gen][type][zone]; 4419 4420 while (!list_empty(head)) { 4421 struct folio *folio = lru_to_folio(head); 4422 int delta = folio_nr_pages(folio); 4423 4424 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); 4425 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); 4426 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); 4427 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); 4428 4429 scanned += delta; 4430 4431 if (sort_folio(lruvec, folio, sc, tier)) 4432 sorted += delta; 4433 else if (isolate_folio(lruvec, folio, sc)) { 4434 list_add(&folio->lru, list); 4435 isolated += delta; 4436 } else { 4437 list_move(&folio->lru, &moved); 4438 skipped_zone += delta; 4439 } 4440 4441 if (!--remaining || max(isolated, skipped_zone) >= MIN_LRU_BATCH) 4442 break; 4443 } 4444 4445 if (skipped_zone) { 4446 list_splice(&moved, head); 4447 __count_zid_vm_events(PGSCAN_SKIP, zone, skipped_zone); 4448 skipped += skipped_zone; 4449 } 4450 4451 if (!remaining || isolated >= MIN_LRU_BATCH) 4452 break; 4453 } 4454 4455 item = PGSCAN_KSWAPD + reclaimer_offset(); 4456 if (!cgroup_reclaim(sc)) { 4457 __count_vm_events(item, isolated); 4458 __count_vm_events(PGREFILL, sorted); 4459 } 4460 __count_memcg_events(memcg, item, isolated); 4461 __count_memcg_events(memcg, PGREFILL, sorted); 4462 __count_vm_events(PGSCAN_ANON + type, isolated); 4463 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, MAX_LRU_BATCH, 4464 scanned, skipped, isolated, 4465 type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON); 4466 if (type == LRU_GEN_FILE) 4467 sc->nr.file_taken += isolated; 4468 /* 4469 * There might not be eligible folios due to reclaim_idx. Check the 4470 * remaining to prevent livelock if it's not making progress. 4471 */ 4472 return isolated || !remaining ? scanned : 0; 4473 } 4474 4475 static int get_tier_idx(struct lruvec *lruvec, int type) 4476 { 4477 int tier; 4478 struct ctrl_pos sp, pv; 4479 4480 /* 4481 * To leave a margin for fluctuations, use a larger gain factor (1:2). 4482 * This value is chosen because any other tier would have at least twice 4483 * as many refaults as the first tier. 4484 */ 4485 read_ctrl_pos(lruvec, type, 0, 1, &sp); 4486 for (tier = 1; tier < MAX_NR_TIERS; tier++) { 4487 read_ctrl_pos(lruvec, type, tier, 2, &pv); 4488 if (!positive_ctrl_err(&sp, &pv)) 4489 break; 4490 } 4491 4492 return tier - 1; 4493 } 4494 4495 static int get_type_to_scan(struct lruvec *lruvec, int swappiness, int *tier_idx) 4496 { 4497 int type, tier; 4498 struct ctrl_pos sp, pv; 4499 int gain[ANON_AND_FILE] = { swappiness, MAX_SWAPPINESS - swappiness }; 4500 4501 /* 4502 * Compare the first tier of anon with that of file to determine which 4503 * type to scan. Also need to compare other tiers of the selected type 4504 * with the first tier of the other type to determine the last tier (of 4505 * the selected type) to evict. 4506 */ 4507 read_ctrl_pos(lruvec, LRU_GEN_ANON, 0, gain[LRU_GEN_ANON], &sp); 4508 read_ctrl_pos(lruvec, LRU_GEN_FILE, 0, gain[LRU_GEN_FILE], &pv); 4509 type = positive_ctrl_err(&sp, &pv); 4510 4511 read_ctrl_pos(lruvec, !type, 0, gain[!type], &sp); 4512 for (tier = 1; tier < MAX_NR_TIERS; tier++) { 4513 read_ctrl_pos(lruvec, type, tier, gain[type], &pv); 4514 if (!positive_ctrl_err(&sp, &pv)) 4515 break; 4516 } 4517 4518 *tier_idx = tier - 1; 4519 4520 return type; 4521 } 4522 4523 static int isolate_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness, 4524 int *type_scanned, struct list_head *list) 4525 { 4526 int i; 4527 int type; 4528 int scanned; 4529 int tier = -1; 4530 DEFINE_MIN_SEQ(lruvec); 4531 4532 /* 4533 * Try to make the obvious choice first, and if anon and file are both 4534 * available from the same generation, 4535 * 1. Interpret swappiness 1 as file first and MAX_SWAPPINESS as anon 4536 * first. 4537 * 2. If !__GFP_IO, file first since clean pagecache is more likely to 4538 * exist than clean swapcache. 4539 */ 4540 if (!swappiness) 4541 type = LRU_GEN_FILE; 4542 else if (min_seq[LRU_GEN_ANON] < min_seq[LRU_GEN_FILE]) 4543 type = LRU_GEN_ANON; 4544 else if (swappiness == 1) 4545 type = LRU_GEN_FILE; 4546 else if (swappiness == MAX_SWAPPINESS) 4547 type = LRU_GEN_ANON; 4548 else if (!(sc->gfp_mask & __GFP_IO)) 4549 type = LRU_GEN_FILE; 4550 else 4551 type = get_type_to_scan(lruvec, swappiness, &tier); 4552 4553 for (i = !swappiness; i < ANON_AND_FILE; i++) { 4554 if (tier < 0) 4555 tier = get_tier_idx(lruvec, type); 4556 4557 scanned = scan_folios(lruvec, sc, type, tier, list); 4558 if (scanned) 4559 break; 4560 4561 type = !type; 4562 tier = -1; 4563 } 4564 4565 *type_scanned = type; 4566 4567 return scanned; 4568 } 4569 4570 static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness) 4571 { 4572 int type; 4573 int scanned; 4574 int reclaimed; 4575 LIST_HEAD(list); 4576 LIST_HEAD(clean); 4577 struct folio *folio; 4578 struct folio *next; 4579 enum vm_event_item item; 4580 struct reclaim_stat stat; 4581 struct lru_gen_mm_walk *walk; 4582 bool skip_retry = false; 4583 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 4584 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 4585 4586 spin_lock_irq(&lruvec->lru_lock); 4587 4588 scanned = isolate_folios(lruvec, sc, swappiness, &type, &list); 4589 4590 scanned += try_to_inc_min_seq(lruvec, swappiness); 4591 4592 if (get_nr_gens(lruvec, !swappiness) == MIN_NR_GENS) 4593 scanned = 0; 4594 4595 spin_unlock_irq(&lruvec->lru_lock); 4596 4597 if (list_empty(&list)) 4598 return scanned; 4599 retry: 4600 reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false); 4601 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty; 4602 sc->nr_reclaimed += reclaimed; 4603 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, 4604 scanned, reclaimed, &stat, sc->priority, 4605 type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON); 4606 4607 list_for_each_entry_safe_reverse(folio, next, &list, lru) { 4608 if (!folio_evictable(folio)) { 4609 list_del(&folio->lru); 4610 folio_putback_lru(folio); 4611 continue; 4612 } 4613 4614 if (folio_test_reclaim(folio) && 4615 (folio_test_dirty(folio) || folio_test_writeback(folio))) { 4616 /* restore LRU_REFS_FLAGS cleared by isolate_folio() */ 4617 if (folio_test_workingset(folio)) 4618 folio_set_referenced(folio); 4619 continue; 4620 } 4621 4622 if (skip_retry || folio_test_active(folio) || folio_test_referenced(folio) || 4623 folio_mapped(folio) || folio_test_locked(folio) || 4624 folio_test_dirty(folio) || folio_test_writeback(folio)) { 4625 /* don't add rejected folios to the oldest generation */ 4626 set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 4627 BIT(PG_active)); 4628 continue; 4629 } 4630 4631 /* retry folios that may have missed folio_rotate_reclaimable() */ 4632 list_move(&folio->lru, &clean); 4633 } 4634 4635 spin_lock_irq(&lruvec->lru_lock); 4636 4637 move_folios_to_lru(lruvec, &list); 4638 4639 walk = current->reclaim_state->mm_walk; 4640 if (walk && walk->batched) { 4641 walk->lruvec = lruvec; 4642 reset_batch_size(walk); 4643 } 4644 4645 __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(), 4646 stat.nr_demoted); 4647 4648 item = PGSTEAL_KSWAPD + reclaimer_offset(); 4649 if (!cgroup_reclaim(sc)) 4650 __count_vm_events(item, reclaimed); 4651 __count_memcg_events(memcg, item, reclaimed); 4652 __count_vm_events(PGSTEAL_ANON + type, reclaimed); 4653 4654 spin_unlock_irq(&lruvec->lru_lock); 4655 4656 list_splice_init(&clean, &list); 4657 4658 if (!list_empty(&list)) { 4659 skip_retry = true; 4660 goto retry; 4661 } 4662 4663 return scanned; 4664 } 4665 4666 static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, 4667 bool can_swap, unsigned long *nr_to_scan) 4668 { 4669 int gen, type, zone; 4670 unsigned long old = 0; 4671 unsigned long young = 0; 4672 unsigned long total = 0; 4673 struct lru_gen_folio *lrugen = &lruvec->lrugen; 4674 DEFINE_MIN_SEQ(lruvec); 4675 4676 /* whether this lruvec is completely out of cold folios */ 4677 if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) { 4678 *nr_to_scan = 0; 4679 return true; 4680 } 4681 4682 for (type = !can_swap; type < ANON_AND_FILE; type++) { 4683 unsigned long seq; 4684 4685 for (seq = min_seq[type]; seq <= max_seq; seq++) { 4686 unsigned long size = 0; 4687 4688 gen = lru_gen_from_seq(seq); 4689 4690 for (zone = 0; zone < MAX_NR_ZONES; zone++) 4691 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); 4692 4693 total += size; 4694 if (seq == max_seq) 4695 young += size; 4696 else if (seq + MIN_NR_GENS == max_seq) 4697 old += size; 4698 } 4699 } 4700 4701 *nr_to_scan = total; 4702 4703 /* 4704 * The aging tries to be lazy to reduce the overhead, while the eviction 4705 * stalls when the number of generations reaches MIN_NR_GENS. Hence, the 4706 * ideal number of generations is MIN_NR_GENS+1. 4707 */ 4708 if (min_seq[!can_swap] + MIN_NR_GENS < max_seq) 4709 return false; 4710 4711 /* 4712 * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1) 4713 * of the total number of pages for each generation. A reasonable range 4714 * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The 4715 * aging cares about the upper bound of hot pages, while the eviction 4716 * cares about the lower bound of cold pages. 4717 */ 4718 if (young * MIN_NR_GENS > total) 4719 return true; 4720 if (old * (MIN_NR_GENS + 2) < total) 4721 return true; 4722 4723 return false; 4724 } 4725 4726 /* 4727 * For future optimizations: 4728 * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg 4729 * reclaim. 4730 */ 4731 static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool can_swap) 4732 { 4733 bool success; 4734 unsigned long nr_to_scan; 4735 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 4736 DEFINE_MAX_SEQ(lruvec); 4737 4738 if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg)) 4739 return -1; 4740 4741 success = should_run_aging(lruvec, max_seq, can_swap, &nr_to_scan); 4742 4743 /* try to scrape all its memory if this memcg was deleted */ 4744 if (nr_to_scan && !mem_cgroup_online(memcg)) 4745 return nr_to_scan; 4746 4747 /* try to get away with not aging at the default priority */ 4748 if (!success || sc->priority == DEF_PRIORITY) 4749 return nr_to_scan >> sc->priority; 4750 4751 /* stop scanning this lruvec as it's low on cold folios */ 4752 return try_to_inc_max_seq(lruvec, max_seq, can_swap, false) ? -1 : 0; 4753 } 4754 4755 static bool should_abort_scan(struct lruvec *lruvec, struct scan_control *sc) 4756 { 4757 int i; 4758 enum zone_watermarks mark; 4759 4760 /* don't abort memcg reclaim to ensure fairness */ 4761 if (!root_reclaim(sc)) 4762 return false; 4763 4764 if (sc->nr_reclaimed >= max(sc->nr_to_reclaim, compact_gap(sc->order))) 4765 return true; 4766 4767 /* check the order to exclude compaction-induced reclaim */ 4768 if (!current_is_kswapd() || sc->order) 4769 return false; 4770 4771 mark = sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING ? 4772 WMARK_PROMO : WMARK_HIGH; 4773 4774 for (i = 0; i <= sc->reclaim_idx; i++) { 4775 struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i; 4776 unsigned long size = wmark_pages(zone, mark) + MIN_LRU_BATCH; 4777 4778 if (managed_zone(zone) && !zone_watermark_ok(zone, 0, size, sc->reclaim_idx, 0)) 4779 return false; 4780 } 4781 4782 /* kswapd should abort if all eligible zones are safe */ 4783 return true; 4784 } 4785 4786 static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) 4787 { 4788 long nr_to_scan; 4789 unsigned long scanned = 0; 4790 int swappiness = get_swappiness(lruvec, sc); 4791 4792 while (true) { 4793 int delta; 4794 4795 nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness); 4796 if (nr_to_scan <= 0) 4797 break; 4798 4799 delta = evict_folios(lruvec, sc, swappiness); 4800 if (!delta) 4801 break; 4802 4803 scanned += delta; 4804 if (scanned >= nr_to_scan) 4805 break; 4806 4807 if (should_abort_scan(lruvec, sc)) 4808 break; 4809 4810 cond_resched(); 4811 } 4812 4813 /* 4814 * If too many file cache in the coldest generation can't be evicted 4815 * due to being dirty, wake up the flusher. 4816 */ 4817 if (sc->nr.unqueued_dirty && sc->nr.unqueued_dirty == sc->nr.file_taken) 4818 wakeup_flusher_threads(WB_REASON_VMSCAN); 4819 4820 /* whether this lruvec should be rotated */ 4821 return nr_to_scan < 0; 4822 } 4823 4824 static int shrink_one(struct lruvec *lruvec, struct scan_control *sc) 4825 { 4826 bool success; 4827 unsigned long scanned = sc->nr_scanned; 4828 unsigned long reclaimed = sc->nr_reclaimed; 4829 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 4830 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 4831 4832 /* lru_gen_age_node() called mem_cgroup_calculate_protection() */ 4833 if (mem_cgroup_below_min(NULL, memcg)) 4834 return MEMCG_LRU_YOUNG; 4835 4836 if (mem_cgroup_below_low(NULL, memcg)) { 4837 /* see the comment on MEMCG_NR_GENS */ 4838 if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL) 4839 return MEMCG_LRU_TAIL; 4840 4841 memcg_memory_event(memcg, MEMCG_LOW); 4842 } 4843 4844 success = try_to_shrink_lruvec(lruvec, sc); 4845 4846 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority); 4847 4848 if (!sc->proactive) 4849 vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned, 4850 sc->nr_reclaimed - reclaimed); 4851 4852 flush_reclaim_state(sc); 4853 4854 if (success && mem_cgroup_online(memcg)) 4855 return MEMCG_LRU_YOUNG; 4856 4857 if (!success && lruvec_is_sizable(lruvec, sc)) 4858 return 0; 4859 4860 /* one retry if offlined or too small */ 4861 return READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL ? 4862 MEMCG_LRU_TAIL : MEMCG_LRU_YOUNG; 4863 } 4864 4865 static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc) 4866 { 4867 int op; 4868 int gen; 4869 int bin; 4870 int first_bin; 4871 struct lruvec *lruvec; 4872 struct lru_gen_folio *lrugen; 4873 struct mem_cgroup *memcg; 4874 struct hlist_nulls_node *pos; 4875 4876 gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq)); 4877 bin = first_bin = get_random_u32_below(MEMCG_NR_BINS); 4878 restart: 4879 op = 0; 4880 memcg = NULL; 4881 4882 rcu_read_lock(); 4883 4884 hlist_nulls_for_each_entry_rcu(lrugen, pos, &pgdat->memcg_lru.fifo[gen][bin], list) { 4885 if (op) { 4886 lru_gen_rotate_memcg(lruvec, op); 4887 op = 0; 4888 } 4889 4890 mem_cgroup_put(memcg); 4891 memcg = NULL; 4892 4893 if (gen != READ_ONCE(lrugen->gen)) 4894 continue; 4895 4896 lruvec = container_of(lrugen, struct lruvec, lrugen); 4897 memcg = lruvec_memcg(lruvec); 4898 4899 if (!mem_cgroup_tryget(memcg)) { 4900 lru_gen_release_memcg(memcg); 4901 memcg = NULL; 4902 continue; 4903 } 4904 4905 rcu_read_unlock(); 4906 4907 op = shrink_one(lruvec, sc); 4908 4909 rcu_read_lock(); 4910 4911 if (should_abort_scan(lruvec, sc)) 4912 break; 4913 } 4914 4915 rcu_read_unlock(); 4916 4917 if (op) 4918 lru_gen_rotate_memcg(lruvec, op); 4919 4920 mem_cgroup_put(memcg); 4921 4922 if (!is_a_nulls(pos)) 4923 return; 4924 4925 /* restart if raced with lru_gen_rotate_memcg() */ 4926 if (gen != get_nulls_value(pos)) 4927 goto restart; 4928 4929 /* try the rest of the bins of the current generation */ 4930 bin = get_memcg_bin(bin + 1); 4931 if (bin != first_bin) 4932 goto restart; 4933 } 4934 4935 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) 4936 { 4937 struct blk_plug plug; 4938 4939 VM_WARN_ON_ONCE(root_reclaim(sc)); 4940 VM_WARN_ON_ONCE(!sc->may_writepage || !sc->may_unmap); 4941 4942 lru_add_drain(); 4943 4944 blk_start_plug(&plug); 4945 4946 set_mm_walk(NULL, sc->proactive); 4947 4948 if (try_to_shrink_lruvec(lruvec, sc)) 4949 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_YOUNG); 4950 4951 clear_mm_walk(); 4952 4953 blk_finish_plug(&plug); 4954 } 4955 4956 static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) 4957 { 4958 struct blk_plug plug; 4959 unsigned long reclaimed = sc->nr_reclaimed; 4960 4961 VM_WARN_ON_ONCE(!root_reclaim(sc)); 4962 4963 /* 4964 * Unmapped clean folios are already prioritized. Scanning for more of 4965 * them is likely futile and can cause high reclaim latency when there 4966 * is a large number of memcgs. 4967 */ 4968 if (!sc->may_writepage || !sc->may_unmap) 4969 goto done; 4970 4971 lru_add_drain(); 4972 4973 blk_start_plug(&plug); 4974 4975 set_mm_walk(pgdat, sc->proactive); 4976 4977 set_initial_priority(pgdat, sc); 4978 4979 if (current_is_kswapd()) 4980 sc->nr_reclaimed = 0; 4981 4982 if (mem_cgroup_disabled()) 4983 shrink_one(&pgdat->__lruvec, sc); 4984 else 4985 shrink_many(pgdat, sc); 4986 4987 if (current_is_kswapd()) 4988 sc->nr_reclaimed += reclaimed; 4989 4990 clear_mm_walk(); 4991 4992 blk_finish_plug(&plug); 4993 done: 4994 if (sc->nr_reclaimed > reclaimed) 4995 pgdat->kswapd_failures = 0; 4996 } 4997 4998 /****************************************************************************** 4999 * state change 5000 ******************************************************************************/ 5001 5002 static bool __maybe_unused state_is_valid(struct lruvec *lruvec) 5003 { 5004 struct lru_gen_folio *lrugen = &lruvec->lrugen; 5005 5006 if (lrugen->enabled) { 5007 enum lru_list lru; 5008 5009 for_each_evictable_lru(lru) { 5010 if (!list_empty(&lruvec->lists[lru])) 5011 return false; 5012 } 5013 } else { 5014 int gen, type, zone; 5015 5016 for_each_gen_type_zone(gen, type, zone) { 5017 if (!list_empty(&lrugen->folios[gen][type][zone])) 5018 return false; 5019 } 5020 } 5021 5022 return true; 5023 } 5024 5025 static bool fill_evictable(struct lruvec *lruvec) 5026 { 5027 enum lru_list lru; 5028 int remaining = MAX_LRU_BATCH; 5029 5030 for_each_evictable_lru(lru) { 5031 int type = is_file_lru(lru); 5032 bool active = is_active_lru(lru); 5033 struct list_head *head = &lruvec->lists[lru]; 5034 5035 while (!list_empty(head)) { 5036 bool success; 5037 struct folio *folio = lru_to_folio(head); 5038 5039 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); 5040 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio) != active, folio); 5041 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); 5042 VM_WARN_ON_ONCE_FOLIO(folio_lru_gen(folio) != -1, folio); 5043 5044 lruvec_del_folio(lruvec, folio); 5045 success = lru_gen_add_folio(lruvec, folio, false); 5046 VM_WARN_ON_ONCE(!success); 5047 5048 if (!--remaining) 5049 return false; 5050 } 5051 } 5052 5053 return true; 5054 } 5055 5056 static bool drain_evictable(struct lruvec *lruvec) 5057 { 5058 int gen, type, zone; 5059 int remaining = MAX_LRU_BATCH; 5060 5061 for_each_gen_type_zone(gen, type, zone) { 5062 struct list_head *head = &lruvec->lrugen.folios[gen][type][zone]; 5063 5064 while (!list_empty(head)) { 5065 bool success; 5066 struct folio *folio = lru_to_folio(head); 5067 5068 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); 5069 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); 5070 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); 5071 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); 5072 5073 success = lru_gen_del_folio(lruvec, folio, false); 5074 VM_WARN_ON_ONCE(!success); 5075 lruvec_add_folio(lruvec, folio); 5076 5077 if (!--remaining) 5078 return false; 5079 } 5080 } 5081 5082 return true; 5083 } 5084 5085 static void lru_gen_change_state(bool enabled) 5086 { 5087 static DEFINE_MUTEX(state_mutex); 5088 5089 struct mem_cgroup *memcg; 5090 5091 cgroup_lock(); 5092 cpus_read_lock(); 5093 get_online_mems(); 5094 mutex_lock(&state_mutex); 5095 5096 if (enabled == lru_gen_enabled()) 5097 goto unlock; 5098 5099 if (enabled) 5100 static_branch_enable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]); 5101 else 5102 static_branch_disable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]); 5103 5104 memcg = mem_cgroup_iter(NULL, NULL, NULL); 5105 do { 5106 int nid; 5107 5108 for_each_node(nid) { 5109 struct lruvec *lruvec = get_lruvec(memcg, nid); 5110 5111 spin_lock_irq(&lruvec->lru_lock); 5112 5113 VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); 5114 VM_WARN_ON_ONCE(!state_is_valid(lruvec)); 5115 5116 lruvec->lrugen.enabled = enabled; 5117 5118 while (!(enabled ? fill_evictable(lruvec) : drain_evictable(lruvec))) { 5119 spin_unlock_irq(&lruvec->lru_lock); 5120 cond_resched(); 5121 spin_lock_irq(&lruvec->lru_lock); 5122 } 5123 5124 spin_unlock_irq(&lruvec->lru_lock); 5125 } 5126 5127 cond_resched(); 5128 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); 5129 unlock: 5130 mutex_unlock(&state_mutex); 5131 put_online_mems(); 5132 cpus_read_unlock(); 5133 cgroup_unlock(); 5134 } 5135 5136 /****************************************************************************** 5137 * sysfs interface 5138 ******************************************************************************/ 5139 5140 static ssize_t min_ttl_ms_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 5141 { 5142 return sysfs_emit(buf, "%u\n", jiffies_to_msecs(READ_ONCE(lru_gen_min_ttl))); 5143 } 5144 5145 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ 5146 static ssize_t min_ttl_ms_store(struct kobject *kobj, struct kobj_attribute *attr, 5147 const char *buf, size_t len) 5148 { 5149 unsigned int msecs; 5150 5151 if (kstrtouint(buf, 0, &msecs)) 5152 return -EINVAL; 5153 5154 WRITE_ONCE(lru_gen_min_ttl, msecs_to_jiffies(msecs)); 5155 5156 return len; 5157 } 5158 5159 static struct kobj_attribute lru_gen_min_ttl_attr = __ATTR_RW(min_ttl_ms); 5160 5161 static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 5162 { 5163 unsigned int caps = 0; 5164 5165 if (get_cap(LRU_GEN_CORE)) 5166 caps |= BIT(LRU_GEN_CORE); 5167 5168 if (should_walk_mmu()) 5169 caps |= BIT(LRU_GEN_MM_WALK); 5170 5171 if (should_clear_pmd_young()) 5172 caps |= BIT(LRU_GEN_NONLEAF_YOUNG); 5173 5174 return sysfs_emit(buf, "0x%04x\n", caps); 5175 } 5176 5177 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ 5178 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, 5179 const char *buf, size_t len) 5180 { 5181 int i; 5182 unsigned int caps; 5183 5184 if (tolower(*buf) == 'n') 5185 caps = 0; 5186 else if (tolower(*buf) == 'y') 5187 caps = -1; 5188 else if (kstrtouint(buf, 0, &caps)) 5189 return -EINVAL; 5190 5191 for (i = 0; i < NR_LRU_GEN_CAPS; i++) { 5192 bool enabled = caps & BIT(i); 5193 5194 if (i == LRU_GEN_CORE) 5195 lru_gen_change_state(enabled); 5196 else if (enabled) 5197 static_branch_enable(&lru_gen_caps[i]); 5198 else 5199 static_branch_disable(&lru_gen_caps[i]); 5200 } 5201 5202 return len; 5203 } 5204 5205 static struct kobj_attribute lru_gen_enabled_attr = __ATTR_RW(enabled); 5206 5207 static struct attribute *lru_gen_attrs[] = { 5208 &lru_gen_min_ttl_attr.attr, 5209 &lru_gen_enabled_attr.attr, 5210 NULL 5211 }; 5212 5213 static const struct attribute_group lru_gen_attr_group = { 5214 .name = "lru_gen", 5215 .attrs = lru_gen_attrs, 5216 }; 5217 5218 /****************************************************************************** 5219 * debugfs interface 5220 ******************************************************************************/ 5221 5222 static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos) 5223 { 5224 struct mem_cgroup *memcg; 5225 loff_t nr_to_skip = *pos; 5226 5227 m->private = kvmalloc(PATH_MAX, GFP_KERNEL); 5228 if (!m->private) 5229 return ERR_PTR(-ENOMEM); 5230 5231 memcg = mem_cgroup_iter(NULL, NULL, NULL); 5232 do { 5233 int nid; 5234 5235 for_each_node_state(nid, N_MEMORY) { 5236 if (!nr_to_skip--) 5237 return get_lruvec(memcg, nid); 5238 } 5239 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); 5240 5241 return NULL; 5242 } 5243 5244 static void lru_gen_seq_stop(struct seq_file *m, void *v) 5245 { 5246 if (!IS_ERR_OR_NULL(v)) 5247 mem_cgroup_iter_break(NULL, lruvec_memcg(v)); 5248 5249 kvfree(m->private); 5250 m->private = NULL; 5251 } 5252 5253 static void *lru_gen_seq_next(struct seq_file *m, void *v, loff_t *pos) 5254 { 5255 int nid = lruvec_pgdat(v)->node_id; 5256 struct mem_cgroup *memcg = lruvec_memcg(v); 5257 5258 ++*pos; 5259 5260 nid = next_memory_node(nid); 5261 if (nid == MAX_NUMNODES) { 5262 memcg = mem_cgroup_iter(NULL, memcg, NULL); 5263 if (!memcg) 5264 return NULL; 5265 5266 nid = first_memory_node; 5267 } 5268 5269 return get_lruvec(memcg, nid); 5270 } 5271 5272 static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec, 5273 unsigned long max_seq, unsigned long *min_seq, 5274 unsigned long seq) 5275 { 5276 int i; 5277 int type, tier; 5278 int hist = lru_hist_from_seq(seq); 5279 struct lru_gen_folio *lrugen = &lruvec->lrugen; 5280 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 5281 5282 for (tier = 0; tier < MAX_NR_TIERS; tier++) { 5283 seq_printf(m, " %10d", tier); 5284 for (type = 0; type < ANON_AND_FILE; type++) { 5285 const char *s = "xxx"; 5286 unsigned long n[3] = {}; 5287 5288 if (seq == max_seq) { 5289 s = "RTx"; 5290 n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]); 5291 n[1] = READ_ONCE(lrugen->avg_total[type][tier]); 5292 } else if (seq == min_seq[type] || NR_HIST_GENS > 1) { 5293 s = "rep"; 5294 n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]); 5295 n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]); 5296 if (tier) 5297 n[2] = READ_ONCE(lrugen->protected[hist][type][tier - 1]); 5298 } 5299 5300 for (i = 0; i < 3; i++) 5301 seq_printf(m, " %10lu%c", n[i], s[i]); 5302 } 5303 seq_putc(m, '\n'); 5304 } 5305 5306 if (!mm_state) 5307 return; 5308 5309 seq_puts(m, " "); 5310 for (i = 0; i < NR_MM_STATS; i++) { 5311 const char *s = "xxxx"; 5312 unsigned long n = 0; 5313 5314 if (seq == max_seq && NR_HIST_GENS == 1) { 5315 s = "TYFA"; 5316 n = READ_ONCE(mm_state->stats[hist][i]); 5317 } else if (seq != max_seq && NR_HIST_GENS > 1) { 5318 s = "tyfa"; 5319 n = READ_ONCE(mm_state->stats[hist][i]); 5320 } 5321 5322 seq_printf(m, " %10lu%c", n, s[i]); 5323 } 5324 seq_putc(m, '\n'); 5325 } 5326 5327 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ 5328 static int lru_gen_seq_show(struct seq_file *m, void *v) 5329 { 5330 unsigned long seq; 5331 bool full = !debugfs_real_fops(m->file)->write; 5332 struct lruvec *lruvec = v; 5333 struct lru_gen_folio *lrugen = &lruvec->lrugen; 5334 int nid = lruvec_pgdat(lruvec)->node_id; 5335 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 5336 DEFINE_MAX_SEQ(lruvec); 5337 DEFINE_MIN_SEQ(lruvec); 5338 5339 if (nid == first_memory_node) { 5340 const char *path = memcg ? m->private : ""; 5341 5342 #ifdef CONFIG_MEMCG 5343 if (memcg) 5344 cgroup_path(memcg->css.cgroup, m->private, PATH_MAX); 5345 #endif 5346 seq_printf(m, "memcg %5hu %s\n", mem_cgroup_id(memcg), path); 5347 } 5348 5349 seq_printf(m, " node %5d\n", nid); 5350 5351 if (!full) 5352 seq = min_seq[LRU_GEN_ANON]; 5353 else if (max_seq >= MAX_NR_GENS) 5354 seq = max_seq - MAX_NR_GENS + 1; 5355 else 5356 seq = 0; 5357 5358 for (; seq <= max_seq; seq++) { 5359 int type, zone; 5360 int gen = lru_gen_from_seq(seq); 5361 unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); 5362 5363 seq_printf(m, " %10lu %10u", seq, jiffies_to_msecs(jiffies - birth)); 5364 5365 for (type = 0; type < ANON_AND_FILE; type++) { 5366 unsigned long size = 0; 5367 char mark = full && seq < min_seq[type] ? 'x' : ' '; 5368 5369 for (zone = 0; zone < MAX_NR_ZONES; zone++) 5370 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); 5371 5372 seq_printf(m, " %10lu%c", size, mark); 5373 } 5374 5375 seq_putc(m, '\n'); 5376 5377 if (full) 5378 lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq); 5379 } 5380 5381 return 0; 5382 } 5383 5384 static const struct seq_operations lru_gen_seq_ops = { 5385 .start = lru_gen_seq_start, 5386 .stop = lru_gen_seq_stop, 5387 .next = lru_gen_seq_next, 5388 .show = lru_gen_seq_show, 5389 }; 5390 5391 static int run_aging(struct lruvec *lruvec, unsigned long seq, 5392 bool can_swap, bool force_scan) 5393 { 5394 DEFINE_MAX_SEQ(lruvec); 5395 DEFINE_MIN_SEQ(lruvec); 5396 5397 if (seq < max_seq) 5398 return 0; 5399 5400 if (seq > max_seq) 5401 return -EINVAL; 5402 5403 if (!force_scan && min_seq[!can_swap] + MAX_NR_GENS - 1 <= max_seq) 5404 return -ERANGE; 5405 5406 try_to_inc_max_seq(lruvec, max_seq, can_swap, force_scan); 5407 5408 return 0; 5409 } 5410 5411 static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc, 5412 int swappiness, unsigned long nr_to_reclaim) 5413 { 5414 DEFINE_MAX_SEQ(lruvec); 5415 5416 if (seq + MIN_NR_GENS > max_seq) 5417 return -EINVAL; 5418 5419 sc->nr_reclaimed = 0; 5420 5421 while (!signal_pending(current)) { 5422 DEFINE_MIN_SEQ(lruvec); 5423 5424 if (seq < min_seq[!swappiness]) 5425 return 0; 5426 5427 if (sc->nr_reclaimed >= nr_to_reclaim) 5428 return 0; 5429 5430 if (!evict_folios(lruvec, sc, swappiness)) 5431 return 0; 5432 5433 cond_resched(); 5434 } 5435 5436 return -EINTR; 5437 } 5438 5439 static int run_cmd(char cmd, int memcg_id, int nid, unsigned long seq, 5440 struct scan_control *sc, int swappiness, unsigned long opt) 5441 { 5442 struct lruvec *lruvec; 5443 int err = -EINVAL; 5444 struct mem_cgroup *memcg = NULL; 5445 5446 if (nid < 0 || nid >= MAX_NUMNODES || !node_state(nid, N_MEMORY)) 5447 return -EINVAL; 5448 5449 if (!mem_cgroup_disabled()) { 5450 rcu_read_lock(); 5451 5452 memcg = mem_cgroup_from_id(memcg_id); 5453 if (!mem_cgroup_tryget(memcg)) 5454 memcg = NULL; 5455 5456 rcu_read_unlock(); 5457 5458 if (!memcg) 5459 return -EINVAL; 5460 } 5461 5462 if (memcg_id != mem_cgroup_id(memcg)) 5463 goto done; 5464 5465 lruvec = get_lruvec(memcg, nid); 5466 5467 if (swappiness < MIN_SWAPPINESS) 5468 swappiness = get_swappiness(lruvec, sc); 5469 else if (swappiness > MAX_SWAPPINESS) 5470 goto done; 5471 5472 switch (cmd) { 5473 case '+': 5474 err = run_aging(lruvec, seq, swappiness, opt); 5475 break; 5476 case '-': 5477 err = run_eviction(lruvec, seq, sc, swappiness, opt); 5478 break; 5479 } 5480 done: 5481 mem_cgroup_put(memcg); 5482 5483 return err; 5484 } 5485 5486 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ 5487 static ssize_t lru_gen_seq_write(struct file *file, const char __user *src, 5488 size_t len, loff_t *pos) 5489 { 5490 void *buf; 5491 char *cur, *next; 5492 unsigned int flags; 5493 struct blk_plug plug; 5494 int err = -EINVAL; 5495 struct scan_control sc = { 5496 .may_writepage = true, 5497 .may_unmap = true, 5498 .may_swap = true, 5499 .reclaim_idx = MAX_NR_ZONES - 1, 5500 .gfp_mask = GFP_KERNEL, 5501 }; 5502 5503 buf = kvmalloc(len + 1, GFP_KERNEL); 5504 if (!buf) 5505 return -ENOMEM; 5506 5507 if (copy_from_user(buf, src, len)) { 5508 kvfree(buf); 5509 return -EFAULT; 5510 } 5511 5512 set_task_reclaim_state(current, &sc.reclaim_state); 5513 flags = memalloc_noreclaim_save(); 5514 blk_start_plug(&plug); 5515 if (!set_mm_walk(NULL, true)) { 5516 err = -ENOMEM; 5517 goto done; 5518 } 5519 5520 next = buf; 5521 next[len] = '\0'; 5522 5523 while ((cur = strsep(&next, ",;\n"))) { 5524 int n; 5525 int end; 5526 char cmd; 5527 unsigned int memcg_id; 5528 unsigned int nid; 5529 unsigned long seq; 5530 unsigned int swappiness = -1; 5531 unsigned long opt = -1; 5532 5533 cur = skip_spaces(cur); 5534 if (!*cur) 5535 continue; 5536 5537 n = sscanf(cur, "%c %u %u %lu %n %u %n %lu %n", &cmd, &memcg_id, &nid, 5538 &seq, &end, &swappiness, &end, &opt, &end); 5539 if (n < 4 || cur[end]) { 5540 err = -EINVAL; 5541 break; 5542 } 5543 5544 err = run_cmd(cmd, memcg_id, nid, seq, &sc, swappiness, opt); 5545 if (err) 5546 break; 5547 } 5548 done: 5549 clear_mm_walk(); 5550 blk_finish_plug(&plug); 5551 memalloc_noreclaim_restore(flags); 5552 set_task_reclaim_state(current, NULL); 5553 5554 kvfree(buf); 5555 5556 return err ? : len; 5557 } 5558 5559 static int lru_gen_seq_open(struct inode *inode, struct file *file) 5560 { 5561 return seq_open(file, &lru_gen_seq_ops); 5562 } 5563 5564 static const struct file_operations lru_gen_rw_fops = { 5565 .open = lru_gen_seq_open, 5566 .read = seq_read, 5567 .write = lru_gen_seq_write, 5568 .llseek = seq_lseek, 5569 .release = seq_release, 5570 }; 5571 5572 static const struct file_operations lru_gen_ro_fops = { 5573 .open = lru_gen_seq_open, 5574 .read = seq_read, 5575 .llseek = seq_lseek, 5576 .release = seq_release, 5577 }; 5578 5579 /****************************************************************************** 5580 * initialization 5581 ******************************************************************************/ 5582 5583 void lru_gen_init_pgdat(struct pglist_data *pgdat) 5584 { 5585 int i, j; 5586 5587 spin_lock_init(&pgdat->memcg_lru.lock); 5588 5589 for (i = 0; i < MEMCG_NR_GENS; i++) { 5590 for (j = 0; j < MEMCG_NR_BINS; j++) 5591 INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i); 5592 } 5593 } 5594 5595 void lru_gen_init_lruvec(struct lruvec *lruvec) 5596 { 5597 int i; 5598 int gen, type, zone; 5599 struct lru_gen_folio *lrugen = &lruvec->lrugen; 5600 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 5601 5602 lrugen->max_seq = MIN_NR_GENS + 1; 5603 lrugen->enabled = lru_gen_enabled(); 5604 5605 for (i = 0; i <= MIN_NR_GENS + 1; i++) 5606 lrugen->timestamps[i] = jiffies; 5607 5608 for_each_gen_type_zone(gen, type, zone) 5609 INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]); 5610 5611 if (mm_state) 5612 mm_state->seq = MIN_NR_GENS; 5613 } 5614 5615 #ifdef CONFIG_MEMCG 5616 5617 void lru_gen_init_memcg(struct mem_cgroup *memcg) 5618 { 5619 struct lru_gen_mm_list *mm_list = get_mm_list(memcg); 5620 5621 if (!mm_list) 5622 return; 5623 5624 INIT_LIST_HEAD(&mm_list->fifo); 5625 spin_lock_init(&mm_list->lock); 5626 } 5627 5628 void lru_gen_exit_memcg(struct mem_cgroup *memcg) 5629 { 5630 int i; 5631 int nid; 5632 struct lru_gen_mm_list *mm_list = get_mm_list(memcg); 5633 5634 VM_WARN_ON_ONCE(mm_list && !list_empty(&mm_list->fifo)); 5635 5636 for_each_node(nid) { 5637 struct lruvec *lruvec = get_lruvec(memcg, nid); 5638 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 5639 5640 VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0, 5641 sizeof(lruvec->lrugen.nr_pages))); 5642 5643 lruvec->lrugen.list.next = LIST_POISON1; 5644 5645 if (!mm_state) 5646 continue; 5647 5648 for (i = 0; i < NR_BLOOM_FILTERS; i++) { 5649 bitmap_free(mm_state->filters[i]); 5650 mm_state->filters[i] = NULL; 5651 } 5652 } 5653 } 5654 5655 #endif /* CONFIG_MEMCG */ 5656 5657 static int __init init_lru_gen(void) 5658 { 5659 BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS); 5660 BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS); 5661 5662 if (sysfs_create_group(mm_kobj, &lru_gen_attr_group)) 5663 pr_err("lru_gen: failed to create sysfs group\n"); 5664 5665 debugfs_create_file("lru_gen", 0644, NULL, NULL, &lru_gen_rw_fops); 5666 debugfs_create_file("lru_gen_full", 0444, NULL, NULL, &lru_gen_ro_fops); 5667 5668 return 0; 5669 }; 5670 late_initcall(init_lru_gen); 5671 5672 #else /* !CONFIG_LRU_GEN */ 5673 5674 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) 5675 { 5676 BUILD_BUG(); 5677 } 5678 5679 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) 5680 { 5681 BUILD_BUG(); 5682 } 5683 5684 static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) 5685 { 5686 BUILD_BUG(); 5687 } 5688 5689 #endif /* CONFIG_LRU_GEN */ 5690 5691 static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) 5692 { 5693 unsigned long nr[NR_LRU_LISTS]; 5694 unsigned long targets[NR_LRU_LISTS]; 5695 unsigned long nr_to_scan; 5696 enum lru_list lru; 5697 unsigned long nr_reclaimed = 0; 5698 unsigned long nr_to_reclaim = sc->nr_to_reclaim; 5699 bool proportional_reclaim; 5700 struct blk_plug plug; 5701 5702 if (lru_gen_enabled() && !root_reclaim(sc)) { 5703 lru_gen_shrink_lruvec(lruvec, sc); 5704 return; 5705 } 5706 5707 get_scan_count(lruvec, sc, nr); 5708 5709 /* Record the original scan target for proportional adjustments later */ 5710 memcpy(targets, nr, sizeof(nr)); 5711 5712 /* 5713 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal 5714 * event that can occur when there is little memory pressure e.g. 5715 * multiple streaming readers/writers. Hence, we do not abort scanning 5716 * when the requested number of pages are reclaimed when scanning at 5717 * DEF_PRIORITY on the assumption that the fact we are direct 5718 * reclaiming implies that kswapd is not keeping up and it is best to 5719 * do a batch of work at once. For memcg reclaim one check is made to 5720 * abort proportional reclaim if either the file or anon lru has already 5721 * dropped to zero at the first pass. 5722 */ 5723 proportional_reclaim = (!cgroup_reclaim(sc) && !current_is_kswapd() && 5724 sc->priority == DEF_PRIORITY); 5725 5726 blk_start_plug(&plug); 5727 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 5728 nr[LRU_INACTIVE_FILE]) { 5729 unsigned long nr_anon, nr_file, percentage; 5730 unsigned long nr_scanned; 5731 5732 for_each_evictable_lru(lru) { 5733 if (nr[lru]) { 5734 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX); 5735 nr[lru] -= nr_to_scan; 5736 5737 nr_reclaimed += shrink_list(lru, nr_to_scan, 5738 lruvec, sc); 5739 } 5740 } 5741 5742 cond_resched(); 5743 5744 if (nr_reclaimed < nr_to_reclaim || proportional_reclaim) 5745 continue; 5746 5747 /* 5748 * For kswapd and memcg, reclaim at least the number of pages 5749 * requested. Ensure that the anon and file LRUs are scanned 5750 * proportionally what was requested by get_scan_count(). We 5751 * stop reclaiming one LRU and reduce the amount scanning 5752 * proportional to the original scan target. 5753 */ 5754 nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE]; 5755 nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON]; 5756 5757 /* 5758 * It's just vindictive to attack the larger once the smaller 5759 * has gone to zero. And given the way we stop scanning the 5760 * smaller below, this makes sure that we only make one nudge 5761 * towards proportionality once we've got nr_to_reclaim. 5762 */ 5763 if (!nr_file || !nr_anon) 5764 break; 5765 5766 if (nr_file > nr_anon) { 5767 unsigned long scan_target = targets[LRU_INACTIVE_ANON] + 5768 targets[LRU_ACTIVE_ANON] + 1; 5769 lru = LRU_BASE; 5770 percentage = nr_anon * 100 / scan_target; 5771 } else { 5772 unsigned long scan_target = targets[LRU_INACTIVE_FILE] + 5773 targets[LRU_ACTIVE_FILE] + 1; 5774 lru = LRU_FILE; 5775 percentage = nr_file * 100 / scan_target; 5776 } 5777 5778 /* Stop scanning the smaller of the LRU */ 5779 nr[lru] = 0; 5780 nr[lru + LRU_ACTIVE] = 0; 5781 5782 /* 5783 * Recalculate the other LRU scan count based on its original 5784 * scan target and the percentage scanning already complete 5785 */ 5786 lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE; 5787 nr_scanned = targets[lru] - nr[lru]; 5788 nr[lru] = targets[lru] * (100 - percentage) / 100; 5789 nr[lru] -= min(nr[lru], nr_scanned); 5790 5791 lru += LRU_ACTIVE; 5792 nr_scanned = targets[lru] - nr[lru]; 5793 nr[lru] = targets[lru] * (100 - percentage) / 100; 5794 nr[lru] -= min(nr[lru], nr_scanned); 5795 } 5796 blk_finish_plug(&plug); 5797 sc->nr_reclaimed += nr_reclaimed; 5798 5799 /* 5800 * Even if we did not try to evict anon pages at all, we want to 5801 * rebalance the anon lru active/inactive ratio. 5802 */ 5803 if (can_age_anon_pages(lruvec_pgdat(lruvec), sc) && 5804 inactive_is_low(lruvec, LRU_INACTIVE_ANON)) 5805 shrink_active_list(SWAP_CLUSTER_MAX, lruvec, 5806 sc, LRU_ACTIVE_ANON); 5807 } 5808 5809 /* Use reclaim/compaction for costly allocs or under memory pressure */ 5810 static bool in_reclaim_compaction(struct scan_control *sc) 5811 { 5812 if (gfp_compaction_allowed(sc->gfp_mask) && sc->order && 5813 (sc->order > PAGE_ALLOC_COSTLY_ORDER || 5814 sc->priority < DEF_PRIORITY - 2)) 5815 return true; 5816 5817 return false; 5818 } 5819 5820 /* 5821 * Reclaim/compaction is used for high-order allocation requests. It reclaims 5822 * order-0 pages before compacting the zone. should_continue_reclaim() returns 5823 * true if more pages should be reclaimed such that when the page allocator 5824 * calls try_to_compact_pages() that it will have enough free pages to succeed. 5825 * It will give up earlier than that if there is difficulty reclaiming pages. 5826 */ 5827 static inline bool should_continue_reclaim(struct pglist_data *pgdat, 5828 unsigned long nr_reclaimed, 5829 struct scan_control *sc) 5830 { 5831 unsigned long pages_for_compaction; 5832 unsigned long inactive_lru_pages; 5833 int z; 5834 5835 /* If not in reclaim/compaction mode, stop */ 5836 if (!in_reclaim_compaction(sc)) 5837 return false; 5838 5839 /* 5840 * Stop if we failed to reclaim any pages from the last SWAP_CLUSTER_MAX 5841 * number of pages that were scanned. This will return to the caller 5842 * with the risk reclaim/compaction and the resulting allocation attempt 5843 * fails. In the past we have tried harder for __GFP_RETRY_MAYFAIL 5844 * allocations through requiring that the full LRU list has been scanned 5845 * first, by assuming that zero delta of sc->nr_scanned means full LRU 5846 * scan, but that approximation was wrong, and there were corner cases 5847 * where always a non-zero amount of pages were scanned. 5848 */ 5849 if (!nr_reclaimed) 5850 return false; 5851 5852 /* If compaction would go ahead or the allocation would succeed, stop */ 5853 for (z = 0; z <= sc->reclaim_idx; z++) { 5854 struct zone *zone = &pgdat->node_zones[z]; 5855 if (!managed_zone(zone)) 5856 continue; 5857 5858 /* Allocation can already succeed, nothing to do */ 5859 if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), 5860 sc->reclaim_idx, 0)) 5861 return false; 5862 5863 if (compaction_suitable(zone, sc->order, sc->reclaim_idx)) 5864 return false; 5865 } 5866 5867 /* 5868 * If we have not reclaimed enough pages for compaction and the 5869 * inactive lists are large enough, continue reclaiming 5870 */ 5871 pages_for_compaction = compact_gap(sc->order); 5872 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE); 5873 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) 5874 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON); 5875 5876 return inactive_lru_pages > pages_for_compaction; 5877 } 5878 5879 static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) 5880 { 5881 struct mem_cgroup *target_memcg = sc->target_mem_cgroup; 5882 struct mem_cgroup_reclaim_cookie reclaim = { 5883 .pgdat = pgdat, 5884 }; 5885 struct mem_cgroup_reclaim_cookie *partial = &reclaim; 5886 struct mem_cgroup *memcg; 5887 5888 /* 5889 * In most cases, direct reclaimers can do partial walks 5890 * through the cgroup tree, using an iterator state that 5891 * persists across invocations. This strikes a balance between 5892 * fairness and allocation latency. 5893 * 5894 * For kswapd, reliable forward progress is more important 5895 * than a quick return to idle. Always do full walks. 5896 */ 5897 if (current_is_kswapd() || sc->memcg_full_walk) 5898 partial = NULL; 5899 5900 memcg = mem_cgroup_iter(target_memcg, NULL, partial); 5901 do { 5902 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); 5903 unsigned long reclaimed; 5904 unsigned long scanned; 5905 5906 /* 5907 * This loop can become CPU-bound when target memcgs 5908 * aren't eligible for reclaim - either because they 5909 * don't have any reclaimable pages, or because their 5910 * memory is explicitly protected. Avoid soft lockups. 5911 */ 5912 cond_resched(); 5913 5914 mem_cgroup_calculate_protection(target_memcg, memcg); 5915 5916 if (mem_cgroup_below_min(target_memcg, memcg)) { 5917 /* 5918 * Hard protection. 5919 * If there is no reclaimable memory, OOM. 5920 */ 5921 continue; 5922 } else if (mem_cgroup_below_low(target_memcg, memcg)) { 5923 /* 5924 * Soft protection. 5925 * Respect the protection only as long as 5926 * there is an unprotected supply 5927 * of reclaimable memory from other cgroups. 5928 */ 5929 if (!sc->memcg_low_reclaim) { 5930 sc->memcg_low_skipped = 1; 5931 continue; 5932 } 5933 memcg_memory_event(memcg, MEMCG_LOW); 5934 } 5935 5936 reclaimed = sc->nr_reclaimed; 5937 scanned = sc->nr_scanned; 5938 5939 shrink_lruvec(lruvec, sc); 5940 5941 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, 5942 sc->priority); 5943 5944 /* Record the group's reclaim efficiency */ 5945 if (!sc->proactive) 5946 vmpressure(sc->gfp_mask, memcg, false, 5947 sc->nr_scanned - scanned, 5948 sc->nr_reclaimed - reclaimed); 5949 5950 /* If partial walks are allowed, bail once goal is reached */ 5951 if (partial && sc->nr_reclaimed >= sc->nr_to_reclaim) { 5952 mem_cgroup_iter_break(target_memcg, memcg); 5953 break; 5954 } 5955 } while ((memcg = mem_cgroup_iter(target_memcg, memcg, partial))); 5956 } 5957 5958 static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) 5959 { 5960 unsigned long nr_reclaimed, nr_scanned, nr_node_reclaimed; 5961 struct lruvec *target_lruvec; 5962 bool reclaimable = false; 5963 5964 if (lru_gen_enabled() && root_reclaim(sc)) { 5965 memset(&sc->nr, 0, sizeof(sc->nr)); 5966 lru_gen_shrink_node(pgdat, sc); 5967 return; 5968 } 5969 5970 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); 5971 5972 again: 5973 memset(&sc->nr, 0, sizeof(sc->nr)); 5974 5975 nr_reclaimed = sc->nr_reclaimed; 5976 nr_scanned = sc->nr_scanned; 5977 5978 prepare_scan_control(pgdat, sc); 5979 5980 shrink_node_memcgs(pgdat, sc); 5981 5982 flush_reclaim_state(sc); 5983 5984 nr_node_reclaimed = sc->nr_reclaimed - nr_reclaimed; 5985 5986 /* Record the subtree's reclaim efficiency */ 5987 if (!sc->proactive) 5988 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true, 5989 sc->nr_scanned - nr_scanned, nr_node_reclaimed); 5990 5991 if (nr_node_reclaimed) 5992 reclaimable = true; 5993 5994 if (current_is_kswapd()) { 5995 /* 5996 * If reclaim is isolating dirty pages under writeback, 5997 * it implies that the long-lived page allocation rate 5998 * is exceeding the page laundering rate. Either the 5999 * global limits are not being effective at throttling 6000 * processes due to the page distribution throughout 6001 * zones or there is heavy usage of a slow backing 6002 * device. The only option is to throttle from reclaim 6003 * context which is not ideal as there is no guarantee 6004 * the dirtying process is throttled in the same way 6005 * balance_dirty_pages() manages. 6006 * 6007 * Once a node is flagged PGDAT_WRITEBACK, kswapd will 6008 * count the number of pages under pages flagged for 6009 * immediate reclaim and stall if any are encountered 6010 * in the nr_immediate check below. 6011 */ 6012 if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken) 6013 set_bit(PGDAT_WRITEBACK, &pgdat->flags); 6014 6015 /* Allow kswapd to start writing pages during reclaim.*/ 6016 if (sc->nr.unqueued_dirty && 6017 sc->nr.unqueued_dirty == sc->nr.file_taken) 6018 set_bit(PGDAT_DIRTY, &pgdat->flags); 6019 6020 /* 6021 * If kswapd scans pages marked for immediate 6022 * reclaim and under writeback (nr_immediate), it 6023 * implies that pages are cycling through the LRU 6024 * faster than they are written so forcibly stall 6025 * until some pages complete writeback. 6026 */ 6027 if (sc->nr.immediate) 6028 reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK); 6029 } 6030 6031 /* 6032 * Tag a node/memcg as congested if all the dirty pages were marked 6033 * for writeback and immediate reclaim (counted in nr.congested). 6034 * 6035 * Legacy memcg will stall in page writeback so avoid forcibly 6036 * stalling in reclaim_throttle(). 6037 */ 6038 if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested) { 6039 if (cgroup_reclaim(sc) && writeback_throttling_sane(sc)) 6040 set_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags); 6041 6042 if (current_is_kswapd()) 6043 set_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags); 6044 } 6045 6046 /* 6047 * Stall direct reclaim for IO completions if the lruvec is 6048 * node is congested. Allow kswapd to continue until it 6049 * starts encountering unqueued dirty pages or cycling through 6050 * the LRU too quickly. 6051 */ 6052 if (!current_is_kswapd() && current_may_throttle() && 6053 !sc->hibernation_mode && 6054 (test_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags) || 6055 test_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags))) 6056 reclaim_throttle(pgdat, VMSCAN_THROTTLE_CONGESTED); 6057 6058 if (should_continue_reclaim(pgdat, nr_node_reclaimed, sc)) 6059 goto again; 6060 6061 /* 6062 * Kswapd gives up on balancing particular nodes after too 6063 * many failures to reclaim anything from them and goes to 6064 * sleep. On reclaim progress, reset the failure counter. A 6065 * successful direct reclaim run will revive a dormant kswapd. 6066 */ 6067 if (reclaimable) 6068 pgdat->kswapd_failures = 0; 6069 else if (sc->cache_trim_mode) 6070 sc->cache_trim_mode_failed = 1; 6071 } 6072 6073 /* 6074 * Returns true if compaction should go ahead for a costly-order request, or 6075 * the allocation would already succeed without compaction. Return false if we 6076 * should reclaim first. 6077 */ 6078 static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) 6079 { 6080 unsigned long watermark; 6081 6082 if (!gfp_compaction_allowed(sc->gfp_mask)) 6083 return false; 6084 6085 /* Allocation can already succeed, nothing to do */ 6086 if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), 6087 sc->reclaim_idx, 0)) 6088 return true; 6089 6090 /* Compaction cannot yet proceed. Do reclaim. */ 6091 if (!compaction_suitable(zone, sc->order, sc->reclaim_idx)) 6092 return false; 6093 6094 /* 6095 * Compaction is already possible, but it takes time to run and there 6096 * are potentially other callers using the pages just freed. So proceed 6097 * with reclaim to make a buffer of free pages available to give 6098 * compaction a reasonable chance of completing and allocating the page. 6099 * Note that we won't actually reclaim the whole buffer in one attempt 6100 * as the target watermark in should_continue_reclaim() is lower. But if 6101 * we are already above the high+gap watermark, don't reclaim at all. 6102 */ 6103 watermark = high_wmark_pages(zone) + compact_gap(sc->order); 6104 6105 return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx); 6106 } 6107 6108 static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc) 6109 { 6110 /* 6111 * If reclaim is making progress greater than 12% efficiency then 6112 * wake all the NOPROGRESS throttled tasks. 6113 */ 6114 if (sc->nr_reclaimed > (sc->nr_scanned >> 3)) { 6115 wait_queue_head_t *wqh; 6116 6117 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_NOPROGRESS]; 6118 if (waitqueue_active(wqh)) 6119 wake_up(wqh); 6120 6121 return; 6122 } 6123 6124 /* 6125 * Do not throttle kswapd or cgroup reclaim on NOPROGRESS as it will 6126 * throttle on VMSCAN_THROTTLE_WRITEBACK if there are too many pages 6127 * under writeback and marked for immediate reclaim at the tail of the 6128 * LRU. 6129 */ 6130 if (current_is_kswapd() || cgroup_reclaim(sc)) 6131 return; 6132 6133 /* Throttle if making no progress at high prioities. */ 6134 if (sc->priority == 1 && !sc->nr_reclaimed) 6135 reclaim_throttle(pgdat, VMSCAN_THROTTLE_NOPROGRESS); 6136 } 6137 6138 /* 6139 * This is the direct reclaim path, for page-allocating processes. We only 6140 * try to reclaim pages from zones which will satisfy the caller's allocation 6141 * request. 6142 * 6143 * If a zone is deemed to be full of pinned pages then just give it a light 6144 * scan then give up on it. 6145 */ 6146 static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) 6147 { 6148 struct zoneref *z; 6149 struct zone *zone; 6150 unsigned long nr_soft_reclaimed; 6151 unsigned long nr_soft_scanned; 6152 gfp_t orig_mask; 6153 pg_data_t *last_pgdat = NULL; 6154 pg_data_t *first_pgdat = NULL; 6155 6156 /* 6157 * If the number of buffer_heads in the machine exceeds the maximum 6158 * allowed level, force direct reclaim to scan the highmem zone as 6159 * highmem pages could be pinning lowmem pages storing buffer_heads 6160 */ 6161 orig_mask = sc->gfp_mask; 6162 if (buffer_heads_over_limit) { 6163 sc->gfp_mask |= __GFP_HIGHMEM; 6164 sc->reclaim_idx = gfp_zone(sc->gfp_mask); 6165 } 6166 6167 for_each_zone_zonelist_nodemask(zone, z, zonelist, 6168 sc->reclaim_idx, sc->nodemask) { 6169 /* 6170 * Take care memory controller reclaiming has small influence 6171 * to global LRU. 6172 */ 6173 if (!cgroup_reclaim(sc)) { 6174 if (!cpuset_zone_allowed(zone, 6175 GFP_KERNEL | __GFP_HARDWALL)) 6176 continue; 6177 6178 /* 6179 * If we already have plenty of memory free for 6180 * compaction in this zone, don't free any more. 6181 * Even though compaction is invoked for any 6182 * non-zero order, only frequent costly order 6183 * reclamation is disruptive enough to become a 6184 * noticeable problem, like transparent huge 6185 * page allocations. 6186 */ 6187 if (IS_ENABLED(CONFIG_COMPACTION) && 6188 sc->order > PAGE_ALLOC_COSTLY_ORDER && 6189 compaction_ready(zone, sc)) { 6190 sc->compaction_ready = true; 6191 continue; 6192 } 6193 6194 /* 6195 * Shrink each node in the zonelist once. If the 6196 * zonelist is ordered by zone (not the default) then a 6197 * node may be shrunk multiple times but in that case 6198 * the user prefers lower zones being preserved. 6199 */ 6200 if (zone->zone_pgdat == last_pgdat) 6201 continue; 6202 6203 /* 6204 * This steals pages from memory cgroups over softlimit 6205 * and returns the number of reclaimed pages and 6206 * scanned pages. This works for global memory pressure 6207 * and balancing, not for a memcg's limit. 6208 */ 6209 nr_soft_scanned = 0; 6210 nr_soft_reclaimed = memcg1_soft_limit_reclaim(zone->zone_pgdat, 6211 sc->order, sc->gfp_mask, 6212 &nr_soft_scanned); 6213 sc->nr_reclaimed += nr_soft_reclaimed; 6214 sc->nr_scanned += nr_soft_scanned; 6215 /* need some check for avoid more shrink_zone() */ 6216 } 6217 6218 if (!first_pgdat) 6219 first_pgdat = zone->zone_pgdat; 6220 6221 /* See comment about same check for global reclaim above */ 6222 if (zone->zone_pgdat == last_pgdat) 6223 continue; 6224 last_pgdat = zone->zone_pgdat; 6225 shrink_node(zone->zone_pgdat, sc); 6226 } 6227 6228 if (first_pgdat) 6229 consider_reclaim_throttle(first_pgdat, sc); 6230 6231 /* 6232 * Restore to original mask to avoid the impact on the caller if we 6233 * promoted it to __GFP_HIGHMEM. 6234 */ 6235 sc->gfp_mask = orig_mask; 6236 } 6237 6238 static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat) 6239 { 6240 struct lruvec *target_lruvec; 6241 unsigned long refaults; 6242 6243 if (lru_gen_enabled()) 6244 return; 6245 6246 target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat); 6247 refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON); 6248 target_lruvec->refaults[WORKINGSET_ANON] = refaults; 6249 refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE); 6250 target_lruvec->refaults[WORKINGSET_FILE] = refaults; 6251 } 6252 6253 /* 6254 * This is the main entry point to direct page reclaim. 6255 * 6256 * If a full scan of the inactive list fails to free enough memory then we 6257 * are "out of memory" and something needs to be killed. 6258 * 6259 * If the caller is !__GFP_FS then the probability of a failure is reasonably 6260 * high - the zone may be full of dirty or under-writeback pages, which this 6261 * caller can't do much about. We kick the writeback threads and take explicit 6262 * naps in the hope that some of these pages can be written. But if the 6263 * allocating task holds filesystem locks which prevent writeout this might not 6264 * work, and the allocation attempt will fail. 6265 * 6266 * returns: 0, if no pages reclaimed 6267 * else, the number of pages reclaimed 6268 */ 6269 static unsigned long do_try_to_free_pages(struct zonelist *zonelist, 6270 struct scan_control *sc) 6271 { 6272 int initial_priority = sc->priority; 6273 pg_data_t *last_pgdat; 6274 struct zoneref *z; 6275 struct zone *zone; 6276 retry: 6277 delayacct_freepages_start(); 6278 6279 if (!cgroup_reclaim(sc)) 6280 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1); 6281 6282 do { 6283 if (!sc->proactive) 6284 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, 6285 sc->priority); 6286 sc->nr_scanned = 0; 6287 shrink_zones(zonelist, sc); 6288 6289 if (sc->nr_reclaimed >= sc->nr_to_reclaim) 6290 break; 6291 6292 if (sc->compaction_ready) 6293 break; 6294 6295 /* 6296 * If we're getting trouble reclaiming, start doing 6297 * writepage even in laptop mode. 6298 */ 6299 if (sc->priority < DEF_PRIORITY - 2) 6300 sc->may_writepage = 1; 6301 } while (--sc->priority >= 0); 6302 6303 last_pgdat = NULL; 6304 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx, 6305 sc->nodemask) { 6306 if (zone->zone_pgdat == last_pgdat) 6307 continue; 6308 last_pgdat = zone->zone_pgdat; 6309 6310 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat); 6311 6312 if (cgroup_reclaim(sc)) { 6313 struct lruvec *lruvec; 6314 6315 lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, 6316 zone->zone_pgdat); 6317 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); 6318 } 6319 } 6320 6321 delayacct_freepages_end(); 6322 6323 if (sc->nr_reclaimed) 6324 return sc->nr_reclaimed; 6325 6326 /* Aborted reclaim to try compaction? don't OOM, then */ 6327 if (sc->compaction_ready) 6328 return 1; 6329 6330 /* 6331 * In most cases, direct reclaimers can do partial walks 6332 * through the cgroup tree to meet the reclaim goal while 6333 * keeping latency low. Since the iterator state is shared 6334 * among all direct reclaim invocations (to retain fairness 6335 * among cgroups), though, high concurrency can result in 6336 * individual threads not seeing enough cgroups to make 6337 * meaningful forward progress. Avoid false OOMs in this case. 6338 */ 6339 if (!sc->memcg_full_walk) { 6340 sc->priority = initial_priority; 6341 sc->memcg_full_walk = 1; 6342 goto retry; 6343 } 6344 6345 /* 6346 * We make inactive:active ratio decisions based on the node's 6347 * composition of memory, but a restrictive reclaim_idx or a 6348 * memory.low cgroup setting can exempt large amounts of 6349 * memory from reclaim. Neither of which are very common, so 6350 * instead of doing costly eligibility calculations of the 6351 * entire cgroup subtree up front, we assume the estimates are 6352 * good, and retry with forcible deactivation if that fails. 6353 */ 6354 if (sc->skipped_deactivate) { 6355 sc->priority = initial_priority; 6356 sc->force_deactivate = 1; 6357 sc->skipped_deactivate = 0; 6358 goto retry; 6359 } 6360 6361 /* Untapped cgroup reserves? Don't OOM, retry. */ 6362 if (sc->memcg_low_skipped) { 6363 sc->priority = initial_priority; 6364 sc->force_deactivate = 0; 6365 sc->memcg_low_reclaim = 1; 6366 sc->memcg_low_skipped = 0; 6367 goto retry; 6368 } 6369 6370 return 0; 6371 } 6372 6373 static bool allow_direct_reclaim(pg_data_t *pgdat) 6374 { 6375 struct zone *zone; 6376 unsigned long pfmemalloc_reserve = 0; 6377 unsigned long free_pages = 0; 6378 int i; 6379 bool wmark_ok; 6380 6381 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) 6382 return true; 6383 6384 for (i = 0; i <= ZONE_NORMAL; i++) { 6385 zone = &pgdat->node_zones[i]; 6386 if (!managed_zone(zone)) 6387 continue; 6388 6389 if (!zone_reclaimable_pages(zone)) 6390 continue; 6391 6392 pfmemalloc_reserve += min_wmark_pages(zone); 6393 free_pages += zone_page_state_snapshot(zone, NR_FREE_PAGES); 6394 } 6395 6396 /* If there are no reserves (unexpected config) then do not throttle */ 6397 if (!pfmemalloc_reserve) 6398 return true; 6399 6400 wmark_ok = free_pages > pfmemalloc_reserve / 2; 6401 6402 /* kswapd must be awake if processes are being throttled */ 6403 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { 6404 if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL) 6405 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL); 6406 6407 wake_up_interruptible(&pgdat->kswapd_wait); 6408 } 6409 6410 return wmark_ok; 6411 } 6412 6413 /* 6414 * Throttle direct reclaimers if backing storage is backed by the network 6415 * and the PFMEMALLOC reserve for the preferred node is getting dangerously 6416 * depleted. kswapd will continue to make progress and wake the processes 6417 * when the low watermark is reached. 6418 * 6419 * Returns true if a fatal signal was delivered during throttling. If this 6420 * happens, the page allocator should not consider triggering the OOM killer. 6421 */ 6422 static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, 6423 nodemask_t *nodemask) 6424 { 6425 struct zoneref *z; 6426 struct zone *zone; 6427 pg_data_t *pgdat = NULL; 6428 6429 /* 6430 * Kernel threads should not be throttled as they may be indirectly 6431 * responsible for cleaning pages necessary for reclaim to make forward 6432 * progress. kjournald for example may enter direct reclaim while 6433 * committing a transaction where throttling it could forcing other 6434 * processes to block on log_wait_commit(). 6435 */ 6436 if (current->flags & PF_KTHREAD) 6437 goto out; 6438 6439 /* 6440 * If a fatal signal is pending, this process should not throttle. 6441 * It should return quickly so it can exit and free its memory 6442 */ 6443 if (fatal_signal_pending(current)) 6444 goto out; 6445 6446 /* 6447 * Check if the pfmemalloc reserves are ok by finding the first node 6448 * with a usable ZONE_NORMAL or lower zone. The expectation is that 6449 * GFP_KERNEL will be required for allocating network buffers when 6450 * swapping over the network so ZONE_HIGHMEM is unusable. 6451 * 6452 * Throttling is based on the first usable node and throttled processes 6453 * wait on a queue until kswapd makes progress and wakes them. There 6454 * is an affinity then between processes waking up and where reclaim 6455 * progress has been made assuming the process wakes on the same node. 6456 * More importantly, processes running on remote nodes will not compete 6457 * for remote pfmemalloc reserves and processes on different nodes 6458 * should make reasonable progress. 6459 */ 6460 for_each_zone_zonelist_nodemask(zone, z, zonelist, 6461 gfp_zone(gfp_mask), nodemask) { 6462 if (zone_idx(zone) > ZONE_NORMAL) 6463 continue; 6464 6465 /* Throttle based on the first usable node */ 6466 pgdat = zone->zone_pgdat; 6467 if (allow_direct_reclaim(pgdat)) 6468 goto out; 6469 break; 6470 } 6471 6472 /* If no zone was usable by the allocation flags then do not throttle */ 6473 if (!pgdat) 6474 goto out; 6475 6476 /* Account for the throttling */ 6477 count_vm_event(PGSCAN_DIRECT_THROTTLE); 6478 6479 /* 6480 * If the caller cannot enter the filesystem, it's possible that it 6481 * is due to the caller holding an FS lock or performing a journal 6482 * transaction in the case of a filesystem like ext[3|4]. In this case, 6483 * it is not safe to block on pfmemalloc_wait as kswapd could be 6484 * blocked waiting on the same lock. Instead, throttle for up to a 6485 * second before continuing. 6486 */ 6487 if (!(gfp_mask & __GFP_FS)) 6488 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, 6489 allow_direct_reclaim(pgdat), HZ); 6490 else 6491 /* Throttle until kswapd wakes the process */ 6492 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, 6493 allow_direct_reclaim(pgdat)); 6494 6495 if (fatal_signal_pending(current)) 6496 return true; 6497 6498 out: 6499 return false; 6500 } 6501 6502 unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 6503 gfp_t gfp_mask, nodemask_t *nodemask) 6504 { 6505 unsigned long nr_reclaimed; 6506 struct scan_control sc = { 6507 .nr_to_reclaim = SWAP_CLUSTER_MAX, 6508 .gfp_mask = current_gfp_context(gfp_mask), 6509 .reclaim_idx = gfp_zone(gfp_mask), 6510 .order = order, 6511 .nodemask = nodemask, 6512 .priority = DEF_PRIORITY, 6513 .may_writepage = !laptop_mode, 6514 .may_unmap = 1, 6515 .may_swap = 1, 6516 }; 6517 6518 /* 6519 * scan_control uses s8 fields for order, priority, and reclaim_idx. 6520 * Confirm they are large enough for max values. 6521 */ 6522 BUILD_BUG_ON(MAX_PAGE_ORDER >= S8_MAX); 6523 BUILD_BUG_ON(DEF_PRIORITY > S8_MAX); 6524 BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX); 6525 6526 /* 6527 * Do not enter reclaim if fatal signal was delivered while throttled. 6528 * 1 is returned so that the page allocator does not OOM kill at this 6529 * point. 6530 */ 6531 if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask)) 6532 return 1; 6533 6534 set_task_reclaim_state(current, &sc.reclaim_state); 6535 trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask); 6536 6537 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 6538 6539 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); 6540 set_task_reclaim_state(current, NULL); 6541 6542 return nr_reclaimed; 6543 } 6544 6545 #ifdef CONFIG_MEMCG 6546 6547 /* Only used by soft limit reclaim. Do not reuse for anything else. */ 6548 unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, 6549 gfp_t gfp_mask, bool noswap, 6550 pg_data_t *pgdat, 6551 unsigned long *nr_scanned) 6552 { 6553 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); 6554 struct scan_control sc = { 6555 .nr_to_reclaim = SWAP_CLUSTER_MAX, 6556 .target_mem_cgroup = memcg, 6557 .may_writepage = !laptop_mode, 6558 .may_unmap = 1, 6559 .reclaim_idx = MAX_NR_ZONES - 1, 6560 .may_swap = !noswap, 6561 }; 6562 6563 WARN_ON_ONCE(!current->reclaim_state); 6564 6565 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 6566 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 6567 6568 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order, 6569 sc.gfp_mask); 6570 6571 /* 6572 * NOTE: Although we can get the priority field, using it 6573 * here is not a good idea, since it limits the pages we can scan. 6574 * if we don't reclaim here, the shrink_node from balance_pgdat 6575 * will pick up pages from other mem cgroup's as well. We hack 6576 * the priority and make it zero. 6577 */ 6578 shrink_lruvec(lruvec, &sc); 6579 6580 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 6581 6582 *nr_scanned = sc.nr_scanned; 6583 6584 return sc.nr_reclaimed; 6585 } 6586 6587 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 6588 unsigned long nr_pages, 6589 gfp_t gfp_mask, 6590 unsigned int reclaim_options, 6591 int *swappiness) 6592 { 6593 unsigned long nr_reclaimed; 6594 unsigned int noreclaim_flag; 6595 struct scan_control sc = { 6596 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), 6597 .proactive_swappiness = swappiness, 6598 .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) | 6599 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), 6600 .reclaim_idx = MAX_NR_ZONES - 1, 6601 .target_mem_cgroup = memcg, 6602 .priority = DEF_PRIORITY, 6603 .may_writepage = !laptop_mode, 6604 .may_unmap = 1, 6605 .may_swap = !!(reclaim_options & MEMCG_RECLAIM_MAY_SWAP), 6606 .proactive = !!(reclaim_options & MEMCG_RECLAIM_PROACTIVE), 6607 }; 6608 /* 6609 * Traverse the ZONELIST_FALLBACK zonelist of the current node to put 6610 * equal pressure on all the nodes. This is based on the assumption that 6611 * the reclaim does not bail out early. 6612 */ 6613 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 6614 6615 set_task_reclaim_state(current, &sc.reclaim_state); 6616 trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask); 6617 noreclaim_flag = memalloc_noreclaim_save(); 6618 6619 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 6620 6621 memalloc_noreclaim_restore(noreclaim_flag); 6622 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 6623 set_task_reclaim_state(current, NULL); 6624 6625 return nr_reclaimed; 6626 } 6627 #endif 6628 6629 static void kswapd_age_node(struct pglist_data *pgdat, struct scan_control *sc) 6630 { 6631 struct mem_cgroup *memcg; 6632 struct lruvec *lruvec; 6633 6634 if (lru_gen_enabled()) { 6635 lru_gen_age_node(pgdat, sc); 6636 return; 6637 } 6638 6639 if (!can_age_anon_pages(pgdat, sc)) 6640 return; 6641 6642 lruvec = mem_cgroup_lruvec(NULL, pgdat); 6643 if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON)) 6644 return; 6645 6646 memcg = mem_cgroup_iter(NULL, NULL, NULL); 6647 do { 6648 lruvec = mem_cgroup_lruvec(memcg, pgdat); 6649 shrink_active_list(SWAP_CLUSTER_MAX, lruvec, 6650 sc, LRU_ACTIVE_ANON); 6651 memcg = mem_cgroup_iter(NULL, memcg, NULL); 6652 } while (memcg); 6653 } 6654 6655 static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx) 6656 { 6657 int i; 6658 struct zone *zone; 6659 6660 /* 6661 * Check for watermark boosts top-down as the higher zones 6662 * are more likely to be boosted. Both watermarks and boosts 6663 * should not be checked at the same time as reclaim would 6664 * start prematurely when there is no boosting and a lower 6665 * zone is balanced. 6666 */ 6667 for (i = highest_zoneidx; i >= 0; i--) { 6668 zone = pgdat->node_zones + i; 6669 if (!managed_zone(zone)) 6670 continue; 6671 6672 if (zone->watermark_boost) 6673 return true; 6674 } 6675 6676 return false; 6677 } 6678 6679 /* 6680 * Returns true if there is an eligible zone balanced for the request order 6681 * and highest_zoneidx 6682 */ 6683 static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx) 6684 { 6685 int i; 6686 unsigned long mark = -1; 6687 struct zone *zone; 6688 6689 /* 6690 * Check watermarks bottom-up as lower zones are more likely to 6691 * meet watermarks. 6692 */ 6693 for (i = 0; i <= highest_zoneidx; i++) { 6694 zone = pgdat->node_zones + i; 6695 6696 if (!managed_zone(zone)) 6697 continue; 6698 6699 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) 6700 mark = promo_wmark_pages(zone); 6701 else 6702 mark = high_wmark_pages(zone); 6703 if (zone_watermark_ok_safe(zone, order, mark, highest_zoneidx)) 6704 return true; 6705 } 6706 6707 /* 6708 * If a node has no managed zone within highest_zoneidx, it does not 6709 * need balancing by definition. This can happen if a zone-restricted 6710 * allocation tries to wake a remote kswapd. 6711 */ 6712 if (mark == -1) 6713 return true; 6714 6715 return false; 6716 } 6717 6718 /* Clear pgdat state for congested, dirty or under writeback. */ 6719 static void clear_pgdat_congested(pg_data_t *pgdat) 6720 { 6721 struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat); 6722 6723 clear_bit(LRUVEC_NODE_CONGESTED, &lruvec->flags); 6724 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); 6725 clear_bit(PGDAT_DIRTY, &pgdat->flags); 6726 clear_bit(PGDAT_WRITEBACK, &pgdat->flags); 6727 } 6728 6729 /* 6730 * Prepare kswapd for sleeping. This verifies that there are no processes 6731 * waiting in throttle_direct_reclaim() and that watermarks have been met. 6732 * 6733 * Returns true if kswapd is ready to sleep 6734 */ 6735 static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, 6736 int highest_zoneidx) 6737 { 6738 /* 6739 * The throttled processes are normally woken up in balance_pgdat() as 6740 * soon as allow_direct_reclaim() is true. But there is a potential 6741 * race between when kswapd checks the watermarks and a process gets 6742 * throttled. There is also a potential race if processes get 6743 * throttled, kswapd wakes, a large process exits thereby balancing the 6744 * zones, which causes kswapd to exit balance_pgdat() before reaching 6745 * the wake up checks. If kswapd is going to sleep, no process should 6746 * be sleeping on pfmemalloc_wait, so wake them now if necessary. If 6747 * the wake up is premature, processes will wake kswapd and get 6748 * throttled again. The difference from wake ups in balance_pgdat() is 6749 * that here we are under prepare_to_wait(). 6750 */ 6751 if (waitqueue_active(&pgdat->pfmemalloc_wait)) 6752 wake_up_all(&pgdat->pfmemalloc_wait); 6753 6754 /* Hopeless node, leave it to direct reclaim */ 6755 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) 6756 return true; 6757 6758 if (pgdat_balanced(pgdat, order, highest_zoneidx)) { 6759 clear_pgdat_congested(pgdat); 6760 return true; 6761 } 6762 6763 return false; 6764 } 6765 6766 /* 6767 * kswapd shrinks a node of pages that are at or below the highest usable 6768 * zone that is currently unbalanced. 6769 * 6770 * Returns true if kswapd scanned at least the requested number of pages to 6771 * reclaim or if the lack of progress was due to pages under writeback. 6772 * This is used to determine if the scanning priority needs to be raised. 6773 */ 6774 static bool kswapd_shrink_node(pg_data_t *pgdat, 6775 struct scan_control *sc) 6776 { 6777 struct zone *zone; 6778 int z; 6779 unsigned long nr_reclaimed = sc->nr_reclaimed; 6780 6781 /* Reclaim a number of pages proportional to the number of zones */ 6782 sc->nr_to_reclaim = 0; 6783 for (z = 0; z <= sc->reclaim_idx; z++) { 6784 zone = pgdat->node_zones + z; 6785 if (!managed_zone(zone)) 6786 continue; 6787 6788 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); 6789 } 6790 6791 /* 6792 * Historically care was taken to put equal pressure on all zones but 6793 * now pressure is applied based on node LRU order. 6794 */ 6795 shrink_node(pgdat, sc); 6796 6797 /* 6798 * Fragmentation may mean that the system cannot be rebalanced for 6799 * high-order allocations. If twice the allocation size has been 6800 * reclaimed then recheck watermarks only at order-0 to prevent 6801 * excessive reclaim. Assume that a process requested a high-order 6802 * can direct reclaim/compact. 6803 */ 6804 if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order)) 6805 sc->order = 0; 6806 6807 /* account for progress from mm_account_reclaimed_pages() */ 6808 return max(sc->nr_scanned, sc->nr_reclaimed - nr_reclaimed) >= sc->nr_to_reclaim; 6809 } 6810 6811 /* Page allocator PCP high watermark is lowered if reclaim is active. */ 6812 static inline void 6813 update_reclaim_active(pg_data_t *pgdat, int highest_zoneidx, bool active) 6814 { 6815 int i; 6816 struct zone *zone; 6817 6818 for (i = 0; i <= highest_zoneidx; i++) { 6819 zone = pgdat->node_zones + i; 6820 6821 if (!managed_zone(zone)) 6822 continue; 6823 6824 if (active) 6825 set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); 6826 else 6827 clear_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); 6828 } 6829 } 6830 6831 static inline void 6832 set_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) 6833 { 6834 update_reclaim_active(pgdat, highest_zoneidx, true); 6835 } 6836 6837 static inline void 6838 clear_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) 6839 { 6840 update_reclaim_active(pgdat, highest_zoneidx, false); 6841 } 6842 6843 /* 6844 * For kswapd, balance_pgdat() will reclaim pages across a node from zones 6845 * that are eligible for use by the caller until at least one zone is 6846 * balanced. 6847 * 6848 * Returns the order kswapd finished reclaiming at. 6849 * 6850 * kswapd scans the zones in the highmem->normal->dma direction. It skips 6851 * zones which have free_pages > high_wmark_pages(zone), but once a zone is 6852 * found to have free_pages <= high_wmark_pages(zone), any page in that zone 6853 * or lower is eligible for reclaim until at least one usable zone is 6854 * balanced. 6855 */ 6856 static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx) 6857 { 6858 int i; 6859 unsigned long nr_soft_reclaimed; 6860 unsigned long nr_soft_scanned; 6861 unsigned long pflags; 6862 unsigned long nr_boost_reclaim; 6863 unsigned long zone_boosts[MAX_NR_ZONES] = { 0, }; 6864 bool boosted; 6865 struct zone *zone; 6866 struct scan_control sc = { 6867 .gfp_mask = GFP_KERNEL, 6868 .order = order, 6869 .may_unmap = 1, 6870 }; 6871 6872 set_task_reclaim_state(current, &sc.reclaim_state); 6873 psi_memstall_enter(&pflags); 6874 __fs_reclaim_acquire(_THIS_IP_); 6875 6876 count_vm_event(PAGEOUTRUN); 6877 6878 /* 6879 * Account for the reclaim boost. Note that the zone boost is left in 6880 * place so that parallel allocations that are near the watermark will 6881 * stall or direct reclaim until kswapd is finished. 6882 */ 6883 nr_boost_reclaim = 0; 6884 for (i = 0; i <= highest_zoneidx; i++) { 6885 zone = pgdat->node_zones + i; 6886 if (!managed_zone(zone)) 6887 continue; 6888 6889 nr_boost_reclaim += zone->watermark_boost; 6890 zone_boosts[i] = zone->watermark_boost; 6891 } 6892 boosted = nr_boost_reclaim; 6893 6894 restart: 6895 set_reclaim_active(pgdat, highest_zoneidx); 6896 sc.priority = DEF_PRIORITY; 6897 do { 6898 unsigned long nr_reclaimed = sc.nr_reclaimed; 6899 bool raise_priority = true; 6900 bool balanced; 6901 bool ret; 6902 bool was_frozen; 6903 6904 sc.reclaim_idx = highest_zoneidx; 6905 6906 /* 6907 * If the number of buffer_heads exceeds the maximum allowed 6908 * then consider reclaiming from all zones. This has a dual 6909 * purpose -- on 64-bit systems it is expected that 6910 * buffer_heads are stripped during active rotation. On 32-bit 6911 * systems, highmem pages can pin lowmem memory and shrinking 6912 * buffers can relieve lowmem pressure. Reclaim may still not 6913 * go ahead if all eligible zones for the original allocation 6914 * request are balanced to avoid excessive reclaim from kswapd. 6915 */ 6916 if (buffer_heads_over_limit) { 6917 for (i = MAX_NR_ZONES - 1; i >= 0; i--) { 6918 zone = pgdat->node_zones + i; 6919 if (!managed_zone(zone)) 6920 continue; 6921 6922 sc.reclaim_idx = i; 6923 break; 6924 } 6925 } 6926 6927 /* 6928 * If the pgdat is imbalanced then ignore boosting and preserve 6929 * the watermarks for a later time and restart. Note that the 6930 * zone watermarks will be still reset at the end of balancing 6931 * on the grounds that the normal reclaim should be enough to 6932 * re-evaluate if boosting is required when kswapd next wakes. 6933 */ 6934 balanced = pgdat_balanced(pgdat, sc.order, highest_zoneidx); 6935 if (!balanced && nr_boost_reclaim) { 6936 nr_boost_reclaim = 0; 6937 goto restart; 6938 } 6939 6940 /* 6941 * If boosting is not active then only reclaim if there are no 6942 * eligible zones. Note that sc.reclaim_idx is not used as 6943 * buffer_heads_over_limit may have adjusted it. 6944 */ 6945 if (!nr_boost_reclaim && balanced) 6946 goto out; 6947 6948 /* Limit the priority of boosting to avoid reclaim writeback */ 6949 if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2) 6950 raise_priority = false; 6951 6952 /* 6953 * Do not writeback or swap pages for boosted reclaim. The 6954 * intent is to relieve pressure not issue sub-optimal IO 6955 * from reclaim context. If no pages are reclaimed, the 6956 * reclaim will be aborted. 6957 */ 6958 sc.may_writepage = !laptop_mode && !nr_boost_reclaim; 6959 sc.may_swap = !nr_boost_reclaim; 6960 6961 /* 6962 * Do some background aging, to give pages a chance to be 6963 * referenced before reclaiming. All pages are rotated 6964 * regardless of classzone as this is about consistent aging. 6965 */ 6966 kswapd_age_node(pgdat, &sc); 6967 6968 /* 6969 * If we're getting trouble reclaiming, start doing writepage 6970 * even in laptop mode. 6971 */ 6972 if (sc.priority < DEF_PRIORITY - 2) 6973 sc.may_writepage = 1; 6974 6975 /* Call soft limit reclaim before calling shrink_node. */ 6976 sc.nr_scanned = 0; 6977 nr_soft_scanned = 0; 6978 nr_soft_reclaimed = memcg1_soft_limit_reclaim(pgdat, sc.order, 6979 sc.gfp_mask, &nr_soft_scanned); 6980 sc.nr_reclaimed += nr_soft_reclaimed; 6981 6982 /* 6983 * There should be no need to raise the scanning priority if 6984 * enough pages are already being scanned that that high 6985 * watermark would be met at 100% efficiency. 6986 */ 6987 if (kswapd_shrink_node(pgdat, &sc)) 6988 raise_priority = false; 6989 6990 /* 6991 * If the low watermark is met there is no need for processes 6992 * to be throttled on pfmemalloc_wait as they should not be 6993 * able to safely make forward progress. Wake them 6994 */ 6995 if (waitqueue_active(&pgdat->pfmemalloc_wait) && 6996 allow_direct_reclaim(pgdat)) 6997 wake_up_all(&pgdat->pfmemalloc_wait); 6998 6999 /* Check if kswapd should be suspending */ 7000 __fs_reclaim_release(_THIS_IP_); 7001 ret = kthread_freezable_should_stop(&was_frozen); 7002 __fs_reclaim_acquire(_THIS_IP_); 7003 if (was_frozen || ret) 7004 break; 7005 7006 /* 7007 * Raise priority if scanning rate is too low or there was no 7008 * progress in reclaiming pages 7009 */ 7010 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed; 7011 nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed); 7012 7013 /* 7014 * If reclaim made no progress for a boost, stop reclaim as 7015 * IO cannot be queued and it could be an infinite loop in 7016 * extreme circumstances. 7017 */ 7018 if (nr_boost_reclaim && !nr_reclaimed) 7019 break; 7020 7021 if (raise_priority || !nr_reclaimed) 7022 sc.priority--; 7023 } while (sc.priority >= 1); 7024 7025 /* 7026 * Restart only if it went through the priority loop all the way, 7027 * but cache_trim_mode didn't work. 7028 */ 7029 if (!sc.nr_reclaimed && sc.priority < 1 && 7030 !sc.no_cache_trim_mode && sc.cache_trim_mode_failed) { 7031 sc.no_cache_trim_mode = 1; 7032 goto restart; 7033 } 7034 7035 if (!sc.nr_reclaimed) 7036 pgdat->kswapd_failures++; 7037 7038 out: 7039 clear_reclaim_active(pgdat, highest_zoneidx); 7040 7041 /* If reclaim was boosted, account for the reclaim done in this pass */ 7042 if (boosted) { 7043 unsigned long flags; 7044 7045 for (i = 0; i <= highest_zoneidx; i++) { 7046 if (!zone_boosts[i]) 7047 continue; 7048 7049 /* Increments are under the zone lock */ 7050 zone = pgdat->node_zones + i; 7051 spin_lock_irqsave(&zone->lock, flags); 7052 zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]); 7053 spin_unlock_irqrestore(&zone->lock, flags); 7054 } 7055 7056 /* 7057 * As there is now likely space, wakeup kcompact to defragment 7058 * pageblocks. 7059 */ 7060 wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx); 7061 } 7062 7063 snapshot_refaults(NULL, pgdat); 7064 __fs_reclaim_release(_THIS_IP_); 7065 psi_memstall_leave(&pflags); 7066 set_task_reclaim_state(current, NULL); 7067 7068 /* 7069 * Return the order kswapd stopped reclaiming at as 7070 * prepare_kswapd_sleep() takes it into account. If another caller 7071 * entered the allocator slow path while kswapd was awake, order will 7072 * remain at the higher level. 7073 */ 7074 return sc.order; 7075 } 7076 7077 /* 7078 * The pgdat->kswapd_highest_zoneidx is used to pass the highest zone index to 7079 * be reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is 7080 * not a valid index then either kswapd runs for first time or kswapd couldn't 7081 * sleep after previous reclaim attempt (node is still unbalanced). In that 7082 * case return the zone index of the previous kswapd reclaim cycle. 7083 */ 7084 static enum zone_type kswapd_highest_zoneidx(pg_data_t *pgdat, 7085 enum zone_type prev_highest_zoneidx) 7086 { 7087 enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); 7088 7089 return curr_idx == MAX_NR_ZONES ? prev_highest_zoneidx : curr_idx; 7090 } 7091 7092 static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, 7093 unsigned int highest_zoneidx) 7094 { 7095 long remaining = 0; 7096 DEFINE_WAIT(wait); 7097 7098 if (freezing(current) || kthread_should_stop()) 7099 return; 7100 7101 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 7102 7103 /* 7104 * Try to sleep for a short interval. Note that kcompactd will only be 7105 * woken if it is possible to sleep for a short interval. This is 7106 * deliberate on the assumption that if reclaim cannot keep an 7107 * eligible zone balanced that it's also unlikely that compaction will 7108 * succeed. 7109 */ 7110 if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { 7111 /* 7112 * Compaction records what page blocks it recently failed to 7113 * isolate pages from and skips them in the future scanning. 7114 * When kswapd is going to sleep, it is reasonable to assume 7115 * that pages and compaction may succeed so reset the cache. 7116 */ 7117 reset_isolation_suitable(pgdat); 7118 7119 /* 7120 * We have freed the memory, now we should compact it to make 7121 * allocation of the requested order possible. 7122 */ 7123 wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx); 7124 7125 remaining = schedule_timeout(HZ/10); 7126 7127 /* 7128 * If woken prematurely then reset kswapd_highest_zoneidx and 7129 * order. The values will either be from a wakeup request or 7130 * the previous request that slept prematurely. 7131 */ 7132 if (remaining) { 7133 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, 7134 kswapd_highest_zoneidx(pgdat, 7135 highest_zoneidx)); 7136 7137 if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) 7138 WRITE_ONCE(pgdat->kswapd_order, reclaim_order); 7139 } 7140 7141 finish_wait(&pgdat->kswapd_wait, &wait); 7142 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 7143 } 7144 7145 /* 7146 * After a short sleep, check if it was a premature sleep. If not, then 7147 * go fully to sleep until explicitly woken up. 7148 */ 7149 if (!remaining && 7150 prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { 7151 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); 7152 7153 /* 7154 * vmstat counters are not perfectly accurate and the estimated 7155 * value for counters such as NR_FREE_PAGES can deviate from the 7156 * true value by nr_online_cpus * threshold. To avoid the zone 7157 * watermarks being breached while under pressure, we reduce the 7158 * per-cpu vmstat threshold while kswapd is awake and restore 7159 * them before going back to sleep. 7160 */ 7161 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); 7162 7163 if (!kthread_should_stop()) 7164 schedule(); 7165 7166 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); 7167 } else { 7168 if (remaining) 7169 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); 7170 else 7171 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); 7172 } 7173 finish_wait(&pgdat->kswapd_wait, &wait); 7174 } 7175 7176 /* 7177 * The background pageout daemon, started as a kernel thread 7178 * from the init process. 7179 * 7180 * This basically trickles out pages so that we have _some_ 7181 * free memory available even if there is no other activity 7182 * that frees anything up. This is needed for things like routing 7183 * etc, where we otherwise might have all activity going on in 7184 * asynchronous contexts that cannot page things out. 7185 * 7186 * If there are applications that are active memory-allocators 7187 * (most normal use), this basically shouldn't matter. 7188 */ 7189 static int kswapd(void *p) 7190 { 7191 unsigned int alloc_order, reclaim_order; 7192 unsigned int highest_zoneidx = MAX_NR_ZONES - 1; 7193 pg_data_t *pgdat = (pg_data_t *)p; 7194 struct task_struct *tsk = current; 7195 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 7196 7197 if (!cpumask_empty(cpumask)) 7198 set_cpus_allowed_ptr(tsk, cpumask); 7199 7200 /* 7201 * Tell the memory management that we're a "memory allocator", 7202 * and that if we need more memory we should get access to it 7203 * regardless (see "__alloc_pages()"). "kswapd" should 7204 * never get caught in the normal page freeing logic. 7205 * 7206 * (Kswapd normally doesn't need memory anyway, but sometimes 7207 * you need a small amount of memory in order to be able to 7208 * page out something else, and this flag essentially protects 7209 * us from recursively trying to free more memory as we're 7210 * trying to free the first piece of memory in the first place). 7211 */ 7212 tsk->flags |= PF_MEMALLOC | PF_KSWAPD; 7213 set_freezable(); 7214 7215 WRITE_ONCE(pgdat->kswapd_order, 0); 7216 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); 7217 atomic_set(&pgdat->nr_writeback_throttled, 0); 7218 for ( ; ; ) { 7219 bool was_frozen; 7220 7221 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); 7222 highest_zoneidx = kswapd_highest_zoneidx(pgdat, 7223 highest_zoneidx); 7224 7225 kswapd_try_sleep: 7226 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order, 7227 highest_zoneidx); 7228 7229 /* Read the new order and highest_zoneidx */ 7230 alloc_order = READ_ONCE(pgdat->kswapd_order); 7231 highest_zoneidx = kswapd_highest_zoneidx(pgdat, 7232 highest_zoneidx); 7233 WRITE_ONCE(pgdat->kswapd_order, 0); 7234 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); 7235 7236 if (kthread_freezable_should_stop(&was_frozen)) 7237 break; 7238 7239 /* 7240 * We can speed up thawing tasks if we don't call balance_pgdat 7241 * after returning from the refrigerator 7242 */ 7243 if (was_frozen) 7244 continue; 7245 7246 /* 7247 * Reclaim begins at the requested order but if a high-order 7248 * reclaim fails then kswapd falls back to reclaiming for 7249 * order-0. If that happens, kswapd will consider sleeping 7250 * for the order it finished reclaiming at (reclaim_order) 7251 * but kcompactd is woken to compact for the original 7252 * request (alloc_order). 7253 */ 7254 trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx, 7255 alloc_order); 7256 reclaim_order = balance_pgdat(pgdat, alloc_order, 7257 highest_zoneidx); 7258 if (reclaim_order < alloc_order) 7259 goto kswapd_try_sleep; 7260 } 7261 7262 tsk->flags &= ~(PF_MEMALLOC | PF_KSWAPD); 7263 7264 return 0; 7265 } 7266 7267 /* 7268 * A zone is low on free memory or too fragmented for high-order memory. If 7269 * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's 7270 * pgdat. It will wake up kcompactd after reclaiming memory. If kswapd reclaim 7271 * has failed or is not needed, still wake up kcompactd if only compaction is 7272 * needed. 7273 */ 7274 void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, 7275 enum zone_type highest_zoneidx) 7276 { 7277 pg_data_t *pgdat; 7278 enum zone_type curr_idx; 7279 7280 if (!managed_zone(zone)) 7281 return; 7282 7283 if (!cpuset_zone_allowed(zone, gfp_flags)) 7284 return; 7285 7286 pgdat = zone->zone_pgdat; 7287 curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); 7288 7289 if (curr_idx == MAX_NR_ZONES || curr_idx < highest_zoneidx) 7290 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx); 7291 7292 if (READ_ONCE(pgdat->kswapd_order) < order) 7293 WRITE_ONCE(pgdat->kswapd_order, order); 7294 7295 if (!waitqueue_active(&pgdat->kswapd_wait)) 7296 return; 7297 7298 /* Hopeless node, leave it to direct reclaim if possible */ 7299 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES || 7300 (pgdat_balanced(pgdat, order, highest_zoneidx) && 7301 !pgdat_watermark_boosted(pgdat, highest_zoneidx))) { 7302 /* 7303 * There may be plenty of free memory available, but it's too 7304 * fragmented for high-order allocations. Wake up kcompactd 7305 * and rely on compaction_suitable() to determine if it's 7306 * needed. If it fails, it will defer subsequent attempts to 7307 * ratelimit its work. 7308 */ 7309 if (!(gfp_flags & __GFP_DIRECT_RECLAIM)) 7310 wakeup_kcompactd(pgdat, order, highest_zoneidx); 7311 return; 7312 } 7313 7314 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order, 7315 gfp_flags); 7316 wake_up_interruptible(&pgdat->kswapd_wait); 7317 } 7318 7319 #ifdef CONFIG_HIBERNATION 7320 /* 7321 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of 7322 * freed pages. 7323 * 7324 * Rather than trying to age LRUs the aim is to preserve the overall 7325 * LRU order by reclaiming preferentially 7326 * inactive > active > active referenced > active mapped 7327 */ 7328 unsigned long shrink_all_memory(unsigned long nr_to_reclaim) 7329 { 7330 struct scan_control sc = { 7331 .nr_to_reclaim = nr_to_reclaim, 7332 .gfp_mask = GFP_HIGHUSER_MOVABLE, 7333 .reclaim_idx = MAX_NR_ZONES - 1, 7334 .priority = DEF_PRIORITY, 7335 .may_writepage = 1, 7336 .may_unmap = 1, 7337 .may_swap = 1, 7338 .hibernation_mode = 1, 7339 }; 7340 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 7341 unsigned long nr_reclaimed; 7342 unsigned int noreclaim_flag; 7343 7344 fs_reclaim_acquire(sc.gfp_mask); 7345 noreclaim_flag = memalloc_noreclaim_save(); 7346 set_task_reclaim_state(current, &sc.reclaim_state); 7347 7348 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 7349 7350 set_task_reclaim_state(current, NULL); 7351 memalloc_noreclaim_restore(noreclaim_flag); 7352 fs_reclaim_release(sc.gfp_mask); 7353 7354 return nr_reclaimed; 7355 } 7356 #endif /* CONFIG_HIBERNATION */ 7357 7358 /* 7359 * This kswapd start function will be called by init and node-hot-add. 7360 */ 7361 void __meminit kswapd_run(int nid) 7362 { 7363 pg_data_t *pgdat = NODE_DATA(nid); 7364 7365 pgdat_kswapd_lock(pgdat); 7366 if (!pgdat->kswapd) { 7367 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); 7368 if (IS_ERR(pgdat->kswapd)) { 7369 /* failure at boot is fatal */ 7370 pr_err("Failed to start kswapd on node %d,ret=%ld\n", 7371 nid, PTR_ERR(pgdat->kswapd)); 7372 BUG_ON(system_state < SYSTEM_RUNNING); 7373 pgdat->kswapd = NULL; 7374 } 7375 } 7376 pgdat_kswapd_unlock(pgdat); 7377 } 7378 7379 /* 7380 * Called by memory hotplug when all memory in a node is offlined. Caller must 7381 * be holding mem_hotplug_begin/done(). 7382 */ 7383 void __meminit kswapd_stop(int nid) 7384 { 7385 pg_data_t *pgdat = NODE_DATA(nid); 7386 struct task_struct *kswapd; 7387 7388 pgdat_kswapd_lock(pgdat); 7389 kswapd = pgdat->kswapd; 7390 if (kswapd) { 7391 kthread_stop(kswapd); 7392 pgdat->kswapd = NULL; 7393 } 7394 pgdat_kswapd_unlock(pgdat); 7395 } 7396 7397 static int __init kswapd_init(void) 7398 { 7399 int nid; 7400 7401 swap_setup(); 7402 for_each_node_state(nid, N_MEMORY) 7403 kswapd_run(nid); 7404 return 0; 7405 } 7406 7407 module_init(kswapd_init) 7408 7409 #ifdef CONFIG_NUMA 7410 /* 7411 * Node reclaim mode 7412 * 7413 * If non-zero call node_reclaim when the number of free pages falls below 7414 * the watermarks. 7415 */ 7416 int node_reclaim_mode __read_mostly; 7417 7418 /* 7419 * Priority for NODE_RECLAIM. This determines the fraction of pages 7420 * of a node considered for each zone_reclaim. 4 scans 1/16th of 7421 * a zone. 7422 */ 7423 #define NODE_RECLAIM_PRIORITY 4 7424 7425 /* 7426 * Percentage of pages in a zone that must be unmapped for node_reclaim to 7427 * occur. 7428 */ 7429 int sysctl_min_unmapped_ratio = 1; 7430 7431 /* 7432 * If the number of slab pages in a zone grows beyond this percentage then 7433 * slab reclaim needs to occur. 7434 */ 7435 int sysctl_min_slab_ratio = 5; 7436 7437 static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat) 7438 { 7439 unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED); 7440 unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) + 7441 node_page_state(pgdat, NR_ACTIVE_FILE); 7442 7443 /* 7444 * It's possible for there to be more file mapped pages than 7445 * accounted for by the pages on the file LRU lists because 7446 * tmpfs pages accounted for as ANON can also be FILE_MAPPED 7447 */ 7448 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; 7449 } 7450 7451 /* Work out how many page cache pages we can reclaim in this reclaim_mode */ 7452 static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat) 7453 { 7454 unsigned long nr_pagecache_reclaimable; 7455 unsigned long delta = 0; 7456 7457 /* 7458 * If RECLAIM_UNMAP is set, then all file pages are considered 7459 * potentially reclaimable. Otherwise, we have to worry about 7460 * pages like swapcache and node_unmapped_file_pages() provides 7461 * a better estimate 7462 */ 7463 if (node_reclaim_mode & RECLAIM_UNMAP) 7464 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES); 7465 else 7466 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat); 7467 7468 /* If we can't clean pages, remove dirty pages from consideration */ 7469 if (!(node_reclaim_mode & RECLAIM_WRITE)) 7470 delta += node_page_state(pgdat, NR_FILE_DIRTY); 7471 7472 /* Watch for any possible underflows due to delta */ 7473 if (unlikely(delta > nr_pagecache_reclaimable)) 7474 delta = nr_pagecache_reclaimable; 7475 7476 return nr_pagecache_reclaimable - delta; 7477 } 7478 7479 /* 7480 * Try to free up some pages from this node through reclaim. 7481 */ 7482 static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) 7483 { 7484 /* Minimum pages needed in order to stay on node */ 7485 const unsigned long nr_pages = 1 << order; 7486 struct task_struct *p = current; 7487 unsigned int noreclaim_flag; 7488 struct scan_control sc = { 7489 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), 7490 .gfp_mask = current_gfp_context(gfp_mask), 7491 .order = order, 7492 .priority = NODE_RECLAIM_PRIORITY, 7493 .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE), 7494 .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP), 7495 .may_swap = 1, 7496 .reclaim_idx = gfp_zone(gfp_mask), 7497 }; 7498 unsigned long pflags; 7499 7500 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order, 7501 sc.gfp_mask); 7502 7503 cond_resched(); 7504 psi_memstall_enter(&pflags); 7505 delayacct_freepages_start(); 7506 fs_reclaim_acquire(sc.gfp_mask); 7507 /* 7508 * We need to be able to allocate from the reserves for RECLAIM_UNMAP 7509 */ 7510 noreclaim_flag = memalloc_noreclaim_save(); 7511 set_task_reclaim_state(p, &sc.reclaim_state); 7512 7513 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages || 7514 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) > pgdat->min_slab_pages) { 7515 /* 7516 * Free memory by calling shrink node with increasing 7517 * priorities until we have enough memory freed. 7518 */ 7519 do { 7520 shrink_node(pgdat, &sc); 7521 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); 7522 } 7523 7524 set_task_reclaim_state(p, NULL); 7525 memalloc_noreclaim_restore(noreclaim_flag); 7526 fs_reclaim_release(sc.gfp_mask); 7527 psi_memstall_leave(&pflags); 7528 delayacct_freepages_end(); 7529 7530 trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed); 7531 7532 return sc.nr_reclaimed >= nr_pages; 7533 } 7534 7535 int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) 7536 { 7537 int ret; 7538 7539 /* 7540 * Node reclaim reclaims unmapped file backed pages and 7541 * slab pages if we are over the defined limits. 7542 * 7543 * A small portion of unmapped file backed pages is needed for 7544 * file I/O otherwise pages read by file I/O will be immediately 7545 * thrown out if the node is overallocated. So we do not reclaim 7546 * if less than a specified percentage of the node is used by 7547 * unmapped file backed pages. 7548 */ 7549 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && 7550 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <= 7551 pgdat->min_slab_pages) 7552 return NODE_RECLAIM_FULL; 7553 7554 /* 7555 * Do not scan if the allocation should not be delayed. 7556 */ 7557 if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC)) 7558 return NODE_RECLAIM_NOSCAN; 7559 7560 /* 7561 * Only run node reclaim on the local node or on nodes that do not 7562 * have associated processors. This will favor the local processor 7563 * over remote processors and spread off node memory allocations 7564 * as wide as possible. 7565 */ 7566 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id()) 7567 return NODE_RECLAIM_NOSCAN; 7568 7569 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) 7570 return NODE_RECLAIM_NOSCAN; 7571 7572 ret = __node_reclaim(pgdat, gfp_mask, order); 7573 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags); 7574 7575 if (ret) 7576 count_vm_event(PGSCAN_ZONE_RECLAIM_SUCCESS); 7577 else 7578 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); 7579 7580 return ret; 7581 } 7582 #endif 7583 7584 /** 7585 * check_move_unevictable_folios - Move evictable folios to appropriate zone 7586 * lru list 7587 * @fbatch: Batch of lru folios to check. 7588 * 7589 * Checks folios for evictability, if an evictable folio is in the unevictable 7590 * lru list, moves it to the appropriate evictable lru list. This function 7591 * should be only used for lru folios. 7592 */ 7593 void check_move_unevictable_folios(struct folio_batch *fbatch) 7594 { 7595 struct lruvec *lruvec = NULL; 7596 int pgscanned = 0; 7597 int pgrescued = 0; 7598 int i; 7599 7600 for (i = 0; i < fbatch->nr; i++) { 7601 struct folio *folio = fbatch->folios[i]; 7602 int nr_pages = folio_nr_pages(folio); 7603 7604 pgscanned += nr_pages; 7605 7606 /* block memcg migration while the folio moves between lrus */ 7607 if (!folio_test_clear_lru(folio)) 7608 continue; 7609 7610 lruvec = folio_lruvec_relock_irq(folio, lruvec); 7611 if (folio_evictable(folio) && folio_test_unevictable(folio)) { 7612 lruvec_del_folio(lruvec, folio); 7613 folio_clear_unevictable(folio); 7614 lruvec_add_folio(lruvec, folio); 7615 pgrescued += nr_pages; 7616 } 7617 folio_set_lru(folio); 7618 } 7619 7620 if (lruvec) { 7621 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); 7622 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); 7623 unlock_page_lruvec_irq(lruvec); 7624 } else if (pgscanned) { 7625 count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); 7626 } 7627 } 7628 EXPORT_SYMBOL_GPL(check_move_unevictable_folios); 7629