1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
4 *
5 * Swap reorganised 29.12.95, Stephen Tweedie.
6 * kswapd added: 7.1.96 sct
7 * Removed kswapd_ctl limits, and swap out as many pages as needed
8 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
9 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
10 * Multiqueue VM started 5.8.00, Rik van Riel.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/mm.h>
16 #include <linux/sched/mm.h>
17 #include <linux/module.h>
18 #include <linux/gfp.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/swap.h>
21 #include <linux/pagemap.h>
22 #include <linux/init.h>
23 #include <linux/highmem.h>
24 #include <linux/vmpressure.h>
25 #include <linux/vmstat.h>
26 #include <linux/file.h>
27 #include <linux/writeback.h>
28 #include <linux/blkdev.h>
29 #include <linux/buffer_head.h> /* for buffer_heads_over_limit */
30 #include <linux/mm_inline.h>
31 #include <linux/backing-dev.h>
32 #include <linux/rmap.h>
33 #include <linux/topology.h>
34 #include <linux/cpu.h>
35 #include <linux/cpuset.h>
36 #include <linux/compaction.h>
37 #include <linux/notifier.h>
38 #include <linux/delay.h>
39 #include <linux/kthread.h>
40 #include <linux/freezer.h>
41 #include <linux/memcontrol.h>
42 #include <linux/migrate.h>
43 #include <linux/delayacct.h>
44 #include <linux/sysctl.h>
45 #include <linux/memory-tiers.h>
46 #include <linux/oom.h>
47 #include <linux/pagevec.h>
48 #include <linux/prefetch.h>
49 #include <linux/printk.h>
50 #include <linux/dax.h>
51 #include <linux/psi.h>
52 #include <linux/pagewalk.h>
53 #include <linux/shmem_fs.h>
54 #include <linux/ctype.h>
55 #include <linux/debugfs.h>
56 #include <linux/khugepaged.h>
57 #include <linux/rculist_nulls.h>
58 #include <linux/random.h>
59 #include <linux/mmu_notifier.h>
60
61 #include <asm/tlbflush.h>
62 #include <asm/div64.h>
63
64 #include <linux/swapops.h>
65 #include <linux/balloon_compaction.h>
66 #include <linux/sched/sysctl.h>
67
68 #include "internal.h"
69 #include "swap.h"
70
71 #define CREATE_TRACE_POINTS
72 #include <trace/events/vmscan.h>
73
74 struct scan_control {
75 /* How many pages shrink_list() should reclaim */
76 unsigned long nr_to_reclaim;
77
78 /*
79 * Nodemask of nodes allowed by the caller. If NULL, all nodes
80 * are scanned.
81 */
82 nodemask_t *nodemask;
83
84 /*
85 * The memory cgroup that hit its limit and as a result is the
86 * primary target of this reclaim invocation.
87 */
88 struct mem_cgroup *target_mem_cgroup;
89
90 /*
91 * Scan pressure balancing between anon and file LRUs
92 */
93 unsigned long anon_cost;
94 unsigned long file_cost;
95
96 #ifdef CONFIG_MEMCG
97 /* Swappiness value for proactive reclaim. Always use sc_swappiness()! */
98 int *proactive_swappiness;
99 #endif
100
101 /* Can active folios be deactivated as part of reclaim? */
102 #define DEACTIVATE_ANON 1
103 #define DEACTIVATE_FILE 2
104 unsigned int may_deactivate:2;
105 unsigned int force_deactivate:1;
106 unsigned int skipped_deactivate:1;
107
108 /* Writepage batching in laptop mode; RECLAIM_WRITE */
109 unsigned int may_writepage:1;
110
111 /* Can mapped folios be reclaimed? */
112 unsigned int may_unmap:1;
113
114 /* Can folios be swapped as part of reclaim? */
115 unsigned int may_swap:1;
116
117 /* Not allow cache_trim_mode to be turned on as part of reclaim? */
118 unsigned int no_cache_trim_mode:1;
119
120 /* Has cache_trim_mode failed at least once? */
121 unsigned int cache_trim_mode_failed:1;
122
123 /* Proactive reclaim invoked by userspace through memory.reclaim */
124 unsigned int proactive:1;
125
126 /*
127 * Cgroup memory below memory.low is protected as long as we
128 * don't threaten to OOM. If any cgroup is reclaimed at
129 * reduced force or passed over entirely due to its memory.low
130 * setting (memcg_low_skipped), and nothing is reclaimed as a
131 * result, then go back for one more cycle that reclaims the protected
132 * memory (memcg_low_reclaim) to avert OOM.
133 */
134 unsigned int memcg_low_reclaim:1;
135 unsigned int memcg_low_skipped:1;
136
137 /* Shared cgroup tree walk failed, rescan the whole tree */
138 unsigned int memcg_full_walk:1;
139
140 unsigned int hibernation_mode:1;
141
142 /* One of the zones is ready for compaction */
143 unsigned int compaction_ready:1;
144
145 /* There is easily reclaimable cold cache in the current node */
146 unsigned int cache_trim_mode:1;
147
148 /* The file folios on the current node are dangerously low */
149 unsigned int file_is_tiny:1;
150
151 /* Always discard instead of demoting to lower tier memory */
152 unsigned int no_demotion:1;
153
154 /* Allocation order */
155 s8 order;
156
157 /* Scan (total_size >> priority) pages at once */
158 s8 priority;
159
160 /* The highest zone to isolate folios for reclaim from */
161 s8 reclaim_idx;
162
163 /* This context's GFP mask */
164 gfp_t gfp_mask;
165
166 /* Incremented by the number of inactive pages that were scanned */
167 unsigned long nr_scanned;
168
169 /* Number of pages freed so far during a call to shrink_zones() */
170 unsigned long nr_reclaimed;
171
172 struct {
173 unsigned int dirty;
174 unsigned int unqueued_dirty;
175 unsigned int congested;
176 unsigned int writeback;
177 unsigned int immediate;
178 unsigned int file_taken;
179 unsigned int taken;
180 } nr;
181
182 /* for recording the reclaimed slab by now */
183 struct reclaim_state reclaim_state;
184 };
185
186 #ifdef ARCH_HAS_PREFETCHW
187 #define prefetchw_prev_lru_folio(_folio, _base, _field) \
188 do { \
189 if ((_folio)->lru.prev != _base) { \
190 struct folio *prev; \
191 \
192 prev = lru_to_folio(&(_folio->lru)); \
193 prefetchw(&prev->_field); \
194 } \
195 } while (0)
196 #else
197 #define prefetchw_prev_lru_folio(_folio, _base, _field) do { } while (0)
198 #endif
199
200 /*
201 * From 0 .. MAX_SWAPPINESS. Higher means more swappy.
202 */
203 int vm_swappiness = 60;
204
205 #ifdef CONFIG_MEMCG
206
207 /* Returns true for reclaim through cgroup limits or cgroup interfaces. */
cgroup_reclaim(struct scan_control * sc)208 static bool cgroup_reclaim(struct scan_control *sc)
209 {
210 return sc->target_mem_cgroup;
211 }
212
213 /*
214 * Returns true for reclaim on the root cgroup. This is true for direct
215 * allocator reclaim and reclaim through cgroup interfaces on the root cgroup.
216 */
root_reclaim(struct scan_control * sc)217 static bool root_reclaim(struct scan_control *sc)
218 {
219 return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup);
220 }
221
222 /**
223 * writeback_throttling_sane - is the usual dirty throttling mechanism available?
224 * @sc: scan_control in question
225 *
226 * The normal page dirty throttling mechanism in balance_dirty_pages() is
227 * completely broken with the legacy memcg and direct stalling in
228 * shrink_folio_list() is used for throttling instead, which lacks all the
229 * niceties such as fairness, adaptive pausing, bandwidth proportional
230 * allocation and configurability.
231 *
232 * This function tests whether the vmscan currently in progress can assume
233 * that the normal dirty throttling mechanism is operational.
234 */
writeback_throttling_sane(struct scan_control * sc)235 static bool writeback_throttling_sane(struct scan_control *sc)
236 {
237 if (!cgroup_reclaim(sc))
238 return true;
239 #ifdef CONFIG_CGROUP_WRITEBACK
240 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
241 return true;
242 #endif
243 return false;
244 }
245
sc_swappiness(struct scan_control * sc,struct mem_cgroup * memcg)246 static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg)
247 {
248 if (sc->proactive && sc->proactive_swappiness)
249 return *sc->proactive_swappiness;
250 return mem_cgroup_swappiness(memcg);
251 }
252 #else
cgroup_reclaim(struct scan_control * sc)253 static bool cgroup_reclaim(struct scan_control *sc)
254 {
255 return false;
256 }
257
root_reclaim(struct scan_control * sc)258 static bool root_reclaim(struct scan_control *sc)
259 {
260 return true;
261 }
262
writeback_throttling_sane(struct scan_control * sc)263 static bool writeback_throttling_sane(struct scan_control *sc)
264 {
265 return true;
266 }
267
sc_swappiness(struct scan_control * sc,struct mem_cgroup * memcg)268 static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg)
269 {
270 return READ_ONCE(vm_swappiness);
271 }
272 #endif
273
274 /* for_each_managed_zone_pgdat - helper macro to iterate over all managed zones in a pgdat up to
275 * and including the specified highidx
276 * @zone: The current zone in the iterator
277 * @pgdat: The pgdat which node_zones are being iterated
278 * @idx: The index variable
279 * @highidx: The index of the highest zone to return
280 *
281 * This macro iterates through all managed zones up to and including the specified highidx.
282 * The zone iterator enters an invalid state after macro call and must be reinitialized
283 * before it can be used again.
284 */
285 #define for_each_managed_zone_pgdat(zone, pgdat, idx, highidx) \
286 for ((idx) = 0, (zone) = (pgdat)->node_zones; \
287 (idx) <= (highidx); \
288 (idx)++, (zone)++) \
289 if (!managed_zone(zone)) \
290 continue; \
291 else
292
set_task_reclaim_state(struct task_struct * task,struct reclaim_state * rs)293 static void set_task_reclaim_state(struct task_struct *task,
294 struct reclaim_state *rs)
295 {
296 /* Check for an overwrite */
297 WARN_ON_ONCE(rs && task->reclaim_state);
298
299 /* Check for the nulling of an already-nulled member */
300 WARN_ON_ONCE(!rs && !task->reclaim_state);
301
302 task->reclaim_state = rs;
303 }
304
305 /*
306 * flush_reclaim_state(): add pages reclaimed outside of LRU-based reclaim to
307 * scan_control->nr_reclaimed.
308 */
flush_reclaim_state(struct scan_control * sc)309 static void flush_reclaim_state(struct scan_control *sc)
310 {
311 /*
312 * Currently, reclaim_state->reclaimed includes three types of pages
313 * freed outside of vmscan:
314 * (1) Slab pages.
315 * (2) Clean file pages from pruned inodes (on highmem systems).
316 * (3) XFS freed buffer pages.
317 *
318 * For all of these cases, we cannot universally link the pages to a
319 * single memcg. For example, a memcg-aware shrinker can free one object
320 * charged to the target memcg, causing an entire page to be freed.
321 * If we count the entire page as reclaimed from the memcg, we end up
322 * overestimating the reclaimed amount (potentially under-reclaiming).
323 *
324 * Only count such pages for global reclaim to prevent under-reclaiming
325 * from the target memcg; preventing unnecessary retries during memcg
326 * charging and false positives from proactive reclaim.
327 *
328 * For uncommon cases where the freed pages were actually mostly
329 * charged to the target memcg, we end up underestimating the reclaimed
330 * amount. This should be fine. The freed pages will be uncharged
331 * anyway, even if they are not counted here properly, and we will be
332 * able to make forward progress in charging (which is usually in a
333 * retry loop).
334 *
335 * We can go one step further, and report the uncharged objcg pages in
336 * memcg reclaim, to make reporting more accurate and reduce
337 * underestimation, but it's probably not worth the complexity for now.
338 */
339 if (current->reclaim_state && root_reclaim(sc)) {
340 sc->nr_reclaimed += current->reclaim_state->reclaimed;
341 current->reclaim_state->reclaimed = 0;
342 }
343 }
344
can_demote(int nid,struct scan_control * sc,struct mem_cgroup * memcg)345 static bool can_demote(int nid, struct scan_control *sc,
346 struct mem_cgroup *memcg)
347 {
348 int demotion_nid;
349
350 if (!numa_demotion_enabled)
351 return false;
352 if (sc && sc->no_demotion)
353 return false;
354
355 demotion_nid = next_demotion_node(nid);
356 if (demotion_nid == NUMA_NO_NODE)
357 return false;
358
359 /* If demotion node isn't in the cgroup's mems_allowed, fall back */
360 return mem_cgroup_node_allowed(memcg, demotion_nid);
361 }
362
can_reclaim_anon_pages(struct mem_cgroup * memcg,int nid,struct scan_control * sc)363 static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg,
364 int nid,
365 struct scan_control *sc)
366 {
367 if (memcg == NULL) {
368 /*
369 * For non-memcg reclaim, is there
370 * space in any swap device?
371 */
372 if (get_nr_swap_pages() > 0)
373 return true;
374 } else {
375 /* Is the memcg below its swap limit? */
376 if (mem_cgroup_get_nr_swap_pages(memcg) > 0)
377 return true;
378 }
379
380 /*
381 * The page can not be swapped.
382 *
383 * Can it be reclaimed from this node via demotion?
384 */
385 return can_demote(nid, sc, memcg);
386 }
387
388 /*
389 * This misses isolated folios which are not accounted for to save counters.
390 * As the data only determines if reclaim or compaction continues, it is
391 * not expected that isolated folios will be a dominating factor.
392 */
zone_reclaimable_pages(struct zone * zone)393 unsigned long zone_reclaimable_pages(struct zone *zone)
394 {
395 unsigned long nr;
396
397 nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
398 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
399 if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL))
400 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
401 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
402 /*
403 * If there are no reclaimable file-backed or anonymous pages,
404 * ensure zones with sufficient free pages are not skipped.
405 * This prevents zones like DMA32 from being ignored in reclaim
406 * scenarios where they can still help alleviate memory pressure.
407 */
408 if (nr == 0)
409 nr = zone_page_state_snapshot(zone, NR_FREE_PAGES);
410 return nr;
411 }
412
413 /**
414 * lruvec_lru_size - Returns the number of pages on the given LRU list.
415 * @lruvec: lru vector
416 * @lru: lru to use
417 * @zone_idx: zones to consider (use MAX_NR_ZONES - 1 for the whole LRU list)
418 */
lruvec_lru_size(struct lruvec * lruvec,enum lru_list lru,int zone_idx)419 static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru,
420 int zone_idx)
421 {
422 unsigned long size = 0;
423 int zid;
424 struct zone *zone;
425
426 for_each_managed_zone_pgdat(zone, lruvec_pgdat(lruvec), zid, zone_idx) {
427 if (!mem_cgroup_disabled())
428 size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
429 else
430 size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru);
431 }
432 return size;
433 }
434
drop_slab_node(int nid)435 static unsigned long drop_slab_node(int nid)
436 {
437 unsigned long freed = 0;
438 struct mem_cgroup *memcg = NULL;
439
440 memcg = mem_cgroup_iter(NULL, NULL, NULL);
441 do {
442 freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
443 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
444
445 return freed;
446 }
447
drop_slab(void)448 void drop_slab(void)
449 {
450 int nid;
451 int shift = 0;
452 unsigned long freed;
453
454 do {
455 freed = 0;
456 for_each_online_node(nid) {
457 if (fatal_signal_pending(current))
458 return;
459
460 freed += drop_slab_node(nid);
461 }
462 } while ((freed >> shift++) > 1);
463 }
464
465 #define CHECK_RECLAIMER_OFFSET(type) \
466 do { \
467 BUILD_BUG_ON(PGSTEAL_##type - PGSTEAL_KSWAPD != \
468 PGDEMOTE_##type - PGDEMOTE_KSWAPD); \
469 BUILD_BUG_ON(PGSTEAL_##type - PGSTEAL_KSWAPD != \
470 PGSCAN_##type - PGSCAN_KSWAPD); \
471 } while (0)
472
reclaimer_offset(struct scan_control * sc)473 static int reclaimer_offset(struct scan_control *sc)
474 {
475 CHECK_RECLAIMER_OFFSET(DIRECT);
476 CHECK_RECLAIMER_OFFSET(KHUGEPAGED);
477 CHECK_RECLAIMER_OFFSET(PROACTIVE);
478
479 if (current_is_kswapd())
480 return 0;
481 if (current_is_khugepaged())
482 return PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD;
483 if (sc->proactive)
484 return PGSTEAL_PROACTIVE - PGSTEAL_KSWAPD;
485 return PGSTEAL_DIRECT - PGSTEAL_KSWAPD;
486 }
487
is_page_cache_freeable(struct folio * folio)488 static inline int is_page_cache_freeable(struct folio *folio)
489 {
490 /*
491 * A freeable page cache folio is referenced only by the caller
492 * that isolated the folio, the page cache and optional filesystem
493 * private data at folio->private.
494 */
495 return folio_ref_count(folio) - folio_test_private(folio) ==
496 1 + folio_nr_pages(folio);
497 }
498
499 /*
500 * We detected a synchronous write error writing a folio out. Probably
501 * -ENOSPC. We need to propagate that into the address_space for a subsequent
502 * fsync(), msync() or close().
503 *
504 * The tricky part is that after writepage we cannot touch the mapping: nothing
505 * prevents it from being freed up. But we have a ref on the folio and once
506 * that folio is locked, the mapping is pinned.
507 *
508 * We're allowed to run sleeping folio_lock() here because we know the caller has
509 * __GFP_FS.
510 */
handle_write_error(struct address_space * mapping,struct folio * folio,int error)511 static void handle_write_error(struct address_space *mapping,
512 struct folio *folio, int error)
513 {
514 folio_lock(folio);
515 if (folio_mapping(folio) == mapping)
516 mapping_set_error(mapping, error);
517 folio_unlock(folio);
518 }
519
skip_throttle_noprogress(pg_data_t * pgdat)520 static bool skip_throttle_noprogress(pg_data_t *pgdat)
521 {
522 int reclaimable = 0, write_pending = 0;
523 int i;
524 struct zone *zone;
525 /*
526 * If kswapd is disabled, reschedule if necessary but do not
527 * throttle as the system is likely near OOM.
528 */
529 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
530 return true;
531
532 /*
533 * If there are a lot of dirty/writeback folios then do not
534 * throttle as throttling will occur when the folios cycle
535 * towards the end of the LRU if still under writeback.
536 */
537 for_each_managed_zone_pgdat(zone, pgdat, i, MAX_NR_ZONES - 1) {
538 reclaimable += zone_reclaimable_pages(zone);
539 write_pending += zone_page_state_snapshot(zone,
540 NR_ZONE_WRITE_PENDING);
541 }
542 if (2 * write_pending <= reclaimable)
543 return true;
544
545 return false;
546 }
547
reclaim_throttle(pg_data_t * pgdat,enum vmscan_throttle_state reason)548 void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason)
549 {
550 wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason];
551 long timeout, ret;
552 DEFINE_WAIT(wait);
553
554 /*
555 * Do not throttle user workers, kthreads other than kswapd or
556 * workqueues. They may be required for reclaim to make
557 * forward progress (e.g. journalling workqueues or kthreads).
558 */
559 if (!current_is_kswapd() &&
560 current->flags & (PF_USER_WORKER|PF_KTHREAD)) {
561 cond_resched();
562 return;
563 }
564
565 /*
566 * These figures are pulled out of thin air.
567 * VMSCAN_THROTTLE_ISOLATED is a transient condition based on too many
568 * parallel reclaimers which is a short-lived event so the timeout is
569 * short. Failing to make progress or waiting on writeback are
570 * potentially long-lived events so use a longer timeout. This is shaky
571 * logic as a failure to make progress could be due to anything from
572 * writeback to a slow device to excessive referenced folios at the tail
573 * of the inactive LRU.
574 */
575 switch(reason) {
576 case VMSCAN_THROTTLE_WRITEBACK:
577 timeout = HZ/10;
578
579 if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) {
580 WRITE_ONCE(pgdat->nr_reclaim_start,
581 node_page_state(pgdat, NR_THROTTLED_WRITTEN));
582 }
583
584 break;
585 case VMSCAN_THROTTLE_CONGESTED:
586 fallthrough;
587 case VMSCAN_THROTTLE_NOPROGRESS:
588 if (skip_throttle_noprogress(pgdat)) {
589 cond_resched();
590 return;
591 }
592
593 timeout = 1;
594
595 break;
596 case VMSCAN_THROTTLE_ISOLATED:
597 timeout = HZ/50;
598 break;
599 default:
600 WARN_ON_ONCE(1);
601 timeout = HZ;
602 break;
603 }
604
605 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
606 ret = schedule_timeout(timeout);
607 finish_wait(wqh, &wait);
608
609 if (reason == VMSCAN_THROTTLE_WRITEBACK)
610 atomic_dec(&pgdat->nr_writeback_throttled);
611
612 trace_mm_vmscan_throttled(pgdat->node_id, jiffies_to_usecs(timeout),
613 jiffies_to_usecs(timeout - ret),
614 reason);
615 }
616
617 /*
618 * Account for folios written if tasks are throttled waiting on dirty
619 * folios to clean. If enough folios have been cleaned since throttling
620 * started then wakeup the throttled tasks.
621 */
__acct_reclaim_writeback(pg_data_t * pgdat,struct folio * folio,int nr_throttled)622 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
623 int nr_throttled)
624 {
625 unsigned long nr_written;
626
627 node_stat_add_folio(folio, NR_THROTTLED_WRITTEN);
628
629 /*
630 * This is an inaccurate read as the per-cpu deltas may not
631 * be synchronised. However, given that the system is
632 * writeback throttled, it is not worth taking the penalty
633 * of getting an accurate count. At worst, the throttle
634 * timeout guarantees forward progress.
635 */
636 nr_written = node_page_state(pgdat, NR_THROTTLED_WRITTEN) -
637 READ_ONCE(pgdat->nr_reclaim_start);
638
639 if (nr_written > SWAP_CLUSTER_MAX * nr_throttled)
640 wake_up(&pgdat->reclaim_wait[VMSCAN_THROTTLE_WRITEBACK]);
641 }
642
643 /* possible outcome of pageout() */
644 typedef enum {
645 /* failed to write folio out, folio is locked */
646 PAGE_KEEP,
647 /* move folio to the active list, folio is locked */
648 PAGE_ACTIVATE,
649 /* folio has been sent to the disk successfully, folio is unlocked */
650 PAGE_SUCCESS,
651 /* folio is clean and locked */
652 PAGE_CLEAN,
653 } pageout_t;
654
655 /*
656 * pageout is called by shrink_folio_list() for each dirty folio.
657 */
pageout(struct folio * folio,struct address_space * mapping,struct swap_iocb ** plug,struct list_head * folio_list)658 static pageout_t pageout(struct folio *folio, struct address_space *mapping,
659 struct swap_iocb **plug, struct list_head *folio_list)
660 {
661 int (*writeout)(struct folio *, struct writeback_control *);
662
663 /*
664 * We no longer attempt to writeback filesystem folios here, other
665 * than tmpfs/shmem. That's taken care of in page-writeback.
666 * If we find a dirty filesystem folio at the end of the LRU list,
667 * typically that means the filesystem is saturating the storage
668 * with contiguous writes and telling it to write a folio here
669 * would only make the situation worse by injecting an element
670 * of random access.
671 *
672 * If the folio is swapcache, write it back even if that would
673 * block, for some throttling. This happens by accident, because
674 * swap_backing_dev_info is bust: it doesn't reflect the
675 * congestion state of the swapdevs. Easy to fix, if needed.
676 */
677 if (!is_page_cache_freeable(folio))
678 return PAGE_KEEP;
679 if (!mapping) {
680 /*
681 * Some data journaling orphaned folios can have
682 * folio->mapping == NULL while being dirty with clean buffers.
683 */
684 if (folio_test_private(folio)) {
685 if (try_to_free_buffers(folio)) {
686 folio_clear_dirty(folio);
687 pr_info("%s: orphaned folio\n", __func__);
688 return PAGE_CLEAN;
689 }
690 }
691 return PAGE_KEEP;
692 }
693 if (shmem_mapping(mapping))
694 writeout = shmem_writeout;
695 else if (folio_test_anon(folio))
696 writeout = swap_writeout;
697 else
698 return PAGE_ACTIVATE;
699
700 if (folio_clear_dirty_for_io(folio)) {
701 int res;
702 struct writeback_control wbc = {
703 .sync_mode = WB_SYNC_NONE,
704 .nr_to_write = SWAP_CLUSTER_MAX,
705 .range_start = 0,
706 .range_end = LLONG_MAX,
707 .for_reclaim = 1,
708 .swap_plug = plug,
709 };
710
711 /*
712 * The large shmem folio can be split if CONFIG_THP_SWAP is
713 * not enabled or contiguous swap entries are failed to
714 * allocate.
715 */
716 if (shmem_mapping(mapping) && folio_test_large(folio))
717 wbc.list = folio_list;
718
719 folio_set_reclaim(folio);
720 res = writeout(folio, &wbc);
721 if (res < 0)
722 handle_write_error(mapping, folio, res);
723 if (res == AOP_WRITEPAGE_ACTIVATE) {
724 folio_clear_reclaim(folio);
725 return PAGE_ACTIVATE;
726 }
727
728 if (!folio_test_writeback(folio)) {
729 /* synchronous write? */
730 folio_clear_reclaim(folio);
731 }
732 trace_mm_vmscan_write_folio(folio);
733 node_stat_add_folio(folio, NR_VMSCAN_WRITE);
734 return PAGE_SUCCESS;
735 }
736
737 return PAGE_CLEAN;
738 }
739
740 /*
741 * Same as remove_mapping, but if the folio is removed from the mapping, it
742 * gets returned with a refcount of 0.
743 */
__remove_mapping(struct address_space * mapping,struct folio * folio,bool reclaimed,struct mem_cgroup * target_memcg)744 static int __remove_mapping(struct address_space *mapping, struct folio *folio,
745 bool reclaimed, struct mem_cgroup *target_memcg)
746 {
747 int refcount;
748 void *shadow = NULL;
749
750 BUG_ON(!folio_test_locked(folio));
751 BUG_ON(mapping != folio_mapping(folio));
752
753 if (!folio_test_swapcache(folio))
754 spin_lock(&mapping->host->i_lock);
755 xa_lock_irq(&mapping->i_pages);
756 /*
757 * The non racy check for a busy folio.
758 *
759 * Must be careful with the order of the tests. When someone has
760 * a ref to the folio, it may be possible that they dirty it then
761 * drop the reference. So if the dirty flag is tested before the
762 * refcount here, then the following race may occur:
763 *
764 * get_user_pages(&page);
765 * [user mapping goes away]
766 * write_to(page);
767 * !folio_test_dirty(folio) [good]
768 * folio_set_dirty(folio);
769 * folio_put(folio);
770 * !refcount(folio) [good, discard it]
771 *
772 * [oops, our write_to data is lost]
773 *
774 * Reversing the order of the tests ensures such a situation cannot
775 * escape unnoticed. The smp_rmb is needed to ensure the folio->flags
776 * load is not satisfied before that of folio->_refcount.
777 *
778 * Note that if the dirty flag is always set via folio_mark_dirty,
779 * and thus under the i_pages lock, then this ordering is not required.
780 */
781 refcount = 1 + folio_nr_pages(folio);
782 if (!folio_ref_freeze(folio, refcount))
783 goto cannot_free;
784 /* note: atomic_cmpxchg in folio_ref_freeze provides the smp_rmb */
785 if (unlikely(folio_test_dirty(folio))) {
786 folio_ref_unfreeze(folio, refcount);
787 goto cannot_free;
788 }
789
790 if (folio_test_swapcache(folio)) {
791 swp_entry_t swap = folio->swap;
792
793 if (reclaimed && !mapping_exiting(mapping))
794 shadow = workingset_eviction(folio, target_memcg);
795 __delete_from_swap_cache(folio, swap, shadow);
796 memcg1_swapout(folio, swap);
797 xa_unlock_irq(&mapping->i_pages);
798 put_swap_folio(folio, swap);
799 } else {
800 void (*free_folio)(struct folio *);
801
802 free_folio = mapping->a_ops->free_folio;
803 /*
804 * Remember a shadow entry for reclaimed file cache in
805 * order to detect refaults, thus thrashing, later on.
806 *
807 * But don't store shadows in an address space that is
808 * already exiting. This is not just an optimization,
809 * inode reclaim needs to empty out the radix tree or
810 * the nodes are lost. Don't plant shadows behind its
811 * back.
812 *
813 * We also don't store shadows for DAX mappings because the
814 * only page cache folios found in these are zero pages
815 * covering holes, and because we don't want to mix DAX
816 * exceptional entries and shadow exceptional entries in the
817 * same address_space.
818 */
819 if (reclaimed && folio_is_file_lru(folio) &&
820 !mapping_exiting(mapping) && !dax_mapping(mapping))
821 shadow = workingset_eviction(folio, target_memcg);
822 __filemap_remove_folio(folio, shadow);
823 xa_unlock_irq(&mapping->i_pages);
824 if (mapping_shrinkable(mapping))
825 inode_add_lru(mapping->host);
826 spin_unlock(&mapping->host->i_lock);
827
828 if (free_folio)
829 free_folio(folio);
830 }
831
832 return 1;
833
834 cannot_free:
835 xa_unlock_irq(&mapping->i_pages);
836 if (!folio_test_swapcache(folio))
837 spin_unlock(&mapping->host->i_lock);
838 return 0;
839 }
840
841 /**
842 * remove_mapping() - Attempt to remove a folio from its mapping.
843 * @mapping: The address space.
844 * @folio: The folio to remove.
845 *
846 * If the folio is dirty, under writeback or if someone else has a ref
847 * on it, removal will fail.
848 * Return: The number of pages removed from the mapping. 0 if the folio
849 * could not be removed.
850 * Context: The caller should have a single refcount on the folio and
851 * hold its lock.
852 */
remove_mapping(struct address_space * mapping,struct folio * folio)853 long remove_mapping(struct address_space *mapping, struct folio *folio)
854 {
855 if (__remove_mapping(mapping, folio, false, NULL)) {
856 /*
857 * Unfreezing the refcount with 1 effectively
858 * drops the pagecache ref for us without requiring another
859 * atomic operation.
860 */
861 folio_ref_unfreeze(folio, 1);
862 return folio_nr_pages(folio);
863 }
864 return 0;
865 }
866
867 /**
868 * folio_putback_lru - Put previously isolated folio onto appropriate LRU list.
869 * @folio: Folio to be returned to an LRU list.
870 *
871 * Add previously isolated @folio to appropriate LRU list.
872 * The folio may still be unevictable for other reasons.
873 *
874 * Context: lru_lock must not be held, interrupts must be enabled.
875 */
folio_putback_lru(struct folio * folio)876 void folio_putback_lru(struct folio *folio)
877 {
878 folio_add_lru(folio);
879 folio_put(folio); /* drop ref from isolate */
880 }
881
882 enum folio_references {
883 FOLIOREF_RECLAIM,
884 FOLIOREF_RECLAIM_CLEAN,
885 FOLIOREF_KEEP,
886 FOLIOREF_ACTIVATE,
887 };
888
889 #ifdef CONFIG_LRU_GEN
890 /*
891 * Only used on a mapped folio in the eviction (rmap walk) path, where promotion
892 * needs to be done by taking the folio off the LRU list and then adding it back
893 * with PG_active set. In contrast, the aging (page table walk) path uses
894 * folio_update_gen().
895 */
lru_gen_set_refs(struct folio * folio)896 static bool lru_gen_set_refs(struct folio *folio)
897 {
898 /* see the comment on LRU_REFS_FLAGS */
899 if (!folio_test_referenced(folio) && !folio_test_workingset(folio)) {
900 set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced));
901 return false;
902 }
903
904 set_mask_bits(&folio->flags, LRU_REFS_FLAGS, BIT(PG_workingset));
905 return true;
906 }
907 #else
lru_gen_set_refs(struct folio * folio)908 static bool lru_gen_set_refs(struct folio *folio)
909 {
910 return false;
911 }
912 #endif /* CONFIG_LRU_GEN */
913
folio_check_references(struct folio * folio,struct scan_control * sc)914 static enum folio_references folio_check_references(struct folio *folio,
915 struct scan_control *sc)
916 {
917 int referenced_ptes, referenced_folio;
918 unsigned long vm_flags;
919
920 referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup,
921 &vm_flags);
922
923 /*
924 * The supposedly reclaimable folio was found to be in a VM_LOCKED vma.
925 * Let the folio, now marked Mlocked, be moved to the unevictable list.
926 */
927 if (vm_flags & VM_LOCKED)
928 return FOLIOREF_ACTIVATE;
929
930 /*
931 * There are two cases to consider.
932 * 1) Rmap lock contention: rotate.
933 * 2) Skip the non-shared swapbacked folio mapped solely by
934 * the exiting or OOM-reaped process.
935 */
936 if (referenced_ptes == -1)
937 return FOLIOREF_KEEP;
938
939 if (lru_gen_enabled()) {
940 if (!referenced_ptes)
941 return FOLIOREF_RECLAIM;
942
943 return lru_gen_set_refs(folio) ? FOLIOREF_ACTIVATE : FOLIOREF_KEEP;
944 }
945
946 referenced_folio = folio_test_clear_referenced(folio);
947
948 if (referenced_ptes) {
949 /*
950 * All mapped folios start out with page table
951 * references from the instantiating fault, so we need
952 * to look twice if a mapped file/anon folio is used more
953 * than once.
954 *
955 * Mark it and spare it for another trip around the
956 * inactive list. Another page table reference will
957 * lead to its activation.
958 *
959 * Note: the mark is set for activated folios as well
960 * so that recently deactivated but used folios are
961 * quickly recovered.
962 */
963 folio_set_referenced(folio);
964
965 if (referenced_folio || referenced_ptes > 1)
966 return FOLIOREF_ACTIVATE;
967
968 /*
969 * Activate file-backed executable folios after first usage.
970 */
971 if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio))
972 return FOLIOREF_ACTIVATE;
973
974 return FOLIOREF_KEEP;
975 }
976
977 /* Reclaim if clean, defer dirty folios to writeback */
978 if (referenced_folio && folio_is_file_lru(folio))
979 return FOLIOREF_RECLAIM_CLEAN;
980
981 return FOLIOREF_RECLAIM;
982 }
983
984 /* Check if a folio is dirty or under writeback */
folio_check_dirty_writeback(struct folio * folio,bool * dirty,bool * writeback)985 static void folio_check_dirty_writeback(struct folio *folio,
986 bool *dirty, bool *writeback)
987 {
988 struct address_space *mapping;
989
990 /*
991 * Anonymous folios are not handled by flushers and must be written
992 * from reclaim context. Do not stall reclaim based on them.
993 * MADV_FREE anonymous folios are put into inactive file list too.
994 * They could be mistakenly treated as file lru. So further anon
995 * test is needed.
996 */
997 if (!folio_is_file_lru(folio) ||
998 (folio_test_anon(folio) && !folio_test_swapbacked(folio))) {
999 *dirty = false;
1000 *writeback = false;
1001 return;
1002 }
1003
1004 /* By default assume that the folio flags are accurate */
1005 *dirty = folio_test_dirty(folio);
1006 *writeback = folio_test_writeback(folio);
1007
1008 /* Verify dirty/writeback state if the filesystem supports it */
1009 if (!folio_test_private(folio))
1010 return;
1011
1012 mapping = folio_mapping(folio);
1013 if (mapping && mapping->a_ops->is_dirty_writeback)
1014 mapping->a_ops->is_dirty_writeback(folio, dirty, writeback);
1015 }
1016
alloc_migrate_folio(struct folio * src,unsigned long private)1017 struct folio *alloc_migrate_folio(struct folio *src, unsigned long private)
1018 {
1019 struct folio *dst;
1020 nodemask_t *allowed_mask;
1021 struct migration_target_control *mtc;
1022
1023 mtc = (struct migration_target_control *)private;
1024
1025 allowed_mask = mtc->nmask;
1026 /*
1027 * make sure we allocate from the target node first also trying to
1028 * demote or reclaim pages from the target node via kswapd if we are
1029 * low on free memory on target node. If we don't do this and if
1030 * we have free memory on the slower(lower) memtier, we would start
1031 * allocating pages from slower(lower) memory tiers without even forcing
1032 * a demotion of cold pages from the target memtier. This can result
1033 * in the kernel placing hot pages in slower(lower) memory tiers.
1034 */
1035 mtc->nmask = NULL;
1036 mtc->gfp_mask |= __GFP_THISNODE;
1037 dst = alloc_migration_target(src, (unsigned long)mtc);
1038 if (dst)
1039 return dst;
1040
1041 mtc->gfp_mask &= ~__GFP_THISNODE;
1042 mtc->nmask = allowed_mask;
1043
1044 return alloc_migration_target(src, (unsigned long)mtc);
1045 }
1046
1047 /*
1048 * Take folios on @demote_folios and attempt to demote them to another node.
1049 * Folios which are not demoted are left on @demote_folios.
1050 */
demote_folio_list(struct list_head * demote_folios,struct pglist_data * pgdat)1051 static unsigned int demote_folio_list(struct list_head *demote_folios,
1052 struct pglist_data *pgdat)
1053 {
1054 int target_nid = next_demotion_node(pgdat->node_id);
1055 unsigned int nr_succeeded;
1056 nodemask_t allowed_mask;
1057
1058 struct migration_target_control mtc = {
1059 /*
1060 * Allocate from 'node', or fail quickly and quietly.
1061 * When this happens, 'page' will likely just be discarded
1062 * instead of migrated.
1063 */
1064 .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | __GFP_NOWARN |
1065 __GFP_NOMEMALLOC | GFP_NOWAIT,
1066 .nid = target_nid,
1067 .nmask = &allowed_mask,
1068 .reason = MR_DEMOTION,
1069 };
1070
1071 if (list_empty(demote_folios))
1072 return 0;
1073
1074 if (target_nid == NUMA_NO_NODE)
1075 return 0;
1076
1077 node_get_allowed_targets(pgdat, &allowed_mask);
1078
1079 /* Demotion ignores all cpuset and mempolicy settings */
1080 migrate_pages(demote_folios, alloc_migrate_folio, NULL,
1081 (unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION,
1082 &nr_succeeded);
1083
1084 return nr_succeeded;
1085 }
1086
may_enter_fs(struct folio * folio,gfp_t gfp_mask)1087 static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask)
1088 {
1089 if (gfp_mask & __GFP_FS)
1090 return true;
1091 if (!folio_test_swapcache(folio) || !(gfp_mask & __GFP_IO))
1092 return false;
1093 /*
1094 * We can "enter_fs" for swap-cache with only __GFP_IO
1095 * providing this isn't SWP_FS_OPS.
1096 * ->flags can be updated non-atomicially (scan_swap_map_slots),
1097 * but that will never affect SWP_FS_OPS, so the data_race
1098 * is safe.
1099 */
1100 return !data_race(folio_swap_flags(folio) & SWP_FS_OPS);
1101 }
1102
1103 /*
1104 * shrink_folio_list() returns the number of reclaimed pages
1105 */
shrink_folio_list(struct list_head * folio_list,struct pglist_data * pgdat,struct scan_control * sc,struct reclaim_stat * stat,bool ignore_references,struct mem_cgroup * memcg)1106 static unsigned int shrink_folio_list(struct list_head *folio_list,
1107 struct pglist_data *pgdat, struct scan_control *sc,
1108 struct reclaim_stat *stat, bool ignore_references,
1109 struct mem_cgroup *memcg)
1110 {
1111 struct folio_batch free_folios;
1112 LIST_HEAD(ret_folios);
1113 LIST_HEAD(demote_folios);
1114 unsigned int nr_reclaimed = 0, nr_demoted = 0;
1115 unsigned int pgactivate = 0;
1116 bool do_demote_pass;
1117 struct swap_iocb *plug = NULL;
1118
1119 folio_batch_init(&free_folios);
1120 memset(stat, 0, sizeof(*stat));
1121 cond_resched();
1122 do_demote_pass = can_demote(pgdat->node_id, sc, memcg);
1123
1124 retry:
1125 while (!list_empty(folio_list)) {
1126 struct address_space *mapping;
1127 struct folio *folio;
1128 enum folio_references references = FOLIOREF_RECLAIM;
1129 bool dirty, writeback;
1130 unsigned int nr_pages;
1131
1132 cond_resched();
1133
1134 folio = lru_to_folio(folio_list);
1135 list_del(&folio->lru);
1136
1137 if (!folio_trylock(folio))
1138 goto keep;
1139
1140 if (folio_contain_hwpoisoned_page(folio)) {
1141 /*
1142 * unmap_poisoned_folio() can't handle large
1143 * folio, just skip it. memory_failure() will
1144 * handle it if the UCE is triggered again.
1145 */
1146 if (folio_test_large(folio))
1147 goto keep_locked;
1148
1149 unmap_poisoned_folio(folio, folio_pfn(folio), false);
1150 folio_unlock(folio);
1151 folio_put(folio);
1152 continue;
1153 }
1154
1155 VM_BUG_ON_FOLIO(folio_test_active(folio), folio);
1156
1157 nr_pages = folio_nr_pages(folio);
1158
1159 /* Account the number of base pages */
1160 sc->nr_scanned += nr_pages;
1161
1162 if (unlikely(!folio_evictable(folio)))
1163 goto activate_locked;
1164
1165 if (!sc->may_unmap && folio_mapped(folio))
1166 goto keep_locked;
1167
1168 /*
1169 * The number of dirty pages determines if a node is marked
1170 * reclaim_congested. kswapd will stall and start writing
1171 * folios if the tail of the LRU is all dirty unqueued folios.
1172 */
1173 folio_check_dirty_writeback(folio, &dirty, &writeback);
1174 if (dirty || writeback)
1175 stat->nr_dirty += nr_pages;
1176
1177 if (dirty && !writeback)
1178 stat->nr_unqueued_dirty += nr_pages;
1179
1180 /*
1181 * Treat this folio as congested if folios are cycling
1182 * through the LRU so quickly that the folios marked
1183 * for immediate reclaim are making it to the end of
1184 * the LRU a second time.
1185 */
1186 if (writeback && folio_test_reclaim(folio))
1187 stat->nr_congested += nr_pages;
1188
1189 /*
1190 * If a folio at the tail of the LRU is under writeback, there
1191 * are three cases to consider.
1192 *
1193 * 1) If reclaim is encountering an excessive number
1194 * of folios under writeback and this folio has both
1195 * the writeback and reclaim flags set, then it
1196 * indicates that folios are being queued for I/O but
1197 * are being recycled through the LRU before the I/O
1198 * can complete. Waiting on the folio itself risks an
1199 * indefinite stall if it is impossible to writeback
1200 * the folio due to I/O error or disconnected storage
1201 * so instead note that the LRU is being scanned too
1202 * quickly and the caller can stall after the folio
1203 * list has been processed.
1204 *
1205 * 2) Global or new memcg reclaim encounters a folio that is
1206 * not marked for immediate reclaim, or the caller does not
1207 * have __GFP_FS (or __GFP_IO if it's simply going to swap,
1208 * not to fs), or the folio belongs to a mapping where
1209 * waiting on writeback during reclaim may lead to a deadlock.
1210 * In this case mark the folio for immediate reclaim and
1211 * continue scanning.
1212 *
1213 * Require may_enter_fs() because we would wait on fs, which
1214 * may not have submitted I/O yet. And the loop driver might
1215 * enter reclaim, and deadlock if it waits on a folio for
1216 * which it is needed to do the write (loop masks off
1217 * __GFP_IO|__GFP_FS for this reason); but more thought
1218 * would probably show more reasons.
1219 *
1220 * 3) Legacy memcg encounters a folio that already has the
1221 * reclaim flag set. memcg does not have any dirty folio
1222 * throttling so we could easily OOM just because too many
1223 * folios are in writeback and there is nothing else to
1224 * reclaim. Wait for the writeback to complete.
1225 *
1226 * In cases 1) and 2) we activate the folios to get them out of
1227 * the way while we continue scanning for clean folios on the
1228 * inactive list and refilling from the active list. The
1229 * observation here is that waiting for disk writes is more
1230 * expensive than potentially causing reloads down the line.
1231 * Since they're marked for immediate reclaim, they won't put
1232 * memory pressure on the cache working set any longer than it
1233 * takes to write them to disk.
1234 */
1235 if (folio_test_writeback(folio)) {
1236 mapping = folio_mapping(folio);
1237
1238 /* Case 1 above */
1239 if (current_is_kswapd() &&
1240 folio_test_reclaim(folio) &&
1241 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
1242 stat->nr_immediate += nr_pages;
1243 goto activate_locked;
1244
1245 /* Case 2 above */
1246 } else if (writeback_throttling_sane(sc) ||
1247 !folio_test_reclaim(folio) ||
1248 !may_enter_fs(folio, sc->gfp_mask) ||
1249 (mapping &&
1250 mapping_writeback_may_deadlock_on_reclaim(mapping))) {
1251 /*
1252 * This is slightly racy -
1253 * folio_end_writeback() might have
1254 * just cleared the reclaim flag, then
1255 * setting the reclaim flag here ends up
1256 * interpreted as the readahead flag - but
1257 * that does not matter enough to care.
1258 * What we do want is for this folio to
1259 * have the reclaim flag set next time
1260 * memcg reclaim reaches the tests above,
1261 * so it will then wait for writeback to
1262 * avoid OOM; and it's also appropriate
1263 * in global reclaim.
1264 */
1265 folio_set_reclaim(folio);
1266 stat->nr_writeback += nr_pages;
1267 goto activate_locked;
1268
1269 /* Case 3 above */
1270 } else {
1271 folio_unlock(folio);
1272 folio_wait_writeback(folio);
1273 /* then go back and try same folio again */
1274 list_add_tail(&folio->lru, folio_list);
1275 continue;
1276 }
1277 }
1278
1279 if (!ignore_references)
1280 references = folio_check_references(folio, sc);
1281
1282 switch (references) {
1283 case FOLIOREF_ACTIVATE:
1284 goto activate_locked;
1285 case FOLIOREF_KEEP:
1286 stat->nr_ref_keep += nr_pages;
1287 goto keep_locked;
1288 case FOLIOREF_RECLAIM:
1289 case FOLIOREF_RECLAIM_CLEAN:
1290 ; /* try to reclaim the folio below */
1291 }
1292
1293 /*
1294 * Before reclaiming the folio, try to relocate
1295 * its contents to another node.
1296 */
1297 if (do_demote_pass &&
1298 (thp_migration_supported() || !folio_test_large(folio))) {
1299 list_add(&folio->lru, &demote_folios);
1300 folio_unlock(folio);
1301 continue;
1302 }
1303
1304 /*
1305 * Anonymous process memory has backing store?
1306 * Try to allocate it some swap space here.
1307 * Lazyfree folio could be freed directly
1308 */
1309 if (folio_test_anon(folio) && folio_test_swapbacked(folio)) {
1310 if (!folio_test_swapcache(folio)) {
1311 if (!(sc->gfp_mask & __GFP_IO))
1312 goto keep_locked;
1313 if (folio_maybe_dma_pinned(folio))
1314 goto keep_locked;
1315 if (folio_test_large(folio)) {
1316 /* cannot split folio, skip it */
1317 if (!can_split_folio(folio, 1, NULL))
1318 goto activate_locked;
1319 /*
1320 * Split partially mapped folios right away.
1321 * We can free the unmapped pages without IO.
1322 */
1323 if (data_race(!list_empty(&folio->_deferred_list) &&
1324 folio_test_partially_mapped(folio)) &&
1325 split_folio_to_list(folio, folio_list))
1326 goto activate_locked;
1327 }
1328 if (folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOWARN)) {
1329 int __maybe_unused order = folio_order(folio);
1330
1331 if (!folio_test_large(folio))
1332 goto activate_locked_split;
1333 /* Fallback to swap normal pages */
1334 if (split_folio_to_list(folio, folio_list))
1335 goto activate_locked;
1336 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1337 if (nr_pages >= HPAGE_PMD_NR) {
1338 count_memcg_folio_events(folio,
1339 THP_SWPOUT_FALLBACK, 1);
1340 count_vm_event(THP_SWPOUT_FALLBACK);
1341 }
1342 #endif
1343 count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK);
1344 if (folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOWARN))
1345 goto activate_locked_split;
1346 }
1347 /*
1348 * Normally the folio will be dirtied in unmap because its
1349 * pte should be dirty. A special case is MADV_FREE page. The
1350 * page's pte could have dirty bit cleared but the folio's
1351 * SwapBacked flag is still set because clearing the dirty bit
1352 * and SwapBacked flag has no lock protected. For such folio,
1353 * unmap will not set dirty bit for it, so folio reclaim will
1354 * not write the folio out. This can cause data corruption when
1355 * the folio is swapped in later. Always setting the dirty flag
1356 * for the folio solves the problem.
1357 */
1358 folio_mark_dirty(folio);
1359 }
1360 }
1361
1362 /*
1363 * If the folio was split above, the tail pages will make
1364 * their own pass through this function and be accounted
1365 * then.
1366 */
1367 if ((nr_pages > 1) && !folio_test_large(folio)) {
1368 sc->nr_scanned -= (nr_pages - 1);
1369 nr_pages = 1;
1370 }
1371
1372 /*
1373 * The folio is mapped into the page tables of one or more
1374 * processes. Try to unmap it here.
1375 */
1376 if (folio_mapped(folio)) {
1377 enum ttu_flags flags = TTU_BATCH_FLUSH;
1378 bool was_swapbacked = folio_test_swapbacked(folio);
1379
1380 if (folio_test_pmd_mappable(folio))
1381 flags |= TTU_SPLIT_HUGE_PMD;
1382 /*
1383 * Without TTU_SYNC, try_to_unmap will only begin to
1384 * hold PTL from the first present PTE within a large
1385 * folio. Some initial PTEs might be skipped due to
1386 * races with parallel PTE writes in which PTEs can be
1387 * cleared temporarily before being written new present
1388 * values. This will lead to a large folio is still
1389 * mapped while some subpages have been partially
1390 * unmapped after try_to_unmap; TTU_SYNC helps
1391 * try_to_unmap acquire PTL from the first PTE,
1392 * eliminating the influence of temporary PTE values.
1393 */
1394 if (folio_test_large(folio))
1395 flags |= TTU_SYNC;
1396
1397 try_to_unmap(folio, flags);
1398 if (folio_mapped(folio)) {
1399 stat->nr_unmap_fail += nr_pages;
1400 if (!was_swapbacked &&
1401 folio_test_swapbacked(folio))
1402 stat->nr_lazyfree_fail += nr_pages;
1403 goto activate_locked;
1404 }
1405 }
1406
1407 /*
1408 * Folio is unmapped now so it cannot be newly pinned anymore.
1409 * No point in trying to reclaim folio if it is pinned.
1410 * Furthermore we don't want to reclaim underlying fs metadata
1411 * if the folio is pinned and thus potentially modified by the
1412 * pinning process as that may upset the filesystem.
1413 */
1414 if (folio_maybe_dma_pinned(folio))
1415 goto activate_locked;
1416
1417 mapping = folio_mapping(folio);
1418 if (folio_test_dirty(folio)) {
1419 /*
1420 * Only kswapd can writeback filesystem folios
1421 * to avoid risk of stack overflow. But avoid
1422 * injecting inefficient single-folio I/O into
1423 * flusher writeback as much as possible: only
1424 * write folios when we've encountered many
1425 * dirty folios, and when we've already scanned
1426 * the rest of the LRU for clean folios and see
1427 * the same dirty folios again (with the reclaim
1428 * flag set).
1429 */
1430 if (folio_is_file_lru(folio) &&
1431 (!current_is_kswapd() ||
1432 !folio_test_reclaim(folio) ||
1433 !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
1434 /*
1435 * Immediately reclaim when written back.
1436 * Similar in principle to folio_deactivate()
1437 * except we already have the folio isolated
1438 * and know it's dirty
1439 */
1440 node_stat_mod_folio(folio, NR_VMSCAN_IMMEDIATE,
1441 nr_pages);
1442 folio_set_reclaim(folio);
1443
1444 goto activate_locked;
1445 }
1446
1447 if (references == FOLIOREF_RECLAIM_CLEAN)
1448 goto keep_locked;
1449 if (!may_enter_fs(folio, sc->gfp_mask))
1450 goto keep_locked;
1451 if (!sc->may_writepage)
1452 goto keep_locked;
1453
1454 /*
1455 * Folio is dirty. Flush the TLB if a writable entry
1456 * potentially exists to avoid CPU writes after I/O
1457 * starts and then write it out here.
1458 */
1459 try_to_unmap_flush_dirty();
1460 switch (pageout(folio, mapping, &plug, folio_list)) {
1461 case PAGE_KEEP:
1462 goto keep_locked;
1463 case PAGE_ACTIVATE:
1464 /*
1465 * If shmem folio is split when writeback to swap,
1466 * the tail pages will make their own pass through
1467 * this function and be accounted then.
1468 */
1469 if (nr_pages > 1 && !folio_test_large(folio)) {
1470 sc->nr_scanned -= (nr_pages - 1);
1471 nr_pages = 1;
1472 }
1473 goto activate_locked;
1474 case PAGE_SUCCESS:
1475 if (nr_pages > 1 && !folio_test_large(folio)) {
1476 sc->nr_scanned -= (nr_pages - 1);
1477 nr_pages = 1;
1478 }
1479 stat->nr_pageout += nr_pages;
1480
1481 if (folio_test_writeback(folio))
1482 goto keep;
1483 if (folio_test_dirty(folio))
1484 goto keep;
1485
1486 /*
1487 * A synchronous write - probably a ramdisk. Go
1488 * ahead and try to reclaim the folio.
1489 */
1490 if (!folio_trylock(folio))
1491 goto keep;
1492 if (folio_test_dirty(folio) ||
1493 folio_test_writeback(folio))
1494 goto keep_locked;
1495 mapping = folio_mapping(folio);
1496 fallthrough;
1497 case PAGE_CLEAN:
1498 ; /* try to free the folio below */
1499 }
1500 }
1501
1502 /*
1503 * If the folio has buffers, try to free the buffer
1504 * mappings associated with this folio. If we succeed
1505 * we try to free the folio as well.
1506 *
1507 * We do this even if the folio is dirty.
1508 * filemap_release_folio() does not perform I/O, but it
1509 * is possible for a folio to have the dirty flag set,
1510 * but it is actually clean (all its buffers are clean).
1511 * This happens if the buffers were written out directly,
1512 * with submit_bh(). ext3 will do this, as well as
1513 * the blockdev mapping. filemap_release_folio() will
1514 * discover that cleanness and will drop the buffers
1515 * and mark the folio clean - it can be freed.
1516 *
1517 * Rarely, folios can have buffers and no ->mapping.
1518 * These are the folios which were not successfully
1519 * invalidated in truncate_cleanup_folio(). We try to
1520 * drop those buffers here and if that worked, and the
1521 * folio is no longer mapped into process address space
1522 * (refcount == 1) it can be freed. Otherwise, leave
1523 * the folio on the LRU so it is swappable.
1524 */
1525 if (folio_needs_release(folio)) {
1526 if (!filemap_release_folio(folio, sc->gfp_mask))
1527 goto activate_locked;
1528 if (!mapping && folio_ref_count(folio) == 1) {
1529 folio_unlock(folio);
1530 if (folio_put_testzero(folio))
1531 goto free_it;
1532 else {
1533 /*
1534 * rare race with speculative reference.
1535 * the speculative reference will free
1536 * this folio shortly, so we may
1537 * increment nr_reclaimed here (and
1538 * leave it off the LRU).
1539 */
1540 nr_reclaimed += nr_pages;
1541 continue;
1542 }
1543 }
1544 }
1545
1546 if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) {
1547 /* follow __remove_mapping for reference */
1548 if (!folio_ref_freeze(folio, 1))
1549 goto keep_locked;
1550 /*
1551 * The folio has only one reference left, which is
1552 * from the isolation. After the caller puts the
1553 * folio back on the lru and drops the reference, the
1554 * folio will be freed anyway. It doesn't matter
1555 * which lru it goes on. So we don't bother checking
1556 * the dirty flag here.
1557 */
1558 count_vm_events(PGLAZYFREED, nr_pages);
1559 count_memcg_folio_events(folio, PGLAZYFREED, nr_pages);
1560 } else if (!mapping || !__remove_mapping(mapping, folio, true,
1561 sc->target_mem_cgroup))
1562 goto keep_locked;
1563
1564 folio_unlock(folio);
1565 free_it:
1566 /*
1567 * Folio may get swapped out as a whole, need to account
1568 * all pages in it.
1569 */
1570 nr_reclaimed += nr_pages;
1571
1572 folio_unqueue_deferred_split(folio);
1573 if (folio_batch_add(&free_folios, folio) == 0) {
1574 mem_cgroup_uncharge_folios(&free_folios);
1575 try_to_unmap_flush();
1576 free_unref_folios(&free_folios);
1577 }
1578 continue;
1579
1580 activate_locked_split:
1581 /*
1582 * The tail pages that are failed to add into swap cache
1583 * reach here. Fixup nr_scanned and nr_pages.
1584 */
1585 if (nr_pages > 1) {
1586 sc->nr_scanned -= (nr_pages - 1);
1587 nr_pages = 1;
1588 }
1589 activate_locked:
1590 /* Not a candidate for swapping, so reclaim swap space. */
1591 if (folio_test_swapcache(folio) &&
1592 (mem_cgroup_swap_full(folio) || folio_test_mlocked(folio)))
1593 folio_free_swap(folio);
1594 VM_BUG_ON_FOLIO(folio_test_active(folio), folio);
1595 if (!folio_test_mlocked(folio)) {
1596 int type = folio_is_file_lru(folio);
1597 folio_set_active(folio);
1598 stat->nr_activate[type] += nr_pages;
1599 count_memcg_folio_events(folio, PGACTIVATE, nr_pages);
1600 }
1601 keep_locked:
1602 folio_unlock(folio);
1603 keep:
1604 list_add(&folio->lru, &ret_folios);
1605 VM_BUG_ON_FOLIO(folio_test_lru(folio) ||
1606 folio_test_unevictable(folio), folio);
1607 }
1608 /* 'folio_list' is always empty here */
1609
1610 /* Migrate folios selected for demotion */
1611 nr_demoted = demote_folio_list(&demote_folios, pgdat);
1612 nr_reclaimed += nr_demoted;
1613 stat->nr_demoted += nr_demoted;
1614 /* Folios that could not be demoted are still in @demote_folios */
1615 if (!list_empty(&demote_folios)) {
1616 /* Folios which weren't demoted go back on @folio_list */
1617 list_splice_init(&demote_folios, folio_list);
1618
1619 /*
1620 * goto retry to reclaim the undemoted folios in folio_list if
1621 * desired.
1622 *
1623 * Reclaiming directly from top tier nodes is not often desired
1624 * due to it breaking the LRU ordering: in general memory
1625 * should be reclaimed from lower tier nodes and demoted from
1626 * top tier nodes.
1627 *
1628 * However, disabling reclaim from top tier nodes entirely
1629 * would cause ooms in edge scenarios where lower tier memory
1630 * is unreclaimable for whatever reason, eg memory being
1631 * mlocked or too hot to reclaim. We can disable reclaim
1632 * from top tier nodes in proactive reclaim though as that is
1633 * not real memory pressure.
1634 */
1635 if (!sc->proactive) {
1636 do_demote_pass = false;
1637 goto retry;
1638 }
1639 }
1640
1641 pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
1642
1643 mem_cgroup_uncharge_folios(&free_folios);
1644 try_to_unmap_flush();
1645 free_unref_folios(&free_folios);
1646
1647 list_splice(&ret_folios, folio_list);
1648 count_vm_events(PGACTIVATE, pgactivate);
1649
1650 if (plug)
1651 swap_write_unplug(plug);
1652 return nr_reclaimed;
1653 }
1654
reclaim_clean_pages_from_list(struct zone * zone,struct list_head * folio_list)1655 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
1656 struct list_head *folio_list)
1657 {
1658 struct scan_control sc = {
1659 .gfp_mask = GFP_KERNEL,
1660 .may_unmap = 1,
1661 };
1662 struct reclaim_stat stat;
1663 unsigned int nr_reclaimed;
1664 struct folio *folio, *next;
1665 LIST_HEAD(clean_folios);
1666 unsigned int noreclaim_flag;
1667
1668 list_for_each_entry_safe(folio, next, folio_list, lru) {
1669 if (!folio_test_hugetlb(folio) && folio_is_file_lru(folio) &&
1670 !folio_test_dirty(folio) && !__folio_test_movable(folio) &&
1671 !folio_test_unevictable(folio)) {
1672 folio_clear_active(folio);
1673 list_move(&folio->lru, &clean_folios);
1674 }
1675 }
1676
1677 /*
1678 * We should be safe here since we are only dealing with file pages and
1679 * we are not kswapd and therefore cannot write dirty file pages. But
1680 * call memalloc_noreclaim_save() anyway, just in case these conditions
1681 * change in the future.
1682 */
1683 noreclaim_flag = memalloc_noreclaim_save();
1684 nr_reclaimed = shrink_folio_list(&clean_folios, zone->zone_pgdat, &sc,
1685 &stat, true, NULL);
1686 memalloc_noreclaim_restore(noreclaim_flag);
1687
1688 list_splice(&clean_folios, folio_list);
1689 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
1690 -(long)nr_reclaimed);
1691 /*
1692 * Since lazyfree pages are isolated from file LRU from the beginning,
1693 * they will rotate back to anonymous LRU in the end if it failed to
1694 * discard so isolated count will be mismatched.
1695 * Compensate the isolated count for both LRU lists.
1696 */
1697 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON,
1698 stat.nr_lazyfree_fail);
1699 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
1700 -(long)stat.nr_lazyfree_fail);
1701 return nr_reclaimed;
1702 }
1703
1704 /*
1705 * Update LRU sizes after isolating pages. The LRU size updates must
1706 * be complete before mem_cgroup_update_lru_size due to a sanity check.
1707 */
update_lru_sizes(struct lruvec * lruvec,enum lru_list lru,unsigned long * nr_zone_taken)1708 static __always_inline void update_lru_sizes(struct lruvec *lruvec,
1709 enum lru_list lru, unsigned long *nr_zone_taken)
1710 {
1711 int zid;
1712
1713 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1714 if (!nr_zone_taken[zid])
1715 continue;
1716
1717 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1718 }
1719
1720 }
1721
1722 /*
1723 * Isolating page from the lruvec to fill in @dst list by nr_to_scan times.
1724 *
1725 * lruvec->lru_lock is heavily contended. Some of the functions that
1726 * shrink the lists perform better by taking out a batch of pages
1727 * and working on them outside the LRU lock.
1728 *
1729 * For pagecache intensive workloads, this function is the hottest
1730 * spot in the kernel (apart from copy_*_user functions).
1731 *
1732 * Lru_lock must be held before calling this function.
1733 *
1734 * @nr_to_scan: The number of eligible pages to look through on the list.
1735 * @lruvec: The LRU vector to pull pages from.
1736 * @dst: The temp list to put pages on to.
1737 * @nr_scanned: The number of pages that were scanned.
1738 * @sc: The scan_control struct for this reclaim session
1739 * @lru: LRU list id for isolating
1740 *
1741 * returns how many pages were moved onto *@dst.
1742 */
isolate_lru_folios(unsigned long nr_to_scan,struct lruvec * lruvec,struct list_head * dst,unsigned long * nr_scanned,struct scan_control * sc,enum lru_list lru)1743 static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
1744 struct lruvec *lruvec, struct list_head *dst,
1745 unsigned long *nr_scanned, struct scan_control *sc,
1746 enum lru_list lru)
1747 {
1748 struct list_head *src = &lruvec->lists[lru];
1749 unsigned long nr_taken = 0;
1750 unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
1751 unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
1752 unsigned long skipped = 0, total_scan = 0, scan = 0;
1753 unsigned long nr_pages;
1754 unsigned long max_nr_skipped = 0;
1755 LIST_HEAD(folios_skipped);
1756
1757 while (scan < nr_to_scan && !list_empty(src)) {
1758 struct list_head *move_to = src;
1759 struct folio *folio;
1760
1761 folio = lru_to_folio(src);
1762 prefetchw_prev_lru_folio(folio, src, flags);
1763
1764 nr_pages = folio_nr_pages(folio);
1765 total_scan += nr_pages;
1766
1767 /* Using max_nr_skipped to prevent hard LOCKUP*/
1768 if (max_nr_skipped < SWAP_CLUSTER_MAX_SKIPPED &&
1769 (folio_zonenum(folio) > sc->reclaim_idx)) {
1770 nr_skipped[folio_zonenum(folio)] += nr_pages;
1771 move_to = &folios_skipped;
1772 max_nr_skipped++;
1773 goto move;
1774 }
1775
1776 /*
1777 * Do not count skipped folios because that makes the function
1778 * return with no isolated folios if the LRU mostly contains
1779 * ineligible folios. This causes the VM to not reclaim any
1780 * folios, triggering a premature OOM.
1781 * Account all pages in a folio.
1782 */
1783 scan += nr_pages;
1784
1785 if (!folio_test_lru(folio))
1786 goto move;
1787 if (!sc->may_unmap && folio_mapped(folio))
1788 goto move;
1789
1790 /*
1791 * Be careful not to clear the lru flag until after we're
1792 * sure the folio is not being freed elsewhere -- the
1793 * folio release code relies on it.
1794 */
1795 if (unlikely(!folio_try_get(folio)))
1796 goto move;
1797
1798 if (!folio_test_clear_lru(folio)) {
1799 /* Another thread is already isolating this folio */
1800 folio_put(folio);
1801 goto move;
1802 }
1803
1804 nr_taken += nr_pages;
1805 nr_zone_taken[folio_zonenum(folio)] += nr_pages;
1806 move_to = dst;
1807 move:
1808 list_move(&folio->lru, move_to);
1809 }
1810
1811 /*
1812 * Splice any skipped folios to the start of the LRU list. Note that
1813 * this disrupts the LRU order when reclaiming for lower zones but
1814 * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
1815 * scanning would soon rescan the same folios to skip and waste lots
1816 * of cpu cycles.
1817 */
1818 if (!list_empty(&folios_skipped)) {
1819 int zid;
1820
1821 list_splice(&folios_skipped, src);
1822 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1823 if (!nr_skipped[zid])
1824 continue;
1825
1826 __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
1827 skipped += nr_skipped[zid];
1828 }
1829 }
1830 *nr_scanned = total_scan;
1831 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
1832 total_scan, skipped, nr_taken, lru);
1833 update_lru_sizes(lruvec, lru, nr_zone_taken);
1834 return nr_taken;
1835 }
1836
1837 /**
1838 * folio_isolate_lru() - Try to isolate a folio from its LRU list.
1839 * @folio: Folio to isolate from its LRU list.
1840 *
1841 * Isolate a @folio from an LRU list and adjust the vmstat statistic
1842 * corresponding to whatever LRU list the folio was on.
1843 *
1844 * The folio will have its LRU flag cleared. If it was found on the
1845 * active list, it will have the Active flag set. If it was found on the
1846 * unevictable list, it will have the Unevictable flag set. These flags
1847 * may need to be cleared by the caller before letting the page go.
1848 *
1849 * Context:
1850 *
1851 * (1) Must be called with an elevated refcount on the folio. This is a
1852 * fundamental difference from isolate_lru_folios() (which is called
1853 * without a stable reference).
1854 * (2) The lru_lock must not be held.
1855 * (3) Interrupts must be enabled.
1856 *
1857 * Return: true if the folio was removed from an LRU list.
1858 * false if the folio was not on an LRU list.
1859 */
folio_isolate_lru(struct folio * folio)1860 bool folio_isolate_lru(struct folio *folio)
1861 {
1862 bool ret = false;
1863
1864 VM_BUG_ON_FOLIO(!folio_ref_count(folio), folio);
1865
1866 if (folio_test_clear_lru(folio)) {
1867 struct lruvec *lruvec;
1868
1869 folio_get(folio);
1870 lruvec = folio_lruvec_lock_irq(folio);
1871 lruvec_del_folio(lruvec, folio);
1872 unlock_page_lruvec_irq(lruvec);
1873 ret = true;
1874 }
1875
1876 return ret;
1877 }
1878
1879 /*
1880 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
1881 * then get rescheduled. When there are massive number of tasks doing page
1882 * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
1883 * the LRU list will go small and be scanned faster than necessary, leading to
1884 * unnecessary swapping, thrashing and OOM.
1885 */
too_many_isolated(struct pglist_data * pgdat,int file,struct scan_control * sc)1886 static bool too_many_isolated(struct pglist_data *pgdat, int file,
1887 struct scan_control *sc)
1888 {
1889 unsigned long inactive, isolated;
1890 bool too_many;
1891
1892 if (current_is_kswapd())
1893 return false;
1894
1895 if (!writeback_throttling_sane(sc))
1896 return false;
1897
1898 if (file) {
1899 inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
1900 isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
1901 } else {
1902 inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
1903 isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
1904 }
1905
1906 /*
1907 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
1908 * won't get blocked by normal direct-reclaimers, forming a circular
1909 * deadlock.
1910 */
1911 if (gfp_has_io_fs(sc->gfp_mask))
1912 inactive >>= 3;
1913
1914 too_many = isolated > inactive;
1915
1916 /* Wake up tasks throttled due to too_many_isolated. */
1917 if (!too_many)
1918 wake_throttle_isolated(pgdat);
1919
1920 return too_many;
1921 }
1922
1923 /*
1924 * move_folios_to_lru() moves folios from private @list to appropriate LRU list.
1925 *
1926 * Returns the number of pages moved to the given lruvec.
1927 */
move_folios_to_lru(struct lruvec * lruvec,struct list_head * list)1928 static unsigned int move_folios_to_lru(struct lruvec *lruvec,
1929 struct list_head *list)
1930 {
1931 int nr_pages, nr_moved = 0;
1932 struct folio_batch free_folios;
1933
1934 folio_batch_init(&free_folios);
1935 while (!list_empty(list)) {
1936 struct folio *folio = lru_to_folio(list);
1937
1938 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
1939 list_del(&folio->lru);
1940 if (unlikely(!folio_evictable(folio))) {
1941 spin_unlock_irq(&lruvec->lru_lock);
1942 folio_putback_lru(folio);
1943 spin_lock_irq(&lruvec->lru_lock);
1944 continue;
1945 }
1946
1947 /*
1948 * The folio_set_lru needs to be kept here for list integrity.
1949 * Otherwise:
1950 * #0 move_folios_to_lru #1 release_pages
1951 * if (!folio_put_testzero())
1952 * if (folio_put_testzero())
1953 * !lru //skip lru_lock
1954 * folio_set_lru()
1955 * list_add(&folio->lru,)
1956 * list_add(&folio->lru,)
1957 */
1958 folio_set_lru(folio);
1959
1960 if (unlikely(folio_put_testzero(folio))) {
1961 __folio_clear_lru_flags(folio);
1962
1963 folio_unqueue_deferred_split(folio);
1964 if (folio_batch_add(&free_folios, folio) == 0) {
1965 spin_unlock_irq(&lruvec->lru_lock);
1966 mem_cgroup_uncharge_folios(&free_folios);
1967 free_unref_folios(&free_folios);
1968 spin_lock_irq(&lruvec->lru_lock);
1969 }
1970
1971 continue;
1972 }
1973
1974 /*
1975 * All pages were isolated from the same lruvec (and isolation
1976 * inhibits memcg migration).
1977 */
1978 VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
1979 lruvec_add_folio(lruvec, folio);
1980 nr_pages = folio_nr_pages(folio);
1981 nr_moved += nr_pages;
1982 if (folio_test_active(folio))
1983 workingset_age_nonresident(lruvec, nr_pages);
1984 }
1985
1986 if (free_folios.nr) {
1987 spin_unlock_irq(&lruvec->lru_lock);
1988 mem_cgroup_uncharge_folios(&free_folios);
1989 free_unref_folios(&free_folios);
1990 spin_lock_irq(&lruvec->lru_lock);
1991 }
1992
1993 return nr_moved;
1994 }
1995
1996 /*
1997 * If a kernel thread (such as nfsd for loop-back mounts) services a backing
1998 * device by writing to the page cache it sets PF_LOCAL_THROTTLE. In this case
1999 * we should not throttle. Otherwise it is safe to do so.
2000 */
current_may_throttle(void)2001 static int current_may_throttle(void)
2002 {
2003 return !(current->flags & PF_LOCAL_THROTTLE);
2004 }
2005
2006 /*
2007 * shrink_inactive_list() is a helper for shrink_node(). It returns the number
2008 * of reclaimed pages
2009 */
shrink_inactive_list(unsigned long nr_to_scan,struct lruvec * lruvec,struct scan_control * sc,enum lru_list lru)2010 static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
2011 struct lruvec *lruvec, struct scan_control *sc,
2012 enum lru_list lru)
2013 {
2014 LIST_HEAD(folio_list);
2015 unsigned long nr_scanned;
2016 unsigned int nr_reclaimed = 0;
2017 unsigned long nr_taken;
2018 struct reclaim_stat stat;
2019 bool file = is_file_lru(lru);
2020 enum vm_event_item item;
2021 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2022 bool stalled = false;
2023
2024 while (unlikely(too_many_isolated(pgdat, file, sc))) {
2025 if (stalled)
2026 return 0;
2027
2028 /* wait a bit for the reclaimer. */
2029 stalled = true;
2030 reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED);
2031
2032 /* We are about to die and free our memory. Return now. */
2033 if (fatal_signal_pending(current))
2034 return SWAP_CLUSTER_MAX;
2035 }
2036
2037 lru_add_drain();
2038
2039 spin_lock_irq(&lruvec->lru_lock);
2040
2041 nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &folio_list,
2042 &nr_scanned, sc, lru);
2043
2044 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
2045 item = PGSCAN_KSWAPD + reclaimer_offset(sc);
2046 if (!cgroup_reclaim(sc))
2047 __count_vm_events(item, nr_scanned);
2048 count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
2049 __count_vm_events(PGSCAN_ANON + file, nr_scanned);
2050
2051 spin_unlock_irq(&lruvec->lru_lock);
2052
2053 if (nr_taken == 0)
2054 return 0;
2055
2056 nr_reclaimed = shrink_folio_list(&folio_list, pgdat, sc, &stat, false,
2057 lruvec_memcg(lruvec));
2058
2059 spin_lock_irq(&lruvec->lru_lock);
2060 move_folios_to_lru(lruvec, &folio_list);
2061
2062 __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
2063 stat.nr_demoted);
2064 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2065 item = PGSTEAL_KSWAPD + reclaimer_offset(sc);
2066 if (!cgroup_reclaim(sc))
2067 __count_vm_events(item, nr_reclaimed);
2068 count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
2069 __count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
2070 spin_unlock_irq(&lruvec->lru_lock);
2071
2072 lru_note_cost(lruvec, file, stat.nr_pageout, nr_scanned - nr_reclaimed);
2073
2074 /*
2075 * If dirty folios are scanned that are not queued for IO, it
2076 * implies that flushers are not doing their job. This can
2077 * happen when memory pressure pushes dirty folios to the end of
2078 * the LRU before the dirty limits are breached and the dirty
2079 * data has expired. It can also happen when the proportion of
2080 * dirty folios grows not through writes but through memory
2081 * pressure reclaiming all the clean cache. And in some cases,
2082 * the flushers simply cannot keep up with the allocation
2083 * rate. Nudge the flusher threads in case they are asleep.
2084 */
2085 if (stat.nr_unqueued_dirty == nr_taken) {
2086 wakeup_flusher_threads(WB_REASON_VMSCAN);
2087 /*
2088 * For cgroupv1 dirty throttling is achieved by waking up
2089 * the kernel flusher here and later waiting on folios
2090 * which are in writeback to finish (see shrink_folio_list()).
2091 *
2092 * Flusher may not be able to issue writeback quickly
2093 * enough for cgroupv1 writeback throttling to work
2094 * on a large system.
2095 */
2096 if (!writeback_throttling_sane(sc))
2097 reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
2098 }
2099
2100 sc->nr.dirty += stat.nr_dirty;
2101 sc->nr.congested += stat.nr_congested;
2102 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
2103 sc->nr.writeback += stat.nr_writeback;
2104 sc->nr.immediate += stat.nr_immediate;
2105 sc->nr.taken += nr_taken;
2106 if (file)
2107 sc->nr.file_taken += nr_taken;
2108
2109 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
2110 nr_scanned, nr_reclaimed, &stat, sc->priority, file);
2111 return nr_reclaimed;
2112 }
2113
2114 /*
2115 * shrink_active_list() moves folios from the active LRU to the inactive LRU.
2116 *
2117 * We move them the other way if the folio is referenced by one or more
2118 * processes.
2119 *
2120 * If the folios are mostly unmapped, the processing is fast and it is
2121 * appropriate to hold lru_lock across the whole operation. But if
2122 * the folios are mapped, the processing is slow (folio_referenced()), so
2123 * we should drop lru_lock around each folio. It's impossible to balance
2124 * this, so instead we remove the folios from the LRU while processing them.
2125 * It is safe to rely on the active flag against the non-LRU folios in here
2126 * because nobody will play with that bit on a non-LRU folio.
2127 *
2128 * The downside is that we have to touch folio->_refcount against each folio.
2129 * But we had to alter folio->flags anyway.
2130 */
shrink_active_list(unsigned long nr_to_scan,struct lruvec * lruvec,struct scan_control * sc,enum lru_list lru)2131 static void shrink_active_list(unsigned long nr_to_scan,
2132 struct lruvec *lruvec,
2133 struct scan_control *sc,
2134 enum lru_list lru)
2135 {
2136 unsigned long nr_taken;
2137 unsigned long nr_scanned;
2138 unsigned long vm_flags;
2139 LIST_HEAD(l_hold); /* The folios which were snipped off */
2140 LIST_HEAD(l_active);
2141 LIST_HEAD(l_inactive);
2142 unsigned nr_deactivate, nr_activate;
2143 unsigned nr_rotated = 0;
2144 bool file = is_file_lru(lru);
2145 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2146
2147 lru_add_drain();
2148
2149 spin_lock_irq(&lruvec->lru_lock);
2150
2151 nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &l_hold,
2152 &nr_scanned, sc, lru);
2153
2154 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
2155
2156 if (!cgroup_reclaim(sc))
2157 __count_vm_events(PGREFILL, nr_scanned);
2158 count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
2159
2160 spin_unlock_irq(&lruvec->lru_lock);
2161
2162 while (!list_empty(&l_hold)) {
2163 struct folio *folio;
2164
2165 cond_resched();
2166 folio = lru_to_folio(&l_hold);
2167 list_del(&folio->lru);
2168
2169 if (unlikely(!folio_evictable(folio))) {
2170 folio_putback_lru(folio);
2171 continue;
2172 }
2173
2174 if (unlikely(buffer_heads_over_limit)) {
2175 if (folio_needs_release(folio) &&
2176 folio_trylock(folio)) {
2177 filemap_release_folio(folio, 0);
2178 folio_unlock(folio);
2179 }
2180 }
2181
2182 /* Referenced or rmap lock contention: rotate */
2183 if (folio_referenced(folio, 0, sc->target_mem_cgroup,
2184 &vm_flags) != 0) {
2185 /*
2186 * Identify referenced, file-backed active folios and
2187 * give them one more trip around the active list. So
2188 * that executable code get better chances to stay in
2189 * memory under moderate memory pressure. Anon folios
2190 * are not likely to be evicted by use-once streaming
2191 * IO, plus JVM can create lots of anon VM_EXEC folios,
2192 * so we ignore them here.
2193 */
2194 if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) {
2195 nr_rotated += folio_nr_pages(folio);
2196 list_add(&folio->lru, &l_active);
2197 continue;
2198 }
2199 }
2200
2201 folio_clear_active(folio); /* we are de-activating */
2202 folio_set_workingset(folio);
2203 list_add(&folio->lru, &l_inactive);
2204 }
2205
2206 /*
2207 * Move folios back to the lru list.
2208 */
2209 spin_lock_irq(&lruvec->lru_lock);
2210
2211 nr_activate = move_folios_to_lru(lruvec, &l_active);
2212 nr_deactivate = move_folios_to_lru(lruvec, &l_inactive);
2213
2214 __count_vm_events(PGDEACTIVATE, nr_deactivate);
2215 count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
2216
2217 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2218 spin_unlock_irq(&lruvec->lru_lock);
2219
2220 if (nr_rotated)
2221 lru_note_cost(lruvec, file, 0, nr_rotated);
2222 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
2223 nr_deactivate, nr_rotated, sc->priority, file);
2224 }
2225
reclaim_folio_list(struct list_head * folio_list,struct pglist_data * pgdat)2226 static unsigned int reclaim_folio_list(struct list_head *folio_list,
2227 struct pglist_data *pgdat)
2228 {
2229 struct reclaim_stat stat;
2230 unsigned int nr_reclaimed;
2231 struct folio *folio;
2232 struct scan_control sc = {
2233 .gfp_mask = GFP_KERNEL,
2234 .may_writepage = 1,
2235 .may_unmap = 1,
2236 .may_swap = 1,
2237 .no_demotion = 1,
2238 };
2239
2240 nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &stat, true, NULL);
2241 while (!list_empty(folio_list)) {
2242 folio = lru_to_folio(folio_list);
2243 list_del(&folio->lru);
2244 folio_putback_lru(folio);
2245 }
2246 trace_mm_vmscan_reclaim_pages(pgdat->node_id, sc.nr_scanned, nr_reclaimed, &stat);
2247
2248 return nr_reclaimed;
2249 }
2250
reclaim_pages(struct list_head * folio_list)2251 unsigned long reclaim_pages(struct list_head *folio_list)
2252 {
2253 int nid;
2254 unsigned int nr_reclaimed = 0;
2255 LIST_HEAD(node_folio_list);
2256 unsigned int noreclaim_flag;
2257
2258 if (list_empty(folio_list))
2259 return nr_reclaimed;
2260
2261 noreclaim_flag = memalloc_noreclaim_save();
2262
2263 nid = folio_nid(lru_to_folio(folio_list));
2264 do {
2265 struct folio *folio = lru_to_folio(folio_list);
2266
2267 if (nid == folio_nid(folio)) {
2268 folio_clear_active(folio);
2269 list_move(&folio->lru, &node_folio_list);
2270 continue;
2271 }
2272
2273 nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid));
2274 nid = folio_nid(lru_to_folio(folio_list));
2275 } while (!list_empty(folio_list));
2276
2277 nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid));
2278
2279 memalloc_noreclaim_restore(noreclaim_flag);
2280
2281 return nr_reclaimed;
2282 }
2283
shrink_list(enum lru_list lru,unsigned long nr_to_scan,struct lruvec * lruvec,struct scan_control * sc)2284 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
2285 struct lruvec *lruvec, struct scan_control *sc)
2286 {
2287 if (is_active_lru(lru)) {
2288 if (sc->may_deactivate & (1 << is_file_lru(lru)))
2289 shrink_active_list(nr_to_scan, lruvec, sc, lru);
2290 else
2291 sc->skipped_deactivate = 1;
2292 return 0;
2293 }
2294
2295 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
2296 }
2297
2298 /*
2299 * The inactive anon list should be small enough that the VM never has
2300 * to do too much work.
2301 *
2302 * The inactive file list should be small enough to leave most memory
2303 * to the established workingset on the scan-resistant active list,
2304 * but large enough to avoid thrashing the aggregate readahead window.
2305 *
2306 * Both inactive lists should also be large enough that each inactive
2307 * folio has a chance to be referenced again before it is reclaimed.
2308 *
2309 * If that fails and refaulting is observed, the inactive list grows.
2310 *
2311 * The inactive_ratio is the target ratio of ACTIVE to INACTIVE folios
2312 * on this LRU, maintained by the pageout code. An inactive_ratio
2313 * of 3 means 3:1 or 25% of the folios are kept on the inactive list.
2314 *
2315 * total target max
2316 * memory ratio inactive
2317 * -------------------------------------
2318 * 10MB 1 5MB
2319 * 100MB 1 50MB
2320 * 1GB 3 250MB
2321 * 10GB 10 0.9GB
2322 * 100GB 31 3GB
2323 * 1TB 101 10GB
2324 * 10TB 320 32GB
2325 */
inactive_is_low(struct lruvec * lruvec,enum lru_list inactive_lru)2326 static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru)
2327 {
2328 enum lru_list active_lru = inactive_lru + LRU_ACTIVE;
2329 unsigned long inactive, active;
2330 unsigned long inactive_ratio;
2331 unsigned long gb;
2332
2333 inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru);
2334 active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru);
2335
2336 gb = (inactive + active) >> (30 - PAGE_SHIFT);
2337 if (gb)
2338 inactive_ratio = int_sqrt(10 * gb);
2339 else
2340 inactive_ratio = 1;
2341
2342 return inactive * inactive_ratio < active;
2343 }
2344
2345 enum scan_balance {
2346 SCAN_EQUAL,
2347 SCAN_FRACT,
2348 SCAN_ANON,
2349 SCAN_FILE,
2350 };
2351
prepare_scan_control(pg_data_t * pgdat,struct scan_control * sc)2352 static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc)
2353 {
2354 unsigned long file;
2355 struct lruvec *target_lruvec;
2356
2357 if (lru_gen_enabled())
2358 return;
2359
2360 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
2361
2362 /*
2363 * Flush the memory cgroup stats in rate-limited way as we don't need
2364 * most accurate stats here. We may switch to regular stats flushing
2365 * in the future once it is cheap enough.
2366 */
2367 mem_cgroup_flush_stats_ratelimited(sc->target_mem_cgroup);
2368
2369 /*
2370 * Determine the scan balance between anon and file LRUs.
2371 */
2372 spin_lock_irq(&target_lruvec->lru_lock);
2373 sc->anon_cost = target_lruvec->anon_cost;
2374 sc->file_cost = target_lruvec->file_cost;
2375 spin_unlock_irq(&target_lruvec->lru_lock);
2376
2377 /*
2378 * Target desirable inactive:active list ratios for the anon
2379 * and file LRU lists.
2380 */
2381 if (!sc->force_deactivate) {
2382 unsigned long refaults;
2383
2384 /*
2385 * When refaults are being observed, it means a new
2386 * workingset is being established. Deactivate to get
2387 * rid of any stale active pages quickly.
2388 */
2389 refaults = lruvec_page_state(target_lruvec,
2390 WORKINGSET_ACTIVATE_ANON);
2391 if (refaults != target_lruvec->refaults[WORKINGSET_ANON] ||
2392 inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
2393 sc->may_deactivate |= DEACTIVATE_ANON;
2394 else
2395 sc->may_deactivate &= ~DEACTIVATE_ANON;
2396
2397 refaults = lruvec_page_state(target_lruvec,
2398 WORKINGSET_ACTIVATE_FILE);
2399 if (refaults != target_lruvec->refaults[WORKINGSET_FILE] ||
2400 inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
2401 sc->may_deactivate |= DEACTIVATE_FILE;
2402 else
2403 sc->may_deactivate &= ~DEACTIVATE_FILE;
2404 } else
2405 sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
2406
2407 /*
2408 * If we have plenty of inactive file pages that aren't
2409 * thrashing, try to reclaim those first before touching
2410 * anonymous pages.
2411 */
2412 file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
2413 if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE) &&
2414 !sc->no_cache_trim_mode)
2415 sc->cache_trim_mode = 1;
2416 else
2417 sc->cache_trim_mode = 0;
2418
2419 /*
2420 * Prevent the reclaimer from falling into the cache trap: as
2421 * cache pages start out inactive, every cache fault will tip
2422 * the scan balance towards the file LRU. And as the file LRU
2423 * shrinks, so does the window for rotation from references.
2424 * This means we have a runaway feedback loop where a tiny
2425 * thrashing file LRU becomes infinitely more attractive than
2426 * anon pages. Try to detect this based on file LRU size.
2427 */
2428 if (!cgroup_reclaim(sc)) {
2429 unsigned long total_high_wmark = 0;
2430 unsigned long free, anon;
2431 int z;
2432 struct zone *zone;
2433
2434 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
2435 file = node_page_state(pgdat, NR_ACTIVE_FILE) +
2436 node_page_state(pgdat, NR_INACTIVE_FILE);
2437
2438 for_each_managed_zone_pgdat(zone, pgdat, z, MAX_NR_ZONES - 1) {
2439 total_high_wmark += high_wmark_pages(zone);
2440 }
2441
2442 /*
2443 * Consider anon: if that's low too, this isn't a
2444 * runaway file reclaim problem, but rather just
2445 * extreme pressure. Reclaim as per usual then.
2446 */
2447 anon = node_page_state(pgdat, NR_INACTIVE_ANON);
2448
2449 sc->file_is_tiny =
2450 file + free <= total_high_wmark &&
2451 !(sc->may_deactivate & DEACTIVATE_ANON) &&
2452 anon >> sc->priority;
2453 }
2454 }
2455
calculate_pressure_balance(struct scan_control * sc,int swappiness,u64 * fraction,u64 * denominator)2456 static inline void calculate_pressure_balance(struct scan_control *sc,
2457 int swappiness, u64 *fraction, u64 *denominator)
2458 {
2459 unsigned long anon_cost, file_cost, total_cost;
2460 unsigned long ap, fp;
2461
2462 /*
2463 * Calculate the pressure balance between anon and file pages.
2464 *
2465 * The amount of pressure we put on each LRU is inversely
2466 * proportional to the cost of reclaiming each list, as
2467 * determined by the share of pages that are refaulting, times
2468 * the relative IO cost of bringing back a swapped out
2469 * anonymous page vs reloading a filesystem page (swappiness).
2470 *
2471 * Although we limit that influence to ensure no list gets
2472 * left behind completely: at least a third of the pressure is
2473 * applied, before swappiness.
2474 *
2475 * With swappiness at 100, anon and file have equal IO cost.
2476 */
2477 total_cost = sc->anon_cost + sc->file_cost;
2478 anon_cost = total_cost + sc->anon_cost;
2479 file_cost = total_cost + sc->file_cost;
2480 total_cost = anon_cost + file_cost;
2481
2482 ap = swappiness * (total_cost + 1);
2483 ap /= anon_cost + 1;
2484
2485 fp = (MAX_SWAPPINESS - swappiness) * (total_cost + 1);
2486 fp /= file_cost + 1;
2487
2488 fraction[WORKINGSET_ANON] = ap;
2489 fraction[WORKINGSET_FILE] = fp;
2490 *denominator = ap + fp;
2491 }
2492
2493 /*
2494 * Determine how aggressively the anon and file LRU lists should be
2495 * scanned.
2496 *
2497 * nr[0] = anon inactive folios to scan; nr[1] = anon active folios to scan
2498 * nr[2] = file inactive folios to scan; nr[3] = file active folios to scan
2499 */
get_scan_count(struct lruvec * lruvec,struct scan_control * sc,unsigned long * nr)2500 static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
2501 unsigned long *nr)
2502 {
2503 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2504 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2505 int swappiness = sc_swappiness(sc, memcg);
2506 u64 fraction[ANON_AND_FILE];
2507 u64 denominator = 0; /* gcc */
2508 enum scan_balance scan_balance;
2509 enum lru_list lru;
2510
2511 /* If we have no swap space, do not bother scanning anon folios. */
2512 if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) {
2513 scan_balance = SCAN_FILE;
2514 goto out;
2515 }
2516
2517 /*
2518 * Global reclaim will swap to prevent OOM even with no
2519 * swappiness, but memcg users want to use this knob to
2520 * disable swapping for individual groups completely when
2521 * using the memory controller's swap limit feature would be
2522 * too expensive.
2523 */
2524 if (cgroup_reclaim(sc) && !swappiness) {
2525 scan_balance = SCAN_FILE;
2526 goto out;
2527 }
2528
2529 /* Proactive reclaim initiated by userspace for anonymous memory only */
2530 if (swappiness == SWAPPINESS_ANON_ONLY) {
2531 WARN_ON_ONCE(!sc->proactive);
2532 scan_balance = SCAN_ANON;
2533 goto out;
2534 }
2535
2536 /*
2537 * Do not apply any pressure balancing cleverness when the
2538 * system is close to OOM, scan both anon and file equally
2539 * (unless the swappiness setting disagrees with swapping).
2540 */
2541 if (!sc->priority && swappiness) {
2542 scan_balance = SCAN_EQUAL;
2543 goto out;
2544 }
2545
2546 /*
2547 * If the system is almost out of file pages, force-scan anon.
2548 */
2549 if (sc->file_is_tiny) {
2550 scan_balance = SCAN_ANON;
2551 goto out;
2552 }
2553
2554 /*
2555 * If there is enough inactive page cache, we do not reclaim
2556 * anything from the anonymous working right now to make sure
2557 * a streaming file access pattern doesn't cause swapping.
2558 */
2559 if (sc->cache_trim_mode) {
2560 scan_balance = SCAN_FILE;
2561 goto out;
2562 }
2563
2564 scan_balance = SCAN_FRACT;
2565 calculate_pressure_balance(sc, swappiness, fraction, &denominator);
2566
2567 out:
2568 for_each_evictable_lru(lru) {
2569 bool file = is_file_lru(lru);
2570 unsigned long lruvec_size;
2571 unsigned long low, min;
2572 unsigned long scan;
2573
2574 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
2575 mem_cgroup_protection(sc->target_mem_cgroup, memcg,
2576 &min, &low);
2577
2578 if (min || low) {
2579 /*
2580 * Scale a cgroup's reclaim pressure by proportioning
2581 * its current usage to its memory.low or memory.min
2582 * setting.
2583 *
2584 * This is important, as otherwise scanning aggression
2585 * becomes extremely binary -- from nothing as we
2586 * approach the memory protection threshold, to totally
2587 * nominal as we exceed it. This results in requiring
2588 * setting extremely liberal protection thresholds. It
2589 * also means we simply get no protection at all if we
2590 * set it too low, which is not ideal.
2591 *
2592 * If there is any protection in place, we reduce scan
2593 * pressure by how much of the total memory used is
2594 * within protection thresholds.
2595 *
2596 * There is one special case: in the first reclaim pass,
2597 * we skip over all groups that are within their low
2598 * protection. If that fails to reclaim enough pages to
2599 * satisfy the reclaim goal, we come back and override
2600 * the best-effort low protection. However, we still
2601 * ideally want to honor how well-behaved groups are in
2602 * that case instead of simply punishing them all
2603 * equally. As such, we reclaim them based on how much
2604 * memory they are using, reducing the scan pressure
2605 * again by how much of the total memory used is under
2606 * hard protection.
2607 */
2608 unsigned long cgroup_size = mem_cgroup_size(memcg);
2609 unsigned long protection;
2610
2611 /* memory.low scaling, make sure we retry before OOM */
2612 if (!sc->memcg_low_reclaim && low > min) {
2613 protection = low;
2614 sc->memcg_low_skipped = 1;
2615 } else {
2616 protection = min;
2617 }
2618
2619 /* Avoid TOCTOU with earlier protection check */
2620 cgroup_size = max(cgroup_size, protection);
2621
2622 scan = lruvec_size - lruvec_size * protection /
2623 (cgroup_size + 1);
2624
2625 /*
2626 * Minimally target SWAP_CLUSTER_MAX pages to keep
2627 * reclaim moving forwards, avoiding decrementing
2628 * sc->priority further than desirable.
2629 */
2630 scan = max(scan, SWAP_CLUSTER_MAX);
2631 } else {
2632 scan = lruvec_size;
2633 }
2634
2635 scan >>= sc->priority;
2636
2637 /*
2638 * If the cgroup's already been deleted, make sure to
2639 * scrape out the remaining cache.
2640 */
2641 if (!scan && !mem_cgroup_online(memcg))
2642 scan = min(lruvec_size, SWAP_CLUSTER_MAX);
2643
2644 switch (scan_balance) {
2645 case SCAN_EQUAL:
2646 /* Scan lists relative to size */
2647 break;
2648 case SCAN_FRACT:
2649 /*
2650 * Scan types proportional to swappiness and
2651 * their relative recent reclaim efficiency.
2652 * Make sure we don't miss the last page on
2653 * the offlined memory cgroups because of a
2654 * round-off error.
2655 */
2656 scan = mem_cgroup_online(memcg) ?
2657 div64_u64(scan * fraction[file], denominator) :
2658 DIV64_U64_ROUND_UP(scan * fraction[file],
2659 denominator);
2660 break;
2661 case SCAN_FILE:
2662 case SCAN_ANON:
2663 /* Scan one type exclusively */
2664 if ((scan_balance == SCAN_FILE) != file)
2665 scan = 0;
2666 break;
2667 default:
2668 /* Look ma, no brain */
2669 BUG();
2670 }
2671
2672 nr[lru] = scan;
2673 }
2674 }
2675
2676 /*
2677 * Anonymous LRU management is a waste if there is
2678 * ultimately no way to reclaim the memory.
2679 */
can_age_anon_pages(struct lruvec * lruvec,struct scan_control * sc)2680 static bool can_age_anon_pages(struct lruvec *lruvec,
2681 struct scan_control *sc)
2682 {
2683 /* Aging the anon LRU is valuable if swap is present: */
2684 if (total_swap_pages > 0)
2685 return true;
2686
2687 /* Also valuable if anon pages can be demoted: */
2688 return can_demote(lruvec_pgdat(lruvec)->node_id, sc,
2689 lruvec_memcg(lruvec));
2690 }
2691
2692 #ifdef CONFIG_LRU_GEN
2693
2694 #ifdef CONFIG_LRU_GEN_ENABLED
2695 DEFINE_STATIC_KEY_ARRAY_TRUE(lru_gen_caps, NR_LRU_GEN_CAPS);
2696 #define get_cap(cap) static_branch_likely(&lru_gen_caps[cap])
2697 #else
2698 DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS);
2699 #define get_cap(cap) static_branch_unlikely(&lru_gen_caps[cap])
2700 #endif
2701
should_walk_mmu(void)2702 static bool should_walk_mmu(void)
2703 {
2704 return arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK);
2705 }
2706
should_clear_pmd_young(void)2707 static bool should_clear_pmd_young(void)
2708 {
2709 return arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG);
2710 }
2711
2712 /******************************************************************************
2713 * shorthand helpers
2714 ******************************************************************************/
2715
2716 #define DEFINE_MAX_SEQ(lruvec) \
2717 unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq)
2718
2719 #define DEFINE_MIN_SEQ(lruvec) \
2720 unsigned long min_seq[ANON_AND_FILE] = { \
2721 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \
2722 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \
2723 }
2724
2725 /* Get the min/max evictable type based on swappiness */
2726 #define min_type(swappiness) (!(swappiness))
2727 #define max_type(swappiness) ((swappiness) < SWAPPINESS_ANON_ONLY)
2728
2729 #define evictable_min_seq(min_seq, swappiness) \
2730 min((min_seq)[min_type(swappiness)], (min_seq)[max_type(swappiness)])
2731
2732 #define for_each_gen_type_zone(gen, type, zone) \
2733 for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \
2734 for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \
2735 for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
2736
2737 #define for_each_evictable_type(type, swappiness) \
2738 for ((type) = min_type(swappiness); (type) <= max_type(swappiness); (type)++)
2739
2740 #define get_memcg_gen(seq) ((seq) % MEMCG_NR_GENS)
2741 #define get_memcg_bin(bin) ((bin) % MEMCG_NR_BINS)
2742
get_lruvec(struct mem_cgroup * memcg,int nid)2743 static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
2744 {
2745 struct pglist_data *pgdat = NODE_DATA(nid);
2746
2747 #ifdef CONFIG_MEMCG
2748 if (memcg) {
2749 struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec;
2750
2751 /* see the comment in mem_cgroup_lruvec() */
2752 if (!lruvec->pgdat)
2753 lruvec->pgdat = pgdat;
2754
2755 return lruvec;
2756 }
2757 #endif
2758 VM_WARN_ON_ONCE(!mem_cgroup_disabled());
2759
2760 return &pgdat->__lruvec;
2761 }
2762
get_swappiness(struct lruvec * lruvec,struct scan_control * sc)2763 static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc)
2764 {
2765 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2766 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2767
2768 if (!sc->may_swap)
2769 return 0;
2770
2771 if (!can_demote(pgdat->node_id, sc, memcg) &&
2772 mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH)
2773 return 0;
2774
2775 return sc_swappiness(sc, memcg);
2776 }
2777
get_nr_gens(struct lruvec * lruvec,int type)2778 static int get_nr_gens(struct lruvec *lruvec, int type)
2779 {
2780 return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1;
2781 }
2782
seq_is_valid(struct lruvec * lruvec)2783 static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
2784 {
2785 int type;
2786
2787 for (type = 0; type < ANON_AND_FILE; type++) {
2788 int n = get_nr_gens(lruvec, type);
2789
2790 if (n < MIN_NR_GENS || n > MAX_NR_GENS)
2791 return false;
2792 }
2793
2794 return true;
2795 }
2796
2797 /******************************************************************************
2798 * Bloom filters
2799 ******************************************************************************/
2800
2801 /*
2802 * Bloom filters with m=1<<15, k=2 and the false positive rates of ~1/5 when
2803 * n=10,000 and ~1/2 when n=20,000, where, conventionally, m is the number of
2804 * bits in a bitmap, k is the number of hash functions and n is the number of
2805 * inserted items.
2806 *
2807 * Page table walkers use one of the two filters to reduce their search space.
2808 * To get rid of non-leaf entries that no longer have enough leaf entries, the
2809 * aging uses the double-buffering technique to flip to the other filter each
2810 * time it produces a new generation. For non-leaf entries that have enough
2811 * leaf entries, the aging carries them over to the next generation in
2812 * walk_pmd_range(); the eviction also report them when walking the rmap
2813 * in lru_gen_look_around().
2814 *
2815 * For future optimizations:
2816 * 1. It's not necessary to keep both filters all the time. The spare one can be
2817 * freed after the RCU grace period and reallocated if needed again.
2818 * 2. And when reallocating, it's worth scaling its size according to the number
2819 * of inserted entries in the other filter, to reduce the memory overhead on
2820 * small systems and false positives on large systems.
2821 * 3. Jenkins' hash function is an alternative to Knuth's.
2822 */
2823 #define BLOOM_FILTER_SHIFT 15
2824
filter_gen_from_seq(unsigned long seq)2825 static inline int filter_gen_from_seq(unsigned long seq)
2826 {
2827 return seq % NR_BLOOM_FILTERS;
2828 }
2829
get_item_key(void * item,int * key)2830 static void get_item_key(void *item, int *key)
2831 {
2832 u32 hash = hash_ptr(item, BLOOM_FILTER_SHIFT * 2);
2833
2834 BUILD_BUG_ON(BLOOM_FILTER_SHIFT * 2 > BITS_PER_TYPE(u32));
2835
2836 key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1);
2837 key[1] = hash >> BLOOM_FILTER_SHIFT;
2838 }
2839
test_bloom_filter(struct lru_gen_mm_state * mm_state,unsigned long seq,void * item)2840 static bool test_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq,
2841 void *item)
2842 {
2843 int key[2];
2844 unsigned long *filter;
2845 int gen = filter_gen_from_seq(seq);
2846
2847 filter = READ_ONCE(mm_state->filters[gen]);
2848 if (!filter)
2849 return true;
2850
2851 get_item_key(item, key);
2852
2853 return test_bit(key[0], filter) && test_bit(key[1], filter);
2854 }
2855
update_bloom_filter(struct lru_gen_mm_state * mm_state,unsigned long seq,void * item)2856 static void update_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq,
2857 void *item)
2858 {
2859 int key[2];
2860 unsigned long *filter;
2861 int gen = filter_gen_from_seq(seq);
2862
2863 filter = READ_ONCE(mm_state->filters[gen]);
2864 if (!filter)
2865 return;
2866
2867 get_item_key(item, key);
2868
2869 if (!test_bit(key[0], filter))
2870 set_bit(key[0], filter);
2871 if (!test_bit(key[1], filter))
2872 set_bit(key[1], filter);
2873 }
2874
reset_bloom_filter(struct lru_gen_mm_state * mm_state,unsigned long seq)2875 static void reset_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq)
2876 {
2877 unsigned long *filter;
2878 int gen = filter_gen_from_seq(seq);
2879
2880 filter = mm_state->filters[gen];
2881 if (filter) {
2882 bitmap_clear(filter, 0, BIT(BLOOM_FILTER_SHIFT));
2883 return;
2884 }
2885
2886 filter = bitmap_zalloc(BIT(BLOOM_FILTER_SHIFT),
2887 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
2888 WRITE_ONCE(mm_state->filters[gen], filter);
2889 }
2890
2891 /******************************************************************************
2892 * mm_struct list
2893 ******************************************************************************/
2894
2895 #ifdef CONFIG_LRU_GEN_WALKS_MMU
2896
get_mm_list(struct mem_cgroup * memcg)2897 static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
2898 {
2899 static struct lru_gen_mm_list mm_list = {
2900 .fifo = LIST_HEAD_INIT(mm_list.fifo),
2901 .lock = __SPIN_LOCK_UNLOCKED(mm_list.lock),
2902 };
2903
2904 #ifdef CONFIG_MEMCG
2905 if (memcg)
2906 return &memcg->mm_list;
2907 #endif
2908 VM_WARN_ON_ONCE(!mem_cgroup_disabled());
2909
2910 return &mm_list;
2911 }
2912
get_mm_state(struct lruvec * lruvec)2913 static struct lru_gen_mm_state *get_mm_state(struct lruvec *lruvec)
2914 {
2915 return &lruvec->mm_state;
2916 }
2917
get_next_mm(struct lru_gen_mm_walk * walk)2918 static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk)
2919 {
2920 int key;
2921 struct mm_struct *mm;
2922 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
2923 struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec);
2924
2925 mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list);
2926 key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap);
2927
2928 if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap))
2929 return NULL;
2930
2931 clear_bit(key, &mm->lru_gen.bitmap);
2932
2933 return mmget_not_zero(mm) ? mm : NULL;
2934 }
2935
lru_gen_add_mm(struct mm_struct * mm)2936 void lru_gen_add_mm(struct mm_struct *mm)
2937 {
2938 int nid;
2939 struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
2940 struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
2941
2942 VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list));
2943 #ifdef CONFIG_MEMCG
2944 VM_WARN_ON_ONCE(mm->lru_gen.memcg);
2945 mm->lru_gen.memcg = memcg;
2946 #endif
2947 spin_lock(&mm_list->lock);
2948
2949 for_each_node_state(nid, N_MEMORY) {
2950 struct lruvec *lruvec = get_lruvec(memcg, nid);
2951 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
2952
2953 /* the first addition since the last iteration */
2954 if (mm_state->tail == &mm_list->fifo)
2955 mm_state->tail = &mm->lru_gen.list;
2956 }
2957
2958 list_add_tail(&mm->lru_gen.list, &mm_list->fifo);
2959
2960 spin_unlock(&mm_list->lock);
2961 }
2962
lru_gen_del_mm(struct mm_struct * mm)2963 void lru_gen_del_mm(struct mm_struct *mm)
2964 {
2965 int nid;
2966 struct lru_gen_mm_list *mm_list;
2967 struct mem_cgroup *memcg = NULL;
2968
2969 if (list_empty(&mm->lru_gen.list))
2970 return;
2971
2972 #ifdef CONFIG_MEMCG
2973 memcg = mm->lru_gen.memcg;
2974 #endif
2975 mm_list = get_mm_list(memcg);
2976
2977 spin_lock(&mm_list->lock);
2978
2979 for_each_node(nid) {
2980 struct lruvec *lruvec = get_lruvec(memcg, nid);
2981 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
2982
2983 /* where the current iteration continues after */
2984 if (mm_state->head == &mm->lru_gen.list)
2985 mm_state->head = mm_state->head->prev;
2986
2987 /* where the last iteration ended before */
2988 if (mm_state->tail == &mm->lru_gen.list)
2989 mm_state->tail = mm_state->tail->next;
2990 }
2991
2992 list_del_init(&mm->lru_gen.list);
2993
2994 spin_unlock(&mm_list->lock);
2995
2996 #ifdef CONFIG_MEMCG
2997 mem_cgroup_put(mm->lru_gen.memcg);
2998 mm->lru_gen.memcg = NULL;
2999 #endif
3000 }
3001
3002 #ifdef CONFIG_MEMCG
lru_gen_migrate_mm(struct mm_struct * mm)3003 void lru_gen_migrate_mm(struct mm_struct *mm)
3004 {
3005 struct mem_cgroup *memcg;
3006 struct task_struct *task = rcu_dereference_protected(mm->owner, true);
3007
3008 VM_WARN_ON_ONCE(task->mm != mm);
3009 lockdep_assert_held(&task->alloc_lock);
3010
3011 /* for mm_update_next_owner() */
3012 if (mem_cgroup_disabled())
3013 return;
3014
3015 /* migration can happen before addition */
3016 if (!mm->lru_gen.memcg)
3017 return;
3018
3019 rcu_read_lock();
3020 memcg = mem_cgroup_from_task(task);
3021 rcu_read_unlock();
3022 if (memcg == mm->lru_gen.memcg)
3023 return;
3024
3025 VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list));
3026
3027 lru_gen_del_mm(mm);
3028 lru_gen_add_mm(mm);
3029 }
3030 #endif
3031
3032 #else /* !CONFIG_LRU_GEN_WALKS_MMU */
3033
get_mm_list(struct mem_cgroup * memcg)3034 static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
3035 {
3036 return NULL;
3037 }
3038
get_mm_state(struct lruvec * lruvec)3039 static struct lru_gen_mm_state *get_mm_state(struct lruvec *lruvec)
3040 {
3041 return NULL;
3042 }
3043
get_next_mm(struct lru_gen_mm_walk * walk)3044 static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk)
3045 {
3046 return NULL;
3047 }
3048
3049 #endif
3050
reset_mm_stats(struct lru_gen_mm_walk * walk,bool last)3051 static void reset_mm_stats(struct lru_gen_mm_walk *walk, bool last)
3052 {
3053 int i;
3054 int hist;
3055 struct lruvec *lruvec = walk->lruvec;
3056 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
3057
3058 lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock);
3059
3060 hist = lru_hist_from_seq(walk->seq);
3061
3062 for (i = 0; i < NR_MM_STATS; i++) {
3063 WRITE_ONCE(mm_state->stats[hist][i],
3064 mm_state->stats[hist][i] + walk->mm_stats[i]);
3065 walk->mm_stats[i] = 0;
3066 }
3067
3068 if (NR_HIST_GENS > 1 && last) {
3069 hist = lru_hist_from_seq(walk->seq + 1);
3070
3071 for (i = 0; i < NR_MM_STATS; i++)
3072 WRITE_ONCE(mm_state->stats[hist][i], 0);
3073 }
3074 }
3075
iterate_mm_list(struct lru_gen_mm_walk * walk,struct mm_struct ** iter)3076 static bool iterate_mm_list(struct lru_gen_mm_walk *walk, struct mm_struct **iter)
3077 {
3078 bool first = false;
3079 bool last = false;
3080 struct mm_struct *mm = NULL;
3081 struct lruvec *lruvec = walk->lruvec;
3082 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3083 struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
3084 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
3085
3086 /*
3087 * mm_state->seq is incremented after each iteration of mm_list. There
3088 * are three interesting cases for this page table walker:
3089 * 1. It tries to start a new iteration with a stale max_seq: there is
3090 * nothing left to do.
3091 * 2. It started the next iteration: it needs to reset the Bloom filter
3092 * so that a fresh set of PTE tables can be recorded.
3093 * 3. It ended the current iteration: it needs to reset the mm stats
3094 * counters and tell its caller to increment max_seq.
3095 */
3096 spin_lock(&mm_list->lock);
3097
3098 VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->seq);
3099
3100 if (walk->seq <= mm_state->seq)
3101 goto done;
3102
3103 if (!mm_state->head)
3104 mm_state->head = &mm_list->fifo;
3105
3106 if (mm_state->head == &mm_list->fifo)
3107 first = true;
3108
3109 do {
3110 mm_state->head = mm_state->head->next;
3111 if (mm_state->head == &mm_list->fifo) {
3112 WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
3113 last = true;
3114 break;
3115 }
3116
3117 /* force scan for those added after the last iteration */
3118 if (!mm_state->tail || mm_state->tail == mm_state->head) {
3119 mm_state->tail = mm_state->head->next;
3120 walk->force_scan = true;
3121 }
3122 } while (!(mm = get_next_mm(walk)));
3123 done:
3124 if (*iter || last)
3125 reset_mm_stats(walk, last);
3126
3127 spin_unlock(&mm_list->lock);
3128
3129 if (mm && first)
3130 reset_bloom_filter(mm_state, walk->seq + 1);
3131
3132 if (*iter)
3133 mmput_async(*iter);
3134
3135 *iter = mm;
3136
3137 return last;
3138 }
3139
iterate_mm_list_nowalk(struct lruvec * lruvec,unsigned long seq)3140 static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long seq)
3141 {
3142 bool success = false;
3143 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3144 struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
3145 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
3146
3147 spin_lock(&mm_list->lock);
3148
3149 VM_WARN_ON_ONCE(mm_state->seq + 1 < seq);
3150
3151 if (seq > mm_state->seq) {
3152 mm_state->head = NULL;
3153 mm_state->tail = NULL;
3154 WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
3155 success = true;
3156 }
3157
3158 spin_unlock(&mm_list->lock);
3159
3160 return success;
3161 }
3162
3163 /******************************************************************************
3164 * PID controller
3165 ******************************************************************************/
3166
3167 /*
3168 * A feedback loop based on Proportional-Integral-Derivative (PID) controller.
3169 *
3170 * The P term is refaulted/(evicted+protected) from a tier in the generation
3171 * currently being evicted; the I term is the exponential moving average of the
3172 * P term over the generations previously evicted, using the smoothing factor
3173 * 1/2; the D term isn't supported.
3174 *
3175 * The setpoint (SP) is always the first tier of one type; the process variable
3176 * (PV) is either any tier of the other type or any other tier of the same
3177 * type.
3178 *
3179 * The error is the difference between the SP and the PV; the correction is to
3180 * turn off protection when SP>PV or turn on protection when SP<PV.
3181 *
3182 * For future optimizations:
3183 * 1. The D term may discount the other two terms over time so that long-lived
3184 * generations can resist stale information.
3185 */
3186 struct ctrl_pos {
3187 unsigned long refaulted;
3188 unsigned long total;
3189 int gain;
3190 };
3191
read_ctrl_pos(struct lruvec * lruvec,int type,int tier,int gain,struct ctrl_pos * pos)3192 static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain,
3193 struct ctrl_pos *pos)
3194 {
3195 int i;
3196 struct lru_gen_folio *lrugen = &lruvec->lrugen;
3197 int hist = lru_hist_from_seq(lrugen->min_seq[type]);
3198
3199 pos->gain = gain;
3200 pos->refaulted = pos->total = 0;
3201
3202 for (i = tier % MAX_NR_TIERS; i <= min(tier, MAX_NR_TIERS - 1); i++) {
3203 pos->refaulted += lrugen->avg_refaulted[type][i] +
3204 atomic_long_read(&lrugen->refaulted[hist][type][i]);
3205 pos->total += lrugen->avg_total[type][i] +
3206 lrugen->protected[hist][type][i] +
3207 atomic_long_read(&lrugen->evicted[hist][type][i]);
3208 }
3209 }
3210
reset_ctrl_pos(struct lruvec * lruvec,int type,bool carryover)3211 static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover)
3212 {
3213 int hist, tier;
3214 struct lru_gen_folio *lrugen = &lruvec->lrugen;
3215 bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1;
3216 unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1;
3217
3218 lockdep_assert_held(&lruvec->lru_lock);
3219
3220 if (!carryover && !clear)
3221 return;
3222
3223 hist = lru_hist_from_seq(seq);
3224
3225 for (tier = 0; tier < MAX_NR_TIERS; tier++) {
3226 if (carryover) {
3227 unsigned long sum;
3228
3229 sum = lrugen->avg_refaulted[type][tier] +
3230 atomic_long_read(&lrugen->refaulted[hist][type][tier]);
3231 WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2);
3232
3233 sum = lrugen->avg_total[type][tier] +
3234 lrugen->protected[hist][type][tier] +
3235 atomic_long_read(&lrugen->evicted[hist][type][tier]);
3236 WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2);
3237 }
3238
3239 if (clear) {
3240 atomic_long_set(&lrugen->refaulted[hist][type][tier], 0);
3241 atomic_long_set(&lrugen->evicted[hist][type][tier], 0);
3242 WRITE_ONCE(lrugen->protected[hist][type][tier], 0);
3243 }
3244 }
3245 }
3246
positive_ctrl_err(struct ctrl_pos * sp,struct ctrl_pos * pv)3247 static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv)
3248 {
3249 /*
3250 * Return true if the PV has a limited number of refaults or a lower
3251 * refaulted/total than the SP.
3252 */
3253 return pv->refaulted < MIN_LRU_BATCH ||
3254 pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <=
3255 (sp->refaulted + 1) * pv->total * pv->gain;
3256 }
3257
3258 /******************************************************************************
3259 * the aging
3260 ******************************************************************************/
3261
3262 /* promote pages accessed through page tables */
folio_update_gen(struct folio * folio,int gen)3263 static int folio_update_gen(struct folio *folio, int gen)
3264 {
3265 unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
3266
3267 VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
3268
3269 /* see the comment on LRU_REFS_FLAGS */
3270 if (!folio_test_referenced(folio) && !folio_test_workingset(folio)) {
3271 set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced));
3272 return -1;
3273 }
3274
3275 do {
3276 /* lru_gen_del_folio() has isolated this page? */
3277 if (!(old_flags & LRU_GEN_MASK))
3278 return -1;
3279
3280 new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_FLAGS);
3281 new_flags |= ((gen + 1UL) << LRU_GEN_PGOFF) | BIT(PG_workingset);
3282 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
3283
3284 return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
3285 }
3286
3287 /* protect pages accessed multiple times through file descriptors */
folio_inc_gen(struct lruvec * lruvec,struct folio * folio,bool reclaiming)3288 static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
3289 {
3290 int type = folio_is_file_lru(folio);
3291 struct lru_gen_folio *lrugen = &lruvec->lrugen;
3292 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
3293 unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
3294
3295 VM_WARN_ON_ONCE_FOLIO(!(old_flags & LRU_GEN_MASK), folio);
3296
3297 do {
3298 new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
3299 /* folio_update_gen() has promoted this page? */
3300 if (new_gen >= 0 && new_gen != old_gen)
3301 return new_gen;
3302
3303 new_gen = (old_gen + 1) % MAX_NR_GENS;
3304
3305 new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_FLAGS);
3306 new_flags |= (new_gen + 1UL) << LRU_GEN_PGOFF;
3307 /* for folio_end_writeback() */
3308 if (reclaiming)
3309 new_flags |= BIT(PG_reclaim);
3310 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
3311
3312 lru_gen_update_size(lruvec, folio, old_gen, new_gen);
3313
3314 return new_gen;
3315 }
3316
update_batch_size(struct lru_gen_mm_walk * walk,struct folio * folio,int old_gen,int new_gen)3317 static void update_batch_size(struct lru_gen_mm_walk *walk, struct folio *folio,
3318 int old_gen, int new_gen)
3319 {
3320 int type = folio_is_file_lru(folio);
3321 int zone = folio_zonenum(folio);
3322 int delta = folio_nr_pages(folio);
3323
3324 VM_WARN_ON_ONCE(old_gen >= MAX_NR_GENS);
3325 VM_WARN_ON_ONCE(new_gen >= MAX_NR_GENS);
3326
3327 walk->batched++;
3328
3329 walk->nr_pages[old_gen][type][zone] -= delta;
3330 walk->nr_pages[new_gen][type][zone] += delta;
3331 }
3332
reset_batch_size(struct lru_gen_mm_walk * walk)3333 static void reset_batch_size(struct lru_gen_mm_walk *walk)
3334 {
3335 int gen, type, zone;
3336 struct lruvec *lruvec = walk->lruvec;
3337 struct lru_gen_folio *lrugen = &lruvec->lrugen;
3338
3339 walk->batched = 0;
3340
3341 for_each_gen_type_zone(gen, type, zone) {
3342 enum lru_list lru = type * LRU_INACTIVE_FILE;
3343 int delta = walk->nr_pages[gen][type][zone];
3344
3345 if (!delta)
3346 continue;
3347
3348 walk->nr_pages[gen][type][zone] = 0;
3349 WRITE_ONCE(lrugen->nr_pages[gen][type][zone],
3350 lrugen->nr_pages[gen][type][zone] + delta);
3351
3352 if (lru_gen_is_active(lruvec, gen))
3353 lru += LRU_ACTIVE;
3354 __update_lru_size(lruvec, lru, zone, delta);
3355 }
3356 }
3357
should_skip_vma(unsigned long start,unsigned long end,struct mm_walk * args)3358 static int should_skip_vma(unsigned long start, unsigned long end, struct mm_walk *args)
3359 {
3360 struct address_space *mapping;
3361 struct vm_area_struct *vma = args->vma;
3362 struct lru_gen_mm_walk *walk = args->private;
3363
3364 if (!vma_is_accessible(vma))
3365 return true;
3366
3367 if (is_vm_hugetlb_page(vma))
3368 return true;
3369
3370 if (!vma_has_recency(vma))
3371 return true;
3372
3373 if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL))
3374 return true;
3375
3376 if (vma == get_gate_vma(vma->vm_mm))
3377 return true;
3378
3379 if (vma_is_anonymous(vma))
3380 return !walk->swappiness;
3381
3382 if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping))
3383 return true;
3384
3385 mapping = vma->vm_file->f_mapping;
3386 if (mapping_unevictable(mapping))
3387 return true;
3388
3389 if (shmem_mapping(mapping))
3390 return !walk->swappiness;
3391
3392 if (walk->swappiness > MAX_SWAPPINESS)
3393 return true;
3394
3395 /* to exclude special mappings like dax, etc. */
3396 return !mapping->a_ops->read_folio;
3397 }
3398
3399 /*
3400 * Some userspace memory allocators map many single-page VMAs. Instead of
3401 * returning back to the PGD table for each of such VMAs, finish an entire PMD
3402 * table to reduce zigzags and improve cache performance.
3403 */
get_next_vma(unsigned long mask,unsigned long size,struct mm_walk * args,unsigned long * vm_start,unsigned long * vm_end)3404 static bool get_next_vma(unsigned long mask, unsigned long size, struct mm_walk *args,
3405 unsigned long *vm_start, unsigned long *vm_end)
3406 {
3407 unsigned long start = round_up(*vm_end, size);
3408 unsigned long end = (start | ~mask) + 1;
3409 VMA_ITERATOR(vmi, args->mm, start);
3410
3411 VM_WARN_ON_ONCE(mask & size);
3412 VM_WARN_ON_ONCE((start & mask) != (*vm_start & mask));
3413
3414 for_each_vma(vmi, args->vma) {
3415 if (end && end <= args->vma->vm_start)
3416 return false;
3417
3418 if (should_skip_vma(args->vma->vm_start, args->vma->vm_end, args))
3419 continue;
3420
3421 *vm_start = max(start, args->vma->vm_start);
3422 *vm_end = min(end - 1, args->vma->vm_end - 1) + 1;
3423
3424 return true;
3425 }
3426
3427 return false;
3428 }
3429
get_pte_pfn(pte_t pte,struct vm_area_struct * vma,unsigned long addr,struct pglist_data * pgdat)3430 static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned long addr,
3431 struct pglist_data *pgdat)
3432 {
3433 unsigned long pfn = pte_pfn(pte);
3434
3435 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end);
3436
3437 if (!pte_present(pte) || is_zero_pfn(pfn))
3438 return -1;
3439
3440 if (WARN_ON_ONCE(pte_devmap(pte) || pte_special(pte)))
3441 return -1;
3442
3443 if (!pte_young(pte) && !mm_has_notifiers(vma->vm_mm))
3444 return -1;
3445
3446 if (WARN_ON_ONCE(!pfn_valid(pfn)))
3447 return -1;
3448
3449 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
3450 return -1;
3451
3452 return pfn;
3453 }
3454
get_pmd_pfn(pmd_t pmd,struct vm_area_struct * vma,unsigned long addr,struct pglist_data * pgdat)3455 static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr,
3456 struct pglist_data *pgdat)
3457 {
3458 unsigned long pfn = pmd_pfn(pmd);
3459
3460 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end);
3461
3462 if (!pmd_present(pmd) || is_huge_zero_pmd(pmd))
3463 return -1;
3464
3465 if (WARN_ON_ONCE(pmd_devmap(pmd)))
3466 return -1;
3467
3468 if (!pmd_young(pmd) && !mm_has_notifiers(vma->vm_mm))
3469 return -1;
3470
3471 if (WARN_ON_ONCE(!pfn_valid(pfn)))
3472 return -1;
3473
3474 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
3475 return -1;
3476
3477 return pfn;
3478 }
3479
get_pfn_folio(unsigned long pfn,struct mem_cgroup * memcg,struct pglist_data * pgdat)3480 static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg,
3481 struct pglist_data *pgdat)
3482 {
3483 struct folio *folio = pfn_folio(pfn);
3484
3485 if (folio_lru_gen(folio) < 0)
3486 return NULL;
3487
3488 if (folio_nid(folio) != pgdat->node_id)
3489 return NULL;
3490
3491 if (folio_memcg(folio) != memcg)
3492 return NULL;
3493
3494 return folio;
3495 }
3496
suitable_to_scan(int total,int young)3497 static bool suitable_to_scan(int total, int young)
3498 {
3499 int n = clamp_t(int, cache_line_size() / sizeof(pte_t), 2, 8);
3500
3501 /* suitable if the average number of young PTEs per cacheline is >=1 */
3502 return young * n >= total;
3503 }
3504
walk_update_folio(struct lru_gen_mm_walk * walk,struct folio * folio,int new_gen,bool dirty)3505 static void walk_update_folio(struct lru_gen_mm_walk *walk, struct folio *folio,
3506 int new_gen, bool dirty)
3507 {
3508 int old_gen;
3509
3510 if (!folio)
3511 return;
3512
3513 if (dirty && !folio_test_dirty(folio) &&
3514 !(folio_test_anon(folio) && folio_test_swapbacked(folio) &&
3515 !folio_test_swapcache(folio)))
3516 folio_mark_dirty(folio);
3517
3518 if (walk) {
3519 old_gen = folio_update_gen(folio, new_gen);
3520 if (old_gen >= 0 && old_gen != new_gen)
3521 update_batch_size(walk, folio, old_gen, new_gen);
3522 } else if (lru_gen_set_refs(folio)) {
3523 old_gen = folio_lru_gen(folio);
3524 if (old_gen >= 0 && old_gen != new_gen)
3525 folio_activate(folio);
3526 }
3527 }
3528
walk_pte_range(pmd_t * pmd,unsigned long start,unsigned long end,struct mm_walk * args)3529 static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
3530 struct mm_walk *args)
3531 {
3532 int i;
3533 bool dirty;
3534 pte_t *pte;
3535 spinlock_t *ptl;
3536 unsigned long addr;
3537 int total = 0;
3538 int young = 0;
3539 struct folio *last = NULL;
3540 struct lru_gen_mm_walk *walk = args->private;
3541 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
3542 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3543 DEFINE_MAX_SEQ(walk->lruvec);
3544 int gen = lru_gen_from_seq(max_seq);
3545 pmd_t pmdval;
3546
3547 pte = pte_offset_map_rw_nolock(args->mm, pmd, start & PMD_MASK, &pmdval, &ptl);
3548 if (!pte)
3549 return false;
3550
3551 if (!spin_trylock(ptl)) {
3552 pte_unmap(pte);
3553 return true;
3554 }
3555
3556 if (unlikely(!pmd_same(pmdval, pmdp_get_lockless(pmd)))) {
3557 pte_unmap_unlock(pte, ptl);
3558 return false;
3559 }
3560
3561 arch_enter_lazy_mmu_mode();
3562 restart:
3563 for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) {
3564 unsigned long pfn;
3565 struct folio *folio;
3566 pte_t ptent = ptep_get(pte + i);
3567
3568 total++;
3569 walk->mm_stats[MM_LEAF_TOTAL]++;
3570
3571 pfn = get_pte_pfn(ptent, args->vma, addr, pgdat);
3572 if (pfn == -1)
3573 continue;
3574
3575 folio = get_pfn_folio(pfn, memcg, pgdat);
3576 if (!folio)
3577 continue;
3578
3579 if (!ptep_clear_young_notify(args->vma, addr, pte + i))
3580 continue;
3581
3582 if (last != folio) {
3583 walk_update_folio(walk, last, gen, dirty);
3584
3585 last = folio;
3586 dirty = false;
3587 }
3588
3589 if (pte_dirty(ptent))
3590 dirty = true;
3591
3592 young++;
3593 walk->mm_stats[MM_LEAF_YOUNG]++;
3594 }
3595
3596 walk_update_folio(walk, last, gen, dirty);
3597 last = NULL;
3598
3599 if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end))
3600 goto restart;
3601
3602 arch_leave_lazy_mmu_mode();
3603 pte_unmap_unlock(pte, ptl);
3604
3605 return suitable_to_scan(total, young);
3606 }
3607
walk_pmd_range_locked(pud_t * pud,unsigned long addr,struct vm_area_struct * vma,struct mm_walk * args,unsigned long * bitmap,unsigned long * first)3608 static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area_struct *vma,
3609 struct mm_walk *args, unsigned long *bitmap, unsigned long *first)
3610 {
3611 int i;
3612 bool dirty;
3613 pmd_t *pmd;
3614 spinlock_t *ptl;
3615 struct folio *last = NULL;
3616 struct lru_gen_mm_walk *walk = args->private;
3617 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
3618 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3619 DEFINE_MAX_SEQ(walk->lruvec);
3620 int gen = lru_gen_from_seq(max_seq);
3621
3622 VM_WARN_ON_ONCE(pud_leaf(*pud));
3623
3624 /* try to batch at most 1+MIN_LRU_BATCH+1 entries */
3625 if (*first == -1) {
3626 *first = addr;
3627 bitmap_zero(bitmap, MIN_LRU_BATCH);
3628 return;
3629 }
3630
3631 i = addr == -1 ? 0 : pmd_index(addr) - pmd_index(*first);
3632 if (i && i <= MIN_LRU_BATCH) {
3633 __set_bit(i - 1, bitmap);
3634 return;
3635 }
3636
3637 pmd = pmd_offset(pud, *first);
3638
3639 ptl = pmd_lockptr(args->mm, pmd);
3640 if (!spin_trylock(ptl))
3641 goto done;
3642
3643 arch_enter_lazy_mmu_mode();
3644
3645 do {
3646 unsigned long pfn;
3647 struct folio *folio;
3648
3649 /* don't round down the first address */
3650 addr = i ? (*first & PMD_MASK) + i * PMD_SIZE : *first;
3651
3652 if (!pmd_present(pmd[i]))
3653 goto next;
3654
3655 if (!pmd_trans_huge(pmd[i])) {
3656 if (!walk->force_scan && should_clear_pmd_young() &&
3657 !mm_has_notifiers(args->mm))
3658 pmdp_test_and_clear_young(vma, addr, pmd + i);
3659 goto next;
3660 }
3661
3662 pfn = get_pmd_pfn(pmd[i], vma, addr, pgdat);
3663 if (pfn == -1)
3664 goto next;
3665
3666 folio = get_pfn_folio(pfn, memcg, pgdat);
3667 if (!folio)
3668 goto next;
3669
3670 if (!pmdp_clear_young_notify(vma, addr, pmd + i))
3671 goto next;
3672
3673 if (last != folio) {
3674 walk_update_folio(walk, last, gen, dirty);
3675
3676 last = folio;
3677 dirty = false;
3678 }
3679
3680 if (pmd_dirty(pmd[i]))
3681 dirty = true;
3682
3683 walk->mm_stats[MM_LEAF_YOUNG]++;
3684 next:
3685 i = i > MIN_LRU_BATCH ? 0 : find_next_bit(bitmap, MIN_LRU_BATCH, i) + 1;
3686 } while (i <= MIN_LRU_BATCH);
3687
3688 walk_update_folio(walk, last, gen, dirty);
3689
3690 arch_leave_lazy_mmu_mode();
3691 spin_unlock(ptl);
3692 done:
3693 *first = -1;
3694 }
3695
walk_pmd_range(pud_t * pud,unsigned long start,unsigned long end,struct mm_walk * args)3696 static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
3697 struct mm_walk *args)
3698 {
3699 int i;
3700 pmd_t *pmd;
3701 unsigned long next;
3702 unsigned long addr;
3703 struct vm_area_struct *vma;
3704 DECLARE_BITMAP(bitmap, MIN_LRU_BATCH);
3705 unsigned long first = -1;
3706 struct lru_gen_mm_walk *walk = args->private;
3707 struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec);
3708
3709 VM_WARN_ON_ONCE(pud_leaf(*pud));
3710
3711 /*
3712 * Finish an entire PMD in two passes: the first only reaches to PTE
3713 * tables to avoid taking the PMD lock; the second, if necessary, takes
3714 * the PMD lock to clear the accessed bit in PMD entries.
3715 */
3716 pmd = pmd_offset(pud, start & PUD_MASK);
3717 restart:
3718 /* walk_pte_range() may call get_next_vma() */
3719 vma = args->vma;
3720 for (i = pmd_index(start), addr = start; addr != end; i++, addr = next) {
3721 pmd_t val = pmdp_get_lockless(pmd + i);
3722
3723 next = pmd_addr_end(addr, end);
3724
3725 if (!pmd_present(val) || is_huge_zero_pmd(val)) {
3726 walk->mm_stats[MM_LEAF_TOTAL]++;
3727 continue;
3728 }
3729
3730 if (pmd_trans_huge(val)) {
3731 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3732 unsigned long pfn = get_pmd_pfn(val, vma, addr, pgdat);
3733
3734 walk->mm_stats[MM_LEAF_TOTAL]++;
3735
3736 if (pfn != -1)
3737 walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first);
3738 continue;
3739 }
3740
3741 if (!walk->force_scan && should_clear_pmd_young() &&
3742 !mm_has_notifiers(args->mm)) {
3743 if (!pmd_young(val))
3744 continue;
3745
3746 walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first);
3747 }
3748
3749 if (!walk->force_scan && !test_bloom_filter(mm_state, walk->seq, pmd + i))
3750 continue;
3751
3752 walk->mm_stats[MM_NONLEAF_FOUND]++;
3753
3754 if (!walk_pte_range(&val, addr, next, args))
3755 continue;
3756
3757 walk->mm_stats[MM_NONLEAF_ADDED]++;
3758
3759 /* carry over to the next generation */
3760 update_bloom_filter(mm_state, walk->seq + 1, pmd + i);
3761 }
3762
3763 walk_pmd_range_locked(pud, -1, vma, args, bitmap, &first);
3764
3765 if (i < PTRS_PER_PMD && get_next_vma(PUD_MASK, PMD_SIZE, args, &start, &end))
3766 goto restart;
3767 }
3768
walk_pud_range(p4d_t * p4d,unsigned long start,unsigned long end,struct mm_walk * args)3769 static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end,
3770 struct mm_walk *args)
3771 {
3772 int i;
3773 pud_t *pud;
3774 unsigned long addr;
3775 unsigned long next;
3776 struct lru_gen_mm_walk *walk = args->private;
3777
3778 VM_WARN_ON_ONCE(p4d_leaf(*p4d));
3779
3780 pud = pud_offset(p4d, start & P4D_MASK);
3781 restart:
3782 for (i = pud_index(start), addr = start; addr != end; i++, addr = next) {
3783 pud_t val = READ_ONCE(pud[i]);
3784
3785 next = pud_addr_end(addr, end);
3786
3787 if (!pud_present(val) || WARN_ON_ONCE(pud_leaf(val)))
3788 continue;
3789
3790 walk_pmd_range(&val, addr, next, args);
3791
3792 if (need_resched() || walk->batched >= MAX_LRU_BATCH) {
3793 end = (addr | ~PUD_MASK) + 1;
3794 goto done;
3795 }
3796 }
3797
3798 if (i < PTRS_PER_PUD && get_next_vma(P4D_MASK, PUD_SIZE, args, &start, &end))
3799 goto restart;
3800
3801 end = round_up(end, P4D_SIZE);
3802 done:
3803 if (!end || !args->vma)
3804 return 1;
3805
3806 walk->next_addr = max(end, args->vma->vm_start);
3807
3808 return -EAGAIN;
3809 }
3810
walk_mm(struct mm_struct * mm,struct lru_gen_mm_walk * walk)3811 static void walk_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
3812 {
3813 static const struct mm_walk_ops mm_walk_ops = {
3814 .test_walk = should_skip_vma,
3815 .p4d_entry = walk_pud_range,
3816 .walk_lock = PGWALK_RDLOCK,
3817 };
3818 int err;
3819 struct lruvec *lruvec = walk->lruvec;
3820
3821 walk->next_addr = FIRST_USER_ADDRESS;
3822
3823 do {
3824 DEFINE_MAX_SEQ(lruvec);
3825
3826 err = -EBUSY;
3827
3828 /* another thread might have called inc_max_seq() */
3829 if (walk->seq != max_seq)
3830 break;
3831
3832 /* the caller might be holding the lock for write */
3833 if (mmap_read_trylock(mm)) {
3834 err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk);
3835
3836 mmap_read_unlock(mm);
3837 }
3838
3839 if (walk->batched) {
3840 spin_lock_irq(&lruvec->lru_lock);
3841 reset_batch_size(walk);
3842 spin_unlock_irq(&lruvec->lru_lock);
3843 }
3844
3845 cond_resched();
3846 } while (err == -EAGAIN);
3847 }
3848
set_mm_walk(struct pglist_data * pgdat,bool force_alloc)3849 static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat, bool force_alloc)
3850 {
3851 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
3852
3853 if (pgdat && current_is_kswapd()) {
3854 VM_WARN_ON_ONCE(walk);
3855
3856 walk = &pgdat->mm_walk;
3857 } else if (!walk && force_alloc) {
3858 VM_WARN_ON_ONCE(current_is_kswapd());
3859
3860 walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
3861 }
3862
3863 current->reclaim_state->mm_walk = walk;
3864
3865 return walk;
3866 }
3867
clear_mm_walk(void)3868 static void clear_mm_walk(void)
3869 {
3870 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
3871
3872 VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages)));
3873 VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats)));
3874
3875 current->reclaim_state->mm_walk = NULL;
3876
3877 if (!current_is_kswapd())
3878 kfree(walk);
3879 }
3880
inc_min_seq(struct lruvec * lruvec,int type,int swappiness)3881 static bool inc_min_seq(struct lruvec *lruvec, int type, int swappiness)
3882 {
3883 int zone;
3884 int remaining = MAX_LRU_BATCH;
3885 struct lru_gen_folio *lrugen = &lruvec->lrugen;
3886 int hist = lru_hist_from_seq(lrugen->min_seq[type]);
3887 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
3888
3889 /* For file type, skip the check if swappiness is anon only */
3890 if (type && (swappiness == SWAPPINESS_ANON_ONLY))
3891 goto done;
3892
3893 /* For anon type, skip the check if swappiness is zero (file only) */
3894 if (!type && !swappiness)
3895 goto done;
3896
3897 /* prevent cold/hot inversion if the type is evictable */
3898 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
3899 struct list_head *head = &lrugen->folios[old_gen][type][zone];
3900
3901 while (!list_empty(head)) {
3902 struct folio *folio = lru_to_folio(head);
3903 int refs = folio_lru_refs(folio);
3904 bool workingset = folio_test_workingset(folio);
3905
3906 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
3907 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
3908 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
3909 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
3910
3911 new_gen = folio_inc_gen(lruvec, folio, false);
3912 list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]);
3913
3914 /* don't count the workingset being lazily promoted */
3915 if (refs + workingset != BIT(LRU_REFS_WIDTH) + 1) {
3916 int tier = lru_tier_from_refs(refs, workingset);
3917 int delta = folio_nr_pages(folio);
3918
3919 WRITE_ONCE(lrugen->protected[hist][type][tier],
3920 lrugen->protected[hist][type][tier] + delta);
3921 }
3922
3923 if (!--remaining)
3924 return false;
3925 }
3926 }
3927 done:
3928 reset_ctrl_pos(lruvec, type, true);
3929 WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1);
3930
3931 return true;
3932 }
3933
try_to_inc_min_seq(struct lruvec * lruvec,int swappiness)3934 static bool try_to_inc_min_seq(struct lruvec *lruvec, int swappiness)
3935 {
3936 int gen, type, zone;
3937 bool success = false;
3938 struct lru_gen_folio *lrugen = &lruvec->lrugen;
3939 DEFINE_MIN_SEQ(lruvec);
3940
3941 VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
3942
3943 /* find the oldest populated generation */
3944 for_each_evictable_type(type, swappiness) {
3945 while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) {
3946 gen = lru_gen_from_seq(min_seq[type]);
3947
3948 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
3949 if (!list_empty(&lrugen->folios[gen][type][zone]))
3950 goto next;
3951 }
3952
3953 min_seq[type]++;
3954 }
3955 next:
3956 ;
3957 }
3958
3959 /* see the comment on lru_gen_folio */
3960 if (swappiness && swappiness <= MAX_SWAPPINESS) {
3961 unsigned long seq = lrugen->max_seq - MIN_NR_GENS;
3962
3963 if (min_seq[LRU_GEN_ANON] > seq && min_seq[LRU_GEN_FILE] < seq)
3964 min_seq[LRU_GEN_ANON] = seq;
3965 else if (min_seq[LRU_GEN_FILE] > seq && min_seq[LRU_GEN_ANON] < seq)
3966 min_seq[LRU_GEN_FILE] = seq;
3967 }
3968
3969 for_each_evictable_type(type, swappiness) {
3970 if (min_seq[type] <= lrugen->min_seq[type])
3971 continue;
3972
3973 reset_ctrl_pos(lruvec, type, true);
3974 WRITE_ONCE(lrugen->min_seq[type], min_seq[type]);
3975 success = true;
3976 }
3977
3978 return success;
3979 }
3980
inc_max_seq(struct lruvec * lruvec,unsigned long seq,int swappiness)3981 static bool inc_max_seq(struct lruvec *lruvec, unsigned long seq, int swappiness)
3982 {
3983 bool success;
3984 int prev, next;
3985 int type, zone;
3986 struct lru_gen_folio *lrugen = &lruvec->lrugen;
3987 restart:
3988 if (seq < READ_ONCE(lrugen->max_seq))
3989 return false;
3990
3991 spin_lock_irq(&lruvec->lru_lock);
3992
3993 VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
3994
3995 success = seq == lrugen->max_seq;
3996 if (!success)
3997 goto unlock;
3998
3999 for (type = 0; type < ANON_AND_FILE; type++) {
4000 if (get_nr_gens(lruvec, type) != MAX_NR_GENS)
4001 continue;
4002
4003 if (inc_min_seq(lruvec, type, swappiness))
4004 continue;
4005
4006 spin_unlock_irq(&lruvec->lru_lock);
4007 cond_resched();
4008 goto restart;
4009 }
4010
4011 /*
4012 * Update the active/inactive LRU sizes for compatibility. Both sides of
4013 * the current max_seq need to be covered, since max_seq+1 can overlap
4014 * with min_seq[LRU_GEN_ANON] if swapping is constrained. And if they do
4015 * overlap, cold/hot inversion happens.
4016 */
4017 prev = lru_gen_from_seq(lrugen->max_seq - 1);
4018 next = lru_gen_from_seq(lrugen->max_seq + 1);
4019
4020 for (type = 0; type < ANON_AND_FILE; type++) {
4021 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4022 enum lru_list lru = type * LRU_INACTIVE_FILE;
4023 long delta = lrugen->nr_pages[prev][type][zone] -
4024 lrugen->nr_pages[next][type][zone];
4025
4026 if (!delta)
4027 continue;
4028
4029 __update_lru_size(lruvec, lru, zone, delta);
4030 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta);
4031 }
4032 }
4033
4034 for (type = 0; type < ANON_AND_FILE; type++)
4035 reset_ctrl_pos(lruvec, type, false);
4036
4037 WRITE_ONCE(lrugen->timestamps[next], jiffies);
4038 /* make sure preceding modifications appear */
4039 smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1);
4040 unlock:
4041 spin_unlock_irq(&lruvec->lru_lock);
4042
4043 return success;
4044 }
4045
try_to_inc_max_seq(struct lruvec * lruvec,unsigned long seq,int swappiness,bool force_scan)4046 static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long seq,
4047 int swappiness, bool force_scan)
4048 {
4049 bool success;
4050 struct lru_gen_mm_walk *walk;
4051 struct mm_struct *mm = NULL;
4052 struct lru_gen_folio *lrugen = &lruvec->lrugen;
4053 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
4054
4055 VM_WARN_ON_ONCE(seq > READ_ONCE(lrugen->max_seq));
4056
4057 if (!mm_state)
4058 return inc_max_seq(lruvec, seq, swappiness);
4059
4060 /* see the comment in iterate_mm_list() */
4061 if (seq <= READ_ONCE(mm_state->seq))
4062 return false;
4063
4064 /*
4065 * If the hardware doesn't automatically set the accessed bit, fallback
4066 * to lru_gen_look_around(), which only clears the accessed bit in a
4067 * handful of PTEs. Spreading the work out over a period of time usually
4068 * is less efficient, but it avoids bursty page faults.
4069 */
4070 if (!should_walk_mmu()) {
4071 success = iterate_mm_list_nowalk(lruvec, seq);
4072 goto done;
4073 }
4074
4075 walk = set_mm_walk(NULL, true);
4076 if (!walk) {
4077 success = iterate_mm_list_nowalk(lruvec, seq);
4078 goto done;
4079 }
4080
4081 walk->lruvec = lruvec;
4082 walk->seq = seq;
4083 walk->swappiness = swappiness;
4084 walk->force_scan = force_scan;
4085
4086 do {
4087 success = iterate_mm_list(walk, &mm);
4088 if (mm)
4089 walk_mm(mm, walk);
4090 } while (mm);
4091 done:
4092 if (success) {
4093 success = inc_max_seq(lruvec, seq, swappiness);
4094 WARN_ON_ONCE(!success);
4095 }
4096
4097 return success;
4098 }
4099
4100 /******************************************************************************
4101 * working set protection
4102 ******************************************************************************/
4103
set_initial_priority(struct pglist_data * pgdat,struct scan_control * sc)4104 static void set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc)
4105 {
4106 int priority;
4107 unsigned long reclaimable;
4108
4109 if (sc->priority != DEF_PRIORITY || sc->nr_to_reclaim < MIN_LRU_BATCH)
4110 return;
4111 /*
4112 * Determine the initial priority based on
4113 * (total >> priority) * reclaimed_to_scanned_ratio = nr_to_reclaim,
4114 * where reclaimed_to_scanned_ratio = inactive / total.
4115 */
4116 reclaimable = node_page_state(pgdat, NR_INACTIVE_FILE);
4117 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc))
4118 reclaimable += node_page_state(pgdat, NR_INACTIVE_ANON);
4119
4120 /* round down reclaimable and round up sc->nr_to_reclaim */
4121 priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1);
4122
4123 /*
4124 * The estimation is based on LRU pages only, so cap it to prevent
4125 * overshoots of shrinker objects by large margins.
4126 */
4127 sc->priority = clamp(priority, DEF_PRIORITY / 2, DEF_PRIORITY);
4128 }
4129
lruvec_is_sizable(struct lruvec * lruvec,struct scan_control * sc)4130 static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
4131 {
4132 int gen, type, zone;
4133 unsigned long total = 0;
4134 int swappiness = get_swappiness(lruvec, sc);
4135 struct lru_gen_folio *lrugen = &lruvec->lrugen;
4136 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4137 DEFINE_MAX_SEQ(lruvec);
4138 DEFINE_MIN_SEQ(lruvec);
4139
4140 for_each_evictable_type(type, swappiness) {
4141 unsigned long seq;
4142
4143 for (seq = min_seq[type]; seq <= max_seq; seq++) {
4144 gen = lru_gen_from_seq(seq);
4145
4146 for (zone = 0; zone < MAX_NR_ZONES; zone++)
4147 total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
4148 }
4149 }
4150
4151 /* whether the size is big enough to be helpful */
4152 return mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
4153 }
4154
lruvec_is_reclaimable(struct lruvec * lruvec,struct scan_control * sc,unsigned long min_ttl)4155 static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc,
4156 unsigned long min_ttl)
4157 {
4158 int gen;
4159 unsigned long birth;
4160 int swappiness = get_swappiness(lruvec, sc);
4161 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4162 DEFINE_MIN_SEQ(lruvec);
4163
4164 if (mem_cgroup_below_min(NULL, memcg))
4165 return false;
4166
4167 if (!lruvec_is_sizable(lruvec, sc))
4168 return false;
4169
4170 gen = lru_gen_from_seq(evictable_min_seq(min_seq, swappiness));
4171 birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
4172
4173 return time_is_before_jiffies(birth + min_ttl);
4174 }
4175
4176 /* to protect the working set of the last N jiffies */
4177 static unsigned long lru_gen_min_ttl __read_mostly;
4178
lru_gen_age_node(struct pglist_data * pgdat,struct scan_control * sc)4179 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
4180 {
4181 struct mem_cgroup *memcg;
4182 unsigned long min_ttl = READ_ONCE(lru_gen_min_ttl);
4183 bool reclaimable = !min_ttl;
4184
4185 VM_WARN_ON_ONCE(!current_is_kswapd());
4186
4187 set_initial_priority(pgdat, sc);
4188
4189 memcg = mem_cgroup_iter(NULL, NULL, NULL);
4190 do {
4191 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
4192
4193 mem_cgroup_calculate_protection(NULL, memcg);
4194
4195 if (!reclaimable)
4196 reclaimable = lruvec_is_reclaimable(lruvec, sc, min_ttl);
4197 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
4198
4199 /*
4200 * The main goal is to OOM kill if every generation from all memcgs is
4201 * younger than min_ttl. However, another possibility is all memcgs are
4202 * either too small or below min.
4203 */
4204 if (!reclaimable && mutex_trylock(&oom_lock)) {
4205 struct oom_control oc = {
4206 .gfp_mask = sc->gfp_mask,
4207 };
4208
4209 out_of_memory(&oc);
4210
4211 mutex_unlock(&oom_lock);
4212 }
4213 }
4214
4215 /******************************************************************************
4216 * rmap/PT walk feedback
4217 ******************************************************************************/
4218
4219 /*
4220 * This function exploits spatial locality when shrink_folio_list() walks the
4221 * rmap. It scans the adjacent PTEs of a young PTE and promotes hot pages. If
4222 * the scan was done cacheline efficiently, it adds the PMD entry pointing to
4223 * the PTE table to the Bloom filter. This forms a feedback loop between the
4224 * eviction and the aging.
4225 */
lru_gen_look_around(struct page_vma_mapped_walk * pvmw)4226 bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
4227 {
4228 int i;
4229 bool dirty;
4230 unsigned long start;
4231 unsigned long end;
4232 struct lru_gen_mm_walk *walk;
4233 struct folio *last = NULL;
4234 int young = 1;
4235 pte_t *pte = pvmw->pte;
4236 unsigned long addr = pvmw->address;
4237 struct vm_area_struct *vma = pvmw->vma;
4238 struct folio *folio = pfn_folio(pvmw->pfn);
4239 struct mem_cgroup *memcg = folio_memcg(folio);
4240 struct pglist_data *pgdat = folio_pgdat(folio);
4241 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
4242 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
4243 DEFINE_MAX_SEQ(lruvec);
4244 int gen = lru_gen_from_seq(max_seq);
4245
4246 lockdep_assert_held(pvmw->ptl);
4247 VM_WARN_ON_ONCE_FOLIO(folio_test_lru(folio), folio);
4248
4249 if (!ptep_clear_young_notify(vma, addr, pte))
4250 return false;
4251
4252 if (spin_is_contended(pvmw->ptl))
4253 return true;
4254
4255 /* exclude special VMAs containing anon pages from COW */
4256 if (vma->vm_flags & VM_SPECIAL)
4257 return true;
4258
4259 /* avoid taking the LRU lock under the PTL when possible */
4260 walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL;
4261
4262 start = max(addr & PMD_MASK, vma->vm_start);
4263 end = min(addr | ~PMD_MASK, vma->vm_end - 1) + 1;
4264
4265 if (end - start == PAGE_SIZE)
4266 return true;
4267
4268 if (end - start > MIN_LRU_BATCH * PAGE_SIZE) {
4269 if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2)
4270 end = start + MIN_LRU_BATCH * PAGE_SIZE;
4271 else if (end - addr < MIN_LRU_BATCH * PAGE_SIZE / 2)
4272 start = end - MIN_LRU_BATCH * PAGE_SIZE;
4273 else {
4274 start = addr - MIN_LRU_BATCH * PAGE_SIZE / 2;
4275 end = addr + MIN_LRU_BATCH * PAGE_SIZE / 2;
4276 }
4277 }
4278
4279 arch_enter_lazy_mmu_mode();
4280
4281 pte -= (addr - start) / PAGE_SIZE;
4282
4283 for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) {
4284 unsigned long pfn;
4285 pte_t ptent = ptep_get(pte + i);
4286
4287 pfn = get_pte_pfn(ptent, vma, addr, pgdat);
4288 if (pfn == -1)
4289 continue;
4290
4291 folio = get_pfn_folio(pfn, memcg, pgdat);
4292 if (!folio)
4293 continue;
4294
4295 if (!ptep_clear_young_notify(vma, addr, pte + i))
4296 continue;
4297
4298 if (last != folio) {
4299 walk_update_folio(walk, last, gen, dirty);
4300
4301 last = folio;
4302 dirty = false;
4303 }
4304
4305 if (pte_dirty(ptent))
4306 dirty = true;
4307
4308 young++;
4309 }
4310
4311 walk_update_folio(walk, last, gen, dirty);
4312
4313 arch_leave_lazy_mmu_mode();
4314
4315 /* feedback from rmap walkers to page table walkers */
4316 if (mm_state && suitable_to_scan(i, young))
4317 update_bloom_filter(mm_state, max_seq, pvmw->pmd);
4318
4319 return true;
4320 }
4321
4322 /******************************************************************************
4323 * memcg LRU
4324 ******************************************************************************/
4325
4326 /* see the comment on MEMCG_NR_GENS */
4327 enum {
4328 MEMCG_LRU_NOP,
4329 MEMCG_LRU_HEAD,
4330 MEMCG_LRU_TAIL,
4331 MEMCG_LRU_OLD,
4332 MEMCG_LRU_YOUNG,
4333 };
4334
lru_gen_rotate_memcg(struct lruvec * lruvec,int op)4335 static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
4336 {
4337 int seg;
4338 int old, new;
4339 unsigned long flags;
4340 int bin = get_random_u32_below(MEMCG_NR_BINS);
4341 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
4342
4343 spin_lock_irqsave(&pgdat->memcg_lru.lock, flags);
4344
4345 VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list));
4346
4347 seg = 0;
4348 new = old = lruvec->lrugen.gen;
4349
4350 /* see the comment on MEMCG_NR_GENS */
4351 if (op == MEMCG_LRU_HEAD)
4352 seg = MEMCG_LRU_HEAD;
4353 else if (op == MEMCG_LRU_TAIL)
4354 seg = MEMCG_LRU_TAIL;
4355 else if (op == MEMCG_LRU_OLD)
4356 new = get_memcg_gen(pgdat->memcg_lru.seq);
4357 else if (op == MEMCG_LRU_YOUNG)
4358 new = get_memcg_gen(pgdat->memcg_lru.seq + 1);
4359 else
4360 VM_WARN_ON_ONCE(true);
4361
4362 WRITE_ONCE(lruvec->lrugen.seg, seg);
4363 WRITE_ONCE(lruvec->lrugen.gen, new);
4364
4365 hlist_nulls_del_rcu(&lruvec->lrugen.list);
4366
4367 if (op == MEMCG_LRU_HEAD || op == MEMCG_LRU_OLD)
4368 hlist_nulls_add_head_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]);
4369 else
4370 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]);
4371
4372 pgdat->memcg_lru.nr_memcgs[old]--;
4373 pgdat->memcg_lru.nr_memcgs[new]++;
4374
4375 if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq))
4376 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
4377
4378 spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags);
4379 }
4380
4381 #ifdef CONFIG_MEMCG
4382
lru_gen_online_memcg(struct mem_cgroup * memcg)4383 void lru_gen_online_memcg(struct mem_cgroup *memcg)
4384 {
4385 int gen;
4386 int nid;
4387 int bin = get_random_u32_below(MEMCG_NR_BINS);
4388
4389 for_each_node(nid) {
4390 struct pglist_data *pgdat = NODE_DATA(nid);
4391 struct lruvec *lruvec = get_lruvec(memcg, nid);
4392
4393 spin_lock_irq(&pgdat->memcg_lru.lock);
4394
4395 VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list));
4396
4397 gen = get_memcg_gen(pgdat->memcg_lru.seq);
4398
4399 lruvec->lrugen.gen = gen;
4400
4401 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]);
4402 pgdat->memcg_lru.nr_memcgs[gen]++;
4403
4404 spin_unlock_irq(&pgdat->memcg_lru.lock);
4405 }
4406 }
4407
lru_gen_offline_memcg(struct mem_cgroup * memcg)4408 void lru_gen_offline_memcg(struct mem_cgroup *memcg)
4409 {
4410 int nid;
4411
4412 for_each_node(nid) {
4413 struct lruvec *lruvec = get_lruvec(memcg, nid);
4414
4415 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_OLD);
4416 }
4417 }
4418
lru_gen_release_memcg(struct mem_cgroup * memcg)4419 void lru_gen_release_memcg(struct mem_cgroup *memcg)
4420 {
4421 int gen;
4422 int nid;
4423
4424 for_each_node(nid) {
4425 struct pglist_data *pgdat = NODE_DATA(nid);
4426 struct lruvec *lruvec = get_lruvec(memcg, nid);
4427
4428 spin_lock_irq(&pgdat->memcg_lru.lock);
4429
4430 if (hlist_nulls_unhashed(&lruvec->lrugen.list))
4431 goto unlock;
4432
4433 gen = lruvec->lrugen.gen;
4434
4435 hlist_nulls_del_init_rcu(&lruvec->lrugen.list);
4436 pgdat->memcg_lru.nr_memcgs[gen]--;
4437
4438 if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq))
4439 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
4440 unlock:
4441 spin_unlock_irq(&pgdat->memcg_lru.lock);
4442 }
4443 }
4444
lru_gen_soft_reclaim(struct mem_cgroup * memcg,int nid)4445 void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
4446 {
4447 struct lruvec *lruvec = get_lruvec(memcg, nid);
4448
4449 /* see the comment on MEMCG_NR_GENS */
4450 if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_HEAD)
4451 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_HEAD);
4452 }
4453
4454 #endif /* CONFIG_MEMCG */
4455
4456 /******************************************************************************
4457 * the eviction
4458 ******************************************************************************/
4459
sort_folio(struct lruvec * lruvec,struct folio * folio,struct scan_control * sc,int tier_idx)4460 static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc,
4461 int tier_idx)
4462 {
4463 bool success;
4464 bool dirty, writeback;
4465 int gen = folio_lru_gen(folio);
4466 int type = folio_is_file_lru(folio);
4467 int zone = folio_zonenum(folio);
4468 int delta = folio_nr_pages(folio);
4469 int refs = folio_lru_refs(folio);
4470 bool workingset = folio_test_workingset(folio);
4471 int tier = lru_tier_from_refs(refs, workingset);
4472 struct lru_gen_folio *lrugen = &lruvec->lrugen;
4473
4474 VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio);
4475
4476 /* unevictable */
4477 if (!folio_evictable(folio)) {
4478 success = lru_gen_del_folio(lruvec, folio, true);
4479 VM_WARN_ON_ONCE_FOLIO(!success, folio);
4480 folio_set_unevictable(folio);
4481 lruvec_add_folio(lruvec, folio);
4482 __count_vm_events(UNEVICTABLE_PGCULLED, delta);
4483 return true;
4484 }
4485
4486 /* promoted */
4487 if (gen != lru_gen_from_seq(lrugen->min_seq[type])) {
4488 list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
4489 return true;
4490 }
4491
4492 /* protected */
4493 if (tier > tier_idx || refs + workingset == BIT(LRU_REFS_WIDTH) + 1) {
4494 gen = folio_inc_gen(lruvec, folio, false);
4495 list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
4496
4497 /* don't count the workingset being lazily promoted */
4498 if (refs + workingset != BIT(LRU_REFS_WIDTH) + 1) {
4499 int hist = lru_hist_from_seq(lrugen->min_seq[type]);
4500
4501 WRITE_ONCE(lrugen->protected[hist][type][tier],
4502 lrugen->protected[hist][type][tier] + delta);
4503 }
4504 return true;
4505 }
4506
4507 /* ineligible */
4508 if (!folio_test_lru(folio) || zone > sc->reclaim_idx) {
4509 gen = folio_inc_gen(lruvec, folio, false);
4510 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
4511 return true;
4512 }
4513
4514 dirty = folio_test_dirty(folio);
4515 writeback = folio_test_writeback(folio);
4516 if (type == LRU_GEN_FILE && dirty) {
4517 sc->nr.file_taken += delta;
4518 if (!writeback)
4519 sc->nr.unqueued_dirty += delta;
4520 }
4521
4522 /* waiting for writeback */
4523 if (writeback || (type == LRU_GEN_FILE && dirty)) {
4524 gen = folio_inc_gen(lruvec, folio, true);
4525 list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
4526 return true;
4527 }
4528
4529 return false;
4530 }
4531
isolate_folio(struct lruvec * lruvec,struct folio * folio,struct scan_control * sc)4532 static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc)
4533 {
4534 bool success;
4535
4536 /* swap constrained */
4537 if (!(sc->gfp_mask & __GFP_IO) &&
4538 (folio_test_dirty(folio) ||
4539 (folio_test_anon(folio) && !folio_test_swapcache(folio))))
4540 return false;
4541
4542 /* raced with release_pages() */
4543 if (!folio_try_get(folio))
4544 return false;
4545
4546 /* raced with another isolation */
4547 if (!folio_test_clear_lru(folio)) {
4548 folio_put(folio);
4549 return false;
4550 }
4551
4552 /* see the comment on LRU_REFS_FLAGS */
4553 if (!folio_test_referenced(folio))
4554 set_mask_bits(&folio->flags, LRU_REFS_MASK, 0);
4555
4556 /* for shrink_folio_list() */
4557 folio_clear_reclaim(folio);
4558
4559 success = lru_gen_del_folio(lruvec, folio, true);
4560 VM_WARN_ON_ONCE_FOLIO(!success, folio);
4561
4562 return true;
4563 }
4564
scan_folios(struct lruvec * lruvec,struct scan_control * sc,int type,int tier,struct list_head * list)4565 static int scan_folios(struct lruvec *lruvec, struct scan_control *sc,
4566 int type, int tier, struct list_head *list)
4567 {
4568 int i;
4569 int gen;
4570 enum vm_event_item item;
4571 int sorted = 0;
4572 int scanned = 0;
4573 int isolated = 0;
4574 int skipped = 0;
4575 int remaining = MAX_LRU_BATCH;
4576 struct lru_gen_folio *lrugen = &lruvec->lrugen;
4577 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4578
4579 VM_WARN_ON_ONCE(!list_empty(list));
4580
4581 if (get_nr_gens(lruvec, type) == MIN_NR_GENS)
4582 return 0;
4583
4584 gen = lru_gen_from_seq(lrugen->min_seq[type]);
4585
4586 for (i = MAX_NR_ZONES; i > 0; i--) {
4587 LIST_HEAD(moved);
4588 int skipped_zone = 0;
4589 int zone = (sc->reclaim_idx + i) % MAX_NR_ZONES;
4590 struct list_head *head = &lrugen->folios[gen][type][zone];
4591
4592 while (!list_empty(head)) {
4593 struct folio *folio = lru_to_folio(head);
4594 int delta = folio_nr_pages(folio);
4595
4596 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
4597 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
4598 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
4599 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
4600
4601 scanned += delta;
4602
4603 if (sort_folio(lruvec, folio, sc, tier))
4604 sorted += delta;
4605 else if (isolate_folio(lruvec, folio, sc)) {
4606 list_add(&folio->lru, list);
4607 isolated += delta;
4608 } else {
4609 list_move(&folio->lru, &moved);
4610 skipped_zone += delta;
4611 }
4612
4613 if (!--remaining || max(isolated, skipped_zone) >= MIN_LRU_BATCH)
4614 break;
4615 }
4616
4617 if (skipped_zone) {
4618 list_splice(&moved, head);
4619 __count_zid_vm_events(PGSCAN_SKIP, zone, skipped_zone);
4620 skipped += skipped_zone;
4621 }
4622
4623 if (!remaining || isolated >= MIN_LRU_BATCH)
4624 break;
4625 }
4626
4627 item = PGSCAN_KSWAPD + reclaimer_offset(sc);
4628 if (!cgroup_reclaim(sc)) {
4629 __count_vm_events(item, isolated);
4630 __count_vm_events(PGREFILL, sorted);
4631 }
4632 count_memcg_events(memcg, item, isolated);
4633 count_memcg_events(memcg, PGREFILL, sorted);
4634 __count_vm_events(PGSCAN_ANON + type, isolated);
4635 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, MAX_LRU_BATCH,
4636 scanned, skipped, isolated,
4637 type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON);
4638 if (type == LRU_GEN_FILE)
4639 sc->nr.file_taken += isolated;
4640 /*
4641 * There might not be eligible folios due to reclaim_idx. Check the
4642 * remaining to prevent livelock if it's not making progress.
4643 */
4644 return isolated || !remaining ? scanned : 0;
4645 }
4646
get_tier_idx(struct lruvec * lruvec,int type)4647 static int get_tier_idx(struct lruvec *lruvec, int type)
4648 {
4649 int tier;
4650 struct ctrl_pos sp, pv;
4651
4652 /*
4653 * To leave a margin for fluctuations, use a larger gain factor (2:3).
4654 * This value is chosen because any other tier would have at least twice
4655 * as many refaults as the first tier.
4656 */
4657 read_ctrl_pos(lruvec, type, 0, 2, &sp);
4658 for (tier = 1; tier < MAX_NR_TIERS; tier++) {
4659 read_ctrl_pos(lruvec, type, tier, 3, &pv);
4660 if (!positive_ctrl_err(&sp, &pv))
4661 break;
4662 }
4663
4664 return tier - 1;
4665 }
4666
get_type_to_scan(struct lruvec * lruvec,int swappiness)4667 static int get_type_to_scan(struct lruvec *lruvec, int swappiness)
4668 {
4669 struct ctrl_pos sp, pv;
4670
4671 if (swappiness <= MIN_SWAPPINESS + 1)
4672 return LRU_GEN_FILE;
4673
4674 if (swappiness >= MAX_SWAPPINESS)
4675 return LRU_GEN_ANON;
4676 /*
4677 * Compare the sum of all tiers of anon with that of file to determine
4678 * which type to scan.
4679 */
4680 read_ctrl_pos(lruvec, LRU_GEN_ANON, MAX_NR_TIERS, swappiness, &sp);
4681 read_ctrl_pos(lruvec, LRU_GEN_FILE, MAX_NR_TIERS, MAX_SWAPPINESS - swappiness, &pv);
4682
4683 return positive_ctrl_err(&sp, &pv);
4684 }
4685
isolate_folios(struct lruvec * lruvec,struct scan_control * sc,int swappiness,int * type_scanned,struct list_head * list)4686 static int isolate_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness,
4687 int *type_scanned, struct list_head *list)
4688 {
4689 int i;
4690 int type = get_type_to_scan(lruvec, swappiness);
4691
4692 for_each_evictable_type(i, swappiness) {
4693 int scanned;
4694 int tier = get_tier_idx(lruvec, type);
4695
4696 *type_scanned = type;
4697
4698 scanned = scan_folios(lruvec, sc, type, tier, list);
4699 if (scanned)
4700 return scanned;
4701
4702 type = !type;
4703 }
4704
4705 return 0;
4706 }
4707
evict_folios(struct lruvec * lruvec,struct scan_control * sc,int swappiness)4708 static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
4709 {
4710 int type;
4711 int scanned;
4712 int reclaimed;
4713 LIST_HEAD(list);
4714 LIST_HEAD(clean);
4715 struct folio *folio;
4716 struct folio *next;
4717 enum vm_event_item item;
4718 struct reclaim_stat stat;
4719 struct lru_gen_mm_walk *walk;
4720 bool skip_retry = false;
4721 struct lru_gen_folio *lrugen = &lruvec->lrugen;
4722 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4723 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
4724
4725 spin_lock_irq(&lruvec->lru_lock);
4726
4727 scanned = isolate_folios(lruvec, sc, swappiness, &type, &list);
4728
4729 scanned += try_to_inc_min_seq(lruvec, swappiness);
4730
4731 if (evictable_min_seq(lrugen->min_seq, swappiness) + MIN_NR_GENS > lrugen->max_seq)
4732 scanned = 0;
4733
4734 spin_unlock_irq(&lruvec->lru_lock);
4735
4736 if (list_empty(&list))
4737 return scanned;
4738 retry:
4739 reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false, memcg);
4740 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
4741 sc->nr_reclaimed += reclaimed;
4742 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
4743 scanned, reclaimed, &stat, sc->priority,
4744 type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON);
4745
4746 list_for_each_entry_safe_reverse(folio, next, &list, lru) {
4747 DEFINE_MIN_SEQ(lruvec);
4748
4749 if (!folio_evictable(folio)) {
4750 list_del(&folio->lru);
4751 folio_putback_lru(folio);
4752 continue;
4753 }
4754
4755 /* retry folios that may have missed folio_rotate_reclaimable() */
4756 if (!skip_retry && !folio_test_active(folio) && !folio_mapped(folio) &&
4757 !folio_test_dirty(folio) && !folio_test_writeback(folio)) {
4758 list_move(&folio->lru, &clean);
4759 continue;
4760 }
4761
4762 /* don't add rejected folios to the oldest generation */
4763 if (lru_gen_folio_seq(lruvec, folio, false) == min_seq[type])
4764 set_mask_bits(&folio->flags, LRU_REFS_FLAGS, BIT(PG_active));
4765 }
4766
4767 spin_lock_irq(&lruvec->lru_lock);
4768
4769 move_folios_to_lru(lruvec, &list);
4770
4771 walk = current->reclaim_state->mm_walk;
4772 if (walk && walk->batched) {
4773 walk->lruvec = lruvec;
4774 reset_batch_size(walk);
4775 }
4776
4777 __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
4778 stat.nr_demoted);
4779
4780 item = PGSTEAL_KSWAPD + reclaimer_offset(sc);
4781 if (!cgroup_reclaim(sc))
4782 __count_vm_events(item, reclaimed);
4783 count_memcg_events(memcg, item, reclaimed);
4784 __count_vm_events(PGSTEAL_ANON + type, reclaimed);
4785
4786 spin_unlock_irq(&lruvec->lru_lock);
4787
4788 list_splice_init(&clean, &list);
4789
4790 if (!list_empty(&list)) {
4791 skip_retry = true;
4792 goto retry;
4793 }
4794
4795 return scanned;
4796 }
4797
should_run_aging(struct lruvec * lruvec,unsigned long max_seq,int swappiness,unsigned long * nr_to_scan)4798 static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
4799 int swappiness, unsigned long *nr_to_scan)
4800 {
4801 int gen, type, zone;
4802 unsigned long size = 0;
4803 struct lru_gen_folio *lrugen = &lruvec->lrugen;
4804 DEFINE_MIN_SEQ(lruvec);
4805
4806 *nr_to_scan = 0;
4807 /* have to run aging, since eviction is not possible anymore */
4808 if (evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS > max_seq)
4809 return true;
4810
4811 for_each_evictable_type(type, swappiness) {
4812 unsigned long seq;
4813
4814 for (seq = min_seq[type]; seq <= max_seq; seq++) {
4815 gen = lru_gen_from_seq(seq);
4816
4817 for (zone = 0; zone < MAX_NR_ZONES; zone++)
4818 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
4819 }
4820 }
4821
4822 *nr_to_scan = size;
4823 /* better to run aging even though eviction is still possible */
4824 return evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS == max_seq;
4825 }
4826
4827 /*
4828 * For future optimizations:
4829 * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
4830 * reclaim.
4831 */
get_nr_to_scan(struct lruvec * lruvec,struct scan_control * sc,int swappiness)4832 static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
4833 {
4834 bool success;
4835 unsigned long nr_to_scan;
4836 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4837 DEFINE_MAX_SEQ(lruvec);
4838
4839 if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg))
4840 return -1;
4841
4842 success = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
4843
4844 /* try to scrape all its memory if this memcg was deleted */
4845 if (nr_to_scan && !mem_cgroup_online(memcg))
4846 return nr_to_scan;
4847
4848 /* try to get away with not aging at the default priority */
4849 if (!success || sc->priority == DEF_PRIORITY)
4850 return nr_to_scan >> sc->priority;
4851
4852 /* stop scanning this lruvec as it's low on cold folios */
4853 return try_to_inc_max_seq(lruvec, max_seq, swappiness, false) ? -1 : 0;
4854 }
4855
should_abort_scan(struct lruvec * lruvec,struct scan_control * sc)4856 static bool should_abort_scan(struct lruvec *lruvec, struct scan_control *sc)
4857 {
4858 int i;
4859 enum zone_watermarks mark;
4860
4861 /* don't abort memcg reclaim to ensure fairness */
4862 if (!root_reclaim(sc))
4863 return false;
4864
4865 if (sc->nr_reclaimed >= max(sc->nr_to_reclaim, compact_gap(sc->order)))
4866 return true;
4867
4868 /* check the order to exclude compaction-induced reclaim */
4869 if (!current_is_kswapd() || sc->order)
4870 return false;
4871
4872 mark = sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING ?
4873 WMARK_PROMO : WMARK_HIGH;
4874
4875 for (i = 0; i <= sc->reclaim_idx; i++) {
4876 struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i;
4877 unsigned long size = wmark_pages(zone, mark) + MIN_LRU_BATCH;
4878
4879 if (managed_zone(zone) && !zone_watermark_ok(zone, 0, size, sc->reclaim_idx, 0))
4880 return false;
4881 }
4882
4883 /* kswapd should abort if all eligible zones are safe */
4884 return true;
4885 }
4886
try_to_shrink_lruvec(struct lruvec * lruvec,struct scan_control * sc)4887 static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
4888 {
4889 long nr_to_scan;
4890 unsigned long scanned = 0;
4891 int swappiness = get_swappiness(lruvec, sc);
4892
4893 while (true) {
4894 int delta;
4895
4896 nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
4897 if (nr_to_scan <= 0)
4898 break;
4899
4900 delta = evict_folios(lruvec, sc, swappiness);
4901 if (!delta)
4902 break;
4903
4904 scanned += delta;
4905 if (scanned >= nr_to_scan)
4906 break;
4907
4908 if (should_abort_scan(lruvec, sc))
4909 break;
4910
4911 cond_resched();
4912 }
4913
4914 /*
4915 * If too many file cache in the coldest generation can't be evicted
4916 * due to being dirty, wake up the flusher.
4917 */
4918 if (sc->nr.unqueued_dirty && sc->nr.unqueued_dirty == sc->nr.file_taken)
4919 wakeup_flusher_threads(WB_REASON_VMSCAN);
4920
4921 /* whether this lruvec should be rotated */
4922 return nr_to_scan < 0;
4923 }
4924
shrink_one(struct lruvec * lruvec,struct scan_control * sc)4925 static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
4926 {
4927 bool success;
4928 unsigned long scanned = sc->nr_scanned;
4929 unsigned long reclaimed = sc->nr_reclaimed;
4930 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4931 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
4932
4933 /* lru_gen_age_node() called mem_cgroup_calculate_protection() */
4934 if (mem_cgroup_below_min(NULL, memcg))
4935 return MEMCG_LRU_YOUNG;
4936
4937 if (mem_cgroup_below_low(NULL, memcg)) {
4938 /* see the comment on MEMCG_NR_GENS */
4939 if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL)
4940 return MEMCG_LRU_TAIL;
4941
4942 memcg_memory_event(memcg, MEMCG_LOW);
4943 }
4944
4945 success = try_to_shrink_lruvec(lruvec, sc);
4946
4947 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority);
4948
4949 if (!sc->proactive)
4950 vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned,
4951 sc->nr_reclaimed - reclaimed);
4952
4953 flush_reclaim_state(sc);
4954
4955 if (success && mem_cgroup_online(memcg))
4956 return MEMCG_LRU_YOUNG;
4957
4958 if (!success && lruvec_is_sizable(lruvec, sc))
4959 return 0;
4960
4961 /* one retry if offlined or too small */
4962 return READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL ?
4963 MEMCG_LRU_TAIL : MEMCG_LRU_YOUNG;
4964 }
4965
shrink_many(struct pglist_data * pgdat,struct scan_control * sc)4966 static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
4967 {
4968 int op;
4969 int gen;
4970 int bin;
4971 int first_bin;
4972 struct lruvec *lruvec;
4973 struct lru_gen_folio *lrugen;
4974 struct mem_cgroup *memcg;
4975 struct hlist_nulls_node *pos;
4976
4977 gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq));
4978 bin = first_bin = get_random_u32_below(MEMCG_NR_BINS);
4979 restart:
4980 op = 0;
4981 memcg = NULL;
4982
4983 rcu_read_lock();
4984
4985 hlist_nulls_for_each_entry_rcu(lrugen, pos, &pgdat->memcg_lru.fifo[gen][bin], list) {
4986 if (op) {
4987 lru_gen_rotate_memcg(lruvec, op);
4988 op = 0;
4989 }
4990
4991 mem_cgroup_put(memcg);
4992 memcg = NULL;
4993
4994 if (gen != READ_ONCE(lrugen->gen))
4995 continue;
4996
4997 lruvec = container_of(lrugen, struct lruvec, lrugen);
4998 memcg = lruvec_memcg(lruvec);
4999
5000 if (!mem_cgroup_tryget(memcg)) {
5001 lru_gen_release_memcg(memcg);
5002 memcg = NULL;
5003 continue;
5004 }
5005
5006 rcu_read_unlock();
5007
5008 op = shrink_one(lruvec, sc);
5009
5010 rcu_read_lock();
5011
5012 if (should_abort_scan(lruvec, sc))
5013 break;
5014 }
5015
5016 rcu_read_unlock();
5017
5018 if (op)
5019 lru_gen_rotate_memcg(lruvec, op);
5020
5021 mem_cgroup_put(memcg);
5022
5023 if (!is_a_nulls(pos))
5024 return;
5025
5026 /* restart if raced with lru_gen_rotate_memcg() */
5027 if (gen != get_nulls_value(pos))
5028 goto restart;
5029
5030 /* try the rest of the bins of the current generation */
5031 bin = get_memcg_bin(bin + 1);
5032 if (bin != first_bin)
5033 goto restart;
5034 }
5035
lru_gen_shrink_lruvec(struct lruvec * lruvec,struct scan_control * sc)5036 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
5037 {
5038 struct blk_plug plug;
5039
5040 VM_WARN_ON_ONCE(root_reclaim(sc));
5041 VM_WARN_ON_ONCE(!sc->may_writepage || !sc->may_unmap);
5042
5043 lru_add_drain();
5044
5045 blk_start_plug(&plug);
5046
5047 set_mm_walk(NULL, sc->proactive);
5048
5049 if (try_to_shrink_lruvec(lruvec, sc))
5050 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_YOUNG);
5051
5052 clear_mm_walk();
5053
5054 blk_finish_plug(&plug);
5055 }
5056
lru_gen_shrink_node(struct pglist_data * pgdat,struct scan_control * sc)5057 static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc)
5058 {
5059 struct blk_plug plug;
5060 unsigned long reclaimed = sc->nr_reclaimed;
5061
5062 VM_WARN_ON_ONCE(!root_reclaim(sc));
5063
5064 /*
5065 * Unmapped clean folios are already prioritized. Scanning for more of
5066 * them is likely futile and can cause high reclaim latency when there
5067 * is a large number of memcgs.
5068 */
5069 if (!sc->may_writepage || !sc->may_unmap)
5070 goto done;
5071
5072 lru_add_drain();
5073
5074 blk_start_plug(&plug);
5075
5076 set_mm_walk(pgdat, sc->proactive);
5077
5078 set_initial_priority(pgdat, sc);
5079
5080 if (current_is_kswapd())
5081 sc->nr_reclaimed = 0;
5082
5083 if (mem_cgroup_disabled())
5084 shrink_one(&pgdat->__lruvec, sc);
5085 else
5086 shrink_many(pgdat, sc);
5087
5088 if (current_is_kswapd())
5089 sc->nr_reclaimed += reclaimed;
5090
5091 clear_mm_walk();
5092
5093 blk_finish_plug(&plug);
5094 done:
5095 if (sc->nr_reclaimed > reclaimed)
5096 pgdat->kswapd_failures = 0;
5097 }
5098
5099 /******************************************************************************
5100 * state change
5101 ******************************************************************************/
5102
state_is_valid(struct lruvec * lruvec)5103 static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
5104 {
5105 struct lru_gen_folio *lrugen = &lruvec->lrugen;
5106
5107 if (lrugen->enabled) {
5108 enum lru_list lru;
5109
5110 for_each_evictable_lru(lru) {
5111 if (!list_empty(&lruvec->lists[lru]))
5112 return false;
5113 }
5114 } else {
5115 int gen, type, zone;
5116
5117 for_each_gen_type_zone(gen, type, zone) {
5118 if (!list_empty(&lrugen->folios[gen][type][zone]))
5119 return false;
5120 }
5121 }
5122
5123 return true;
5124 }
5125
fill_evictable(struct lruvec * lruvec)5126 static bool fill_evictable(struct lruvec *lruvec)
5127 {
5128 enum lru_list lru;
5129 int remaining = MAX_LRU_BATCH;
5130
5131 for_each_evictable_lru(lru) {
5132 int type = is_file_lru(lru);
5133 bool active = is_active_lru(lru);
5134 struct list_head *head = &lruvec->lists[lru];
5135
5136 while (!list_empty(head)) {
5137 bool success;
5138 struct folio *folio = lru_to_folio(head);
5139
5140 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
5141 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio) != active, folio);
5142 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
5143 VM_WARN_ON_ONCE_FOLIO(folio_lru_gen(folio) != -1, folio);
5144
5145 lruvec_del_folio(lruvec, folio);
5146 success = lru_gen_add_folio(lruvec, folio, false);
5147 VM_WARN_ON_ONCE(!success);
5148
5149 if (!--remaining)
5150 return false;
5151 }
5152 }
5153
5154 return true;
5155 }
5156
drain_evictable(struct lruvec * lruvec)5157 static bool drain_evictable(struct lruvec *lruvec)
5158 {
5159 int gen, type, zone;
5160 int remaining = MAX_LRU_BATCH;
5161
5162 for_each_gen_type_zone(gen, type, zone) {
5163 struct list_head *head = &lruvec->lrugen.folios[gen][type][zone];
5164
5165 while (!list_empty(head)) {
5166 bool success;
5167 struct folio *folio = lru_to_folio(head);
5168
5169 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
5170 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
5171 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
5172 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
5173
5174 success = lru_gen_del_folio(lruvec, folio, false);
5175 VM_WARN_ON_ONCE(!success);
5176 lruvec_add_folio(lruvec, folio);
5177
5178 if (!--remaining)
5179 return false;
5180 }
5181 }
5182
5183 return true;
5184 }
5185
lru_gen_change_state(bool enabled)5186 static void lru_gen_change_state(bool enabled)
5187 {
5188 static DEFINE_MUTEX(state_mutex);
5189
5190 struct mem_cgroup *memcg;
5191
5192 cgroup_lock();
5193 cpus_read_lock();
5194 get_online_mems();
5195 mutex_lock(&state_mutex);
5196
5197 if (enabled == lru_gen_enabled())
5198 goto unlock;
5199
5200 if (enabled)
5201 static_branch_enable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]);
5202 else
5203 static_branch_disable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]);
5204
5205 memcg = mem_cgroup_iter(NULL, NULL, NULL);
5206 do {
5207 int nid;
5208
5209 for_each_node(nid) {
5210 struct lruvec *lruvec = get_lruvec(memcg, nid);
5211
5212 spin_lock_irq(&lruvec->lru_lock);
5213
5214 VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
5215 VM_WARN_ON_ONCE(!state_is_valid(lruvec));
5216
5217 lruvec->lrugen.enabled = enabled;
5218
5219 while (!(enabled ? fill_evictable(lruvec) : drain_evictable(lruvec))) {
5220 spin_unlock_irq(&lruvec->lru_lock);
5221 cond_resched();
5222 spin_lock_irq(&lruvec->lru_lock);
5223 }
5224
5225 spin_unlock_irq(&lruvec->lru_lock);
5226 }
5227
5228 cond_resched();
5229 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
5230 unlock:
5231 mutex_unlock(&state_mutex);
5232 put_online_mems();
5233 cpus_read_unlock();
5234 cgroup_unlock();
5235 }
5236
5237 /******************************************************************************
5238 * sysfs interface
5239 ******************************************************************************/
5240
min_ttl_ms_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)5241 static ssize_t min_ttl_ms_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
5242 {
5243 return sysfs_emit(buf, "%u\n", jiffies_to_msecs(READ_ONCE(lru_gen_min_ttl)));
5244 }
5245
5246 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
min_ttl_ms_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t len)5247 static ssize_t min_ttl_ms_store(struct kobject *kobj, struct kobj_attribute *attr,
5248 const char *buf, size_t len)
5249 {
5250 unsigned int msecs;
5251
5252 if (kstrtouint(buf, 0, &msecs))
5253 return -EINVAL;
5254
5255 WRITE_ONCE(lru_gen_min_ttl, msecs_to_jiffies(msecs));
5256
5257 return len;
5258 }
5259
5260 static struct kobj_attribute lru_gen_min_ttl_attr = __ATTR_RW(min_ttl_ms);
5261
enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)5262 static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
5263 {
5264 unsigned int caps = 0;
5265
5266 if (get_cap(LRU_GEN_CORE))
5267 caps |= BIT(LRU_GEN_CORE);
5268
5269 if (should_walk_mmu())
5270 caps |= BIT(LRU_GEN_MM_WALK);
5271
5272 if (should_clear_pmd_young())
5273 caps |= BIT(LRU_GEN_NONLEAF_YOUNG);
5274
5275 return sysfs_emit(buf, "0x%04x\n", caps);
5276 }
5277
5278 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t len)5279 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
5280 const char *buf, size_t len)
5281 {
5282 int i;
5283 unsigned int caps;
5284
5285 if (tolower(*buf) == 'n')
5286 caps = 0;
5287 else if (tolower(*buf) == 'y')
5288 caps = -1;
5289 else if (kstrtouint(buf, 0, &caps))
5290 return -EINVAL;
5291
5292 for (i = 0; i < NR_LRU_GEN_CAPS; i++) {
5293 bool enabled = caps & BIT(i);
5294
5295 if (i == LRU_GEN_CORE)
5296 lru_gen_change_state(enabled);
5297 else if (enabled)
5298 static_branch_enable(&lru_gen_caps[i]);
5299 else
5300 static_branch_disable(&lru_gen_caps[i]);
5301 }
5302
5303 return len;
5304 }
5305
5306 static struct kobj_attribute lru_gen_enabled_attr = __ATTR_RW(enabled);
5307
5308 static struct attribute *lru_gen_attrs[] = {
5309 &lru_gen_min_ttl_attr.attr,
5310 &lru_gen_enabled_attr.attr,
5311 NULL
5312 };
5313
5314 static const struct attribute_group lru_gen_attr_group = {
5315 .name = "lru_gen",
5316 .attrs = lru_gen_attrs,
5317 };
5318
5319 /******************************************************************************
5320 * debugfs interface
5321 ******************************************************************************/
5322
lru_gen_seq_start(struct seq_file * m,loff_t * pos)5323 static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos)
5324 {
5325 struct mem_cgroup *memcg;
5326 loff_t nr_to_skip = *pos;
5327
5328 m->private = kvmalloc(PATH_MAX, GFP_KERNEL);
5329 if (!m->private)
5330 return ERR_PTR(-ENOMEM);
5331
5332 memcg = mem_cgroup_iter(NULL, NULL, NULL);
5333 do {
5334 int nid;
5335
5336 for_each_node_state(nid, N_MEMORY) {
5337 if (!nr_to_skip--)
5338 return get_lruvec(memcg, nid);
5339 }
5340 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
5341
5342 return NULL;
5343 }
5344
lru_gen_seq_stop(struct seq_file * m,void * v)5345 static void lru_gen_seq_stop(struct seq_file *m, void *v)
5346 {
5347 if (!IS_ERR_OR_NULL(v))
5348 mem_cgroup_iter_break(NULL, lruvec_memcg(v));
5349
5350 kvfree(m->private);
5351 m->private = NULL;
5352 }
5353
lru_gen_seq_next(struct seq_file * m,void * v,loff_t * pos)5354 static void *lru_gen_seq_next(struct seq_file *m, void *v, loff_t *pos)
5355 {
5356 int nid = lruvec_pgdat(v)->node_id;
5357 struct mem_cgroup *memcg = lruvec_memcg(v);
5358
5359 ++*pos;
5360
5361 nid = next_memory_node(nid);
5362 if (nid == MAX_NUMNODES) {
5363 memcg = mem_cgroup_iter(NULL, memcg, NULL);
5364 if (!memcg)
5365 return NULL;
5366
5367 nid = first_memory_node;
5368 }
5369
5370 return get_lruvec(memcg, nid);
5371 }
5372
lru_gen_seq_show_full(struct seq_file * m,struct lruvec * lruvec,unsigned long max_seq,unsigned long * min_seq,unsigned long seq)5373 static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
5374 unsigned long max_seq, unsigned long *min_seq,
5375 unsigned long seq)
5376 {
5377 int i;
5378 int type, tier;
5379 int hist = lru_hist_from_seq(seq);
5380 struct lru_gen_folio *lrugen = &lruvec->lrugen;
5381 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
5382
5383 for (tier = 0; tier < MAX_NR_TIERS; tier++) {
5384 seq_printf(m, " %10d", tier);
5385 for (type = 0; type < ANON_AND_FILE; type++) {
5386 const char *s = "xxx";
5387 unsigned long n[3] = {};
5388
5389 if (seq == max_seq) {
5390 s = "RTx";
5391 n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]);
5392 n[1] = READ_ONCE(lrugen->avg_total[type][tier]);
5393 } else if (seq == min_seq[type] || NR_HIST_GENS > 1) {
5394 s = "rep";
5395 n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]);
5396 n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]);
5397 n[2] = READ_ONCE(lrugen->protected[hist][type][tier]);
5398 }
5399
5400 for (i = 0; i < 3; i++)
5401 seq_printf(m, " %10lu%c", n[i], s[i]);
5402 }
5403 seq_putc(m, '\n');
5404 }
5405
5406 if (!mm_state)
5407 return;
5408
5409 seq_puts(m, " ");
5410 for (i = 0; i < NR_MM_STATS; i++) {
5411 const char *s = "xxxx";
5412 unsigned long n = 0;
5413
5414 if (seq == max_seq && NR_HIST_GENS == 1) {
5415 s = "TYFA";
5416 n = READ_ONCE(mm_state->stats[hist][i]);
5417 } else if (seq != max_seq && NR_HIST_GENS > 1) {
5418 s = "tyfa";
5419 n = READ_ONCE(mm_state->stats[hist][i]);
5420 }
5421
5422 seq_printf(m, " %10lu%c", n, s[i]);
5423 }
5424 seq_putc(m, '\n');
5425 }
5426
5427 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
lru_gen_seq_show(struct seq_file * m,void * v)5428 static int lru_gen_seq_show(struct seq_file *m, void *v)
5429 {
5430 unsigned long seq;
5431 bool full = !debugfs_real_fops(m->file)->write;
5432 struct lruvec *lruvec = v;
5433 struct lru_gen_folio *lrugen = &lruvec->lrugen;
5434 int nid = lruvec_pgdat(lruvec)->node_id;
5435 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
5436 DEFINE_MAX_SEQ(lruvec);
5437 DEFINE_MIN_SEQ(lruvec);
5438
5439 if (nid == first_memory_node) {
5440 const char *path = memcg ? m->private : "";
5441
5442 #ifdef CONFIG_MEMCG
5443 if (memcg)
5444 cgroup_path(memcg->css.cgroup, m->private, PATH_MAX);
5445 #endif
5446 seq_printf(m, "memcg %5hu %s\n", mem_cgroup_id(memcg), path);
5447 }
5448
5449 seq_printf(m, " node %5d\n", nid);
5450
5451 if (!full)
5452 seq = evictable_min_seq(min_seq, MAX_SWAPPINESS / 2);
5453 else if (max_seq >= MAX_NR_GENS)
5454 seq = max_seq - MAX_NR_GENS + 1;
5455 else
5456 seq = 0;
5457
5458 for (; seq <= max_seq; seq++) {
5459 int type, zone;
5460 int gen = lru_gen_from_seq(seq);
5461 unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
5462
5463 seq_printf(m, " %10lu %10u", seq, jiffies_to_msecs(jiffies - birth));
5464
5465 for (type = 0; type < ANON_AND_FILE; type++) {
5466 unsigned long size = 0;
5467 char mark = full && seq < min_seq[type] ? 'x' : ' ';
5468
5469 for (zone = 0; zone < MAX_NR_ZONES; zone++)
5470 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
5471
5472 seq_printf(m, " %10lu%c", size, mark);
5473 }
5474
5475 seq_putc(m, '\n');
5476
5477 if (full)
5478 lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq);
5479 }
5480
5481 return 0;
5482 }
5483
5484 static const struct seq_operations lru_gen_seq_ops = {
5485 .start = lru_gen_seq_start,
5486 .stop = lru_gen_seq_stop,
5487 .next = lru_gen_seq_next,
5488 .show = lru_gen_seq_show,
5489 };
5490
run_aging(struct lruvec * lruvec,unsigned long seq,int swappiness,bool force_scan)5491 static int run_aging(struct lruvec *lruvec, unsigned long seq,
5492 int swappiness, bool force_scan)
5493 {
5494 DEFINE_MAX_SEQ(lruvec);
5495
5496 if (seq > max_seq)
5497 return -EINVAL;
5498
5499 return try_to_inc_max_seq(lruvec, max_seq, swappiness, force_scan) ? 0 : -EEXIST;
5500 }
5501
run_eviction(struct lruvec * lruvec,unsigned long seq,struct scan_control * sc,int swappiness,unsigned long nr_to_reclaim)5502 static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
5503 int swappiness, unsigned long nr_to_reclaim)
5504 {
5505 DEFINE_MAX_SEQ(lruvec);
5506
5507 if (seq + MIN_NR_GENS > max_seq)
5508 return -EINVAL;
5509
5510 sc->nr_reclaimed = 0;
5511
5512 while (!signal_pending(current)) {
5513 DEFINE_MIN_SEQ(lruvec);
5514
5515 if (seq < evictable_min_seq(min_seq, swappiness))
5516 return 0;
5517
5518 if (sc->nr_reclaimed >= nr_to_reclaim)
5519 return 0;
5520
5521 if (!evict_folios(lruvec, sc, swappiness))
5522 return 0;
5523
5524 cond_resched();
5525 }
5526
5527 return -EINTR;
5528 }
5529
run_cmd(char cmd,int memcg_id,int nid,unsigned long seq,struct scan_control * sc,int swappiness,unsigned long opt)5530 static int run_cmd(char cmd, int memcg_id, int nid, unsigned long seq,
5531 struct scan_control *sc, int swappiness, unsigned long opt)
5532 {
5533 struct lruvec *lruvec;
5534 int err = -EINVAL;
5535 struct mem_cgroup *memcg = NULL;
5536
5537 if (nid < 0 || nid >= MAX_NUMNODES || !node_state(nid, N_MEMORY))
5538 return -EINVAL;
5539
5540 if (!mem_cgroup_disabled()) {
5541 rcu_read_lock();
5542
5543 memcg = mem_cgroup_from_id(memcg_id);
5544 if (!mem_cgroup_tryget(memcg))
5545 memcg = NULL;
5546
5547 rcu_read_unlock();
5548
5549 if (!memcg)
5550 return -EINVAL;
5551 }
5552
5553 if (memcg_id != mem_cgroup_id(memcg))
5554 goto done;
5555
5556 lruvec = get_lruvec(memcg, nid);
5557
5558 if (swappiness < MIN_SWAPPINESS)
5559 swappiness = get_swappiness(lruvec, sc);
5560 else if (swappiness > SWAPPINESS_ANON_ONLY)
5561 goto done;
5562
5563 switch (cmd) {
5564 case '+':
5565 err = run_aging(lruvec, seq, swappiness, opt);
5566 break;
5567 case '-':
5568 err = run_eviction(lruvec, seq, sc, swappiness, opt);
5569 break;
5570 }
5571 done:
5572 mem_cgroup_put(memcg);
5573
5574 return err;
5575 }
5576
5577 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
lru_gen_seq_write(struct file * file,const char __user * src,size_t len,loff_t * pos)5578 static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
5579 size_t len, loff_t *pos)
5580 {
5581 void *buf;
5582 char *cur, *next;
5583 unsigned int flags;
5584 struct blk_plug plug;
5585 int err = -EINVAL;
5586 struct scan_control sc = {
5587 .may_writepage = true,
5588 .may_unmap = true,
5589 .may_swap = true,
5590 .reclaim_idx = MAX_NR_ZONES - 1,
5591 .gfp_mask = GFP_KERNEL,
5592 };
5593
5594 buf = kvmalloc(len + 1, GFP_KERNEL);
5595 if (!buf)
5596 return -ENOMEM;
5597
5598 if (copy_from_user(buf, src, len)) {
5599 kvfree(buf);
5600 return -EFAULT;
5601 }
5602
5603 set_task_reclaim_state(current, &sc.reclaim_state);
5604 flags = memalloc_noreclaim_save();
5605 blk_start_plug(&plug);
5606 if (!set_mm_walk(NULL, true)) {
5607 err = -ENOMEM;
5608 goto done;
5609 }
5610
5611 next = buf;
5612 next[len] = '\0';
5613
5614 while ((cur = strsep(&next, ",;\n"))) {
5615 int n;
5616 int end;
5617 char cmd, swap_string[5];
5618 unsigned int memcg_id;
5619 unsigned int nid;
5620 unsigned long seq;
5621 unsigned int swappiness;
5622 unsigned long opt = -1;
5623
5624 cur = skip_spaces(cur);
5625 if (!*cur)
5626 continue;
5627
5628 n = sscanf(cur, "%c %u %u %lu %n %4s %n %lu %n", &cmd, &memcg_id, &nid,
5629 &seq, &end, swap_string, &end, &opt, &end);
5630 if (n < 4 || cur[end]) {
5631 err = -EINVAL;
5632 break;
5633 }
5634
5635 if (n == 4) {
5636 swappiness = -1;
5637 } else if (!strcmp("max", swap_string)) {
5638 /* set by userspace for anonymous memory only */
5639 swappiness = SWAPPINESS_ANON_ONLY;
5640 } else {
5641 err = kstrtouint(swap_string, 0, &swappiness);
5642 if (err)
5643 break;
5644 }
5645
5646 err = run_cmd(cmd, memcg_id, nid, seq, &sc, swappiness, opt);
5647 if (err)
5648 break;
5649 }
5650 done:
5651 clear_mm_walk();
5652 blk_finish_plug(&plug);
5653 memalloc_noreclaim_restore(flags);
5654 set_task_reclaim_state(current, NULL);
5655
5656 kvfree(buf);
5657
5658 return err ? : len;
5659 }
5660
lru_gen_seq_open(struct inode * inode,struct file * file)5661 static int lru_gen_seq_open(struct inode *inode, struct file *file)
5662 {
5663 return seq_open(file, &lru_gen_seq_ops);
5664 }
5665
5666 static const struct file_operations lru_gen_rw_fops = {
5667 .open = lru_gen_seq_open,
5668 .read = seq_read,
5669 .write = lru_gen_seq_write,
5670 .llseek = seq_lseek,
5671 .release = seq_release,
5672 };
5673
5674 static const struct file_operations lru_gen_ro_fops = {
5675 .open = lru_gen_seq_open,
5676 .read = seq_read,
5677 .llseek = seq_lseek,
5678 .release = seq_release,
5679 };
5680
5681 /******************************************************************************
5682 * initialization
5683 ******************************************************************************/
5684
lru_gen_init_pgdat(struct pglist_data * pgdat)5685 void lru_gen_init_pgdat(struct pglist_data *pgdat)
5686 {
5687 int i, j;
5688
5689 spin_lock_init(&pgdat->memcg_lru.lock);
5690
5691 for (i = 0; i < MEMCG_NR_GENS; i++) {
5692 for (j = 0; j < MEMCG_NR_BINS; j++)
5693 INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i);
5694 }
5695 }
5696
lru_gen_init_lruvec(struct lruvec * lruvec)5697 void lru_gen_init_lruvec(struct lruvec *lruvec)
5698 {
5699 int i;
5700 int gen, type, zone;
5701 struct lru_gen_folio *lrugen = &lruvec->lrugen;
5702 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
5703
5704 lrugen->max_seq = MIN_NR_GENS + 1;
5705 lrugen->enabled = lru_gen_enabled();
5706
5707 for (i = 0; i <= MIN_NR_GENS + 1; i++)
5708 lrugen->timestamps[i] = jiffies;
5709
5710 for_each_gen_type_zone(gen, type, zone)
5711 INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]);
5712
5713 if (mm_state)
5714 mm_state->seq = MIN_NR_GENS;
5715 }
5716
5717 #ifdef CONFIG_MEMCG
5718
lru_gen_init_memcg(struct mem_cgroup * memcg)5719 void lru_gen_init_memcg(struct mem_cgroup *memcg)
5720 {
5721 struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
5722
5723 if (!mm_list)
5724 return;
5725
5726 INIT_LIST_HEAD(&mm_list->fifo);
5727 spin_lock_init(&mm_list->lock);
5728 }
5729
lru_gen_exit_memcg(struct mem_cgroup * memcg)5730 void lru_gen_exit_memcg(struct mem_cgroup *memcg)
5731 {
5732 int i;
5733 int nid;
5734 struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
5735
5736 VM_WARN_ON_ONCE(mm_list && !list_empty(&mm_list->fifo));
5737
5738 for_each_node(nid) {
5739 struct lruvec *lruvec = get_lruvec(memcg, nid);
5740 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
5741
5742 VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0,
5743 sizeof(lruvec->lrugen.nr_pages)));
5744
5745 lruvec->lrugen.list.next = LIST_POISON1;
5746
5747 if (!mm_state)
5748 continue;
5749
5750 for (i = 0; i < NR_BLOOM_FILTERS; i++) {
5751 bitmap_free(mm_state->filters[i]);
5752 mm_state->filters[i] = NULL;
5753 }
5754 }
5755 }
5756
5757 #endif /* CONFIG_MEMCG */
5758
init_lru_gen(void)5759 static int __init init_lru_gen(void)
5760 {
5761 BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS);
5762 BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS);
5763
5764 if (sysfs_create_group(mm_kobj, &lru_gen_attr_group))
5765 pr_err("lru_gen: failed to create sysfs group\n");
5766
5767 debugfs_create_file("lru_gen", 0644, NULL, NULL, &lru_gen_rw_fops);
5768 debugfs_create_file("lru_gen_full", 0444, NULL, NULL, &lru_gen_ro_fops);
5769
5770 return 0;
5771 };
5772 late_initcall(init_lru_gen);
5773
5774 #else /* !CONFIG_LRU_GEN */
5775
lru_gen_age_node(struct pglist_data * pgdat,struct scan_control * sc)5776 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
5777 {
5778 BUILD_BUG();
5779 }
5780
lru_gen_shrink_lruvec(struct lruvec * lruvec,struct scan_control * sc)5781 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
5782 {
5783 BUILD_BUG();
5784 }
5785
lru_gen_shrink_node(struct pglist_data * pgdat,struct scan_control * sc)5786 static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc)
5787 {
5788 BUILD_BUG();
5789 }
5790
5791 #endif /* CONFIG_LRU_GEN */
5792
shrink_lruvec(struct lruvec * lruvec,struct scan_control * sc)5793 static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
5794 {
5795 unsigned long nr[NR_LRU_LISTS];
5796 unsigned long targets[NR_LRU_LISTS];
5797 unsigned long nr_to_scan;
5798 enum lru_list lru;
5799 unsigned long nr_reclaimed = 0;
5800 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
5801 bool proportional_reclaim;
5802 struct blk_plug plug;
5803
5804 if (lru_gen_enabled() && !root_reclaim(sc)) {
5805 lru_gen_shrink_lruvec(lruvec, sc);
5806 return;
5807 }
5808
5809 get_scan_count(lruvec, sc, nr);
5810
5811 /* Record the original scan target for proportional adjustments later */
5812 memcpy(targets, nr, sizeof(nr));
5813
5814 /*
5815 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
5816 * event that can occur when there is little memory pressure e.g.
5817 * multiple streaming readers/writers. Hence, we do not abort scanning
5818 * when the requested number of pages are reclaimed when scanning at
5819 * DEF_PRIORITY on the assumption that the fact we are direct
5820 * reclaiming implies that kswapd is not keeping up and it is best to
5821 * do a batch of work at once. For memcg reclaim one check is made to
5822 * abort proportional reclaim if either the file or anon lru has already
5823 * dropped to zero at the first pass.
5824 */
5825 proportional_reclaim = (!cgroup_reclaim(sc) && !current_is_kswapd() &&
5826 sc->priority == DEF_PRIORITY);
5827
5828 blk_start_plug(&plug);
5829 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
5830 nr[LRU_INACTIVE_FILE]) {
5831 unsigned long nr_anon, nr_file, percentage;
5832 unsigned long nr_scanned;
5833
5834 for_each_evictable_lru(lru) {
5835 if (nr[lru]) {
5836 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
5837 nr[lru] -= nr_to_scan;
5838
5839 nr_reclaimed += shrink_list(lru, nr_to_scan,
5840 lruvec, sc);
5841 }
5842 }
5843
5844 cond_resched();
5845
5846 if (nr_reclaimed < nr_to_reclaim || proportional_reclaim)
5847 continue;
5848
5849 /*
5850 * For kswapd and memcg, reclaim at least the number of pages
5851 * requested. Ensure that the anon and file LRUs are scanned
5852 * proportionally what was requested by get_scan_count(). We
5853 * stop reclaiming one LRU and reduce the amount scanning
5854 * proportional to the original scan target.
5855 */
5856 nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
5857 nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
5858
5859 /*
5860 * It's just vindictive to attack the larger once the smaller
5861 * has gone to zero. And given the way we stop scanning the
5862 * smaller below, this makes sure that we only make one nudge
5863 * towards proportionality once we've got nr_to_reclaim.
5864 */
5865 if (!nr_file || !nr_anon)
5866 break;
5867
5868 if (nr_file > nr_anon) {
5869 unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
5870 targets[LRU_ACTIVE_ANON] + 1;
5871 lru = LRU_BASE;
5872 percentage = nr_anon * 100 / scan_target;
5873 } else {
5874 unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
5875 targets[LRU_ACTIVE_FILE] + 1;
5876 lru = LRU_FILE;
5877 percentage = nr_file * 100 / scan_target;
5878 }
5879
5880 /* Stop scanning the smaller of the LRU */
5881 nr[lru] = 0;
5882 nr[lru + LRU_ACTIVE] = 0;
5883
5884 /*
5885 * Recalculate the other LRU scan count based on its original
5886 * scan target and the percentage scanning already complete
5887 */
5888 lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
5889 nr_scanned = targets[lru] - nr[lru];
5890 nr[lru] = targets[lru] * (100 - percentage) / 100;
5891 nr[lru] -= min(nr[lru], nr_scanned);
5892
5893 lru += LRU_ACTIVE;
5894 nr_scanned = targets[lru] - nr[lru];
5895 nr[lru] = targets[lru] * (100 - percentage) / 100;
5896 nr[lru] -= min(nr[lru], nr_scanned);
5897 }
5898 blk_finish_plug(&plug);
5899 sc->nr_reclaimed += nr_reclaimed;
5900
5901 /*
5902 * Even if we did not try to evict anon pages at all, we want to
5903 * rebalance the anon lru active/inactive ratio.
5904 */
5905 if (can_age_anon_pages(lruvec, sc) &&
5906 inactive_is_low(lruvec, LRU_INACTIVE_ANON))
5907 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
5908 sc, LRU_ACTIVE_ANON);
5909 }
5910
5911 /* Use reclaim/compaction for costly allocs or under memory pressure */
in_reclaim_compaction(struct scan_control * sc)5912 static bool in_reclaim_compaction(struct scan_control *sc)
5913 {
5914 if (gfp_compaction_allowed(sc->gfp_mask) && sc->order &&
5915 (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
5916 sc->priority < DEF_PRIORITY - 2))
5917 return true;
5918
5919 return false;
5920 }
5921
5922 /*
5923 * Reclaim/compaction is used for high-order allocation requests. It reclaims
5924 * order-0 pages before compacting the zone. should_continue_reclaim() returns
5925 * true if more pages should be reclaimed such that when the page allocator
5926 * calls try_to_compact_pages() that it will have enough free pages to succeed.
5927 * It will give up earlier than that if there is difficulty reclaiming pages.
5928 */
should_continue_reclaim(struct pglist_data * pgdat,unsigned long nr_reclaimed,struct scan_control * sc)5929 static inline bool should_continue_reclaim(struct pglist_data *pgdat,
5930 unsigned long nr_reclaimed,
5931 struct scan_control *sc)
5932 {
5933 unsigned long pages_for_compaction;
5934 unsigned long inactive_lru_pages;
5935 int z;
5936 struct zone *zone;
5937
5938 /* If not in reclaim/compaction mode, stop */
5939 if (!in_reclaim_compaction(sc))
5940 return false;
5941
5942 /*
5943 * Stop if we failed to reclaim any pages from the last SWAP_CLUSTER_MAX
5944 * number of pages that were scanned. This will return to the caller
5945 * with the risk reclaim/compaction and the resulting allocation attempt
5946 * fails. In the past we have tried harder for __GFP_RETRY_MAYFAIL
5947 * allocations through requiring that the full LRU list has been scanned
5948 * first, by assuming that zero delta of sc->nr_scanned means full LRU
5949 * scan, but that approximation was wrong, and there were corner cases
5950 * where always a non-zero amount of pages were scanned.
5951 */
5952 if (!nr_reclaimed)
5953 return false;
5954
5955 /* If compaction would go ahead or the allocation would succeed, stop */
5956 for_each_managed_zone_pgdat(zone, pgdat, z, sc->reclaim_idx) {
5957 unsigned long watermark = min_wmark_pages(zone);
5958
5959 /* Allocation can already succeed, nothing to do */
5960 if (zone_watermark_ok(zone, sc->order, watermark,
5961 sc->reclaim_idx, 0))
5962 return false;
5963
5964 if (compaction_suitable(zone, sc->order, watermark,
5965 sc->reclaim_idx))
5966 return false;
5967 }
5968
5969 /*
5970 * If we have not reclaimed enough pages for compaction and the
5971 * inactive lists are large enough, continue reclaiming
5972 */
5973 pages_for_compaction = compact_gap(sc->order);
5974 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
5975 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc))
5976 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
5977
5978 return inactive_lru_pages > pages_for_compaction;
5979 }
5980
shrink_node_memcgs(pg_data_t * pgdat,struct scan_control * sc)5981 static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
5982 {
5983 struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
5984 struct mem_cgroup_reclaim_cookie reclaim = {
5985 .pgdat = pgdat,
5986 };
5987 struct mem_cgroup_reclaim_cookie *partial = &reclaim;
5988 struct mem_cgroup *memcg;
5989
5990 /*
5991 * In most cases, direct reclaimers can do partial walks
5992 * through the cgroup tree, using an iterator state that
5993 * persists across invocations. This strikes a balance between
5994 * fairness and allocation latency.
5995 *
5996 * For kswapd, reliable forward progress is more important
5997 * than a quick return to idle. Always do full walks.
5998 */
5999 if (current_is_kswapd() || sc->memcg_full_walk)
6000 partial = NULL;
6001
6002 memcg = mem_cgroup_iter(target_memcg, NULL, partial);
6003 do {
6004 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
6005 unsigned long reclaimed;
6006 unsigned long scanned;
6007
6008 /*
6009 * This loop can become CPU-bound when target memcgs
6010 * aren't eligible for reclaim - either because they
6011 * don't have any reclaimable pages, or because their
6012 * memory is explicitly protected. Avoid soft lockups.
6013 */
6014 cond_resched();
6015
6016 mem_cgroup_calculate_protection(target_memcg, memcg);
6017
6018 if (mem_cgroup_below_min(target_memcg, memcg)) {
6019 /*
6020 * Hard protection.
6021 * If there is no reclaimable memory, OOM.
6022 */
6023 continue;
6024 } else if (mem_cgroup_below_low(target_memcg, memcg)) {
6025 /*
6026 * Soft protection.
6027 * Respect the protection only as long as
6028 * there is an unprotected supply
6029 * of reclaimable memory from other cgroups.
6030 */
6031 if (!sc->memcg_low_reclaim) {
6032 sc->memcg_low_skipped = 1;
6033 continue;
6034 }
6035 memcg_memory_event(memcg, MEMCG_LOW);
6036 }
6037
6038 reclaimed = sc->nr_reclaimed;
6039 scanned = sc->nr_scanned;
6040
6041 shrink_lruvec(lruvec, sc);
6042
6043 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
6044 sc->priority);
6045
6046 /* Record the group's reclaim efficiency */
6047 if (!sc->proactive)
6048 vmpressure(sc->gfp_mask, memcg, false,
6049 sc->nr_scanned - scanned,
6050 sc->nr_reclaimed - reclaimed);
6051
6052 /* If partial walks are allowed, bail once goal is reached */
6053 if (partial && sc->nr_reclaimed >= sc->nr_to_reclaim) {
6054 mem_cgroup_iter_break(target_memcg, memcg);
6055 break;
6056 }
6057 } while ((memcg = mem_cgroup_iter(target_memcg, memcg, partial)));
6058 }
6059
shrink_node(pg_data_t * pgdat,struct scan_control * sc)6060 static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
6061 {
6062 unsigned long nr_reclaimed, nr_scanned, nr_node_reclaimed;
6063 struct lruvec *target_lruvec;
6064 bool reclaimable = false;
6065
6066 if (lru_gen_enabled() && root_reclaim(sc)) {
6067 memset(&sc->nr, 0, sizeof(sc->nr));
6068 lru_gen_shrink_node(pgdat, sc);
6069 return;
6070 }
6071
6072 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
6073
6074 again:
6075 memset(&sc->nr, 0, sizeof(sc->nr));
6076
6077 nr_reclaimed = sc->nr_reclaimed;
6078 nr_scanned = sc->nr_scanned;
6079
6080 prepare_scan_control(pgdat, sc);
6081
6082 shrink_node_memcgs(pgdat, sc);
6083
6084 flush_reclaim_state(sc);
6085
6086 nr_node_reclaimed = sc->nr_reclaimed - nr_reclaimed;
6087
6088 /* Record the subtree's reclaim efficiency */
6089 if (!sc->proactive)
6090 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
6091 sc->nr_scanned - nr_scanned, nr_node_reclaimed);
6092
6093 if (nr_node_reclaimed)
6094 reclaimable = true;
6095
6096 if (current_is_kswapd()) {
6097 /*
6098 * If reclaim is isolating dirty pages under writeback,
6099 * it implies that the long-lived page allocation rate
6100 * is exceeding the page laundering rate. Either the
6101 * global limits are not being effective at throttling
6102 * processes due to the page distribution throughout
6103 * zones or there is heavy usage of a slow backing
6104 * device. The only option is to throttle from reclaim
6105 * context which is not ideal as there is no guarantee
6106 * the dirtying process is throttled in the same way
6107 * balance_dirty_pages() manages.
6108 *
6109 * Once a node is flagged PGDAT_WRITEBACK, kswapd will
6110 * count the number of pages under pages flagged for
6111 * immediate reclaim and stall if any are encountered
6112 * in the nr_immediate check below.
6113 */
6114 if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)
6115 set_bit(PGDAT_WRITEBACK, &pgdat->flags);
6116
6117 /* Allow kswapd to start writing pages during reclaim.*/
6118 if (sc->nr.unqueued_dirty &&
6119 sc->nr.unqueued_dirty == sc->nr.file_taken)
6120 set_bit(PGDAT_DIRTY, &pgdat->flags);
6121
6122 /*
6123 * If kswapd scans pages marked for immediate
6124 * reclaim and under writeback (nr_immediate), it
6125 * implies that pages are cycling through the LRU
6126 * faster than they are written so forcibly stall
6127 * until some pages complete writeback.
6128 */
6129 if (sc->nr.immediate)
6130 reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
6131 }
6132
6133 /*
6134 * Tag a node/memcg as congested if all the dirty pages were marked
6135 * for writeback and immediate reclaim (counted in nr.congested).
6136 *
6137 * Legacy memcg will stall in page writeback so avoid forcibly
6138 * stalling in reclaim_throttle().
6139 */
6140 if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested) {
6141 if (cgroup_reclaim(sc) && writeback_throttling_sane(sc))
6142 set_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags);
6143
6144 if (current_is_kswapd())
6145 set_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags);
6146 }
6147
6148 /*
6149 * Stall direct reclaim for IO completions if the lruvec is
6150 * node is congested. Allow kswapd to continue until it
6151 * starts encountering unqueued dirty pages or cycling through
6152 * the LRU too quickly.
6153 */
6154 if (!current_is_kswapd() && current_may_throttle() &&
6155 !sc->hibernation_mode &&
6156 (test_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags) ||
6157 test_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags)))
6158 reclaim_throttle(pgdat, VMSCAN_THROTTLE_CONGESTED);
6159
6160 if (should_continue_reclaim(pgdat, nr_node_reclaimed, sc))
6161 goto again;
6162
6163 /*
6164 * Kswapd gives up on balancing particular nodes after too
6165 * many failures to reclaim anything from them and goes to
6166 * sleep. On reclaim progress, reset the failure counter. A
6167 * successful direct reclaim run will revive a dormant kswapd.
6168 */
6169 if (reclaimable)
6170 pgdat->kswapd_failures = 0;
6171 else if (sc->cache_trim_mode)
6172 sc->cache_trim_mode_failed = 1;
6173 }
6174
6175 /*
6176 * Returns true if compaction should go ahead for a costly-order request, or
6177 * the allocation would already succeed without compaction. Return false if we
6178 * should reclaim first.
6179 */
compaction_ready(struct zone * zone,struct scan_control * sc)6180 static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
6181 {
6182 unsigned long watermark;
6183
6184 if (!gfp_compaction_allowed(sc->gfp_mask))
6185 return false;
6186
6187 /* Allocation can already succeed, nothing to do */
6188 if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
6189 sc->reclaim_idx, 0))
6190 return true;
6191
6192 /*
6193 * Direct reclaim usually targets the min watermark, but compaction
6194 * takes time to run and there are potentially other callers using the
6195 * pages just freed. So target a higher buffer to give compaction a
6196 * reasonable chance of completing and allocating the pages.
6197 *
6198 * Note that we won't actually reclaim the whole buffer in one attempt
6199 * as the target watermark in should_continue_reclaim() is lower. But if
6200 * we are already above the high+gap watermark, don't reclaim at all.
6201 */
6202 watermark = high_wmark_pages(zone);
6203 if (compaction_suitable(zone, sc->order, watermark, sc->reclaim_idx))
6204 return true;
6205
6206 return false;
6207 }
6208
consider_reclaim_throttle(pg_data_t * pgdat,struct scan_control * sc)6209 static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc)
6210 {
6211 /*
6212 * If reclaim is making progress greater than 12% efficiency then
6213 * wake all the NOPROGRESS throttled tasks.
6214 */
6215 if (sc->nr_reclaimed > (sc->nr_scanned >> 3)) {
6216 wait_queue_head_t *wqh;
6217
6218 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_NOPROGRESS];
6219 if (waitqueue_active(wqh))
6220 wake_up(wqh);
6221
6222 return;
6223 }
6224
6225 /*
6226 * Do not throttle kswapd or cgroup reclaim on NOPROGRESS as it will
6227 * throttle on VMSCAN_THROTTLE_WRITEBACK if there are too many pages
6228 * under writeback and marked for immediate reclaim at the tail of the
6229 * LRU.
6230 */
6231 if (current_is_kswapd() || cgroup_reclaim(sc))
6232 return;
6233
6234 /* Throttle if making no progress at high prioities. */
6235 if (sc->priority == 1 && !sc->nr_reclaimed)
6236 reclaim_throttle(pgdat, VMSCAN_THROTTLE_NOPROGRESS);
6237 }
6238
6239 /*
6240 * This is the direct reclaim path, for page-allocating processes. We only
6241 * try to reclaim pages from zones which will satisfy the caller's allocation
6242 * request.
6243 *
6244 * If a zone is deemed to be full of pinned pages then just give it a light
6245 * scan then give up on it.
6246 */
shrink_zones(struct zonelist * zonelist,struct scan_control * sc)6247 static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
6248 {
6249 struct zoneref *z;
6250 struct zone *zone;
6251 unsigned long nr_soft_reclaimed;
6252 unsigned long nr_soft_scanned;
6253 gfp_t orig_mask;
6254 pg_data_t *last_pgdat = NULL;
6255 pg_data_t *first_pgdat = NULL;
6256
6257 /*
6258 * If the number of buffer_heads in the machine exceeds the maximum
6259 * allowed level, force direct reclaim to scan the highmem zone as
6260 * highmem pages could be pinning lowmem pages storing buffer_heads
6261 */
6262 orig_mask = sc->gfp_mask;
6263 if (buffer_heads_over_limit) {
6264 sc->gfp_mask |= __GFP_HIGHMEM;
6265 sc->reclaim_idx = gfp_zone(sc->gfp_mask);
6266 }
6267
6268 for_each_zone_zonelist_nodemask(zone, z, zonelist,
6269 sc->reclaim_idx, sc->nodemask) {
6270 /*
6271 * Take care memory controller reclaiming has small influence
6272 * to global LRU.
6273 */
6274 if (!cgroup_reclaim(sc)) {
6275 if (!cpuset_zone_allowed(zone,
6276 GFP_KERNEL | __GFP_HARDWALL))
6277 continue;
6278
6279 /*
6280 * If we already have plenty of memory free for
6281 * compaction in this zone, don't free any more.
6282 * Even though compaction is invoked for any
6283 * non-zero order, only frequent costly order
6284 * reclamation is disruptive enough to become a
6285 * noticeable problem, like transparent huge
6286 * page allocations.
6287 */
6288 if (IS_ENABLED(CONFIG_COMPACTION) &&
6289 sc->order > PAGE_ALLOC_COSTLY_ORDER &&
6290 compaction_ready(zone, sc)) {
6291 sc->compaction_ready = true;
6292 continue;
6293 }
6294
6295 /*
6296 * Shrink each node in the zonelist once. If the
6297 * zonelist is ordered by zone (not the default) then a
6298 * node may be shrunk multiple times but in that case
6299 * the user prefers lower zones being preserved.
6300 */
6301 if (zone->zone_pgdat == last_pgdat)
6302 continue;
6303
6304 /*
6305 * This steals pages from memory cgroups over softlimit
6306 * and returns the number of reclaimed pages and
6307 * scanned pages. This works for global memory pressure
6308 * and balancing, not for a memcg's limit.
6309 */
6310 nr_soft_scanned = 0;
6311 nr_soft_reclaimed = memcg1_soft_limit_reclaim(zone->zone_pgdat,
6312 sc->order, sc->gfp_mask,
6313 &nr_soft_scanned);
6314 sc->nr_reclaimed += nr_soft_reclaimed;
6315 sc->nr_scanned += nr_soft_scanned;
6316 /* need some check for avoid more shrink_zone() */
6317 }
6318
6319 if (!first_pgdat)
6320 first_pgdat = zone->zone_pgdat;
6321
6322 /* See comment about same check for global reclaim above */
6323 if (zone->zone_pgdat == last_pgdat)
6324 continue;
6325 last_pgdat = zone->zone_pgdat;
6326 shrink_node(zone->zone_pgdat, sc);
6327 }
6328
6329 if (first_pgdat)
6330 consider_reclaim_throttle(first_pgdat, sc);
6331
6332 /*
6333 * Restore to original mask to avoid the impact on the caller if we
6334 * promoted it to __GFP_HIGHMEM.
6335 */
6336 sc->gfp_mask = orig_mask;
6337 }
6338
snapshot_refaults(struct mem_cgroup * target_memcg,pg_data_t * pgdat)6339 static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
6340 {
6341 struct lruvec *target_lruvec;
6342 unsigned long refaults;
6343
6344 if (lru_gen_enabled())
6345 return;
6346
6347 target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
6348 refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON);
6349 target_lruvec->refaults[WORKINGSET_ANON] = refaults;
6350 refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE);
6351 target_lruvec->refaults[WORKINGSET_FILE] = refaults;
6352 }
6353
6354 /*
6355 * This is the main entry point to direct page reclaim.
6356 *
6357 * If a full scan of the inactive list fails to free enough memory then we
6358 * are "out of memory" and something needs to be killed.
6359 *
6360 * If the caller is !__GFP_FS then the probability of a failure is reasonably
6361 * high - the zone may be full of dirty or under-writeback pages, which this
6362 * caller can't do much about. We kick the writeback threads and take explicit
6363 * naps in the hope that some of these pages can be written. But if the
6364 * allocating task holds filesystem locks which prevent writeout this might not
6365 * work, and the allocation attempt will fail.
6366 *
6367 * returns: 0, if no pages reclaimed
6368 * else, the number of pages reclaimed
6369 */
do_try_to_free_pages(struct zonelist * zonelist,struct scan_control * sc)6370 static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
6371 struct scan_control *sc)
6372 {
6373 int initial_priority = sc->priority;
6374 pg_data_t *last_pgdat;
6375 struct zoneref *z;
6376 struct zone *zone;
6377 retry:
6378 delayacct_freepages_start();
6379
6380 if (!cgroup_reclaim(sc))
6381 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
6382
6383 do {
6384 if (!sc->proactive)
6385 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
6386 sc->priority);
6387 sc->nr_scanned = 0;
6388 shrink_zones(zonelist, sc);
6389
6390 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
6391 break;
6392
6393 if (sc->compaction_ready)
6394 break;
6395
6396 /*
6397 * If we're getting trouble reclaiming, start doing
6398 * writepage even in laptop mode.
6399 */
6400 if (sc->priority < DEF_PRIORITY - 2)
6401 sc->may_writepage = 1;
6402 } while (--sc->priority >= 0);
6403
6404 last_pgdat = NULL;
6405 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx,
6406 sc->nodemask) {
6407 if (zone->zone_pgdat == last_pgdat)
6408 continue;
6409 last_pgdat = zone->zone_pgdat;
6410
6411 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
6412
6413 if (cgroup_reclaim(sc)) {
6414 struct lruvec *lruvec;
6415
6416 lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup,
6417 zone->zone_pgdat);
6418 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags);
6419 }
6420 }
6421
6422 delayacct_freepages_end();
6423
6424 if (sc->nr_reclaimed)
6425 return sc->nr_reclaimed;
6426
6427 /* Aborted reclaim to try compaction? don't OOM, then */
6428 if (sc->compaction_ready)
6429 return 1;
6430
6431 /*
6432 * In most cases, direct reclaimers can do partial walks
6433 * through the cgroup tree to meet the reclaim goal while
6434 * keeping latency low. Since the iterator state is shared
6435 * among all direct reclaim invocations (to retain fairness
6436 * among cgroups), though, high concurrency can result in
6437 * individual threads not seeing enough cgroups to make
6438 * meaningful forward progress. Avoid false OOMs in this case.
6439 */
6440 if (!sc->memcg_full_walk) {
6441 sc->priority = initial_priority;
6442 sc->memcg_full_walk = 1;
6443 goto retry;
6444 }
6445
6446 /*
6447 * We make inactive:active ratio decisions based on the node's
6448 * composition of memory, but a restrictive reclaim_idx or a
6449 * memory.low cgroup setting can exempt large amounts of
6450 * memory from reclaim. Neither of which are very common, so
6451 * instead of doing costly eligibility calculations of the
6452 * entire cgroup subtree up front, we assume the estimates are
6453 * good, and retry with forcible deactivation if that fails.
6454 */
6455 if (sc->skipped_deactivate) {
6456 sc->priority = initial_priority;
6457 sc->force_deactivate = 1;
6458 sc->skipped_deactivate = 0;
6459 goto retry;
6460 }
6461
6462 /* Untapped cgroup reserves? Don't OOM, retry. */
6463 if (sc->memcg_low_skipped) {
6464 sc->priority = initial_priority;
6465 sc->force_deactivate = 0;
6466 sc->memcg_low_reclaim = 1;
6467 sc->memcg_low_skipped = 0;
6468 goto retry;
6469 }
6470
6471 return 0;
6472 }
6473
allow_direct_reclaim(pg_data_t * pgdat)6474 static bool allow_direct_reclaim(pg_data_t *pgdat)
6475 {
6476 struct zone *zone;
6477 unsigned long pfmemalloc_reserve = 0;
6478 unsigned long free_pages = 0;
6479 int i;
6480 bool wmark_ok;
6481
6482 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
6483 return true;
6484
6485 for_each_managed_zone_pgdat(zone, pgdat, i, ZONE_NORMAL) {
6486 if (!zone_reclaimable_pages(zone))
6487 continue;
6488
6489 pfmemalloc_reserve += min_wmark_pages(zone);
6490 free_pages += zone_page_state_snapshot(zone, NR_FREE_PAGES);
6491 }
6492
6493 /* If there are no reserves (unexpected config) then do not throttle */
6494 if (!pfmemalloc_reserve)
6495 return true;
6496
6497 wmark_ok = free_pages > pfmemalloc_reserve / 2;
6498
6499 /* kswapd must be awake if processes are being throttled */
6500 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
6501 if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL)
6502 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL);
6503
6504 wake_up_interruptible(&pgdat->kswapd_wait);
6505 }
6506
6507 return wmark_ok;
6508 }
6509
6510 /*
6511 * Throttle direct reclaimers if backing storage is backed by the network
6512 * and the PFMEMALLOC reserve for the preferred node is getting dangerously
6513 * depleted. kswapd will continue to make progress and wake the processes
6514 * when the low watermark is reached.
6515 *
6516 * Returns true if a fatal signal was delivered during throttling. If this
6517 * happens, the page allocator should not consider triggering the OOM killer.
6518 */
throttle_direct_reclaim(gfp_t gfp_mask,struct zonelist * zonelist,nodemask_t * nodemask)6519 static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
6520 nodemask_t *nodemask)
6521 {
6522 struct zoneref *z;
6523 struct zone *zone;
6524 pg_data_t *pgdat = NULL;
6525
6526 /*
6527 * Kernel threads should not be throttled as they may be indirectly
6528 * responsible for cleaning pages necessary for reclaim to make forward
6529 * progress. kjournald for example may enter direct reclaim while
6530 * committing a transaction where throttling it could forcing other
6531 * processes to block on log_wait_commit().
6532 */
6533 if (current->flags & PF_KTHREAD)
6534 goto out;
6535
6536 /*
6537 * If a fatal signal is pending, this process should not throttle.
6538 * It should return quickly so it can exit and free its memory
6539 */
6540 if (fatal_signal_pending(current))
6541 goto out;
6542
6543 /*
6544 * Check if the pfmemalloc reserves are ok by finding the first node
6545 * with a usable ZONE_NORMAL or lower zone. The expectation is that
6546 * GFP_KERNEL will be required for allocating network buffers when
6547 * swapping over the network so ZONE_HIGHMEM is unusable.
6548 *
6549 * Throttling is based on the first usable node and throttled processes
6550 * wait on a queue until kswapd makes progress and wakes them. There
6551 * is an affinity then between processes waking up and where reclaim
6552 * progress has been made assuming the process wakes on the same node.
6553 * More importantly, processes running on remote nodes will not compete
6554 * for remote pfmemalloc reserves and processes on different nodes
6555 * should make reasonable progress.
6556 */
6557 for_each_zone_zonelist_nodemask(zone, z, zonelist,
6558 gfp_zone(gfp_mask), nodemask) {
6559 if (zone_idx(zone) > ZONE_NORMAL)
6560 continue;
6561
6562 /* Throttle based on the first usable node */
6563 pgdat = zone->zone_pgdat;
6564 if (allow_direct_reclaim(pgdat))
6565 goto out;
6566 break;
6567 }
6568
6569 /* If no zone was usable by the allocation flags then do not throttle */
6570 if (!pgdat)
6571 goto out;
6572
6573 /* Account for the throttling */
6574 count_vm_event(PGSCAN_DIRECT_THROTTLE);
6575
6576 /*
6577 * If the caller cannot enter the filesystem, it's possible that it
6578 * is due to the caller holding an FS lock or performing a journal
6579 * transaction in the case of a filesystem like ext[3|4]. In this case,
6580 * it is not safe to block on pfmemalloc_wait as kswapd could be
6581 * blocked waiting on the same lock. Instead, throttle for up to a
6582 * second before continuing.
6583 */
6584 if (!(gfp_mask & __GFP_FS))
6585 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
6586 allow_direct_reclaim(pgdat), HZ);
6587 else
6588 /* Throttle until kswapd wakes the process */
6589 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
6590 allow_direct_reclaim(pgdat));
6591
6592 if (fatal_signal_pending(current))
6593 return true;
6594
6595 out:
6596 return false;
6597 }
6598
try_to_free_pages(struct zonelist * zonelist,int order,gfp_t gfp_mask,nodemask_t * nodemask)6599 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
6600 gfp_t gfp_mask, nodemask_t *nodemask)
6601 {
6602 unsigned long nr_reclaimed;
6603 struct scan_control sc = {
6604 .nr_to_reclaim = SWAP_CLUSTER_MAX,
6605 .gfp_mask = current_gfp_context(gfp_mask),
6606 .reclaim_idx = gfp_zone(gfp_mask),
6607 .order = order,
6608 .nodemask = nodemask,
6609 .priority = DEF_PRIORITY,
6610 .may_writepage = !laptop_mode,
6611 .may_unmap = 1,
6612 .may_swap = 1,
6613 };
6614
6615 /*
6616 * scan_control uses s8 fields for order, priority, and reclaim_idx.
6617 * Confirm they are large enough for max values.
6618 */
6619 BUILD_BUG_ON(MAX_PAGE_ORDER >= S8_MAX);
6620 BUILD_BUG_ON(DEF_PRIORITY > S8_MAX);
6621 BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX);
6622
6623 /*
6624 * Do not enter reclaim if fatal signal was delivered while throttled.
6625 * 1 is returned so that the page allocator does not OOM kill at this
6626 * point.
6627 */
6628 if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
6629 return 1;
6630
6631 set_task_reclaim_state(current, &sc.reclaim_state);
6632 trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
6633
6634 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
6635
6636 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
6637 set_task_reclaim_state(current, NULL);
6638
6639 return nr_reclaimed;
6640 }
6641
6642 #ifdef CONFIG_MEMCG
6643
6644 /* Only used by soft limit reclaim. Do not reuse for anything else. */
mem_cgroup_shrink_node(struct mem_cgroup * memcg,gfp_t gfp_mask,bool noswap,pg_data_t * pgdat,unsigned long * nr_scanned)6645 unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
6646 gfp_t gfp_mask, bool noswap,
6647 pg_data_t *pgdat,
6648 unsigned long *nr_scanned)
6649 {
6650 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
6651 struct scan_control sc = {
6652 .nr_to_reclaim = SWAP_CLUSTER_MAX,
6653 .target_mem_cgroup = memcg,
6654 .may_writepage = !laptop_mode,
6655 .may_unmap = 1,
6656 .reclaim_idx = MAX_NR_ZONES - 1,
6657 .may_swap = !noswap,
6658 };
6659
6660 WARN_ON_ONCE(!current->reclaim_state);
6661
6662 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
6663 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
6664
6665 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
6666 sc.gfp_mask);
6667
6668 /*
6669 * NOTE: Although we can get the priority field, using it
6670 * here is not a good idea, since it limits the pages we can scan.
6671 * if we don't reclaim here, the shrink_node from balance_pgdat
6672 * will pick up pages from other mem cgroup's as well. We hack
6673 * the priority and make it zero.
6674 */
6675 shrink_lruvec(lruvec, &sc);
6676
6677 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
6678
6679 *nr_scanned = sc.nr_scanned;
6680
6681 return sc.nr_reclaimed;
6682 }
6683
try_to_free_mem_cgroup_pages(struct mem_cgroup * memcg,unsigned long nr_pages,gfp_t gfp_mask,unsigned int reclaim_options,int * swappiness)6684 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
6685 unsigned long nr_pages,
6686 gfp_t gfp_mask,
6687 unsigned int reclaim_options,
6688 int *swappiness)
6689 {
6690 unsigned long nr_reclaimed;
6691 unsigned int noreclaim_flag;
6692 struct scan_control sc = {
6693 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
6694 .proactive_swappiness = swappiness,
6695 .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) |
6696 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
6697 .reclaim_idx = MAX_NR_ZONES - 1,
6698 .target_mem_cgroup = memcg,
6699 .priority = DEF_PRIORITY,
6700 .may_writepage = !laptop_mode,
6701 .may_unmap = 1,
6702 .may_swap = !!(reclaim_options & MEMCG_RECLAIM_MAY_SWAP),
6703 .proactive = !!(reclaim_options & MEMCG_RECLAIM_PROACTIVE),
6704 };
6705 /*
6706 * Traverse the ZONELIST_FALLBACK zonelist of the current node to put
6707 * equal pressure on all the nodes. This is based on the assumption that
6708 * the reclaim does not bail out early.
6709 */
6710 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
6711
6712 set_task_reclaim_state(current, &sc.reclaim_state);
6713 trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
6714 noreclaim_flag = memalloc_noreclaim_save();
6715
6716 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
6717
6718 memalloc_noreclaim_restore(noreclaim_flag);
6719 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
6720 set_task_reclaim_state(current, NULL);
6721
6722 return nr_reclaimed;
6723 }
6724 #endif
6725
kswapd_age_node(struct pglist_data * pgdat,struct scan_control * sc)6726 static void kswapd_age_node(struct pglist_data *pgdat, struct scan_control *sc)
6727 {
6728 struct mem_cgroup *memcg;
6729 struct lruvec *lruvec;
6730
6731 if (lru_gen_enabled()) {
6732 lru_gen_age_node(pgdat, sc);
6733 return;
6734 }
6735
6736 lruvec = mem_cgroup_lruvec(NULL, pgdat);
6737 if (!can_age_anon_pages(lruvec, sc))
6738 return;
6739
6740 if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON))
6741 return;
6742
6743 memcg = mem_cgroup_iter(NULL, NULL, NULL);
6744 do {
6745 lruvec = mem_cgroup_lruvec(memcg, pgdat);
6746 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
6747 sc, LRU_ACTIVE_ANON);
6748 memcg = mem_cgroup_iter(NULL, memcg, NULL);
6749 } while (memcg);
6750 }
6751
pgdat_watermark_boosted(pg_data_t * pgdat,int highest_zoneidx)6752 static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx)
6753 {
6754 int i;
6755 struct zone *zone;
6756
6757 /*
6758 * Check for watermark boosts top-down as the higher zones
6759 * are more likely to be boosted. Both watermarks and boosts
6760 * should not be checked at the same time as reclaim would
6761 * start prematurely when there is no boosting and a lower
6762 * zone is balanced.
6763 */
6764 for (i = highest_zoneidx; i >= 0; i--) {
6765 zone = pgdat->node_zones + i;
6766 if (!managed_zone(zone))
6767 continue;
6768
6769 if (zone->watermark_boost)
6770 return true;
6771 }
6772
6773 return false;
6774 }
6775
6776 /*
6777 * Returns true if there is an eligible zone balanced for the request order
6778 * and highest_zoneidx
6779 */
pgdat_balanced(pg_data_t * pgdat,int order,int highest_zoneidx)6780 static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx)
6781 {
6782 int i;
6783 unsigned long mark = -1;
6784 struct zone *zone;
6785
6786 /*
6787 * Check watermarks bottom-up as lower zones are more likely to
6788 * meet watermarks.
6789 */
6790 for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) {
6791 enum zone_stat_item item;
6792 unsigned long free_pages;
6793
6794 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)
6795 mark = promo_wmark_pages(zone);
6796 else
6797 mark = high_wmark_pages(zone);
6798
6799 /*
6800 * In defrag_mode, watermarks must be met in whole
6801 * blocks to avoid polluting allocator fallbacks.
6802 *
6803 * However, kswapd usually cannot accomplish this on
6804 * its own and needs kcompactd support. Once it's
6805 * reclaimed a compaction gap, and kswapd_shrink_node
6806 * has dropped order, simply ensure there are enough
6807 * base pages for compaction, wake kcompactd & sleep.
6808 */
6809 if (defrag_mode && order)
6810 item = NR_FREE_PAGES_BLOCKS;
6811 else
6812 item = NR_FREE_PAGES;
6813
6814 /*
6815 * When there is a high number of CPUs in the system,
6816 * the cumulative error from the vmstat per-cpu cache
6817 * can blur the line between the watermarks. In that
6818 * case, be safe and get an accurate snapshot.
6819 *
6820 * TODO: NR_FREE_PAGES_BLOCKS moves in steps of
6821 * pageblock_nr_pages, while the vmstat pcp threshold
6822 * is limited to 125. On many configurations that
6823 * counter won't actually be per-cpu cached. But keep
6824 * things simple for now; revisit when somebody cares.
6825 */
6826 free_pages = zone_page_state(zone, item);
6827 if (zone->percpu_drift_mark && free_pages < zone->percpu_drift_mark)
6828 free_pages = zone_page_state_snapshot(zone, item);
6829
6830 if (__zone_watermark_ok(zone, order, mark, highest_zoneidx,
6831 0, free_pages))
6832 return true;
6833 }
6834
6835 /*
6836 * If a node has no managed zone within highest_zoneidx, it does not
6837 * need balancing by definition. This can happen if a zone-restricted
6838 * allocation tries to wake a remote kswapd.
6839 */
6840 if (mark == -1)
6841 return true;
6842
6843 return false;
6844 }
6845
6846 /* Clear pgdat state for congested, dirty or under writeback. */
clear_pgdat_congested(pg_data_t * pgdat)6847 static void clear_pgdat_congested(pg_data_t *pgdat)
6848 {
6849 struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
6850
6851 clear_bit(LRUVEC_NODE_CONGESTED, &lruvec->flags);
6852 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags);
6853 clear_bit(PGDAT_DIRTY, &pgdat->flags);
6854 clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
6855 }
6856
6857 /*
6858 * Prepare kswapd for sleeping. This verifies that there are no processes
6859 * waiting in throttle_direct_reclaim() and that watermarks have been met.
6860 *
6861 * Returns true if kswapd is ready to sleep
6862 */
prepare_kswapd_sleep(pg_data_t * pgdat,int order,int highest_zoneidx)6863 static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order,
6864 int highest_zoneidx)
6865 {
6866 /*
6867 * The throttled processes are normally woken up in balance_pgdat() as
6868 * soon as allow_direct_reclaim() is true. But there is a potential
6869 * race between when kswapd checks the watermarks and a process gets
6870 * throttled. There is also a potential race if processes get
6871 * throttled, kswapd wakes, a large process exits thereby balancing the
6872 * zones, which causes kswapd to exit balance_pgdat() before reaching
6873 * the wake up checks. If kswapd is going to sleep, no process should
6874 * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
6875 * the wake up is premature, processes will wake kswapd and get
6876 * throttled again. The difference from wake ups in balance_pgdat() is
6877 * that here we are under prepare_to_wait().
6878 */
6879 if (waitqueue_active(&pgdat->pfmemalloc_wait))
6880 wake_up_all(&pgdat->pfmemalloc_wait);
6881
6882 /* Hopeless node, leave it to direct reclaim */
6883 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
6884 return true;
6885
6886 if (pgdat_balanced(pgdat, order, highest_zoneidx)) {
6887 clear_pgdat_congested(pgdat);
6888 return true;
6889 }
6890
6891 return false;
6892 }
6893
6894 /*
6895 * kswapd shrinks a node of pages that are at or below the highest usable
6896 * zone that is currently unbalanced.
6897 *
6898 * Returns true if kswapd scanned at least the requested number of pages to
6899 * reclaim or if the lack of progress was due to pages under writeback.
6900 * This is used to determine if the scanning priority needs to be raised.
6901 */
kswapd_shrink_node(pg_data_t * pgdat,struct scan_control * sc)6902 static bool kswapd_shrink_node(pg_data_t *pgdat,
6903 struct scan_control *sc)
6904 {
6905 struct zone *zone;
6906 int z;
6907 unsigned long nr_reclaimed = sc->nr_reclaimed;
6908
6909 /* Reclaim a number of pages proportional to the number of zones */
6910 sc->nr_to_reclaim = 0;
6911 for_each_managed_zone_pgdat(zone, pgdat, z, sc->reclaim_idx) {
6912 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
6913 }
6914
6915 /*
6916 * Historically care was taken to put equal pressure on all zones but
6917 * now pressure is applied based on node LRU order.
6918 */
6919 shrink_node(pgdat, sc);
6920
6921 /*
6922 * Fragmentation may mean that the system cannot be rebalanced for
6923 * high-order allocations. If twice the allocation size has been
6924 * reclaimed then recheck watermarks only at order-0 to prevent
6925 * excessive reclaim. Assume that a process requested a high-order
6926 * can direct reclaim/compact.
6927 */
6928 if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
6929 sc->order = 0;
6930
6931 /* account for progress from mm_account_reclaimed_pages() */
6932 return max(sc->nr_scanned, sc->nr_reclaimed - nr_reclaimed) >= sc->nr_to_reclaim;
6933 }
6934
6935 /* Page allocator PCP high watermark is lowered if reclaim is active. */
6936 static inline void
update_reclaim_active(pg_data_t * pgdat,int highest_zoneidx,bool active)6937 update_reclaim_active(pg_data_t *pgdat, int highest_zoneidx, bool active)
6938 {
6939 int i;
6940 struct zone *zone;
6941
6942 for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) {
6943 if (active)
6944 set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags);
6945 else
6946 clear_bit(ZONE_RECLAIM_ACTIVE, &zone->flags);
6947 }
6948 }
6949
6950 static inline void
set_reclaim_active(pg_data_t * pgdat,int highest_zoneidx)6951 set_reclaim_active(pg_data_t *pgdat, int highest_zoneidx)
6952 {
6953 update_reclaim_active(pgdat, highest_zoneidx, true);
6954 }
6955
6956 static inline void
clear_reclaim_active(pg_data_t * pgdat,int highest_zoneidx)6957 clear_reclaim_active(pg_data_t *pgdat, int highest_zoneidx)
6958 {
6959 update_reclaim_active(pgdat, highest_zoneidx, false);
6960 }
6961
6962 /*
6963 * For kswapd, balance_pgdat() will reclaim pages across a node from zones
6964 * that are eligible for use by the caller until at least one zone is
6965 * balanced.
6966 *
6967 * Returns the order kswapd finished reclaiming at.
6968 *
6969 * kswapd scans the zones in the highmem->normal->dma direction. It skips
6970 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
6971 * found to have free_pages <= high_wmark_pages(zone), any page in that zone
6972 * or lower is eligible for reclaim until at least one usable zone is
6973 * balanced.
6974 */
balance_pgdat(pg_data_t * pgdat,int order,int highest_zoneidx)6975 static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
6976 {
6977 int i;
6978 unsigned long nr_soft_reclaimed;
6979 unsigned long nr_soft_scanned;
6980 unsigned long pflags;
6981 unsigned long nr_boost_reclaim;
6982 unsigned long zone_boosts[MAX_NR_ZONES] = { 0, };
6983 bool boosted;
6984 struct zone *zone;
6985 struct scan_control sc = {
6986 .gfp_mask = GFP_KERNEL,
6987 .order = order,
6988 .may_unmap = 1,
6989 };
6990
6991 set_task_reclaim_state(current, &sc.reclaim_state);
6992 psi_memstall_enter(&pflags);
6993 __fs_reclaim_acquire(_THIS_IP_);
6994
6995 count_vm_event(PAGEOUTRUN);
6996
6997 /*
6998 * Account for the reclaim boost. Note that the zone boost is left in
6999 * place so that parallel allocations that are near the watermark will
7000 * stall or direct reclaim until kswapd is finished.
7001 */
7002 nr_boost_reclaim = 0;
7003 for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) {
7004 nr_boost_reclaim += zone->watermark_boost;
7005 zone_boosts[i] = zone->watermark_boost;
7006 }
7007 boosted = nr_boost_reclaim;
7008
7009 restart:
7010 set_reclaim_active(pgdat, highest_zoneidx);
7011 sc.priority = DEF_PRIORITY;
7012 do {
7013 unsigned long nr_reclaimed = sc.nr_reclaimed;
7014 bool raise_priority = true;
7015 bool balanced;
7016 bool ret;
7017 bool was_frozen;
7018
7019 sc.reclaim_idx = highest_zoneidx;
7020
7021 /*
7022 * If the number of buffer_heads exceeds the maximum allowed
7023 * then consider reclaiming from all zones. This has a dual
7024 * purpose -- on 64-bit systems it is expected that
7025 * buffer_heads are stripped during active rotation. On 32-bit
7026 * systems, highmem pages can pin lowmem memory and shrinking
7027 * buffers can relieve lowmem pressure. Reclaim may still not
7028 * go ahead if all eligible zones for the original allocation
7029 * request are balanced to avoid excessive reclaim from kswapd.
7030 */
7031 if (buffer_heads_over_limit) {
7032 for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
7033 zone = pgdat->node_zones + i;
7034 if (!managed_zone(zone))
7035 continue;
7036
7037 sc.reclaim_idx = i;
7038 break;
7039 }
7040 }
7041
7042 /*
7043 * If the pgdat is imbalanced then ignore boosting and preserve
7044 * the watermarks for a later time and restart. Note that the
7045 * zone watermarks will be still reset at the end of balancing
7046 * on the grounds that the normal reclaim should be enough to
7047 * re-evaluate if boosting is required when kswapd next wakes.
7048 */
7049 balanced = pgdat_balanced(pgdat, sc.order, highest_zoneidx);
7050 if (!balanced && nr_boost_reclaim) {
7051 nr_boost_reclaim = 0;
7052 goto restart;
7053 }
7054
7055 /*
7056 * If boosting is not active then only reclaim if there are no
7057 * eligible zones. Note that sc.reclaim_idx is not used as
7058 * buffer_heads_over_limit may have adjusted it.
7059 */
7060 if (!nr_boost_reclaim && balanced)
7061 goto out;
7062
7063 /* Limit the priority of boosting to avoid reclaim writeback */
7064 if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2)
7065 raise_priority = false;
7066
7067 /*
7068 * Do not writeback or swap pages for boosted reclaim. The
7069 * intent is to relieve pressure not issue sub-optimal IO
7070 * from reclaim context. If no pages are reclaimed, the
7071 * reclaim will be aborted.
7072 */
7073 sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
7074 sc.may_swap = !nr_boost_reclaim;
7075
7076 /*
7077 * Do some background aging, to give pages a chance to be
7078 * referenced before reclaiming. All pages are rotated
7079 * regardless of classzone as this is about consistent aging.
7080 */
7081 kswapd_age_node(pgdat, &sc);
7082
7083 /*
7084 * If we're getting trouble reclaiming, start doing writepage
7085 * even in laptop mode.
7086 */
7087 if (sc.priority < DEF_PRIORITY - 2)
7088 sc.may_writepage = 1;
7089
7090 /* Call soft limit reclaim before calling shrink_node. */
7091 sc.nr_scanned = 0;
7092 nr_soft_scanned = 0;
7093 nr_soft_reclaimed = memcg1_soft_limit_reclaim(pgdat, sc.order,
7094 sc.gfp_mask, &nr_soft_scanned);
7095 sc.nr_reclaimed += nr_soft_reclaimed;
7096
7097 /*
7098 * There should be no need to raise the scanning priority if
7099 * enough pages are already being scanned that that high
7100 * watermark would be met at 100% efficiency.
7101 */
7102 if (kswapd_shrink_node(pgdat, &sc))
7103 raise_priority = false;
7104
7105 /*
7106 * If the low watermark is met there is no need for processes
7107 * to be throttled on pfmemalloc_wait as they should not be
7108 * able to safely make forward progress. Wake them
7109 */
7110 if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
7111 allow_direct_reclaim(pgdat))
7112 wake_up_all(&pgdat->pfmemalloc_wait);
7113
7114 /* Check if kswapd should be suspending */
7115 __fs_reclaim_release(_THIS_IP_);
7116 ret = kthread_freezable_should_stop(&was_frozen);
7117 __fs_reclaim_acquire(_THIS_IP_);
7118 if (was_frozen || ret)
7119 break;
7120
7121 /*
7122 * Raise priority if scanning rate is too low or there was no
7123 * progress in reclaiming pages
7124 */
7125 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed;
7126 nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed);
7127
7128 /*
7129 * If reclaim made no progress for a boost, stop reclaim as
7130 * IO cannot be queued and it could be an infinite loop in
7131 * extreme circumstances.
7132 */
7133 if (nr_boost_reclaim && !nr_reclaimed)
7134 break;
7135
7136 if (raise_priority || !nr_reclaimed)
7137 sc.priority--;
7138 } while (sc.priority >= 1);
7139
7140 /*
7141 * Restart only if it went through the priority loop all the way,
7142 * but cache_trim_mode didn't work.
7143 */
7144 if (!sc.nr_reclaimed && sc.priority < 1 &&
7145 !sc.no_cache_trim_mode && sc.cache_trim_mode_failed) {
7146 sc.no_cache_trim_mode = 1;
7147 goto restart;
7148 }
7149
7150 if (!sc.nr_reclaimed)
7151 pgdat->kswapd_failures++;
7152
7153 out:
7154 clear_reclaim_active(pgdat, highest_zoneidx);
7155
7156 /* If reclaim was boosted, account for the reclaim done in this pass */
7157 if (boosted) {
7158 unsigned long flags;
7159
7160 for (i = 0; i <= highest_zoneidx; i++) {
7161 if (!zone_boosts[i])
7162 continue;
7163
7164 /* Increments are under the zone lock */
7165 zone = pgdat->node_zones + i;
7166 spin_lock_irqsave(&zone->lock, flags);
7167 zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]);
7168 spin_unlock_irqrestore(&zone->lock, flags);
7169 }
7170
7171 /*
7172 * As there is now likely space, wakeup kcompact to defragment
7173 * pageblocks.
7174 */
7175 wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx);
7176 }
7177
7178 snapshot_refaults(NULL, pgdat);
7179 __fs_reclaim_release(_THIS_IP_);
7180 psi_memstall_leave(&pflags);
7181 set_task_reclaim_state(current, NULL);
7182
7183 /*
7184 * Return the order kswapd stopped reclaiming at as
7185 * prepare_kswapd_sleep() takes it into account. If another caller
7186 * entered the allocator slow path while kswapd was awake, order will
7187 * remain at the higher level.
7188 */
7189 return sc.order;
7190 }
7191
7192 /*
7193 * The pgdat->kswapd_highest_zoneidx is used to pass the highest zone index to
7194 * be reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is
7195 * not a valid index then either kswapd runs for first time or kswapd couldn't
7196 * sleep after previous reclaim attempt (node is still unbalanced). In that
7197 * case return the zone index of the previous kswapd reclaim cycle.
7198 */
kswapd_highest_zoneidx(pg_data_t * pgdat,enum zone_type prev_highest_zoneidx)7199 static enum zone_type kswapd_highest_zoneidx(pg_data_t *pgdat,
7200 enum zone_type prev_highest_zoneidx)
7201 {
7202 enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx);
7203
7204 return curr_idx == MAX_NR_ZONES ? prev_highest_zoneidx : curr_idx;
7205 }
7206
kswapd_try_to_sleep(pg_data_t * pgdat,int alloc_order,int reclaim_order,unsigned int highest_zoneidx)7207 static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
7208 unsigned int highest_zoneidx)
7209 {
7210 long remaining = 0;
7211 DEFINE_WAIT(wait);
7212
7213 if (freezing(current) || kthread_should_stop())
7214 return;
7215
7216 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
7217
7218 /*
7219 * Try to sleep for a short interval. Note that kcompactd will only be
7220 * woken if it is possible to sleep for a short interval. This is
7221 * deliberate on the assumption that if reclaim cannot keep an
7222 * eligible zone balanced that it's also unlikely that compaction will
7223 * succeed.
7224 */
7225 if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {
7226 /*
7227 * Compaction records what page blocks it recently failed to
7228 * isolate pages from and skips them in the future scanning.
7229 * When kswapd is going to sleep, it is reasonable to assume
7230 * that pages and compaction may succeed so reset the cache.
7231 */
7232 reset_isolation_suitable(pgdat);
7233
7234 /*
7235 * We have freed the memory, now we should compact it to make
7236 * allocation of the requested order possible.
7237 */
7238 wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx);
7239
7240 remaining = schedule_timeout(HZ/10);
7241
7242 /*
7243 * If woken prematurely then reset kswapd_highest_zoneidx and
7244 * order. The values will either be from a wakeup request or
7245 * the previous request that slept prematurely.
7246 */
7247 if (remaining) {
7248 WRITE_ONCE(pgdat->kswapd_highest_zoneidx,
7249 kswapd_highest_zoneidx(pgdat,
7250 highest_zoneidx));
7251
7252 if (READ_ONCE(pgdat->kswapd_order) < reclaim_order)
7253 WRITE_ONCE(pgdat->kswapd_order, reclaim_order);
7254 }
7255
7256 finish_wait(&pgdat->kswapd_wait, &wait);
7257 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
7258 }
7259
7260 /*
7261 * After a short sleep, check if it was a premature sleep. If not, then
7262 * go fully to sleep until explicitly woken up.
7263 */
7264 if (!remaining &&
7265 prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {
7266 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
7267
7268 /*
7269 * vmstat counters are not perfectly accurate and the estimated
7270 * value for counters such as NR_FREE_PAGES can deviate from the
7271 * true value by nr_online_cpus * threshold. To avoid the zone
7272 * watermarks being breached while under pressure, we reduce the
7273 * per-cpu vmstat threshold while kswapd is awake and restore
7274 * them before going back to sleep.
7275 */
7276 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
7277
7278 if (!kthread_should_stop())
7279 schedule();
7280
7281 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
7282 } else {
7283 if (remaining)
7284 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
7285 else
7286 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
7287 }
7288 finish_wait(&pgdat->kswapd_wait, &wait);
7289 }
7290
7291 /*
7292 * The background pageout daemon, started as a kernel thread
7293 * from the init process.
7294 *
7295 * This basically trickles out pages so that we have _some_
7296 * free memory available even if there is no other activity
7297 * that frees anything up. This is needed for things like routing
7298 * etc, where we otherwise might have all activity going on in
7299 * asynchronous contexts that cannot page things out.
7300 *
7301 * If there are applications that are active memory-allocators
7302 * (most normal use), this basically shouldn't matter.
7303 */
kswapd(void * p)7304 static int kswapd(void *p)
7305 {
7306 unsigned int alloc_order, reclaim_order;
7307 unsigned int highest_zoneidx = MAX_NR_ZONES - 1;
7308 pg_data_t *pgdat = (pg_data_t *)p;
7309 struct task_struct *tsk = current;
7310
7311 /*
7312 * Tell the memory management that we're a "memory allocator",
7313 * and that if we need more memory we should get access to it
7314 * regardless (see "__alloc_pages()"). "kswapd" should
7315 * never get caught in the normal page freeing logic.
7316 *
7317 * (Kswapd normally doesn't need memory anyway, but sometimes
7318 * you need a small amount of memory in order to be able to
7319 * page out something else, and this flag essentially protects
7320 * us from recursively trying to free more memory as we're
7321 * trying to free the first piece of memory in the first place).
7322 */
7323 tsk->flags |= PF_MEMALLOC | PF_KSWAPD;
7324 set_freezable();
7325
7326 WRITE_ONCE(pgdat->kswapd_order, 0);
7327 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
7328 atomic_set(&pgdat->nr_writeback_throttled, 0);
7329 for ( ; ; ) {
7330 bool was_frozen;
7331
7332 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
7333 highest_zoneidx = kswapd_highest_zoneidx(pgdat,
7334 highest_zoneidx);
7335
7336 kswapd_try_sleep:
7337 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
7338 highest_zoneidx);
7339
7340 /* Read the new order and highest_zoneidx */
7341 alloc_order = READ_ONCE(pgdat->kswapd_order);
7342 highest_zoneidx = kswapd_highest_zoneidx(pgdat,
7343 highest_zoneidx);
7344 WRITE_ONCE(pgdat->kswapd_order, 0);
7345 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
7346
7347 if (kthread_freezable_should_stop(&was_frozen))
7348 break;
7349
7350 /*
7351 * We can speed up thawing tasks if we don't call balance_pgdat
7352 * after returning from the refrigerator
7353 */
7354 if (was_frozen)
7355 continue;
7356
7357 /*
7358 * Reclaim begins at the requested order but if a high-order
7359 * reclaim fails then kswapd falls back to reclaiming for
7360 * order-0. If that happens, kswapd will consider sleeping
7361 * for the order it finished reclaiming at (reclaim_order)
7362 * but kcompactd is woken to compact for the original
7363 * request (alloc_order).
7364 */
7365 trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx,
7366 alloc_order);
7367 reclaim_order = balance_pgdat(pgdat, alloc_order,
7368 highest_zoneidx);
7369 if (reclaim_order < alloc_order)
7370 goto kswapd_try_sleep;
7371 }
7372
7373 tsk->flags &= ~(PF_MEMALLOC | PF_KSWAPD);
7374
7375 return 0;
7376 }
7377
7378 /*
7379 * A zone is low on free memory or too fragmented for high-order memory. If
7380 * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's
7381 * pgdat. It will wake up kcompactd after reclaiming memory. If kswapd reclaim
7382 * has failed or is not needed, still wake up kcompactd if only compaction is
7383 * needed.
7384 */
wakeup_kswapd(struct zone * zone,gfp_t gfp_flags,int order,enum zone_type highest_zoneidx)7385 void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
7386 enum zone_type highest_zoneidx)
7387 {
7388 pg_data_t *pgdat;
7389 enum zone_type curr_idx;
7390
7391 if (!managed_zone(zone))
7392 return;
7393
7394 if (!cpuset_zone_allowed(zone, gfp_flags))
7395 return;
7396
7397 pgdat = zone->zone_pgdat;
7398 curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx);
7399
7400 if (curr_idx == MAX_NR_ZONES || curr_idx < highest_zoneidx)
7401 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx);
7402
7403 if (READ_ONCE(pgdat->kswapd_order) < order)
7404 WRITE_ONCE(pgdat->kswapd_order, order);
7405
7406 if (!waitqueue_active(&pgdat->kswapd_wait))
7407 return;
7408
7409 /* Hopeless node, leave it to direct reclaim if possible */
7410 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ||
7411 (pgdat_balanced(pgdat, order, highest_zoneidx) &&
7412 !pgdat_watermark_boosted(pgdat, highest_zoneidx))) {
7413 /*
7414 * There may be plenty of free memory available, but it's too
7415 * fragmented for high-order allocations. Wake up kcompactd
7416 * and rely on compaction_suitable() to determine if it's
7417 * needed. If it fails, it will defer subsequent attempts to
7418 * ratelimit its work.
7419 */
7420 if (!(gfp_flags & __GFP_DIRECT_RECLAIM))
7421 wakeup_kcompactd(pgdat, order, highest_zoneidx);
7422 return;
7423 }
7424
7425 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order,
7426 gfp_flags);
7427 wake_up_interruptible(&pgdat->kswapd_wait);
7428 }
7429
7430 #ifdef CONFIG_HIBERNATION
7431 /*
7432 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
7433 * freed pages.
7434 *
7435 * Rather than trying to age LRUs the aim is to preserve the overall
7436 * LRU order by reclaiming preferentially
7437 * inactive > active > active referenced > active mapped
7438 */
shrink_all_memory(unsigned long nr_to_reclaim)7439 unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
7440 {
7441 struct scan_control sc = {
7442 .nr_to_reclaim = nr_to_reclaim,
7443 .gfp_mask = GFP_HIGHUSER_MOVABLE,
7444 .reclaim_idx = MAX_NR_ZONES - 1,
7445 .priority = DEF_PRIORITY,
7446 .may_writepage = 1,
7447 .may_unmap = 1,
7448 .may_swap = 1,
7449 .hibernation_mode = 1,
7450 };
7451 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
7452 unsigned long nr_reclaimed;
7453 unsigned int noreclaim_flag;
7454
7455 fs_reclaim_acquire(sc.gfp_mask);
7456 noreclaim_flag = memalloc_noreclaim_save();
7457 set_task_reclaim_state(current, &sc.reclaim_state);
7458
7459 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
7460
7461 set_task_reclaim_state(current, NULL);
7462 memalloc_noreclaim_restore(noreclaim_flag);
7463 fs_reclaim_release(sc.gfp_mask);
7464
7465 return nr_reclaimed;
7466 }
7467 #endif /* CONFIG_HIBERNATION */
7468
7469 /*
7470 * This kswapd start function will be called by init and node-hot-add.
7471 */
kswapd_run(int nid)7472 void __meminit kswapd_run(int nid)
7473 {
7474 pg_data_t *pgdat = NODE_DATA(nid);
7475
7476 pgdat_kswapd_lock(pgdat);
7477 if (!pgdat->kswapd) {
7478 pgdat->kswapd = kthread_create_on_node(kswapd, pgdat, nid, "kswapd%d", nid);
7479 if (IS_ERR(pgdat->kswapd)) {
7480 /* failure at boot is fatal */
7481 pr_err("Failed to start kswapd on node %d,ret=%ld\n",
7482 nid, PTR_ERR(pgdat->kswapd));
7483 BUG_ON(system_state < SYSTEM_RUNNING);
7484 pgdat->kswapd = NULL;
7485 } else {
7486 wake_up_process(pgdat->kswapd);
7487 }
7488 }
7489 pgdat_kswapd_unlock(pgdat);
7490 }
7491
7492 /*
7493 * Called by memory hotplug when all memory in a node is offlined. Caller must
7494 * be holding mem_hotplug_begin/done().
7495 */
kswapd_stop(int nid)7496 void __meminit kswapd_stop(int nid)
7497 {
7498 pg_data_t *pgdat = NODE_DATA(nid);
7499 struct task_struct *kswapd;
7500
7501 pgdat_kswapd_lock(pgdat);
7502 kswapd = pgdat->kswapd;
7503 if (kswapd) {
7504 kthread_stop(kswapd);
7505 pgdat->kswapd = NULL;
7506 }
7507 pgdat_kswapd_unlock(pgdat);
7508 }
7509
7510 static const struct ctl_table vmscan_sysctl_table[] = {
7511 {
7512 .procname = "swappiness",
7513 .data = &vm_swappiness,
7514 .maxlen = sizeof(vm_swappiness),
7515 .mode = 0644,
7516 .proc_handler = proc_dointvec_minmax,
7517 .extra1 = SYSCTL_ZERO,
7518 .extra2 = SYSCTL_TWO_HUNDRED,
7519 },
7520 #ifdef CONFIG_NUMA
7521 {
7522 .procname = "zone_reclaim_mode",
7523 .data = &node_reclaim_mode,
7524 .maxlen = sizeof(node_reclaim_mode),
7525 .mode = 0644,
7526 .proc_handler = proc_dointvec_minmax,
7527 .extra1 = SYSCTL_ZERO,
7528 }
7529 #endif
7530 };
7531
kswapd_init(void)7532 static int __init kswapd_init(void)
7533 {
7534 int nid;
7535
7536 swap_setup();
7537 for_each_node_state(nid, N_MEMORY)
7538 kswapd_run(nid);
7539 register_sysctl_init("vm", vmscan_sysctl_table);
7540 return 0;
7541 }
7542
7543 module_init(kswapd_init)
7544
7545 #ifdef CONFIG_NUMA
7546 /*
7547 * Node reclaim mode
7548 *
7549 * If non-zero call node_reclaim when the number of free pages falls below
7550 * the watermarks.
7551 */
7552 int node_reclaim_mode __read_mostly;
7553
7554 /*
7555 * Priority for NODE_RECLAIM. This determines the fraction of pages
7556 * of a node considered for each zone_reclaim. 4 scans 1/16th of
7557 * a zone.
7558 */
7559 #define NODE_RECLAIM_PRIORITY 4
7560
7561 /*
7562 * Percentage of pages in a zone that must be unmapped for node_reclaim to
7563 * occur.
7564 */
7565 int sysctl_min_unmapped_ratio = 1;
7566
7567 /*
7568 * If the number of slab pages in a zone grows beyond this percentage then
7569 * slab reclaim needs to occur.
7570 */
7571 int sysctl_min_slab_ratio = 5;
7572
node_unmapped_file_pages(struct pglist_data * pgdat)7573 static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
7574 {
7575 unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
7576 unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
7577 node_page_state(pgdat, NR_ACTIVE_FILE);
7578
7579 /*
7580 * It's possible for there to be more file mapped pages than
7581 * accounted for by the pages on the file LRU lists because
7582 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
7583 */
7584 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
7585 }
7586
7587 /* Work out how many page cache pages we can reclaim in this reclaim_mode */
node_pagecache_reclaimable(struct pglist_data * pgdat)7588 static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
7589 {
7590 unsigned long nr_pagecache_reclaimable;
7591 unsigned long delta = 0;
7592
7593 /*
7594 * If RECLAIM_UNMAP is set, then all file pages are considered
7595 * potentially reclaimable. Otherwise, we have to worry about
7596 * pages like swapcache and node_unmapped_file_pages() provides
7597 * a better estimate
7598 */
7599 if (node_reclaim_mode & RECLAIM_UNMAP)
7600 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
7601 else
7602 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
7603
7604 /* If we can't clean pages, remove dirty pages from consideration */
7605 if (!(node_reclaim_mode & RECLAIM_WRITE))
7606 delta += node_page_state(pgdat, NR_FILE_DIRTY);
7607
7608 /* Watch for any possible underflows due to delta */
7609 if (unlikely(delta > nr_pagecache_reclaimable))
7610 delta = nr_pagecache_reclaimable;
7611
7612 return nr_pagecache_reclaimable - delta;
7613 }
7614
7615 /*
7616 * Try to free up some pages from this node through reclaim.
7617 */
__node_reclaim(struct pglist_data * pgdat,gfp_t gfp_mask,unsigned int order)7618 static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
7619 {
7620 /* Minimum pages needed in order to stay on node */
7621 const unsigned long nr_pages = 1 << order;
7622 struct task_struct *p = current;
7623 unsigned int noreclaim_flag;
7624 struct scan_control sc = {
7625 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
7626 .gfp_mask = current_gfp_context(gfp_mask),
7627 .order = order,
7628 .priority = NODE_RECLAIM_PRIORITY,
7629 .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
7630 .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
7631 .may_swap = 1,
7632 .reclaim_idx = gfp_zone(gfp_mask),
7633 };
7634 unsigned long pflags;
7635
7636 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
7637 sc.gfp_mask);
7638
7639 cond_resched();
7640 psi_memstall_enter(&pflags);
7641 delayacct_freepages_start();
7642 fs_reclaim_acquire(sc.gfp_mask);
7643 /*
7644 * We need to be able to allocate from the reserves for RECLAIM_UNMAP
7645 */
7646 noreclaim_flag = memalloc_noreclaim_save();
7647 set_task_reclaim_state(p, &sc.reclaim_state);
7648
7649 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages ||
7650 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) > pgdat->min_slab_pages) {
7651 /*
7652 * Free memory by calling shrink node with increasing
7653 * priorities until we have enough memory freed.
7654 */
7655 do {
7656 shrink_node(pgdat, &sc);
7657 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
7658 }
7659
7660 set_task_reclaim_state(p, NULL);
7661 memalloc_noreclaim_restore(noreclaim_flag);
7662 fs_reclaim_release(sc.gfp_mask);
7663 psi_memstall_leave(&pflags);
7664 delayacct_freepages_end();
7665
7666 trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed);
7667
7668 return sc.nr_reclaimed >= nr_pages;
7669 }
7670
node_reclaim(struct pglist_data * pgdat,gfp_t gfp_mask,unsigned int order)7671 int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
7672 {
7673 int ret;
7674
7675 /*
7676 * Node reclaim reclaims unmapped file backed pages and
7677 * slab pages if we are over the defined limits.
7678 *
7679 * A small portion of unmapped file backed pages is needed for
7680 * file I/O otherwise pages read by file I/O will be immediately
7681 * thrown out if the node is overallocated. So we do not reclaim
7682 * if less than a specified percentage of the node is used by
7683 * unmapped file backed pages.
7684 */
7685 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
7686 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <=
7687 pgdat->min_slab_pages)
7688 return NODE_RECLAIM_FULL;
7689
7690 /*
7691 * Do not scan if the allocation should not be delayed.
7692 */
7693 if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
7694 return NODE_RECLAIM_NOSCAN;
7695
7696 /*
7697 * Only run node reclaim on the local node or on nodes that do not
7698 * have associated processors. This will favor the local processor
7699 * over remote processors and spread off node memory allocations
7700 * as wide as possible.
7701 */
7702 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
7703 return NODE_RECLAIM_NOSCAN;
7704
7705 if (test_and_set_bit_lock(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
7706 return NODE_RECLAIM_NOSCAN;
7707
7708 ret = __node_reclaim(pgdat, gfp_mask, order);
7709 clear_bit_unlock(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
7710
7711 if (ret)
7712 count_vm_event(PGSCAN_ZONE_RECLAIM_SUCCESS);
7713 else
7714 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
7715
7716 return ret;
7717 }
7718 #endif
7719
7720 /**
7721 * check_move_unevictable_folios - Move evictable folios to appropriate zone
7722 * lru list
7723 * @fbatch: Batch of lru folios to check.
7724 *
7725 * Checks folios for evictability, if an evictable folio is in the unevictable
7726 * lru list, moves it to the appropriate evictable lru list. This function
7727 * should be only used for lru folios.
7728 */
check_move_unevictable_folios(struct folio_batch * fbatch)7729 void check_move_unevictable_folios(struct folio_batch *fbatch)
7730 {
7731 struct lruvec *lruvec = NULL;
7732 int pgscanned = 0;
7733 int pgrescued = 0;
7734 int i;
7735
7736 for (i = 0; i < fbatch->nr; i++) {
7737 struct folio *folio = fbatch->folios[i];
7738 int nr_pages = folio_nr_pages(folio);
7739
7740 pgscanned += nr_pages;
7741
7742 /* block memcg migration while the folio moves between lrus */
7743 if (!folio_test_clear_lru(folio))
7744 continue;
7745
7746 lruvec = folio_lruvec_relock_irq(folio, lruvec);
7747 if (folio_evictable(folio) && folio_test_unevictable(folio)) {
7748 lruvec_del_folio(lruvec, folio);
7749 folio_clear_unevictable(folio);
7750 lruvec_add_folio(lruvec, folio);
7751 pgrescued += nr_pages;
7752 }
7753 folio_set_lru(folio);
7754 }
7755
7756 if (lruvec) {
7757 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
7758 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
7759 unlock_page_lruvec_irq(lruvec);
7760 } else if (pgscanned) {
7761 count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
7762 }
7763 }
7764 EXPORT_SYMBOL_GPL(check_move_unevictable_folios);
7765