1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
4 *
5 * Swap reorganised 29.12.95, Stephen Tweedie.
6 * kswapd added: 7.1.96 sct
7 * Removed kswapd_ctl limits, and swap out as many pages as needed
8 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
9 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
10 * Multiqueue VM started 5.8.00, Rik van Riel.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/mm.h>
16 #include <linux/sched/mm.h>
17 #include <linux/module.h>
18 #include <linux/gfp.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/swap.h>
21 #include <linux/pagemap.h>
22 #include <linux/init.h>
23 #include <linux/highmem.h>
24 #include <linux/vmpressure.h>
25 #include <linux/vmstat.h>
26 #include <linux/file.h>
27 #include <linux/writeback.h>
28 #include <linux/blkdev.h>
29 #include <linux/buffer_head.h> /* for buffer_heads_over_limit */
30 #include <linux/mm_inline.h>
31 #include <linux/backing-dev.h>
32 #include <linux/rmap.h>
33 #include <linux/topology.h>
34 #include <linux/cpu.h>
35 #include <linux/cpuset.h>
36 #include <linux/compaction.h>
37 #include <linux/notifier.h>
38 #include <linux/delay.h>
39 #include <linux/kthread.h>
40 #include <linux/freezer.h>
41 #include <linux/memcontrol.h>
42 #include <linux/migrate.h>
43 #include <linux/delayacct.h>
44 #include <linux/sysctl.h>
45 #include <linux/memory-tiers.h>
46 #include <linux/oom.h>
47 #include <linux/pagevec.h>
48 #include <linux/prefetch.h>
49 #include <linux/printk.h>
50 #include <linux/dax.h>
51 #include <linux/psi.h>
52 #include <linux/pagewalk.h>
53 #include <linux/shmem_fs.h>
54 #include <linux/ctype.h>
55 #include <linux/debugfs.h>
56 #include <linux/khugepaged.h>
57 #include <linux/rculist_nulls.h>
58 #include <linux/random.h>
59 #include <linux/mmu_notifier.h>
60 #include <linux/parser.h>
61
62 #include <asm/tlbflush.h>
63 #include <asm/div64.h>
64
65 #include <linux/swapops.h>
66 #include <linux/sched/sysctl.h>
67
68 #include "internal.h"
69 #include "swap.h"
70
71 #define CREATE_TRACE_POINTS
72 #include <trace/events/vmscan.h>
73
74 struct scan_control {
75 /* How many pages shrink_list() should reclaim */
76 unsigned long nr_to_reclaim;
77
78 /*
79 * Nodemask of nodes allowed by the caller. If NULL, all nodes
80 * are scanned.
81 */
82 nodemask_t *nodemask;
83
84 /*
85 * The memory cgroup that hit its limit and as a result is the
86 * primary target of this reclaim invocation.
87 */
88 struct mem_cgroup *target_mem_cgroup;
89
90 /*
91 * Scan pressure balancing between anon and file LRUs
92 */
93 unsigned long anon_cost;
94 unsigned long file_cost;
95
96 /* Swappiness value for proactive reclaim. Always use sc_swappiness()! */
97 int *proactive_swappiness;
98
99 /* Can active folios be deactivated as part of reclaim? */
100 #define DEACTIVATE_ANON 1
101 #define DEACTIVATE_FILE 2
102 unsigned int may_deactivate:2;
103 unsigned int force_deactivate:1;
104 unsigned int skipped_deactivate:1;
105
106 /* zone_reclaim_mode, boost reclaim */
107 unsigned int may_writepage:1;
108
109 /* zone_reclaim_mode */
110 unsigned int may_unmap:1;
111
112 /* zome_reclaim_mode, boost reclaim, cgroup restrictions */
113 unsigned int may_swap:1;
114
115 /* Not allow cache_trim_mode to be turned on as part of reclaim? */
116 unsigned int no_cache_trim_mode:1;
117
118 /* Has cache_trim_mode failed at least once? */
119 unsigned int cache_trim_mode_failed:1;
120
121 /* Proactive reclaim invoked by userspace */
122 unsigned int proactive:1;
123
124 /*
125 * Cgroup memory below memory.low is protected as long as we
126 * don't threaten to OOM. If any cgroup is reclaimed at
127 * reduced force or passed over entirely due to its memory.low
128 * setting (memcg_low_skipped), and nothing is reclaimed as a
129 * result, then go back for one more cycle that reclaims the protected
130 * memory (memcg_low_reclaim) to avert OOM.
131 */
132 unsigned int memcg_low_reclaim:1;
133 unsigned int memcg_low_skipped:1;
134
135 /* Shared cgroup tree walk failed, rescan the whole tree */
136 unsigned int memcg_full_walk:1;
137
138 unsigned int hibernation_mode:1;
139
140 /* One of the zones is ready for compaction */
141 unsigned int compaction_ready:1;
142
143 /* There is easily reclaimable cold cache in the current node */
144 unsigned int cache_trim_mode:1;
145
146 /* The file folios on the current node are dangerously low */
147 unsigned int file_is_tiny:1;
148
149 /* Always discard instead of demoting to lower tier memory */
150 unsigned int no_demotion:1;
151
152 /* Allocation order */
153 s8 order;
154
155 /* Scan (total_size >> priority) pages at once */
156 s8 priority;
157
158 /* The highest zone to isolate folios for reclaim from */
159 s8 reclaim_idx;
160
161 /* This context's GFP mask */
162 gfp_t gfp_mask;
163
164 /* Incremented by the number of inactive pages that were scanned */
165 unsigned long nr_scanned;
166
167 /* Number of pages freed so far during a call to shrink_zones() */
168 unsigned long nr_reclaimed;
169
170 struct {
171 unsigned int dirty;
172 unsigned int unqueued_dirty;
173 unsigned int congested;
174 unsigned int writeback;
175 unsigned int immediate;
176 unsigned int file_taken;
177 unsigned int taken;
178 } nr;
179
180 /* for recording the reclaimed slab by now */
181 struct reclaim_state reclaim_state;
182 };
183
184 #ifdef ARCH_HAS_PREFETCHW
185 #define prefetchw_prev_lru_folio(_folio, _base, _field) \
186 do { \
187 if ((_folio)->lru.prev != _base) { \
188 struct folio *prev; \
189 \
190 prev = lru_to_folio(&(_folio->lru)); \
191 prefetchw(&prev->_field); \
192 } \
193 } while (0)
194 #else
195 #define prefetchw_prev_lru_folio(_folio, _base, _field) do { } while (0)
196 #endif
197
198 /*
199 * From 0 .. MAX_SWAPPINESS. Higher means more swappy.
200 */
201 int vm_swappiness = 60;
202
203 #ifdef CONFIG_MEMCG
204
205 /* Returns true for reclaim through cgroup limits or cgroup interfaces. */
cgroup_reclaim(struct scan_control * sc)206 static bool cgroup_reclaim(struct scan_control *sc)
207 {
208 return sc->target_mem_cgroup;
209 }
210
211 /*
212 * Returns true for reclaim on the root cgroup. This is true for direct
213 * allocator reclaim and reclaim through cgroup interfaces on the root cgroup.
214 */
root_reclaim(struct scan_control * sc)215 static bool root_reclaim(struct scan_control *sc)
216 {
217 return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup);
218 }
219
220 /**
221 * writeback_throttling_sane - is the usual dirty throttling mechanism available?
222 * @sc: scan_control in question
223 *
224 * The normal page dirty throttling mechanism in balance_dirty_pages() is
225 * completely broken with the legacy memcg and direct stalling in
226 * shrink_folio_list() is used for throttling instead, which lacks all the
227 * niceties such as fairness, adaptive pausing, bandwidth proportional
228 * allocation and configurability.
229 *
230 * This function tests whether the vmscan currently in progress can assume
231 * that the normal dirty throttling mechanism is operational.
232 */
writeback_throttling_sane(struct scan_control * sc)233 static bool writeback_throttling_sane(struct scan_control *sc)
234 {
235 if (!cgroup_reclaim(sc))
236 return true;
237 #ifdef CONFIG_CGROUP_WRITEBACK
238 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
239 return true;
240 #endif
241 return false;
242 }
243
sc_swappiness(struct scan_control * sc,struct mem_cgroup * memcg)244 static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg)
245 {
246 if (sc->proactive && sc->proactive_swappiness)
247 return *sc->proactive_swappiness;
248 return mem_cgroup_swappiness(memcg);
249 }
250 #else
cgroup_reclaim(struct scan_control * sc)251 static bool cgroup_reclaim(struct scan_control *sc)
252 {
253 return false;
254 }
255
root_reclaim(struct scan_control * sc)256 static bool root_reclaim(struct scan_control *sc)
257 {
258 return true;
259 }
260
writeback_throttling_sane(struct scan_control * sc)261 static bool writeback_throttling_sane(struct scan_control *sc)
262 {
263 return true;
264 }
265
sc_swappiness(struct scan_control * sc,struct mem_cgroup * memcg)266 static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg)
267 {
268 return READ_ONCE(vm_swappiness);
269 }
270 #endif
271
272 /* for_each_managed_zone_pgdat - helper macro to iterate over all managed zones in a pgdat up to
273 * and including the specified highidx
274 * @zone: The current zone in the iterator
275 * @pgdat: The pgdat which node_zones are being iterated
276 * @idx: The index variable
277 * @highidx: The index of the highest zone to return
278 *
279 * This macro iterates through all managed zones up to and including the specified highidx.
280 * The zone iterator enters an invalid state after macro call and must be reinitialized
281 * before it can be used again.
282 */
283 #define for_each_managed_zone_pgdat(zone, pgdat, idx, highidx) \
284 for ((idx) = 0, (zone) = (pgdat)->node_zones; \
285 (idx) <= (highidx); \
286 (idx)++, (zone)++) \
287 if (!managed_zone(zone)) \
288 continue; \
289 else
290
set_task_reclaim_state(struct task_struct * task,struct reclaim_state * rs)291 static void set_task_reclaim_state(struct task_struct *task,
292 struct reclaim_state *rs)
293 {
294 /* Check for an overwrite */
295 WARN_ON_ONCE(rs && task->reclaim_state);
296
297 /* Check for the nulling of an already-nulled member */
298 WARN_ON_ONCE(!rs && !task->reclaim_state);
299
300 task->reclaim_state = rs;
301 }
302
303 /*
304 * flush_reclaim_state(): add pages reclaimed outside of LRU-based reclaim to
305 * scan_control->nr_reclaimed.
306 */
flush_reclaim_state(struct scan_control * sc)307 static void flush_reclaim_state(struct scan_control *sc)
308 {
309 /*
310 * Currently, reclaim_state->reclaimed includes three types of pages
311 * freed outside of vmscan:
312 * (1) Slab pages.
313 * (2) Clean file pages from pruned inodes (on highmem systems).
314 * (3) XFS freed buffer pages.
315 *
316 * For all of these cases, we cannot universally link the pages to a
317 * single memcg. For example, a memcg-aware shrinker can free one object
318 * charged to the target memcg, causing an entire page to be freed.
319 * If we count the entire page as reclaimed from the memcg, we end up
320 * overestimating the reclaimed amount (potentially under-reclaiming).
321 *
322 * Only count such pages for global reclaim to prevent under-reclaiming
323 * from the target memcg; preventing unnecessary retries during memcg
324 * charging and false positives from proactive reclaim.
325 *
326 * For uncommon cases where the freed pages were actually mostly
327 * charged to the target memcg, we end up underestimating the reclaimed
328 * amount. This should be fine. The freed pages will be uncharged
329 * anyway, even if they are not counted here properly, and we will be
330 * able to make forward progress in charging (which is usually in a
331 * retry loop).
332 *
333 * We can go one step further, and report the uncharged objcg pages in
334 * memcg reclaim, to make reporting more accurate and reduce
335 * underestimation, but it's probably not worth the complexity for now.
336 */
337 if (current->reclaim_state && root_reclaim(sc)) {
338 sc->nr_reclaimed += current->reclaim_state->reclaimed;
339 current->reclaim_state->reclaimed = 0;
340 }
341 }
342
can_demote(int nid,struct scan_control * sc,struct mem_cgroup * memcg)343 static bool can_demote(int nid, struct scan_control *sc,
344 struct mem_cgroup *memcg)
345 {
346 struct pglist_data *pgdat = NODE_DATA(nid);
347 nodemask_t allowed_mask;
348
349 if (!pgdat || !numa_demotion_enabled)
350 return false;
351 if (sc && sc->no_demotion)
352 return false;
353
354 node_get_allowed_targets(pgdat, &allowed_mask);
355 if (nodes_empty(allowed_mask))
356 return false;
357
358 /* Filter out nodes that are not in cgroup's mems_allowed. */
359 mem_cgroup_node_filter_allowed(memcg, &allowed_mask);
360 return !nodes_empty(allowed_mask);
361 }
362
can_reclaim_anon_pages(struct mem_cgroup * memcg,int nid,struct scan_control * sc)363 static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg,
364 int nid,
365 struct scan_control *sc)
366 {
367 if (memcg == NULL) {
368 /*
369 * For non-memcg reclaim, is there
370 * space in any swap device?
371 */
372 if (get_nr_swap_pages() > 0)
373 return true;
374 } else {
375 /* Is the memcg below its swap limit? */
376 if (mem_cgroup_get_nr_swap_pages(memcg) > 0)
377 return true;
378 }
379
380 /*
381 * The page can not be swapped.
382 *
383 * Can it be reclaimed from this node via demotion?
384 */
385 return can_demote(nid, sc, memcg);
386 }
387
388 /*
389 * This misses isolated folios which are not accounted for to save counters.
390 * As the data only determines if reclaim or compaction continues, it is
391 * not expected that isolated folios will be a dominating factor.
392 */
zone_reclaimable_pages(struct zone * zone)393 unsigned long zone_reclaimable_pages(struct zone *zone)
394 {
395 unsigned long nr;
396
397 nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
398 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
399 if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL))
400 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
401 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
402
403 return nr;
404 }
405
406 /**
407 * lruvec_lru_size - Returns the number of pages on the given LRU list.
408 * @lruvec: lru vector
409 * @lru: lru to use
410 * @zone_idx: zones to consider (use MAX_NR_ZONES - 1 for the whole LRU list)
411 */
lruvec_lru_size(struct lruvec * lruvec,enum lru_list lru,int zone_idx)412 static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru,
413 int zone_idx)
414 {
415 unsigned long size = 0;
416 int zid;
417 struct zone *zone;
418
419 for_each_managed_zone_pgdat(zone, lruvec_pgdat(lruvec), zid, zone_idx) {
420 if (!mem_cgroup_disabled())
421 size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
422 else
423 size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru);
424 }
425 return size;
426 }
427
drop_slab_node(int nid)428 static unsigned long drop_slab_node(int nid)
429 {
430 unsigned long freed = 0;
431 struct mem_cgroup *memcg = NULL;
432
433 memcg = mem_cgroup_iter(NULL, NULL, NULL);
434 do {
435 freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
436 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
437
438 return freed;
439 }
440
drop_slab(void)441 void drop_slab(void)
442 {
443 int nid;
444 int shift = 0;
445 unsigned long freed;
446
447 do {
448 freed = 0;
449 for_each_online_node(nid) {
450 if (fatal_signal_pending(current))
451 return;
452
453 freed += drop_slab_node(nid);
454 }
455 } while ((freed >> shift++) > 1);
456 }
457
458 #define CHECK_RECLAIMER_OFFSET(type) \
459 do { \
460 BUILD_BUG_ON(PGSTEAL_##type - PGSTEAL_KSWAPD != \
461 PGDEMOTE_##type - PGDEMOTE_KSWAPD); \
462 BUILD_BUG_ON(PGSTEAL_##type - PGSTEAL_KSWAPD != \
463 PGSCAN_##type - PGSCAN_KSWAPD); \
464 } while (0)
465
reclaimer_offset(struct scan_control * sc)466 static int reclaimer_offset(struct scan_control *sc)
467 {
468 CHECK_RECLAIMER_OFFSET(DIRECT);
469 CHECK_RECLAIMER_OFFSET(KHUGEPAGED);
470 CHECK_RECLAIMER_OFFSET(PROACTIVE);
471
472 if (current_is_kswapd())
473 return 0;
474 if (current_is_khugepaged())
475 return PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD;
476 if (sc->proactive)
477 return PGSTEAL_PROACTIVE - PGSTEAL_KSWAPD;
478 return PGSTEAL_DIRECT - PGSTEAL_KSWAPD;
479 }
480
481 /*
482 * We detected a synchronous write error writing a folio out. Probably
483 * -ENOSPC. We need to propagate that into the address_space for a subsequent
484 * fsync(), msync() or close().
485 *
486 * The tricky part is that after writepage we cannot touch the mapping: nothing
487 * prevents it from being freed up. But we have a ref on the folio and once
488 * that folio is locked, the mapping is pinned.
489 *
490 * We're allowed to run sleeping folio_lock() here because we know the caller has
491 * __GFP_FS.
492 */
handle_write_error(struct address_space * mapping,struct folio * folio,int error)493 static void handle_write_error(struct address_space *mapping,
494 struct folio *folio, int error)
495 {
496 folio_lock(folio);
497 if (folio_mapping(folio) == mapping)
498 mapping_set_error(mapping, error);
499 folio_unlock(folio);
500 }
501
skip_throttle_noprogress(pg_data_t * pgdat)502 static bool skip_throttle_noprogress(pg_data_t *pgdat)
503 {
504 int reclaimable = 0, write_pending = 0;
505 int i;
506 struct zone *zone;
507 /*
508 * If kswapd is disabled, reschedule if necessary but do not
509 * throttle as the system is likely near OOM.
510 */
511 if (kswapd_test_hopeless(pgdat))
512 return true;
513
514 /*
515 * If there are a lot of dirty/writeback folios then do not
516 * throttle as throttling will occur when the folios cycle
517 * towards the end of the LRU if still under writeback.
518 */
519 for_each_managed_zone_pgdat(zone, pgdat, i, MAX_NR_ZONES - 1) {
520 reclaimable += zone_reclaimable_pages(zone);
521 write_pending += zone_page_state_snapshot(zone,
522 NR_ZONE_WRITE_PENDING);
523 }
524 if (2 * write_pending <= reclaimable)
525 return true;
526
527 return false;
528 }
529
reclaim_throttle(pg_data_t * pgdat,enum vmscan_throttle_state reason)530 void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason)
531 {
532 wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason];
533 long timeout, ret;
534 DEFINE_WAIT(wait);
535
536 /*
537 * Do not throttle user workers, kthreads other than kswapd or
538 * workqueues. They may be required for reclaim to make
539 * forward progress (e.g. journalling workqueues or kthreads).
540 */
541 if (!current_is_kswapd() &&
542 current->flags & (PF_USER_WORKER|PF_KTHREAD)) {
543 cond_resched();
544 return;
545 }
546
547 /*
548 * These figures are pulled out of thin air.
549 * VMSCAN_THROTTLE_ISOLATED is a transient condition based on too many
550 * parallel reclaimers which is a short-lived event so the timeout is
551 * short. Failing to make progress or waiting on writeback are
552 * potentially long-lived events so use a longer timeout. This is shaky
553 * logic as a failure to make progress could be due to anything from
554 * writeback to a slow device to excessive referenced folios at the tail
555 * of the inactive LRU.
556 */
557 switch(reason) {
558 case VMSCAN_THROTTLE_WRITEBACK:
559 timeout = HZ/10;
560
561 if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) {
562 WRITE_ONCE(pgdat->nr_reclaim_start,
563 node_page_state(pgdat, NR_THROTTLED_WRITTEN));
564 }
565
566 break;
567 case VMSCAN_THROTTLE_CONGESTED:
568 fallthrough;
569 case VMSCAN_THROTTLE_NOPROGRESS:
570 if (skip_throttle_noprogress(pgdat)) {
571 cond_resched();
572 return;
573 }
574
575 timeout = 1;
576
577 break;
578 case VMSCAN_THROTTLE_ISOLATED:
579 timeout = HZ/50;
580 break;
581 default:
582 WARN_ON_ONCE(1);
583 timeout = HZ;
584 break;
585 }
586
587 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
588 ret = schedule_timeout(timeout);
589 finish_wait(wqh, &wait);
590
591 if (reason == VMSCAN_THROTTLE_WRITEBACK)
592 atomic_dec(&pgdat->nr_writeback_throttled);
593
594 trace_mm_vmscan_throttled(pgdat->node_id, jiffies_to_usecs(timeout),
595 jiffies_to_usecs(timeout - ret),
596 reason);
597 }
598
599 /*
600 * Account for folios written if tasks are throttled waiting on dirty
601 * folios to clean. If enough folios have been cleaned since throttling
602 * started then wakeup the throttled tasks.
603 */
__acct_reclaim_writeback(pg_data_t * pgdat,struct folio * folio,int nr_throttled)604 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
605 int nr_throttled)
606 {
607 unsigned long nr_written;
608
609 node_stat_add_folio(folio, NR_THROTTLED_WRITTEN);
610
611 /*
612 * This is an inaccurate read as the per-cpu deltas may not
613 * be synchronised. However, given that the system is
614 * writeback throttled, it is not worth taking the penalty
615 * of getting an accurate count. At worst, the throttle
616 * timeout guarantees forward progress.
617 */
618 nr_written = node_page_state(pgdat, NR_THROTTLED_WRITTEN) -
619 READ_ONCE(pgdat->nr_reclaim_start);
620
621 if (nr_written > SWAP_CLUSTER_MAX * nr_throttled)
622 wake_up(&pgdat->reclaim_wait[VMSCAN_THROTTLE_WRITEBACK]);
623 }
624
625 /* possible outcome of pageout() */
626 typedef enum {
627 /* failed to write folio out, folio is locked */
628 PAGE_KEEP,
629 /* move folio to the active list, folio is locked */
630 PAGE_ACTIVATE,
631 /* folio has been sent to the disk successfully, folio is unlocked */
632 PAGE_SUCCESS,
633 /* folio is clean and locked */
634 PAGE_CLEAN,
635 } pageout_t;
636
writeout(struct folio * folio,struct address_space * mapping,struct swap_iocb ** plug,struct list_head * folio_list)637 static pageout_t writeout(struct folio *folio, struct address_space *mapping,
638 struct swap_iocb **plug, struct list_head *folio_list)
639 {
640 int res;
641
642 folio_set_reclaim(folio);
643
644 /*
645 * The large shmem folio can be split if CONFIG_THP_SWAP is not enabled
646 * or we failed to allocate contiguous swap entries, in which case
647 * the split out folios get added back to folio_list.
648 */
649 if (shmem_mapping(mapping))
650 res = shmem_writeout(folio, plug, folio_list);
651 else
652 res = swap_writeout(folio, plug);
653
654 if (res < 0)
655 handle_write_error(mapping, folio, res);
656 if (res == AOP_WRITEPAGE_ACTIVATE) {
657 folio_clear_reclaim(folio);
658 return PAGE_ACTIVATE;
659 }
660
661 /* synchronous write? */
662 if (!folio_test_writeback(folio))
663 folio_clear_reclaim(folio);
664
665 trace_mm_vmscan_write_folio(folio);
666 node_stat_add_folio(folio, NR_VMSCAN_WRITE);
667 return PAGE_SUCCESS;
668 }
669
670 /*
671 * pageout is called by shrink_folio_list() for each dirty folio.
672 */
pageout(struct folio * folio,struct address_space * mapping,struct swap_iocb ** plug,struct list_head * folio_list)673 static pageout_t pageout(struct folio *folio, struct address_space *mapping,
674 struct swap_iocb **plug, struct list_head *folio_list)
675 {
676 /*
677 * We no longer attempt to writeback filesystem folios here, other
678 * than tmpfs/shmem. That's taken care of in page-writeback.
679 * If we find a dirty filesystem folio at the end of the LRU list,
680 * typically that means the filesystem is saturating the storage
681 * with contiguous writes and telling it to write a folio here
682 * would only make the situation worse by injecting an element
683 * of random access.
684 *
685 * If the folio is swapcache, write it back even if that would
686 * block, for some throttling. This happens by accident, because
687 * swap_backing_dev_info is bust: it doesn't reflect the
688 * congestion state of the swapdevs. Easy to fix, if needed.
689 *
690 * A freeable shmem or swapcache folio is referenced only by the
691 * caller that isolated the folio and the page cache.
692 */
693 if (folio_ref_count(folio) != 1 + folio_nr_pages(folio) || !mapping)
694 return PAGE_KEEP;
695 if (!shmem_mapping(mapping) && !folio_test_anon(folio))
696 return PAGE_ACTIVATE;
697 if (!folio_clear_dirty_for_io(folio))
698 return PAGE_CLEAN;
699 return writeout(folio, mapping, plug, folio_list);
700 }
701
702 /*
703 * Same as remove_mapping, but if the folio is removed from the mapping, it
704 * gets returned with a refcount of 0.
705 */
__remove_mapping(struct address_space * mapping,struct folio * folio,bool reclaimed,struct mem_cgroup * target_memcg)706 static int __remove_mapping(struct address_space *mapping, struct folio *folio,
707 bool reclaimed, struct mem_cgroup *target_memcg)
708 {
709 int refcount;
710 void *shadow = NULL;
711 struct swap_cluster_info *ci;
712
713 BUG_ON(!folio_test_locked(folio));
714 BUG_ON(mapping != folio_mapping(folio));
715
716 if (folio_test_swapcache(folio)) {
717 ci = swap_cluster_get_and_lock_irq(folio);
718 } else {
719 spin_lock(&mapping->host->i_lock);
720 xa_lock_irq(&mapping->i_pages);
721 }
722
723 /*
724 * The non racy check for a busy folio.
725 *
726 * Must be careful with the order of the tests. When someone has
727 * a ref to the folio, it may be possible that they dirty it then
728 * drop the reference. So if the dirty flag is tested before the
729 * refcount here, then the following race may occur:
730 *
731 * get_user_pages(&page);
732 * [user mapping goes away]
733 * write_to(page);
734 * !folio_test_dirty(folio) [good]
735 * folio_set_dirty(folio);
736 * folio_put(folio);
737 * !refcount(folio) [good, discard it]
738 *
739 * [oops, our write_to data is lost]
740 *
741 * Reversing the order of the tests ensures such a situation cannot
742 * escape unnoticed. The smp_rmb is needed to ensure the folio->flags
743 * load is not satisfied before that of folio->_refcount.
744 *
745 * Note that if the dirty flag is always set via folio_mark_dirty,
746 * and thus under the i_pages lock, then this ordering is not required.
747 */
748 refcount = 1 + folio_nr_pages(folio);
749 if (!folio_ref_freeze(folio, refcount))
750 goto cannot_free;
751 /* note: atomic_cmpxchg in folio_ref_freeze provides the smp_rmb */
752 if (unlikely(folio_test_dirty(folio))) {
753 folio_ref_unfreeze(folio, refcount);
754 goto cannot_free;
755 }
756
757 if (folio_test_swapcache(folio)) {
758 swp_entry_t swap = folio->swap;
759
760 if (reclaimed && !mapping_exiting(mapping))
761 shadow = workingset_eviction(folio, target_memcg);
762 memcg1_swapout(folio, swap);
763 __swap_cache_del_folio(ci, folio, swap, shadow);
764 swap_cluster_unlock_irq(ci);
765 } else {
766 void (*free_folio)(struct folio *);
767
768 free_folio = mapping->a_ops->free_folio;
769 /*
770 * Remember a shadow entry for reclaimed file cache in
771 * order to detect refaults, thus thrashing, later on.
772 *
773 * But don't store shadows in an address space that is
774 * already exiting. This is not just an optimization,
775 * inode reclaim needs to empty out the radix tree or
776 * the nodes are lost. Don't plant shadows behind its
777 * back.
778 *
779 * We also don't store shadows for DAX mappings because the
780 * only page cache folios found in these are zero pages
781 * covering holes, and because we don't want to mix DAX
782 * exceptional entries and shadow exceptional entries in the
783 * same address_space.
784 */
785 if (reclaimed && folio_is_file_lru(folio) &&
786 !mapping_exiting(mapping) && !dax_mapping(mapping))
787 shadow = workingset_eviction(folio, target_memcg);
788 __filemap_remove_folio(folio, shadow);
789 xa_unlock_irq(&mapping->i_pages);
790 if (mapping_shrinkable(mapping))
791 inode_lru_list_add(mapping->host);
792 spin_unlock(&mapping->host->i_lock);
793
794 if (free_folio)
795 free_folio(folio);
796 }
797
798 return 1;
799
800 cannot_free:
801 if (folio_test_swapcache(folio)) {
802 swap_cluster_unlock_irq(ci);
803 } else {
804 xa_unlock_irq(&mapping->i_pages);
805 spin_unlock(&mapping->host->i_lock);
806 }
807 return 0;
808 }
809
810 /**
811 * remove_mapping() - Attempt to remove a folio from its mapping.
812 * @mapping: The address space.
813 * @folio: The folio to remove.
814 *
815 * If the folio is dirty, under writeback or if someone else has a ref
816 * on it, removal will fail.
817 * Return: The number of pages removed from the mapping. 0 if the folio
818 * could not be removed.
819 * Context: The caller should have a single refcount on the folio and
820 * hold its lock.
821 */
remove_mapping(struct address_space * mapping,struct folio * folio)822 long remove_mapping(struct address_space *mapping, struct folio *folio)
823 {
824 if (__remove_mapping(mapping, folio, false, NULL)) {
825 /*
826 * Unfreezing the refcount with 1 effectively
827 * drops the pagecache ref for us without requiring another
828 * atomic operation.
829 */
830 folio_ref_unfreeze(folio, 1);
831 return folio_nr_pages(folio);
832 }
833 return 0;
834 }
835
836 /**
837 * folio_putback_lru - Put previously isolated folio onto appropriate LRU list.
838 * @folio: Folio to be returned to an LRU list.
839 *
840 * Add previously isolated @folio to appropriate LRU list.
841 * The folio may still be unevictable for other reasons.
842 *
843 * Context: lru_lock must not be held, interrupts must be enabled.
844 */
folio_putback_lru(struct folio * folio)845 void folio_putback_lru(struct folio *folio)
846 {
847 folio_add_lru(folio);
848 folio_put(folio); /* drop ref from isolate */
849 }
850
851 enum folio_references {
852 FOLIOREF_RECLAIM,
853 FOLIOREF_RECLAIM_CLEAN,
854 FOLIOREF_KEEP,
855 FOLIOREF_ACTIVATE,
856 };
857
858 #ifdef CONFIG_LRU_GEN
859 /*
860 * Only used on a mapped folio in the eviction (rmap walk) path, where promotion
861 * needs to be done by taking the folio off the LRU list and then adding it back
862 * with PG_active set. In contrast, the aging (page table walk) path uses
863 * folio_update_gen().
864 */
lru_gen_set_refs(struct folio * folio)865 static bool lru_gen_set_refs(struct folio *folio)
866 {
867 /* see the comment on LRU_REFS_FLAGS */
868 if (!folio_test_referenced(folio) && !folio_test_workingset(folio)) {
869 set_mask_bits(&folio->flags.f, LRU_REFS_MASK, BIT(PG_referenced));
870 return false;
871 }
872
873 set_mask_bits(&folio->flags.f, LRU_REFS_FLAGS, BIT(PG_workingset));
874 return true;
875 }
876 #else
lru_gen_set_refs(struct folio * folio)877 static bool lru_gen_set_refs(struct folio *folio)
878 {
879 return false;
880 }
881 #endif /* CONFIG_LRU_GEN */
882
folio_check_references(struct folio * folio,struct scan_control * sc)883 static enum folio_references folio_check_references(struct folio *folio,
884 struct scan_control *sc)
885 {
886 int referenced_ptes, referenced_folio;
887 vm_flags_t vm_flags;
888
889 referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup,
890 &vm_flags);
891
892 /*
893 * The supposedly reclaimable folio was found to be in a VM_LOCKED vma.
894 * Let the folio, now marked Mlocked, be moved to the unevictable list.
895 */
896 if (vm_flags & VM_LOCKED)
897 return FOLIOREF_ACTIVATE;
898
899 /*
900 * There are two cases to consider.
901 * 1) Rmap lock contention: rotate.
902 * 2) Skip the non-shared swapbacked folio mapped solely by
903 * the exiting or OOM-reaped process.
904 */
905 if (referenced_ptes == -1)
906 return FOLIOREF_KEEP;
907
908 if (lru_gen_enabled()) {
909 if (!referenced_ptes)
910 return FOLIOREF_RECLAIM;
911
912 return lru_gen_set_refs(folio) ? FOLIOREF_ACTIVATE : FOLIOREF_KEEP;
913 }
914
915 referenced_folio = folio_test_clear_referenced(folio);
916
917 if (referenced_ptes) {
918 /*
919 * All mapped folios start out with page table
920 * references from the instantiating fault, so we need
921 * to look twice if a mapped file/anon folio is used more
922 * than once.
923 *
924 * Mark it and spare it for another trip around the
925 * inactive list. Another page table reference will
926 * lead to its activation.
927 *
928 * Note: the mark is set for activated folios as well
929 * so that recently deactivated but used folios are
930 * quickly recovered.
931 */
932 folio_set_referenced(folio);
933
934 if (referenced_folio || referenced_ptes > 1)
935 return FOLIOREF_ACTIVATE;
936
937 /*
938 * Activate file-backed executable folios after first usage.
939 */
940 if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio))
941 return FOLIOREF_ACTIVATE;
942
943 return FOLIOREF_KEEP;
944 }
945
946 /* Reclaim if clean, defer dirty folios to writeback */
947 if (referenced_folio && folio_is_file_lru(folio))
948 return FOLIOREF_RECLAIM_CLEAN;
949
950 return FOLIOREF_RECLAIM;
951 }
952
953 /* Check if a folio is dirty or under writeback */
folio_check_dirty_writeback(struct folio * folio,bool * dirty,bool * writeback)954 static void folio_check_dirty_writeback(struct folio *folio,
955 bool *dirty, bool *writeback)
956 {
957 struct address_space *mapping;
958
959 /*
960 * Anonymous folios are not handled by flushers and must be written
961 * from reclaim context. Do not stall reclaim based on them.
962 * MADV_FREE anonymous folios are put into inactive file list too.
963 * They could be mistakenly treated as file lru. So further anon
964 * test is needed.
965 */
966 if (!folio_is_file_lru(folio) ||
967 (folio_test_anon(folio) && !folio_test_swapbacked(folio))) {
968 *dirty = false;
969 *writeback = false;
970 return;
971 }
972
973 /* By default assume that the folio flags are accurate */
974 *dirty = folio_test_dirty(folio);
975 *writeback = folio_test_writeback(folio);
976
977 /* Verify dirty/writeback state if the filesystem supports it */
978 if (!folio_test_private(folio))
979 return;
980
981 mapping = folio_mapping(folio);
982 if (mapping && mapping->a_ops->is_dirty_writeback)
983 mapping->a_ops->is_dirty_writeback(folio, dirty, writeback);
984 }
985
alloc_demote_folio(struct folio * src,unsigned long private)986 static struct folio *alloc_demote_folio(struct folio *src,
987 unsigned long private)
988 {
989 struct folio *dst;
990 nodemask_t *allowed_mask;
991 struct migration_target_control *mtc;
992
993 mtc = (struct migration_target_control *)private;
994
995 allowed_mask = mtc->nmask;
996 /*
997 * make sure we allocate from the target node first also trying to
998 * demote or reclaim pages from the target node via kswapd if we are
999 * low on free memory on target node. If we don't do this and if
1000 * we have free memory on the slower(lower) memtier, we would start
1001 * allocating pages from slower(lower) memory tiers without even forcing
1002 * a demotion of cold pages from the target memtier. This can result
1003 * in the kernel placing hot pages in slower(lower) memory tiers.
1004 */
1005 mtc->nmask = NULL;
1006 mtc->gfp_mask |= __GFP_THISNODE;
1007 dst = alloc_migration_target(src, (unsigned long)mtc);
1008 if (dst)
1009 return dst;
1010
1011 mtc->gfp_mask &= ~__GFP_THISNODE;
1012 mtc->nmask = allowed_mask;
1013
1014 return alloc_migration_target(src, (unsigned long)mtc);
1015 }
1016
1017 /*
1018 * Take folios on @demote_folios and attempt to demote them to another node.
1019 * Folios which are not demoted are left on @demote_folios.
1020 */
demote_folio_list(struct list_head * demote_folios,struct pglist_data * pgdat,struct mem_cgroup * memcg)1021 static unsigned int demote_folio_list(struct list_head *demote_folios,
1022 struct pglist_data *pgdat,
1023 struct mem_cgroup *memcg)
1024 {
1025 int target_nid;
1026 unsigned int nr_succeeded;
1027 nodemask_t allowed_mask;
1028
1029 struct migration_target_control mtc = {
1030 /*
1031 * Allocate from 'node', or fail quickly and quietly.
1032 * When this happens, 'page' will likely just be discarded
1033 * instead of migrated.
1034 */
1035 .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) |
1036 __GFP_NOMEMALLOC | GFP_NOWAIT,
1037 .nmask = &allowed_mask,
1038 .reason = MR_DEMOTION,
1039 };
1040
1041 if (list_empty(demote_folios))
1042 return 0;
1043
1044 node_get_allowed_targets(pgdat, &allowed_mask);
1045 mem_cgroup_node_filter_allowed(memcg, &allowed_mask);
1046 if (nodes_empty(allowed_mask))
1047 return 0;
1048
1049 target_nid = next_demotion_node(pgdat->node_id, &allowed_mask);
1050 if (target_nid == NUMA_NO_NODE)
1051 /* No lower-tier nodes or nodes were hot-unplugged. */
1052 return 0;
1053
1054 mtc.nid = target_nid;
1055
1056 /* Demotion ignores all cpuset and mempolicy settings */
1057 migrate_pages(demote_folios, alloc_demote_folio, NULL,
1058 (unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION,
1059 &nr_succeeded);
1060
1061 return nr_succeeded;
1062 }
1063
may_enter_fs(struct folio * folio,gfp_t gfp_mask)1064 static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask)
1065 {
1066 if (gfp_mask & __GFP_FS)
1067 return true;
1068 if (!folio_test_swapcache(folio) || !(gfp_mask & __GFP_IO))
1069 return false;
1070 /*
1071 * We can "enter_fs" for swap-cache with only __GFP_IO
1072 * providing this isn't SWP_FS_OPS.
1073 * ->flags can be updated non-atomically (scan_swap_map_slots),
1074 * but that will never affect SWP_FS_OPS, so the data_race
1075 * is safe.
1076 */
1077 return !data_race(folio_swap_flags(folio) & SWP_FS_OPS);
1078 }
1079
1080 /*
1081 * shrink_folio_list() returns the number of reclaimed pages
1082 */
shrink_folio_list(struct list_head * folio_list,struct pglist_data * pgdat,struct scan_control * sc,struct reclaim_stat * stat,bool ignore_references,struct mem_cgroup * memcg)1083 static unsigned int shrink_folio_list(struct list_head *folio_list,
1084 struct pglist_data *pgdat, struct scan_control *sc,
1085 struct reclaim_stat *stat, bool ignore_references,
1086 struct mem_cgroup *memcg)
1087 {
1088 struct folio_batch free_folios;
1089 LIST_HEAD(ret_folios);
1090 LIST_HEAD(demote_folios);
1091 unsigned int nr_reclaimed = 0, nr_demoted = 0;
1092 unsigned int pgactivate = 0;
1093 bool do_demote_pass;
1094 struct swap_iocb *plug = NULL;
1095
1096 folio_batch_init(&free_folios);
1097 memset(stat, 0, sizeof(*stat));
1098 cond_resched();
1099 do_demote_pass = can_demote(pgdat->node_id, sc, memcg);
1100
1101 retry:
1102 while (!list_empty(folio_list)) {
1103 struct address_space *mapping;
1104 struct folio *folio;
1105 enum folio_references references = FOLIOREF_RECLAIM;
1106 bool dirty, writeback;
1107 unsigned int nr_pages;
1108
1109 cond_resched();
1110
1111 folio = lru_to_folio(folio_list);
1112 list_del(&folio->lru);
1113
1114 if (!folio_trylock(folio))
1115 goto keep;
1116
1117 if (folio_contain_hwpoisoned_page(folio)) {
1118 /*
1119 * unmap_poisoned_folio() can't handle large
1120 * folio, just skip it. memory_failure() will
1121 * handle it if the UCE is triggered again.
1122 */
1123 if (folio_test_large(folio))
1124 goto keep_locked;
1125
1126 unmap_poisoned_folio(folio, folio_pfn(folio), false);
1127 folio_unlock(folio);
1128 folio_put(folio);
1129 continue;
1130 }
1131
1132 VM_BUG_ON_FOLIO(folio_test_active(folio), folio);
1133
1134 nr_pages = folio_nr_pages(folio);
1135
1136 /* Account the number of base pages */
1137 sc->nr_scanned += nr_pages;
1138
1139 if (unlikely(!folio_evictable(folio)))
1140 goto activate_locked;
1141
1142 if (!sc->may_unmap && folio_mapped(folio))
1143 goto keep_locked;
1144
1145 /*
1146 * The number of dirty pages determines if a node is marked
1147 * reclaim_congested. kswapd will stall and start writing
1148 * folios if the tail of the LRU is all dirty unqueued folios.
1149 */
1150 folio_check_dirty_writeback(folio, &dirty, &writeback);
1151 if (dirty || writeback)
1152 stat->nr_dirty += nr_pages;
1153
1154 if (dirty && !writeback)
1155 stat->nr_unqueued_dirty += nr_pages;
1156
1157 /*
1158 * Treat this folio as congested if folios are cycling
1159 * through the LRU so quickly that the folios marked
1160 * for immediate reclaim are making it to the end of
1161 * the LRU a second time.
1162 */
1163 if (writeback && folio_test_reclaim(folio))
1164 stat->nr_congested += nr_pages;
1165
1166 /*
1167 * If a folio at the tail of the LRU is under writeback, there
1168 * are three cases to consider.
1169 *
1170 * 1) If reclaim is encountering an excessive number
1171 * of folios under writeback and this folio has both
1172 * the writeback and reclaim flags set, then it
1173 * indicates that folios are being queued for I/O but
1174 * are being recycled through the LRU before the I/O
1175 * can complete. Waiting on the folio itself risks an
1176 * indefinite stall if it is impossible to writeback
1177 * the folio due to I/O error or disconnected storage
1178 * so instead note that the LRU is being scanned too
1179 * quickly and the caller can stall after the folio
1180 * list has been processed.
1181 *
1182 * 2) Global or new memcg reclaim encounters a folio that is
1183 * not marked for immediate reclaim, or the caller does not
1184 * have __GFP_FS (or __GFP_IO if it's simply going to swap,
1185 * not to fs), or the folio belongs to a mapping where
1186 * waiting on writeback during reclaim may lead to a deadlock.
1187 * In this case mark the folio for immediate reclaim and
1188 * continue scanning.
1189 *
1190 * Require may_enter_fs() because we would wait on fs, which
1191 * may not have submitted I/O yet. And the loop driver might
1192 * enter reclaim, and deadlock if it waits on a folio for
1193 * which it is needed to do the write (loop masks off
1194 * __GFP_IO|__GFP_FS for this reason); but more thought
1195 * would probably show more reasons.
1196 *
1197 * 3) Legacy memcg encounters a folio that already has the
1198 * reclaim flag set. memcg does not have any dirty folio
1199 * throttling so we could easily OOM just because too many
1200 * folios are in writeback and there is nothing else to
1201 * reclaim. Wait for the writeback to complete.
1202 *
1203 * In cases 1) and 2) we activate the folios to get them out of
1204 * the way while we continue scanning for clean folios on the
1205 * inactive list and refilling from the active list. The
1206 * observation here is that waiting for disk writes is more
1207 * expensive than potentially causing reloads down the line.
1208 * Since they're marked for immediate reclaim, they won't put
1209 * memory pressure on the cache working set any longer than it
1210 * takes to write them to disk.
1211 */
1212 if (folio_test_writeback(folio)) {
1213 mapping = folio_mapping(folio);
1214
1215 /* Case 1 above */
1216 if (current_is_kswapd() &&
1217 folio_test_reclaim(folio) &&
1218 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
1219 stat->nr_immediate += nr_pages;
1220 goto activate_locked;
1221
1222 /* Case 2 above */
1223 } else if (writeback_throttling_sane(sc) ||
1224 !folio_test_reclaim(folio) ||
1225 !may_enter_fs(folio, sc->gfp_mask) ||
1226 (mapping &&
1227 mapping_writeback_may_deadlock_on_reclaim(mapping))) {
1228 /*
1229 * This is slightly racy -
1230 * folio_end_writeback() might have
1231 * just cleared the reclaim flag, then
1232 * setting the reclaim flag here ends up
1233 * interpreted as the readahead flag - but
1234 * that does not matter enough to care.
1235 * What we do want is for this folio to
1236 * have the reclaim flag set next time
1237 * memcg reclaim reaches the tests above,
1238 * so it will then wait for writeback to
1239 * avoid OOM; and it's also appropriate
1240 * in global reclaim.
1241 */
1242 folio_set_reclaim(folio);
1243 stat->nr_writeback += nr_pages;
1244 goto activate_locked;
1245
1246 /* Case 3 above */
1247 } else {
1248 folio_unlock(folio);
1249 folio_wait_writeback(folio);
1250 /* then go back and try same folio again */
1251 list_add_tail(&folio->lru, folio_list);
1252 continue;
1253 }
1254 }
1255
1256 if (!ignore_references)
1257 references = folio_check_references(folio, sc);
1258
1259 switch (references) {
1260 case FOLIOREF_ACTIVATE:
1261 goto activate_locked;
1262 case FOLIOREF_KEEP:
1263 stat->nr_ref_keep += nr_pages;
1264 goto keep_locked;
1265 case FOLIOREF_RECLAIM:
1266 case FOLIOREF_RECLAIM_CLEAN:
1267 ; /* try to reclaim the folio below */
1268 }
1269
1270 /*
1271 * Before reclaiming the folio, try to relocate
1272 * its contents to another node.
1273 */
1274 if (do_demote_pass &&
1275 (thp_migration_supported() || !folio_test_large(folio))) {
1276 list_add(&folio->lru, &demote_folios);
1277 folio_unlock(folio);
1278 continue;
1279 }
1280
1281 /*
1282 * Anonymous process memory has backing store?
1283 * Try to allocate it some swap space here.
1284 * Lazyfree folio could be freed directly
1285 */
1286 if (folio_test_anon(folio) && folio_test_swapbacked(folio) &&
1287 !folio_test_swapcache(folio)) {
1288 if (!(sc->gfp_mask & __GFP_IO))
1289 goto keep_locked;
1290 if (folio_maybe_dma_pinned(folio))
1291 goto keep_locked;
1292 if (folio_test_large(folio)) {
1293 /* cannot split folio, skip it */
1294 if (folio_expected_ref_count(folio) !=
1295 folio_ref_count(folio) - 1)
1296 goto activate_locked;
1297 /*
1298 * Split partially mapped folios right away.
1299 * We can free the unmapped pages without IO.
1300 */
1301 if (data_race(!list_empty(&folio->_deferred_list) &&
1302 folio_test_partially_mapped(folio)) &&
1303 split_folio_to_list(folio, folio_list))
1304 goto activate_locked;
1305 }
1306 if (folio_alloc_swap(folio)) {
1307 int __maybe_unused order = folio_order(folio);
1308
1309 if (!folio_test_large(folio))
1310 goto activate_locked_split;
1311 /* Fallback to swap normal pages */
1312 if (split_folio_to_list(folio, folio_list))
1313 goto activate_locked;
1314 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1315 if (nr_pages >= HPAGE_PMD_NR) {
1316 count_memcg_folio_events(folio,
1317 THP_SWPOUT_FALLBACK, 1);
1318 count_vm_event(THP_SWPOUT_FALLBACK);
1319 }
1320 #endif
1321 count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK);
1322 if (folio_alloc_swap(folio))
1323 goto activate_locked_split;
1324 }
1325 /*
1326 * Normally the folio will be dirtied in unmap because
1327 * its pte should be dirty. A special case is MADV_FREE
1328 * page. The page's pte could have dirty bit cleared but
1329 * the folio's SwapBacked flag is still set because
1330 * clearing the dirty bit and SwapBacked flag has no
1331 * lock protected. For such folio, unmap will not set
1332 * dirty bit for it, so folio reclaim will not write the
1333 * folio out. This can cause data corruption when the
1334 * folio is swapped in later. Always setting the dirty
1335 * flag for the folio solves the problem.
1336 */
1337 folio_mark_dirty(folio);
1338 }
1339
1340 /*
1341 * If the folio was split above, the tail pages will make
1342 * their own pass through this function and be accounted
1343 * then.
1344 */
1345 if ((nr_pages > 1) && !folio_test_large(folio)) {
1346 sc->nr_scanned -= (nr_pages - 1);
1347 nr_pages = 1;
1348 }
1349
1350 /*
1351 * The folio is mapped into the page tables of one or more
1352 * processes. Try to unmap it here.
1353 */
1354 if (folio_mapped(folio)) {
1355 enum ttu_flags flags = TTU_BATCH_FLUSH;
1356 bool was_swapbacked = folio_test_swapbacked(folio);
1357
1358 if (folio_test_pmd_mappable(folio))
1359 flags |= TTU_SPLIT_HUGE_PMD;
1360 /*
1361 * Without TTU_SYNC, try_to_unmap will only begin to
1362 * hold PTL from the first present PTE within a large
1363 * folio. Some initial PTEs might be skipped due to
1364 * races with parallel PTE writes in which PTEs can be
1365 * cleared temporarily before being written new present
1366 * values. This will lead to a large folio is still
1367 * mapped while some subpages have been partially
1368 * unmapped after try_to_unmap; TTU_SYNC helps
1369 * try_to_unmap acquire PTL from the first PTE,
1370 * eliminating the influence of temporary PTE values.
1371 */
1372 if (folio_test_large(folio))
1373 flags |= TTU_SYNC;
1374
1375 try_to_unmap(folio, flags);
1376 if (folio_mapped(folio)) {
1377 stat->nr_unmap_fail += nr_pages;
1378 if (!was_swapbacked &&
1379 folio_test_swapbacked(folio))
1380 stat->nr_lazyfree_fail += nr_pages;
1381 goto activate_locked;
1382 }
1383 }
1384
1385 /*
1386 * Folio is unmapped now so it cannot be newly pinned anymore.
1387 * No point in trying to reclaim folio if it is pinned.
1388 * Furthermore we don't want to reclaim underlying fs metadata
1389 * if the folio is pinned and thus potentially modified by the
1390 * pinning process as that may upset the filesystem.
1391 */
1392 if (folio_maybe_dma_pinned(folio))
1393 goto activate_locked;
1394
1395 mapping = folio_mapping(folio);
1396 if (folio_test_dirty(folio)) {
1397 if (folio_is_file_lru(folio)) {
1398 /*
1399 * Immediately reclaim when written back.
1400 * Similar in principle to folio_deactivate()
1401 * except we already have the folio isolated
1402 * and know it's dirty
1403 */
1404 node_stat_mod_folio(folio, NR_VMSCAN_IMMEDIATE,
1405 nr_pages);
1406 if (!folio_test_reclaim(folio))
1407 folio_set_reclaim(folio);
1408
1409 goto activate_locked;
1410 }
1411
1412 if (references == FOLIOREF_RECLAIM_CLEAN)
1413 goto keep_locked;
1414 if (!may_enter_fs(folio, sc->gfp_mask))
1415 goto keep_locked;
1416 if (!sc->may_writepage)
1417 goto keep_locked;
1418
1419 /*
1420 * Folio is dirty. Flush the TLB if a writable entry
1421 * potentially exists to avoid CPU writes after I/O
1422 * starts and then write it out here.
1423 */
1424 try_to_unmap_flush_dirty();
1425 switch (pageout(folio, mapping, &plug, folio_list)) {
1426 case PAGE_KEEP:
1427 goto keep_locked;
1428 case PAGE_ACTIVATE:
1429 /*
1430 * If shmem folio is split when writeback to swap,
1431 * the tail pages will make their own pass through
1432 * this function and be accounted then.
1433 */
1434 if (nr_pages > 1 && !folio_test_large(folio)) {
1435 sc->nr_scanned -= (nr_pages - 1);
1436 nr_pages = 1;
1437 }
1438 goto activate_locked;
1439 case PAGE_SUCCESS:
1440 if (nr_pages > 1 && !folio_test_large(folio)) {
1441 sc->nr_scanned -= (nr_pages - 1);
1442 nr_pages = 1;
1443 }
1444 stat->nr_pageout += nr_pages;
1445
1446 if (folio_test_writeback(folio))
1447 goto keep;
1448 if (folio_test_dirty(folio))
1449 goto keep;
1450
1451 /*
1452 * A synchronous write - probably a ramdisk. Go
1453 * ahead and try to reclaim the folio.
1454 */
1455 if (!folio_trylock(folio))
1456 goto keep;
1457 if (folio_test_dirty(folio) ||
1458 folio_test_writeback(folio))
1459 goto keep_locked;
1460 mapping = folio_mapping(folio);
1461 fallthrough;
1462 case PAGE_CLEAN:
1463 ; /* try to free the folio below */
1464 }
1465 }
1466
1467 /*
1468 * If the folio has buffers, try to free the buffer
1469 * mappings associated with this folio. If we succeed
1470 * we try to free the folio as well.
1471 *
1472 * We do this even if the folio is dirty.
1473 * filemap_release_folio() does not perform I/O, but it
1474 * is possible for a folio to have the dirty flag set,
1475 * but it is actually clean (all its buffers are clean).
1476 * This happens if the buffers were written out directly,
1477 * with submit_bh(). ext3 will do this, as well as
1478 * the blockdev mapping. filemap_release_folio() will
1479 * discover that cleanness and will drop the buffers
1480 * and mark the folio clean - it can be freed.
1481 *
1482 * Rarely, folios can have buffers and no ->mapping.
1483 * These are the folios which were not successfully
1484 * invalidated in truncate_cleanup_folio(). We try to
1485 * drop those buffers here and if that worked, and the
1486 * folio is no longer mapped into process address space
1487 * (refcount == 1) it can be freed. Otherwise, leave
1488 * the folio on the LRU so it is swappable.
1489 */
1490 if (folio_needs_release(folio)) {
1491 if (!filemap_release_folio(folio, sc->gfp_mask))
1492 goto activate_locked;
1493 if (!mapping && folio_ref_count(folio) == 1) {
1494 folio_unlock(folio);
1495 if (folio_put_testzero(folio))
1496 goto free_it;
1497 else {
1498 /*
1499 * rare race with speculative reference.
1500 * the speculative reference will free
1501 * this folio shortly, so we may
1502 * increment nr_reclaimed here (and
1503 * leave it off the LRU).
1504 */
1505 nr_reclaimed += nr_pages;
1506 continue;
1507 }
1508 }
1509 }
1510
1511 if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) {
1512 /* follow __remove_mapping for reference */
1513 if (!folio_ref_freeze(folio, 1))
1514 goto keep_locked;
1515 /*
1516 * The folio has only one reference left, which is
1517 * from the isolation. After the caller puts the
1518 * folio back on the lru and drops the reference, the
1519 * folio will be freed anyway. It doesn't matter
1520 * which lru it goes on. So we don't bother checking
1521 * the dirty flag here.
1522 */
1523 count_vm_events(PGLAZYFREED, nr_pages);
1524 count_memcg_folio_events(folio, PGLAZYFREED, nr_pages);
1525 } else if (!mapping || !__remove_mapping(mapping, folio, true,
1526 sc->target_mem_cgroup))
1527 goto keep_locked;
1528
1529 folio_unlock(folio);
1530 free_it:
1531 /*
1532 * Folio may get swapped out as a whole, need to account
1533 * all pages in it.
1534 */
1535 nr_reclaimed += nr_pages;
1536
1537 folio_unqueue_deferred_split(folio);
1538 if (folio_batch_add(&free_folios, folio) == 0) {
1539 mem_cgroup_uncharge_folios(&free_folios);
1540 try_to_unmap_flush();
1541 free_unref_folios(&free_folios);
1542 }
1543 continue;
1544
1545 activate_locked_split:
1546 /*
1547 * The tail pages that are failed to add into swap cache
1548 * reach here. Fixup nr_scanned and nr_pages.
1549 */
1550 if (nr_pages > 1) {
1551 sc->nr_scanned -= (nr_pages - 1);
1552 nr_pages = 1;
1553 }
1554 activate_locked:
1555 /* Not a candidate for swapping, so reclaim swap space. */
1556 if (folio_test_swapcache(folio) &&
1557 (mem_cgroup_swap_full(folio) || folio_test_mlocked(folio)))
1558 folio_free_swap(folio);
1559 VM_BUG_ON_FOLIO(folio_test_active(folio), folio);
1560 if (!folio_test_mlocked(folio)) {
1561 int type = folio_is_file_lru(folio);
1562 folio_set_active(folio);
1563 stat->nr_activate[type] += nr_pages;
1564 count_memcg_folio_events(folio, PGACTIVATE, nr_pages);
1565 }
1566 keep_locked:
1567 folio_unlock(folio);
1568 keep:
1569 list_add(&folio->lru, &ret_folios);
1570 VM_BUG_ON_FOLIO(folio_test_lru(folio) ||
1571 folio_test_unevictable(folio), folio);
1572 }
1573 /* 'folio_list' is always empty here */
1574
1575 /* Migrate folios selected for demotion */
1576 nr_demoted = demote_folio_list(&demote_folios, pgdat, memcg);
1577 nr_reclaimed += nr_demoted;
1578 stat->nr_demoted += nr_demoted;
1579 /* Folios that could not be demoted are still in @demote_folios */
1580 if (!list_empty(&demote_folios)) {
1581 /* Folios which weren't demoted go back on @folio_list */
1582 list_splice_init(&demote_folios, folio_list);
1583
1584 /*
1585 * goto retry to reclaim the undemoted folios in folio_list if
1586 * desired.
1587 *
1588 * Reclaiming directly from top tier nodes is not often desired
1589 * due to it breaking the LRU ordering: in general memory
1590 * should be reclaimed from lower tier nodes and demoted from
1591 * top tier nodes.
1592 *
1593 * However, disabling reclaim from top tier nodes entirely
1594 * would cause ooms in edge scenarios where lower tier memory
1595 * is unreclaimable for whatever reason, eg memory being
1596 * mlocked or too hot to reclaim. We can disable reclaim
1597 * from top tier nodes in proactive reclaim though as that is
1598 * not real memory pressure.
1599 */
1600 if (!sc->proactive) {
1601 do_demote_pass = false;
1602 goto retry;
1603 }
1604 }
1605
1606 pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
1607
1608 mem_cgroup_uncharge_folios(&free_folios);
1609 try_to_unmap_flush();
1610 free_unref_folios(&free_folios);
1611
1612 list_splice(&ret_folios, folio_list);
1613 count_vm_events(PGACTIVATE, pgactivate);
1614
1615 if (plug)
1616 swap_write_unplug(plug);
1617 return nr_reclaimed;
1618 }
1619
reclaim_clean_pages_from_list(struct zone * zone,struct list_head * folio_list)1620 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
1621 struct list_head *folio_list)
1622 {
1623 struct scan_control sc = {
1624 .gfp_mask = GFP_KERNEL,
1625 .may_unmap = 1,
1626 };
1627 struct reclaim_stat stat;
1628 unsigned int nr_reclaimed;
1629 struct folio *folio, *next;
1630 LIST_HEAD(clean_folios);
1631 unsigned int noreclaim_flag;
1632
1633 list_for_each_entry_safe(folio, next, folio_list, lru) {
1634 /* TODO: these pages should not even appear in this list. */
1635 if (page_has_movable_ops(&folio->page))
1636 continue;
1637 if (!folio_test_hugetlb(folio) && folio_is_file_lru(folio) &&
1638 !folio_test_dirty(folio) && !folio_test_unevictable(folio)) {
1639 folio_clear_active(folio);
1640 list_move(&folio->lru, &clean_folios);
1641 }
1642 }
1643
1644 /*
1645 * We should be safe here since we are only dealing with file pages and
1646 * we are not kswapd and therefore cannot write dirty file pages. But
1647 * call memalloc_noreclaim_save() anyway, just in case these conditions
1648 * change in the future.
1649 */
1650 noreclaim_flag = memalloc_noreclaim_save();
1651 nr_reclaimed = shrink_folio_list(&clean_folios, zone->zone_pgdat, &sc,
1652 &stat, true, NULL);
1653 memalloc_noreclaim_restore(noreclaim_flag);
1654
1655 list_splice(&clean_folios, folio_list);
1656 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
1657 -(long)nr_reclaimed);
1658 /*
1659 * Since lazyfree pages are isolated from file LRU from the beginning,
1660 * they will rotate back to anonymous LRU in the end if it failed to
1661 * discard so isolated count will be mismatched.
1662 * Compensate the isolated count for both LRU lists.
1663 */
1664 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON,
1665 stat.nr_lazyfree_fail);
1666 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
1667 -(long)stat.nr_lazyfree_fail);
1668 return nr_reclaimed;
1669 }
1670
1671 /*
1672 * Update LRU sizes after isolating pages. The LRU size updates must
1673 * be complete before mem_cgroup_update_lru_size due to a sanity check.
1674 */
update_lru_sizes(struct lruvec * lruvec,enum lru_list lru,unsigned long * nr_zone_taken)1675 static __always_inline void update_lru_sizes(struct lruvec *lruvec,
1676 enum lru_list lru, unsigned long *nr_zone_taken)
1677 {
1678 int zid;
1679
1680 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1681 if (!nr_zone_taken[zid])
1682 continue;
1683
1684 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1685 }
1686
1687 }
1688
1689 /*
1690 * Isolating page from the lruvec to fill in @dst list by nr_to_scan times.
1691 *
1692 * lruvec->lru_lock is heavily contended. Some of the functions that
1693 * shrink the lists perform better by taking out a batch of pages
1694 * and working on them outside the LRU lock.
1695 *
1696 * For pagecache intensive workloads, this function is the hottest
1697 * spot in the kernel (apart from copy_*_user functions).
1698 *
1699 * Lru_lock must be held before calling this function.
1700 *
1701 * @nr_to_scan: The number of eligible pages to look through on the list.
1702 * @lruvec: The LRU vector to pull pages from.
1703 * @dst: The temp list to put pages on to.
1704 * @nr_scanned: The number of pages that were scanned.
1705 * @sc: The scan_control struct for this reclaim session
1706 * @lru: LRU list id for isolating
1707 *
1708 * returns how many pages were moved onto *@dst.
1709 */
isolate_lru_folios(unsigned long nr_to_scan,struct lruvec * lruvec,struct list_head * dst,unsigned long * nr_scanned,struct scan_control * sc,enum lru_list lru)1710 static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
1711 struct lruvec *lruvec, struct list_head *dst,
1712 unsigned long *nr_scanned, struct scan_control *sc,
1713 enum lru_list lru)
1714 {
1715 struct list_head *src = &lruvec->lists[lru];
1716 unsigned long nr_taken = 0;
1717 unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
1718 unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
1719 unsigned long skipped = 0, total_scan = 0, scan = 0;
1720 unsigned long nr_pages;
1721 unsigned long max_nr_skipped = 0;
1722 LIST_HEAD(folios_skipped);
1723
1724 while (scan < nr_to_scan && !list_empty(src)) {
1725 struct list_head *move_to = src;
1726 struct folio *folio;
1727
1728 folio = lru_to_folio(src);
1729 prefetchw_prev_lru_folio(folio, src, flags);
1730
1731 nr_pages = folio_nr_pages(folio);
1732 total_scan += nr_pages;
1733
1734 /* Using max_nr_skipped to prevent hard LOCKUP*/
1735 if (max_nr_skipped < SWAP_CLUSTER_MAX_SKIPPED &&
1736 (folio_zonenum(folio) > sc->reclaim_idx)) {
1737 nr_skipped[folio_zonenum(folio)] += nr_pages;
1738 move_to = &folios_skipped;
1739 max_nr_skipped++;
1740 goto move;
1741 }
1742
1743 /*
1744 * Do not count skipped folios because that makes the function
1745 * return with no isolated folios if the LRU mostly contains
1746 * ineligible folios. This causes the VM to not reclaim any
1747 * folios, triggering a premature OOM.
1748 * Account all pages in a folio.
1749 */
1750 scan += nr_pages;
1751
1752 if (!folio_test_lru(folio))
1753 goto move;
1754 if (!sc->may_unmap && folio_mapped(folio))
1755 goto move;
1756
1757 /*
1758 * Be careful not to clear the lru flag until after we're
1759 * sure the folio is not being freed elsewhere -- the
1760 * folio release code relies on it.
1761 */
1762 if (unlikely(!folio_try_get(folio)))
1763 goto move;
1764
1765 if (!folio_test_clear_lru(folio)) {
1766 /* Another thread is already isolating this folio */
1767 folio_put(folio);
1768 goto move;
1769 }
1770
1771 nr_taken += nr_pages;
1772 nr_zone_taken[folio_zonenum(folio)] += nr_pages;
1773 move_to = dst;
1774 move:
1775 list_move(&folio->lru, move_to);
1776 }
1777
1778 /*
1779 * Splice any skipped folios to the start of the LRU list. Note that
1780 * this disrupts the LRU order when reclaiming for lower zones but
1781 * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
1782 * scanning would soon rescan the same folios to skip and waste lots
1783 * of cpu cycles.
1784 */
1785 if (!list_empty(&folios_skipped)) {
1786 int zid;
1787
1788 list_splice(&folios_skipped, src);
1789 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1790 if (!nr_skipped[zid])
1791 continue;
1792
1793 __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
1794 skipped += nr_skipped[zid];
1795 }
1796 }
1797 *nr_scanned = total_scan;
1798 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
1799 total_scan, skipped, nr_taken, lru);
1800 update_lru_sizes(lruvec, lru, nr_zone_taken);
1801 return nr_taken;
1802 }
1803
1804 /**
1805 * folio_isolate_lru() - Try to isolate a folio from its LRU list.
1806 * @folio: Folio to isolate from its LRU list.
1807 *
1808 * Isolate a @folio from an LRU list and adjust the vmstat statistic
1809 * corresponding to whatever LRU list the folio was on.
1810 *
1811 * The folio will have its LRU flag cleared. If it was found on the
1812 * active list, it will have the Active flag set. If it was found on the
1813 * unevictable list, it will have the Unevictable flag set. These flags
1814 * may need to be cleared by the caller before letting the page go.
1815 *
1816 * Context:
1817 *
1818 * (1) Must be called with an elevated refcount on the folio. This is a
1819 * fundamental difference from isolate_lru_folios() (which is called
1820 * without a stable reference).
1821 * (2) The lru_lock must not be held.
1822 * (3) Interrupts must be enabled.
1823 *
1824 * Return: true if the folio was removed from an LRU list.
1825 * false if the folio was not on an LRU list.
1826 */
folio_isolate_lru(struct folio * folio)1827 bool folio_isolate_lru(struct folio *folio)
1828 {
1829 bool ret = false;
1830
1831 VM_BUG_ON_FOLIO(!folio_ref_count(folio), folio);
1832
1833 if (folio_test_clear_lru(folio)) {
1834 struct lruvec *lruvec;
1835
1836 folio_get(folio);
1837 lruvec = folio_lruvec_lock_irq(folio);
1838 lruvec_del_folio(lruvec, folio);
1839 unlock_page_lruvec_irq(lruvec);
1840 ret = true;
1841 }
1842
1843 return ret;
1844 }
1845
1846 /*
1847 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
1848 * then get rescheduled. When there are massive number of tasks doing page
1849 * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
1850 * the LRU list will go small and be scanned faster than necessary, leading to
1851 * unnecessary swapping, thrashing and OOM.
1852 */
too_many_isolated(struct pglist_data * pgdat,int file,struct scan_control * sc)1853 static bool too_many_isolated(struct pglist_data *pgdat, int file,
1854 struct scan_control *sc)
1855 {
1856 unsigned long inactive, isolated;
1857 bool too_many;
1858
1859 if (current_is_kswapd())
1860 return false;
1861
1862 if (!writeback_throttling_sane(sc))
1863 return false;
1864
1865 if (file) {
1866 inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
1867 isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
1868 } else {
1869 inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
1870 isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
1871 }
1872
1873 /*
1874 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
1875 * won't get blocked by normal direct-reclaimers, forming a circular
1876 * deadlock.
1877 */
1878 if (gfp_has_io_fs(sc->gfp_mask))
1879 inactive >>= 3;
1880
1881 too_many = isolated > inactive;
1882
1883 /* Wake up tasks throttled due to too_many_isolated. */
1884 if (!too_many)
1885 wake_throttle_isolated(pgdat);
1886
1887 return too_many;
1888 }
1889
1890 /*
1891 * move_folios_to_lru() moves folios from private @list to appropriate LRU list.
1892 *
1893 * Returns the number of pages moved to the given lruvec.
1894 */
move_folios_to_lru(struct lruvec * lruvec,struct list_head * list)1895 static unsigned int move_folios_to_lru(struct lruvec *lruvec,
1896 struct list_head *list)
1897 {
1898 int nr_pages, nr_moved = 0;
1899 struct folio_batch free_folios;
1900
1901 folio_batch_init(&free_folios);
1902 while (!list_empty(list)) {
1903 struct folio *folio = lru_to_folio(list);
1904
1905 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
1906 list_del(&folio->lru);
1907 if (unlikely(!folio_evictable(folio))) {
1908 spin_unlock_irq(&lruvec->lru_lock);
1909 folio_putback_lru(folio);
1910 spin_lock_irq(&lruvec->lru_lock);
1911 continue;
1912 }
1913
1914 /*
1915 * The folio_set_lru needs to be kept here for list integrity.
1916 * Otherwise:
1917 * #0 move_folios_to_lru #1 release_pages
1918 * if (!folio_put_testzero())
1919 * if (folio_put_testzero())
1920 * !lru //skip lru_lock
1921 * folio_set_lru()
1922 * list_add(&folio->lru,)
1923 * list_add(&folio->lru,)
1924 */
1925 folio_set_lru(folio);
1926
1927 if (unlikely(folio_put_testzero(folio))) {
1928 __folio_clear_lru_flags(folio);
1929
1930 folio_unqueue_deferred_split(folio);
1931 if (folio_batch_add(&free_folios, folio) == 0) {
1932 spin_unlock_irq(&lruvec->lru_lock);
1933 mem_cgroup_uncharge_folios(&free_folios);
1934 free_unref_folios(&free_folios);
1935 spin_lock_irq(&lruvec->lru_lock);
1936 }
1937
1938 continue;
1939 }
1940
1941 /*
1942 * All pages were isolated from the same lruvec (and isolation
1943 * inhibits memcg migration).
1944 */
1945 VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
1946 lruvec_add_folio(lruvec, folio);
1947 nr_pages = folio_nr_pages(folio);
1948 nr_moved += nr_pages;
1949 if (folio_test_active(folio))
1950 workingset_age_nonresident(lruvec, nr_pages);
1951 }
1952
1953 if (free_folios.nr) {
1954 spin_unlock_irq(&lruvec->lru_lock);
1955 mem_cgroup_uncharge_folios(&free_folios);
1956 free_unref_folios(&free_folios);
1957 spin_lock_irq(&lruvec->lru_lock);
1958 }
1959
1960 return nr_moved;
1961 }
1962
1963 /*
1964 * If a kernel thread (such as nfsd for loop-back mounts) services a backing
1965 * device by writing to the page cache it sets PF_LOCAL_THROTTLE. In this case
1966 * we should not throttle. Otherwise it is safe to do so.
1967 */
current_may_throttle(void)1968 static int current_may_throttle(void)
1969 {
1970 return !(current->flags & PF_LOCAL_THROTTLE);
1971 }
1972
1973 /*
1974 * shrink_inactive_list() is a helper for shrink_node(). It returns the number
1975 * of reclaimed pages
1976 */
shrink_inactive_list(unsigned long nr_to_scan,struct lruvec * lruvec,struct scan_control * sc,enum lru_list lru)1977 static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
1978 struct lruvec *lruvec, struct scan_control *sc,
1979 enum lru_list lru)
1980 {
1981 LIST_HEAD(folio_list);
1982 unsigned long nr_scanned;
1983 unsigned int nr_reclaimed = 0;
1984 unsigned long nr_taken;
1985 struct reclaim_stat stat;
1986 bool file = is_file_lru(lru);
1987 enum vm_event_item item;
1988 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1989 bool stalled = false;
1990
1991 while (unlikely(too_many_isolated(pgdat, file, sc))) {
1992 if (stalled)
1993 return 0;
1994
1995 /* wait a bit for the reclaimer. */
1996 stalled = true;
1997 reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED);
1998
1999 /* We are about to die and free our memory. Return now. */
2000 if (fatal_signal_pending(current))
2001 return SWAP_CLUSTER_MAX;
2002 }
2003
2004 lru_add_drain();
2005
2006 spin_lock_irq(&lruvec->lru_lock);
2007
2008 nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &folio_list,
2009 &nr_scanned, sc, lru);
2010
2011 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
2012 item = PGSCAN_KSWAPD + reclaimer_offset(sc);
2013 if (!cgroup_reclaim(sc))
2014 __count_vm_events(item, nr_scanned);
2015 count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
2016 __count_vm_events(PGSCAN_ANON + file, nr_scanned);
2017
2018 spin_unlock_irq(&lruvec->lru_lock);
2019
2020 if (nr_taken == 0)
2021 return 0;
2022
2023 nr_reclaimed = shrink_folio_list(&folio_list, pgdat, sc, &stat, false,
2024 lruvec_memcg(lruvec));
2025
2026 spin_lock_irq(&lruvec->lru_lock);
2027 move_folios_to_lru(lruvec, &folio_list);
2028
2029 mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
2030 stat.nr_demoted);
2031 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2032 item = PGSTEAL_KSWAPD + reclaimer_offset(sc);
2033 if (!cgroup_reclaim(sc))
2034 __count_vm_events(item, nr_reclaimed);
2035 count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
2036 __count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
2037
2038 lru_note_cost_unlock_irq(lruvec, file, stat.nr_pageout,
2039 nr_scanned - nr_reclaimed);
2040
2041 /*
2042 * If dirty folios are scanned that are not queued for IO, it
2043 * implies that flushers are not doing their job. This can
2044 * happen when memory pressure pushes dirty folios to the end of
2045 * the LRU before the dirty limits are breached and the dirty
2046 * data has expired. It can also happen when the proportion of
2047 * dirty folios grows not through writes but through memory
2048 * pressure reclaiming all the clean cache. And in some cases,
2049 * the flushers simply cannot keep up with the allocation
2050 * rate. Nudge the flusher threads in case they are asleep.
2051 */
2052 if (stat.nr_unqueued_dirty == nr_taken) {
2053 wakeup_flusher_threads(WB_REASON_VMSCAN);
2054 /*
2055 * For cgroupv1 dirty throttling is achieved by waking up
2056 * the kernel flusher here and later waiting on folios
2057 * which are in writeback to finish (see shrink_folio_list()).
2058 *
2059 * Flusher may not be able to issue writeback quickly
2060 * enough for cgroupv1 writeback throttling to work
2061 * on a large system.
2062 */
2063 if (!writeback_throttling_sane(sc))
2064 reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
2065 }
2066
2067 sc->nr.dirty += stat.nr_dirty;
2068 sc->nr.congested += stat.nr_congested;
2069 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
2070 sc->nr.writeback += stat.nr_writeback;
2071 sc->nr.immediate += stat.nr_immediate;
2072 sc->nr.taken += nr_taken;
2073 if (file)
2074 sc->nr.file_taken += nr_taken;
2075
2076 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
2077 nr_scanned, nr_reclaimed, &stat, sc->priority, file);
2078 return nr_reclaimed;
2079 }
2080
2081 /*
2082 * shrink_active_list() moves folios from the active LRU to the inactive LRU.
2083 *
2084 * We move them the other way if the folio is referenced by one or more
2085 * processes.
2086 *
2087 * If the folios are mostly unmapped, the processing is fast and it is
2088 * appropriate to hold lru_lock across the whole operation. But if
2089 * the folios are mapped, the processing is slow (folio_referenced()), so
2090 * we should drop lru_lock around each folio. It's impossible to balance
2091 * this, so instead we remove the folios from the LRU while processing them.
2092 * It is safe to rely on the active flag against the non-LRU folios in here
2093 * because nobody will play with that bit on a non-LRU folio.
2094 *
2095 * The downside is that we have to touch folio->_refcount against each folio.
2096 * But we had to alter folio->flags anyway.
2097 */
shrink_active_list(unsigned long nr_to_scan,struct lruvec * lruvec,struct scan_control * sc,enum lru_list lru)2098 static void shrink_active_list(unsigned long nr_to_scan,
2099 struct lruvec *lruvec,
2100 struct scan_control *sc,
2101 enum lru_list lru)
2102 {
2103 unsigned long nr_taken;
2104 unsigned long nr_scanned;
2105 vm_flags_t vm_flags;
2106 LIST_HEAD(l_hold); /* The folios which were snipped off */
2107 LIST_HEAD(l_active);
2108 LIST_HEAD(l_inactive);
2109 unsigned nr_deactivate, nr_activate;
2110 unsigned nr_rotated = 0;
2111 bool file = is_file_lru(lru);
2112 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2113
2114 lru_add_drain();
2115
2116 spin_lock_irq(&lruvec->lru_lock);
2117
2118 nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &l_hold,
2119 &nr_scanned, sc, lru);
2120
2121 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
2122
2123 if (!cgroup_reclaim(sc))
2124 __count_vm_events(PGREFILL, nr_scanned);
2125 count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
2126
2127 spin_unlock_irq(&lruvec->lru_lock);
2128
2129 while (!list_empty(&l_hold)) {
2130 struct folio *folio;
2131
2132 cond_resched();
2133 folio = lru_to_folio(&l_hold);
2134 list_del(&folio->lru);
2135
2136 if (unlikely(!folio_evictable(folio))) {
2137 folio_putback_lru(folio);
2138 continue;
2139 }
2140
2141 if (unlikely(buffer_heads_over_limit)) {
2142 if (folio_needs_release(folio) &&
2143 folio_trylock(folio)) {
2144 filemap_release_folio(folio, 0);
2145 folio_unlock(folio);
2146 }
2147 }
2148
2149 /* Referenced or rmap lock contention: rotate */
2150 if (folio_referenced(folio, 0, sc->target_mem_cgroup,
2151 &vm_flags) != 0) {
2152 /*
2153 * Identify referenced, file-backed active folios and
2154 * give them one more trip around the active list. So
2155 * that executable code get better chances to stay in
2156 * memory under moderate memory pressure. Anon folios
2157 * are not likely to be evicted by use-once streaming
2158 * IO, plus JVM can create lots of anon VM_EXEC folios,
2159 * so we ignore them here.
2160 */
2161 if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) {
2162 nr_rotated += folio_nr_pages(folio);
2163 list_add(&folio->lru, &l_active);
2164 continue;
2165 }
2166 }
2167
2168 folio_clear_active(folio); /* we are de-activating */
2169 folio_set_workingset(folio);
2170 list_add(&folio->lru, &l_inactive);
2171 }
2172
2173 /*
2174 * Move folios back to the lru list.
2175 */
2176 spin_lock_irq(&lruvec->lru_lock);
2177
2178 nr_activate = move_folios_to_lru(lruvec, &l_active);
2179 nr_deactivate = move_folios_to_lru(lruvec, &l_inactive);
2180
2181 __count_vm_events(PGDEACTIVATE, nr_deactivate);
2182 count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
2183
2184 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2185
2186 lru_note_cost_unlock_irq(lruvec, file, 0, nr_rotated);
2187 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
2188 nr_deactivate, nr_rotated, sc->priority, file);
2189 }
2190
reclaim_folio_list(struct list_head * folio_list,struct pglist_data * pgdat)2191 static unsigned int reclaim_folio_list(struct list_head *folio_list,
2192 struct pglist_data *pgdat)
2193 {
2194 struct reclaim_stat stat;
2195 unsigned int nr_reclaimed;
2196 struct folio *folio;
2197 struct scan_control sc = {
2198 .gfp_mask = GFP_KERNEL,
2199 .may_writepage = 1,
2200 .may_unmap = 1,
2201 .may_swap = 1,
2202 .no_demotion = 1,
2203 };
2204
2205 nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &stat, true, NULL);
2206 while (!list_empty(folio_list)) {
2207 folio = lru_to_folio(folio_list);
2208 list_del(&folio->lru);
2209 folio_putback_lru(folio);
2210 }
2211 trace_mm_vmscan_reclaim_pages(pgdat->node_id, sc.nr_scanned, nr_reclaimed, &stat);
2212
2213 return nr_reclaimed;
2214 }
2215
reclaim_pages(struct list_head * folio_list)2216 unsigned long reclaim_pages(struct list_head *folio_list)
2217 {
2218 int nid;
2219 unsigned int nr_reclaimed = 0;
2220 LIST_HEAD(node_folio_list);
2221 unsigned int noreclaim_flag;
2222
2223 if (list_empty(folio_list))
2224 return nr_reclaimed;
2225
2226 noreclaim_flag = memalloc_noreclaim_save();
2227
2228 nid = folio_nid(lru_to_folio(folio_list));
2229 do {
2230 struct folio *folio = lru_to_folio(folio_list);
2231
2232 if (nid == folio_nid(folio)) {
2233 folio_clear_active(folio);
2234 list_move(&folio->lru, &node_folio_list);
2235 continue;
2236 }
2237
2238 nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid));
2239 nid = folio_nid(lru_to_folio(folio_list));
2240 } while (!list_empty(folio_list));
2241
2242 nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid));
2243
2244 memalloc_noreclaim_restore(noreclaim_flag);
2245
2246 return nr_reclaimed;
2247 }
2248
shrink_list(enum lru_list lru,unsigned long nr_to_scan,struct lruvec * lruvec,struct scan_control * sc)2249 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
2250 struct lruvec *lruvec, struct scan_control *sc)
2251 {
2252 if (is_active_lru(lru)) {
2253 if (sc->may_deactivate & (1 << is_file_lru(lru)))
2254 shrink_active_list(nr_to_scan, lruvec, sc, lru);
2255 else
2256 sc->skipped_deactivate = 1;
2257 return 0;
2258 }
2259
2260 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
2261 }
2262
2263 /*
2264 * The inactive anon list should be small enough that the VM never has
2265 * to do too much work.
2266 *
2267 * The inactive file list should be small enough to leave most memory
2268 * to the established workingset on the scan-resistant active list,
2269 * but large enough to avoid thrashing the aggregate readahead window.
2270 *
2271 * Both inactive lists should also be large enough that each inactive
2272 * folio has a chance to be referenced again before it is reclaimed.
2273 *
2274 * If that fails and refaulting is observed, the inactive list grows.
2275 *
2276 * The inactive_ratio is the target ratio of ACTIVE to INACTIVE folios
2277 * on this LRU, maintained by the pageout code. An inactive_ratio
2278 * of 3 means 3:1 or 25% of the folios are kept on the inactive list.
2279 *
2280 * total target max
2281 * memory ratio inactive
2282 * -------------------------------------
2283 * 10MB 1 5MB
2284 * 100MB 1 50MB
2285 * 1GB 3 250MB
2286 * 10GB 10 0.9GB
2287 * 100GB 31 3GB
2288 * 1TB 101 10GB
2289 * 10TB 320 32GB
2290 */
inactive_is_low(struct lruvec * lruvec,enum lru_list inactive_lru)2291 static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru)
2292 {
2293 enum lru_list active_lru = inactive_lru + LRU_ACTIVE;
2294 unsigned long inactive, active;
2295 unsigned long inactive_ratio;
2296 unsigned long gb;
2297
2298 inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru);
2299 active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru);
2300
2301 gb = (inactive + active) >> (30 - PAGE_SHIFT);
2302 if (gb)
2303 inactive_ratio = int_sqrt(10 * gb);
2304 else
2305 inactive_ratio = 1;
2306
2307 return inactive * inactive_ratio < active;
2308 }
2309
2310 enum scan_balance {
2311 SCAN_EQUAL,
2312 SCAN_FRACT,
2313 SCAN_ANON,
2314 SCAN_FILE,
2315 };
2316
prepare_scan_control(pg_data_t * pgdat,struct scan_control * sc)2317 static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc)
2318 {
2319 unsigned long file;
2320 struct lruvec *target_lruvec;
2321
2322 if (lru_gen_enabled())
2323 return;
2324
2325 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
2326
2327 /*
2328 * Flush the memory cgroup stats in rate-limited way as we don't need
2329 * most accurate stats here. We may switch to regular stats flushing
2330 * in the future once it is cheap enough.
2331 */
2332 mem_cgroup_flush_stats_ratelimited(sc->target_mem_cgroup);
2333
2334 /*
2335 * Determine the scan balance between anon and file LRUs.
2336 */
2337 spin_lock_irq(&target_lruvec->lru_lock);
2338 sc->anon_cost = target_lruvec->anon_cost;
2339 sc->file_cost = target_lruvec->file_cost;
2340 spin_unlock_irq(&target_lruvec->lru_lock);
2341
2342 /*
2343 * Target desirable inactive:active list ratios for the anon
2344 * and file LRU lists.
2345 */
2346 if (!sc->force_deactivate) {
2347 unsigned long refaults;
2348
2349 /*
2350 * When refaults are being observed, it means a new
2351 * workingset is being established. Deactivate to get
2352 * rid of any stale active pages quickly.
2353 */
2354 refaults = lruvec_page_state(target_lruvec,
2355 WORKINGSET_ACTIVATE_ANON);
2356 if (refaults != target_lruvec->refaults[WORKINGSET_ANON] ||
2357 inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
2358 sc->may_deactivate |= DEACTIVATE_ANON;
2359 else
2360 sc->may_deactivate &= ~DEACTIVATE_ANON;
2361
2362 refaults = lruvec_page_state(target_lruvec,
2363 WORKINGSET_ACTIVATE_FILE);
2364 if (refaults != target_lruvec->refaults[WORKINGSET_FILE] ||
2365 inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
2366 sc->may_deactivate |= DEACTIVATE_FILE;
2367 else
2368 sc->may_deactivate &= ~DEACTIVATE_FILE;
2369 } else
2370 sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
2371
2372 /*
2373 * If we have plenty of inactive file pages that aren't
2374 * thrashing, try to reclaim those first before touching
2375 * anonymous pages.
2376 */
2377 file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
2378 if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE) &&
2379 !sc->no_cache_trim_mode)
2380 sc->cache_trim_mode = 1;
2381 else
2382 sc->cache_trim_mode = 0;
2383
2384 /*
2385 * Prevent the reclaimer from falling into the cache trap: as
2386 * cache pages start out inactive, every cache fault will tip
2387 * the scan balance towards the file LRU. And as the file LRU
2388 * shrinks, so does the window for rotation from references.
2389 * This means we have a runaway feedback loop where a tiny
2390 * thrashing file LRU becomes infinitely more attractive than
2391 * anon pages. Try to detect this based on file LRU size.
2392 */
2393 if (!cgroup_reclaim(sc)) {
2394 unsigned long total_high_wmark = 0;
2395 unsigned long free, anon;
2396 int z;
2397 struct zone *zone;
2398
2399 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
2400 file = node_page_state(pgdat, NR_ACTIVE_FILE) +
2401 node_page_state(pgdat, NR_INACTIVE_FILE);
2402
2403 for_each_managed_zone_pgdat(zone, pgdat, z, MAX_NR_ZONES - 1) {
2404 total_high_wmark += high_wmark_pages(zone);
2405 }
2406
2407 /*
2408 * Consider anon: if that's low too, this isn't a
2409 * runaway file reclaim problem, but rather just
2410 * extreme pressure. Reclaim as per usual then.
2411 */
2412 anon = node_page_state(pgdat, NR_INACTIVE_ANON);
2413
2414 sc->file_is_tiny =
2415 file + free <= total_high_wmark &&
2416 !(sc->may_deactivate & DEACTIVATE_ANON) &&
2417 anon >> sc->priority;
2418 }
2419 }
2420
calculate_pressure_balance(struct scan_control * sc,int swappiness,u64 * fraction,u64 * denominator)2421 static inline void calculate_pressure_balance(struct scan_control *sc,
2422 int swappiness, u64 *fraction, u64 *denominator)
2423 {
2424 unsigned long anon_cost, file_cost, total_cost;
2425 unsigned long ap, fp;
2426
2427 /*
2428 * Calculate the pressure balance between anon and file pages.
2429 *
2430 * The amount of pressure we put on each LRU is inversely
2431 * proportional to the cost of reclaiming each list, as
2432 * determined by the share of pages that are refaulting, times
2433 * the relative IO cost of bringing back a swapped out
2434 * anonymous page vs reloading a filesystem page (swappiness).
2435 *
2436 * Although we limit that influence to ensure no list gets
2437 * left behind completely: at least a third of the pressure is
2438 * applied, before swappiness.
2439 *
2440 * With swappiness at 100, anon and file have equal IO cost.
2441 */
2442 total_cost = sc->anon_cost + sc->file_cost;
2443 anon_cost = total_cost + sc->anon_cost;
2444 file_cost = total_cost + sc->file_cost;
2445 total_cost = anon_cost + file_cost;
2446
2447 ap = swappiness * (total_cost + 1);
2448 ap /= anon_cost + 1;
2449
2450 fp = (MAX_SWAPPINESS - swappiness) * (total_cost + 1);
2451 fp /= file_cost + 1;
2452
2453 fraction[WORKINGSET_ANON] = ap;
2454 fraction[WORKINGSET_FILE] = fp;
2455 *denominator = ap + fp;
2456 }
2457
apply_proportional_protection(struct mem_cgroup * memcg,struct scan_control * sc,unsigned long scan)2458 static unsigned long apply_proportional_protection(struct mem_cgroup *memcg,
2459 struct scan_control *sc, unsigned long scan)
2460 {
2461 unsigned long min, low, usage;
2462
2463 mem_cgroup_protection(sc->target_mem_cgroup, memcg, &min, &low, &usage);
2464
2465 if (min || low) {
2466 /*
2467 * Scale a cgroup's reclaim pressure by proportioning
2468 * its current usage to its memory.low or memory.min
2469 * setting.
2470 *
2471 * This is important, as otherwise scanning aggression
2472 * becomes extremely binary -- from nothing as we
2473 * approach the memory protection threshold, to totally
2474 * nominal as we exceed it. This results in requiring
2475 * setting extremely liberal protection thresholds. It
2476 * also means we simply get no protection at all if we
2477 * set it too low, which is not ideal.
2478 *
2479 * If there is any protection in place, we reduce scan
2480 * pressure by how much of the total memory used is
2481 * within protection thresholds.
2482 *
2483 * There is one special case: in the first reclaim pass,
2484 * we skip over all groups that are within their low
2485 * protection. If that fails to reclaim enough pages to
2486 * satisfy the reclaim goal, we come back and override
2487 * the best-effort low protection. However, we still
2488 * ideally want to honor how well-behaved groups are in
2489 * that case instead of simply punishing them all
2490 * equally. As such, we reclaim them based on how much
2491 * memory they are using, reducing the scan pressure
2492 * again by how much of the total memory used is under
2493 * hard protection.
2494 */
2495 unsigned long protection;
2496
2497 /* memory.low scaling, make sure we retry before OOM */
2498 if (!sc->memcg_low_reclaim && low > min) {
2499 protection = low;
2500 sc->memcg_low_skipped = 1;
2501 } else {
2502 protection = min;
2503 }
2504
2505 /* Avoid TOCTOU with earlier protection check */
2506 usage = max(usage, protection);
2507
2508 scan -= scan * protection / (usage + 1);
2509
2510 /*
2511 * Minimally target SWAP_CLUSTER_MAX pages to keep
2512 * reclaim moving forwards, avoiding decrementing
2513 * sc->priority further than desirable.
2514 */
2515 scan = max(scan, SWAP_CLUSTER_MAX);
2516 }
2517 return scan;
2518 }
2519
2520 /*
2521 * Determine how aggressively the anon and file LRU lists should be
2522 * scanned.
2523 *
2524 * nr[0] = anon inactive folios to scan; nr[1] = anon active folios to scan
2525 * nr[2] = file inactive folios to scan; nr[3] = file active folios to scan
2526 */
get_scan_count(struct lruvec * lruvec,struct scan_control * sc,unsigned long * nr)2527 static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
2528 unsigned long *nr)
2529 {
2530 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2531 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2532 int swappiness = sc_swappiness(sc, memcg);
2533 u64 fraction[ANON_AND_FILE];
2534 u64 denominator = 0; /* gcc */
2535 enum scan_balance scan_balance;
2536 enum lru_list lru;
2537
2538 /* If we have no swap space, do not bother scanning anon folios. */
2539 if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) {
2540 scan_balance = SCAN_FILE;
2541 goto out;
2542 }
2543
2544 /*
2545 * Global reclaim will swap to prevent OOM even with no
2546 * swappiness, but memcg users want to use this knob to
2547 * disable swapping for individual groups completely when
2548 * using the memory controller's swap limit feature would be
2549 * too expensive.
2550 */
2551 if (cgroup_reclaim(sc) && !swappiness) {
2552 scan_balance = SCAN_FILE;
2553 goto out;
2554 }
2555
2556 /* Proactive reclaim initiated by userspace for anonymous memory only */
2557 if (swappiness == SWAPPINESS_ANON_ONLY) {
2558 WARN_ON_ONCE(!sc->proactive);
2559 scan_balance = SCAN_ANON;
2560 goto out;
2561 }
2562
2563 /*
2564 * Do not apply any pressure balancing cleverness when the
2565 * system is close to OOM, scan both anon and file equally
2566 * (unless the swappiness setting disagrees with swapping).
2567 */
2568 if (!sc->priority && swappiness) {
2569 scan_balance = SCAN_EQUAL;
2570 goto out;
2571 }
2572
2573 /*
2574 * If the system is almost out of file pages, force-scan anon.
2575 */
2576 if (sc->file_is_tiny) {
2577 scan_balance = SCAN_ANON;
2578 goto out;
2579 }
2580
2581 /*
2582 * If there is enough inactive page cache, we do not reclaim
2583 * anything from the anonymous working right now to make sure
2584 * a streaming file access pattern doesn't cause swapping.
2585 */
2586 if (sc->cache_trim_mode) {
2587 scan_balance = SCAN_FILE;
2588 goto out;
2589 }
2590
2591 scan_balance = SCAN_FRACT;
2592 calculate_pressure_balance(sc, swappiness, fraction, &denominator);
2593
2594 out:
2595 for_each_evictable_lru(lru) {
2596 bool file = is_file_lru(lru);
2597 unsigned long lruvec_size;
2598 unsigned long scan;
2599
2600 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
2601 scan = apply_proportional_protection(memcg, sc, lruvec_size);
2602 scan >>= sc->priority;
2603
2604 /*
2605 * If the cgroup's already been deleted, make sure to
2606 * scrape out the remaining cache.
2607 */
2608 if (!scan && !mem_cgroup_online(memcg))
2609 scan = min(lruvec_size, SWAP_CLUSTER_MAX);
2610
2611 switch (scan_balance) {
2612 case SCAN_EQUAL:
2613 /* Scan lists relative to size */
2614 break;
2615 case SCAN_FRACT:
2616 /*
2617 * Scan types proportional to swappiness and
2618 * their relative recent reclaim efficiency.
2619 * Make sure we don't miss the last page on
2620 * the offlined memory cgroups because of a
2621 * round-off error.
2622 */
2623 scan = mem_cgroup_online(memcg) ?
2624 div64_u64(scan * fraction[file], denominator) :
2625 DIV64_U64_ROUND_UP(scan * fraction[file],
2626 denominator);
2627 break;
2628 case SCAN_FILE:
2629 case SCAN_ANON:
2630 /* Scan one type exclusively */
2631 if ((scan_balance == SCAN_FILE) != file)
2632 scan = 0;
2633 break;
2634 default:
2635 /* Look ma, no brain */
2636 BUG();
2637 }
2638
2639 nr[lru] = scan;
2640 }
2641 }
2642
2643 /*
2644 * Anonymous LRU management is a waste if there is
2645 * ultimately no way to reclaim the memory.
2646 */
can_age_anon_pages(struct lruvec * lruvec,struct scan_control * sc)2647 static bool can_age_anon_pages(struct lruvec *lruvec,
2648 struct scan_control *sc)
2649 {
2650 /* Aging the anon LRU is valuable if swap is present: */
2651 if (total_swap_pages > 0)
2652 return true;
2653
2654 /* Also valuable if anon pages can be demoted: */
2655 return can_demote(lruvec_pgdat(lruvec)->node_id, sc,
2656 lruvec_memcg(lruvec));
2657 }
2658
2659 #ifdef CONFIG_LRU_GEN
2660
2661 #ifdef CONFIG_LRU_GEN_ENABLED
2662 DEFINE_STATIC_KEY_ARRAY_TRUE(lru_gen_caps, NR_LRU_GEN_CAPS);
2663 #define get_cap(cap) static_branch_likely(&lru_gen_caps[cap])
2664 #else
2665 DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS);
2666 #define get_cap(cap) static_branch_unlikely(&lru_gen_caps[cap])
2667 #endif
2668
should_walk_mmu(void)2669 static bool should_walk_mmu(void)
2670 {
2671 return arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK);
2672 }
2673
should_clear_pmd_young(void)2674 static bool should_clear_pmd_young(void)
2675 {
2676 return arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG);
2677 }
2678
2679 /******************************************************************************
2680 * shorthand helpers
2681 ******************************************************************************/
2682
2683 #define DEFINE_MAX_SEQ(lruvec) \
2684 unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq)
2685
2686 #define DEFINE_MIN_SEQ(lruvec) \
2687 unsigned long min_seq[ANON_AND_FILE] = { \
2688 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \
2689 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \
2690 }
2691
2692 /* Get the min/max evictable type based on swappiness */
2693 #define min_type(swappiness) (!(swappiness))
2694 #define max_type(swappiness) ((swappiness) < SWAPPINESS_ANON_ONLY)
2695
2696 #define evictable_min_seq(min_seq, swappiness) \
2697 min((min_seq)[min_type(swappiness)], (min_seq)[max_type(swappiness)])
2698
2699 #define for_each_gen_type_zone(gen, type, zone) \
2700 for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \
2701 for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \
2702 for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
2703
2704 #define for_each_evictable_type(type, swappiness) \
2705 for ((type) = min_type(swappiness); (type) <= max_type(swappiness); (type)++)
2706
2707 #define get_memcg_gen(seq) ((seq) % MEMCG_NR_GENS)
2708 #define get_memcg_bin(bin) ((bin) % MEMCG_NR_BINS)
2709
get_lruvec(struct mem_cgroup * memcg,int nid)2710 static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
2711 {
2712 struct pglist_data *pgdat = NODE_DATA(nid);
2713
2714 #ifdef CONFIG_MEMCG
2715 if (memcg) {
2716 struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec;
2717
2718 /* see the comment in mem_cgroup_lruvec() */
2719 if (!lruvec->pgdat)
2720 lruvec->pgdat = pgdat;
2721
2722 return lruvec;
2723 }
2724 #endif
2725 VM_WARN_ON_ONCE(!mem_cgroup_disabled());
2726
2727 return &pgdat->__lruvec;
2728 }
2729
get_swappiness(struct lruvec * lruvec,struct scan_control * sc)2730 static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc)
2731 {
2732 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2733 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2734
2735 if (!sc->may_swap)
2736 return 0;
2737
2738 if (!can_demote(pgdat->node_id, sc, memcg) &&
2739 mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH)
2740 return 0;
2741
2742 return sc_swappiness(sc, memcg);
2743 }
2744
get_nr_gens(struct lruvec * lruvec,int type)2745 static int get_nr_gens(struct lruvec *lruvec, int type)
2746 {
2747 return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1;
2748 }
2749
seq_is_valid(struct lruvec * lruvec)2750 static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
2751 {
2752 int type;
2753
2754 for (type = 0; type < ANON_AND_FILE; type++) {
2755 int n = get_nr_gens(lruvec, type);
2756
2757 if (n < MIN_NR_GENS || n > MAX_NR_GENS)
2758 return false;
2759 }
2760
2761 return true;
2762 }
2763
2764 /******************************************************************************
2765 * Bloom filters
2766 ******************************************************************************/
2767
2768 /*
2769 * Bloom filters with m=1<<15, k=2 and the false positive rates of ~1/5 when
2770 * n=10,000 and ~1/2 when n=20,000, where, conventionally, m is the number of
2771 * bits in a bitmap, k is the number of hash functions and n is the number of
2772 * inserted items.
2773 *
2774 * Page table walkers use one of the two filters to reduce their search space.
2775 * To get rid of non-leaf entries that no longer have enough leaf entries, the
2776 * aging uses the double-buffering technique to flip to the other filter each
2777 * time it produces a new generation. For non-leaf entries that have enough
2778 * leaf entries, the aging carries them over to the next generation in
2779 * walk_pmd_range(); the eviction also report them when walking the rmap
2780 * in lru_gen_look_around().
2781 *
2782 * For future optimizations:
2783 * 1. It's not necessary to keep both filters all the time. The spare one can be
2784 * freed after the RCU grace period and reallocated if needed again.
2785 * 2. And when reallocating, it's worth scaling its size according to the number
2786 * of inserted entries in the other filter, to reduce the memory overhead on
2787 * small systems and false positives on large systems.
2788 * 3. Jenkins' hash function is an alternative to Knuth's.
2789 */
2790 #define BLOOM_FILTER_SHIFT 15
2791
filter_gen_from_seq(unsigned long seq)2792 static inline int filter_gen_from_seq(unsigned long seq)
2793 {
2794 return seq % NR_BLOOM_FILTERS;
2795 }
2796
get_item_key(void * item,int * key)2797 static void get_item_key(void *item, int *key)
2798 {
2799 u32 hash = hash_ptr(item, BLOOM_FILTER_SHIFT * 2);
2800
2801 BUILD_BUG_ON(BLOOM_FILTER_SHIFT * 2 > BITS_PER_TYPE(u32));
2802
2803 key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1);
2804 key[1] = hash >> BLOOM_FILTER_SHIFT;
2805 }
2806
test_bloom_filter(struct lru_gen_mm_state * mm_state,unsigned long seq,void * item)2807 static bool test_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq,
2808 void *item)
2809 {
2810 int key[2];
2811 unsigned long *filter;
2812 int gen = filter_gen_from_seq(seq);
2813
2814 filter = READ_ONCE(mm_state->filters[gen]);
2815 if (!filter)
2816 return true;
2817
2818 get_item_key(item, key);
2819
2820 return test_bit(key[0], filter) && test_bit(key[1], filter);
2821 }
2822
update_bloom_filter(struct lru_gen_mm_state * mm_state,unsigned long seq,void * item)2823 static void update_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq,
2824 void *item)
2825 {
2826 int key[2];
2827 unsigned long *filter;
2828 int gen = filter_gen_from_seq(seq);
2829
2830 filter = READ_ONCE(mm_state->filters[gen]);
2831 if (!filter)
2832 return;
2833
2834 get_item_key(item, key);
2835
2836 if (!test_bit(key[0], filter))
2837 set_bit(key[0], filter);
2838 if (!test_bit(key[1], filter))
2839 set_bit(key[1], filter);
2840 }
2841
reset_bloom_filter(struct lru_gen_mm_state * mm_state,unsigned long seq)2842 static void reset_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq)
2843 {
2844 unsigned long *filter;
2845 int gen = filter_gen_from_seq(seq);
2846
2847 filter = mm_state->filters[gen];
2848 if (filter) {
2849 bitmap_clear(filter, 0, BIT(BLOOM_FILTER_SHIFT));
2850 return;
2851 }
2852
2853 filter = bitmap_zalloc(BIT(BLOOM_FILTER_SHIFT),
2854 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
2855 WRITE_ONCE(mm_state->filters[gen], filter);
2856 }
2857
2858 /******************************************************************************
2859 * mm_struct list
2860 ******************************************************************************/
2861
2862 #ifdef CONFIG_LRU_GEN_WALKS_MMU
2863
get_mm_list(struct mem_cgroup * memcg)2864 static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
2865 {
2866 static struct lru_gen_mm_list mm_list = {
2867 .fifo = LIST_HEAD_INIT(mm_list.fifo),
2868 .lock = __SPIN_LOCK_UNLOCKED(mm_list.lock),
2869 };
2870
2871 #ifdef CONFIG_MEMCG
2872 if (memcg)
2873 return &memcg->mm_list;
2874 #endif
2875 VM_WARN_ON_ONCE(!mem_cgroup_disabled());
2876
2877 return &mm_list;
2878 }
2879
get_mm_state(struct lruvec * lruvec)2880 static struct lru_gen_mm_state *get_mm_state(struct lruvec *lruvec)
2881 {
2882 return &lruvec->mm_state;
2883 }
2884
get_next_mm(struct lru_gen_mm_walk * walk)2885 static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk)
2886 {
2887 int key;
2888 struct mm_struct *mm;
2889 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
2890 struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec);
2891
2892 mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list);
2893 key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap);
2894
2895 if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap))
2896 return NULL;
2897
2898 clear_bit(key, &mm->lru_gen.bitmap);
2899
2900 return mmget_not_zero(mm) ? mm : NULL;
2901 }
2902
lru_gen_add_mm(struct mm_struct * mm)2903 void lru_gen_add_mm(struct mm_struct *mm)
2904 {
2905 int nid;
2906 struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
2907 struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
2908
2909 VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list));
2910 #ifdef CONFIG_MEMCG
2911 VM_WARN_ON_ONCE(mm->lru_gen.memcg);
2912 mm->lru_gen.memcg = memcg;
2913 #endif
2914 spin_lock(&mm_list->lock);
2915
2916 for_each_node_state(nid, N_MEMORY) {
2917 struct lruvec *lruvec = get_lruvec(memcg, nid);
2918 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
2919
2920 /* the first addition since the last iteration */
2921 if (mm_state->tail == &mm_list->fifo)
2922 mm_state->tail = &mm->lru_gen.list;
2923 }
2924
2925 list_add_tail(&mm->lru_gen.list, &mm_list->fifo);
2926
2927 spin_unlock(&mm_list->lock);
2928 }
2929
lru_gen_del_mm(struct mm_struct * mm)2930 void lru_gen_del_mm(struct mm_struct *mm)
2931 {
2932 int nid;
2933 struct lru_gen_mm_list *mm_list;
2934 struct mem_cgroup *memcg = NULL;
2935
2936 if (list_empty(&mm->lru_gen.list))
2937 return;
2938
2939 #ifdef CONFIG_MEMCG
2940 memcg = mm->lru_gen.memcg;
2941 #endif
2942 mm_list = get_mm_list(memcg);
2943
2944 spin_lock(&mm_list->lock);
2945
2946 for_each_node(nid) {
2947 struct lruvec *lruvec = get_lruvec(memcg, nid);
2948 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
2949
2950 /* where the current iteration continues after */
2951 if (mm_state->head == &mm->lru_gen.list)
2952 mm_state->head = mm_state->head->prev;
2953
2954 /* where the last iteration ended before */
2955 if (mm_state->tail == &mm->lru_gen.list)
2956 mm_state->tail = mm_state->tail->next;
2957 }
2958
2959 list_del_init(&mm->lru_gen.list);
2960
2961 spin_unlock(&mm_list->lock);
2962
2963 #ifdef CONFIG_MEMCG
2964 mem_cgroup_put(mm->lru_gen.memcg);
2965 mm->lru_gen.memcg = NULL;
2966 #endif
2967 }
2968
2969 #ifdef CONFIG_MEMCG
lru_gen_migrate_mm(struct mm_struct * mm)2970 void lru_gen_migrate_mm(struct mm_struct *mm)
2971 {
2972 struct mem_cgroup *memcg;
2973 struct task_struct *task = rcu_dereference_protected(mm->owner, true);
2974
2975 VM_WARN_ON_ONCE(task->mm != mm);
2976 lockdep_assert_held(&task->alloc_lock);
2977
2978 /* for mm_update_next_owner() */
2979 if (mem_cgroup_disabled())
2980 return;
2981
2982 /* migration can happen before addition */
2983 if (!mm->lru_gen.memcg)
2984 return;
2985
2986 rcu_read_lock();
2987 memcg = mem_cgroup_from_task(task);
2988 rcu_read_unlock();
2989 if (memcg == mm->lru_gen.memcg)
2990 return;
2991
2992 VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list));
2993
2994 lru_gen_del_mm(mm);
2995 lru_gen_add_mm(mm);
2996 }
2997 #endif
2998
2999 #else /* !CONFIG_LRU_GEN_WALKS_MMU */
3000
get_mm_list(struct mem_cgroup * memcg)3001 static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
3002 {
3003 return NULL;
3004 }
3005
get_mm_state(struct lruvec * lruvec)3006 static struct lru_gen_mm_state *get_mm_state(struct lruvec *lruvec)
3007 {
3008 return NULL;
3009 }
3010
get_next_mm(struct lru_gen_mm_walk * walk)3011 static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk)
3012 {
3013 return NULL;
3014 }
3015
3016 #endif
3017
reset_mm_stats(struct lru_gen_mm_walk * walk,bool last)3018 static void reset_mm_stats(struct lru_gen_mm_walk *walk, bool last)
3019 {
3020 int i;
3021 int hist;
3022 struct lruvec *lruvec = walk->lruvec;
3023 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
3024
3025 lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock);
3026
3027 hist = lru_hist_from_seq(walk->seq);
3028
3029 for (i = 0; i < NR_MM_STATS; i++) {
3030 WRITE_ONCE(mm_state->stats[hist][i],
3031 mm_state->stats[hist][i] + walk->mm_stats[i]);
3032 walk->mm_stats[i] = 0;
3033 }
3034
3035 if (NR_HIST_GENS > 1 && last) {
3036 hist = lru_hist_from_seq(walk->seq + 1);
3037
3038 for (i = 0; i < NR_MM_STATS; i++)
3039 WRITE_ONCE(mm_state->stats[hist][i], 0);
3040 }
3041 }
3042
iterate_mm_list(struct lru_gen_mm_walk * walk,struct mm_struct ** iter)3043 static bool iterate_mm_list(struct lru_gen_mm_walk *walk, struct mm_struct **iter)
3044 {
3045 bool first = false;
3046 bool last = false;
3047 struct mm_struct *mm = NULL;
3048 struct lruvec *lruvec = walk->lruvec;
3049 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3050 struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
3051 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
3052
3053 /*
3054 * mm_state->seq is incremented after each iteration of mm_list. There
3055 * are three interesting cases for this page table walker:
3056 * 1. It tries to start a new iteration with a stale max_seq: there is
3057 * nothing left to do.
3058 * 2. It started the next iteration: it needs to reset the Bloom filter
3059 * so that a fresh set of PTE tables can be recorded.
3060 * 3. It ended the current iteration: it needs to reset the mm stats
3061 * counters and tell its caller to increment max_seq.
3062 */
3063 spin_lock(&mm_list->lock);
3064
3065 VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->seq);
3066
3067 if (walk->seq <= mm_state->seq)
3068 goto done;
3069
3070 if (!mm_state->head)
3071 mm_state->head = &mm_list->fifo;
3072
3073 if (mm_state->head == &mm_list->fifo)
3074 first = true;
3075
3076 do {
3077 mm_state->head = mm_state->head->next;
3078 if (mm_state->head == &mm_list->fifo) {
3079 WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
3080 last = true;
3081 break;
3082 }
3083
3084 /* force scan for those added after the last iteration */
3085 if (!mm_state->tail || mm_state->tail == mm_state->head) {
3086 mm_state->tail = mm_state->head->next;
3087 walk->force_scan = true;
3088 }
3089 } while (!(mm = get_next_mm(walk)));
3090 done:
3091 if (*iter || last)
3092 reset_mm_stats(walk, last);
3093
3094 spin_unlock(&mm_list->lock);
3095
3096 if (mm && first)
3097 reset_bloom_filter(mm_state, walk->seq + 1);
3098
3099 if (*iter)
3100 mmput_async(*iter);
3101
3102 *iter = mm;
3103
3104 return last;
3105 }
3106
iterate_mm_list_nowalk(struct lruvec * lruvec,unsigned long seq)3107 static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long seq)
3108 {
3109 bool success = false;
3110 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3111 struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
3112 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
3113
3114 spin_lock(&mm_list->lock);
3115
3116 VM_WARN_ON_ONCE(mm_state->seq + 1 < seq);
3117
3118 if (seq > mm_state->seq) {
3119 mm_state->head = NULL;
3120 mm_state->tail = NULL;
3121 WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
3122 success = true;
3123 }
3124
3125 spin_unlock(&mm_list->lock);
3126
3127 return success;
3128 }
3129
3130 /******************************************************************************
3131 * PID controller
3132 ******************************************************************************/
3133
3134 /*
3135 * A feedback loop based on Proportional-Integral-Derivative (PID) controller.
3136 *
3137 * The P term is refaulted/(evicted+protected) from a tier in the generation
3138 * currently being evicted; the I term is the exponential moving average of the
3139 * P term over the generations previously evicted, using the smoothing factor
3140 * 1/2; the D term isn't supported.
3141 *
3142 * The setpoint (SP) is always the first tier of one type; the process variable
3143 * (PV) is either any tier of the other type or any other tier of the same
3144 * type.
3145 *
3146 * The error is the difference between the SP and the PV; the correction is to
3147 * turn off protection when SP>PV or turn on protection when SP<PV.
3148 *
3149 * For future optimizations:
3150 * 1. The D term may discount the other two terms over time so that long-lived
3151 * generations can resist stale information.
3152 */
3153 struct ctrl_pos {
3154 unsigned long refaulted;
3155 unsigned long total;
3156 int gain;
3157 };
3158
read_ctrl_pos(struct lruvec * lruvec,int type,int tier,int gain,struct ctrl_pos * pos)3159 static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain,
3160 struct ctrl_pos *pos)
3161 {
3162 int i;
3163 struct lru_gen_folio *lrugen = &lruvec->lrugen;
3164 int hist = lru_hist_from_seq(lrugen->min_seq[type]);
3165
3166 pos->gain = gain;
3167 pos->refaulted = pos->total = 0;
3168
3169 for (i = tier % MAX_NR_TIERS; i <= min(tier, MAX_NR_TIERS - 1); i++) {
3170 pos->refaulted += lrugen->avg_refaulted[type][i] +
3171 atomic_long_read(&lrugen->refaulted[hist][type][i]);
3172 pos->total += lrugen->avg_total[type][i] +
3173 lrugen->protected[hist][type][i] +
3174 atomic_long_read(&lrugen->evicted[hist][type][i]);
3175 }
3176 }
3177
reset_ctrl_pos(struct lruvec * lruvec,int type,bool carryover)3178 static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover)
3179 {
3180 int hist, tier;
3181 struct lru_gen_folio *lrugen = &lruvec->lrugen;
3182 bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1;
3183 unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1;
3184
3185 lockdep_assert_held(&lruvec->lru_lock);
3186
3187 if (!carryover && !clear)
3188 return;
3189
3190 hist = lru_hist_from_seq(seq);
3191
3192 for (tier = 0; tier < MAX_NR_TIERS; tier++) {
3193 if (carryover) {
3194 unsigned long sum;
3195
3196 sum = lrugen->avg_refaulted[type][tier] +
3197 atomic_long_read(&lrugen->refaulted[hist][type][tier]);
3198 WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2);
3199
3200 sum = lrugen->avg_total[type][tier] +
3201 lrugen->protected[hist][type][tier] +
3202 atomic_long_read(&lrugen->evicted[hist][type][tier]);
3203 WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2);
3204 }
3205
3206 if (clear) {
3207 atomic_long_set(&lrugen->refaulted[hist][type][tier], 0);
3208 atomic_long_set(&lrugen->evicted[hist][type][tier], 0);
3209 WRITE_ONCE(lrugen->protected[hist][type][tier], 0);
3210 }
3211 }
3212 }
3213
positive_ctrl_err(struct ctrl_pos * sp,struct ctrl_pos * pv)3214 static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv)
3215 {
3216 /*
3217 * Return true if the PV has a limited number of refaults or a lower
3218 * refaulted/total than the SP.
3219 */
3220 return pv->refaulted < MIN_LRU_BATCH ||
3221 pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <=
3222 (sp->refaulted + 1) * pv->total * pv->gain;
3223 }
3224
3225 /******************************************************************************
3226 * the aging
3227 ******************************************************************************/
3228
3229 /* promote pages accessed through page tables */
folio_update_gen(struct folio * folio,int gen)3230 static int folio_update_gen(struct folio *folio, int gen)
3231 {
3232 unsigned long new_flags, old_flags = READ_ONCE(folio->flags.f);
3233
3234 VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
3235
3236 /* see the comment on LRU_REFS_FLAGS */
3237 if (!folio_test_referenced(folio) && !folio_test_workingset(folio)) {
3238 set_mask_bits(&folio->flags.f, LRU_REFS_MASK, BIT(PG_referenced));
3239 return -1;
3240 }
3241
3242 do {
3243 /* lru_gen_del_folio() has isolated this page? */
3244 if (!(old_flags & LRU_GEN_MASK))
3245 return -1;
3246
3247 new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_FLAGS);
3248 new_flags |= ((gen + 1UL) << LRU_GEN_PGOFF) | BIT(PG_workingset);
3249 } while (!try_cmpxchg(&folio->flags.f, &old_flags, new_flags));
3250
3251 return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
3252 }
3253
3254 /* protect pages accessed multiple times through file descriptors */
folio_inc_gen(struct lruvec * lruvec,struct folio * folio,bool reclaiming)3255 static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
3256 {
3257 int type = folio_is_file_lru(folio);
3258 struct lru_gen_folio *lrugen = &lruvec->lrugen;
3259 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
3260 unsigned long new_flags, old_flags = READ_ONCE(folio->flags.f);
3261
3262 VM_WARN_ON_ONCE_FOLIO(!(old_flags & LRU_GEN_MASK), folio);
3263
3264 do {
3265 new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
3266 /* folio_update_gen() has promoted this page? */
3267 if (new_gen >= 0 && new_gen != old_gen)
3268 return new_gen;
3269
3270 new_gen = (old_gen + 1) % MAX_NR_GENS;
3271
3272 new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_FLAGS);
3273 new_flags |= (new_gen + 1UL) << LRU_GEN_PGOFF;
3274 /* for folio_end_writeback() */
3275 if (reclaiming)
3276 new_flags |= BIT(PG_reclaim);
3277 } while (!try_cmpxchg(&folio->flags.f, &old_flags, new_flags));
3278
3279 lru_gen_update_size(lruvec, folio, old_gen, new_gen);
3280
3281 return new_gen;
3282 }
3283
update_batch_size(struct lru_gen_mm_walk * walk,struct folio * folio,int old_gen,int new_gen)3284 static void update_batch_size(struct lru_gen_mm_walk *walk, struct folio *folio,
3285 int old_gen, int new_gen)
3286 {
3287 int type = folio_is_file_lru(folio);
3288 int zone = folio_zonenum(folio);
3289 int delta = folio_nr_pages(folio);
3290
3291 VM_WARN_ON_ONCE(old_gen >= MAX_NR_GENS);
3292 VM_WARN_ON_ONCE(new_gen >= MAX_NR_GENS);
3293
3294 walk->batched++;
3295
3296 walk->nr_pages[old_gen][type][zone] -= delta;
3297 walk->nr_pages[new_gen][type][zone] += delta;
3298 }
3299
reset_batch_size(struct lru_gen_mm_walk * walk)3300 static void reset_batch_size(struct lru_gen_mm_walk *walk)
3301 {
3302 int gen, type, zone;
3303 struct lruvec *lruvec = walk->lruvec;
3304 struct lru_gen_folio *lrugen = &lruvec->lrugen;
3305
3306 walk->batched = 0;
3307
3308 for_each_gen_type_zone(gen, type, zone) {
3309 enum lru_list lru = type * LRU_INACTIVE_FILE;
3310 int delta = walk->nr_pages[gen][type][zone];
3311
3312 if (!delta)
3313 continue;
3314
3315 walk->nr_pages[gen][type][zone] = 0;
3316 WRITE_ONCE(lrugen->nr_pages[gen][type][zone],
3317 lrugen->nr_pages[gen][type][zone] + delta);
3318
3319 if (lru_gen_is_active(lruvec, gen))
3320 lru += LRU_ACTIVE;
3321 __update_lru_size(lruvec, lru, zone, delta);
3322 }
3323 }
3324
should_skip_vma(unsigned long start,unsigned long end,struct mm_walk * args)3325 static int should_skip_vma(unsigned long start, unsigned long end, struct mm_walk *args)
3326 {
3327 struct address_space *mapping;
3328 struct vm_area_struct *vma = args->vma;
3329 struct lru_gen_mm_walk *walk = args->private;
3330
3331 if (!vma_is_accessible(vma))
3332 return true;
3333
3334 if (is_vm_hugetlb_page(vma))
3335 return true;
3336
3337 if (!vma_has_recency(vma))
3338 return true;
3339
3340 if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL))
3341 return true;
3342
3343 if (vma == get_gate_vma(vma->vm_mm))
3344 return true;
3345
3346 if (vma_is_anonymous(vma))
3347 return !walk->swappiness;
3348
3349 if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping))
3350 return true;
3351
3352 mapping = vma->vm_file->f_mapping;
3353 if (mapping_unevictable(mapping))
3354 return true;
3355
3356 if (shmem_mapping(mapping))
3357 return !walk->swappiness;
3358
3359 if (walk->swappiness > MAX_SWAPPINESS)
3360 return true;
3361
3362 /* to exclude special mappings like dax, etc. */
3363 return !mapping->a_ops->read_folio;
3364 }
3365
3366 /*
3367 * Some userspace memory allocators map many single-page VMAs. Instead of
3368 * returning back to the PGD table for each of such VMAs, finish an entire PMD
3369 * table to reduce zigzags and improve cache performance.
3370 */
get_next_vma(unsigned long mask,unsigned long size,struct mm_walk * args,unsigned long * vm_start,unsigned long * vm_end)3371 static bool get_next_vma(unsigned long mask, unsigned long size, struct mm_walk *args,
3372 unsigned long *vm_start, unsigned long *vm_end)
3373 {
3374 unsigned long start = round_up(*vm_end, size);
3375 unsigned long end = (start | ~mask) + 1;
3376 VMA_ITERATOR(vmi, args->mm, start);
3377
3378 VM_WARN_ON_ONCE(mask & size);
3379 VM_WARN_ON_ONCE((start & mask) != (*vm_start & mask));
3380
3381 for_each_vma(vmi, args->vma) {
3382 if (end && end <= args->vma->vm_start)
3383 return false;
3384
3385 if (should_skip_vma(args->vma->vm_start, args->vma->vm_end, args))
3386 continue;
3387
3388 *vm_start = max(start, args->vma->vm_start);
3389 *vm_end = min(end - 1, args->vma->vm_end - 1) + 1;
3390
3391 return true;
3392 }
3393
3394 return false;
3395 }
3396
get_pte_pfn(pte_t pte,struct vm_area_struct * vma,unsigned long addr,struct pglist_data * pgdat)3397 static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned long addr,
3398 struct pglist_data *pgdat)
3399 {
3400 unsigned long pfn = pte_pfn(pte);
3401
3402 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end);
3403
3404 if (!pte_present(pte) || is_zero_pfn(pfn))
3405 return -1;
3406
3407 if (WARN_ON_ONCE(pte_special(pte)))
3408 return -1;
3409
3410 if (!pte_young(pte) && !mm_has_notifiers(vma->vm_mm))
3411 return -1;
3412
3413 if (WARN_ON_ONCE(!pfn_valid(pfn)))
3414 return -1;
3415
3416 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
3417 return -1;
3418
3419 return pfn;
3420 }
3421
get_pmd_pfn(pmd_t pmd,struct vm_area_struct * vma,unsigned long addr,struct pglist_data * pgdat)3422 static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr,
3423 struct pglist_data *pgdat)
3424 {
3425 unsigned long pfn = pmd_pfn(pmd);
3426
3427 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end);
3428
3429 if (!pmd_present(pmd) || is_huge_zero_pmd(pmd))
3430 return -1;
3431
3432 if (!pmd_young(pmd) && !mm_has_notifiers(vma->vm_mm))
3433 return -1;
3434
3435 if (WARN_ON_ONCE(!pfn_valid(pfn)))
3436 return -1;
3437
3438 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
3439 return -1;
3440
3441 return pfn;
3442 }
3443
get_pfn_folio(unsigned long pfn,struct mem_cgroup * memcg,struct pglist_data * pgdat)3444 static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg,
3445 struct pglist_data *pgdat)
3446 {
3447 struct folio *folio = pfn_folio(pfn);
3448
3449 if (folio_lru_gen(folio) < 0)
3450 return NULL;
3451
3452 if (folio_nid(folio) != pgdat->node_id)
3453 return NULL;
3454
3455 if (folio_memcg(folio) != memcg)
3456 return NULL;
3457
3458 return folio;
3459 }
3460
suitable_to_scan(int total,int young)3461 static bool suitable_to_scan(int total, int young)
3462 {
3463 int n = clamp_t(int, cache_line_size() / sizeof(pte_t), 2, 8);
3464
3465 /* suitable if the average number of young PTEs per cacheline is >=1 */
3466 return young * n >= total;
3467 }
3468
walk_update_folio(struct lru_gen_mm_walk * walk,struct folio * folio,int new_gen,bool dirty)3469 static void walk_update_folio(struct lru_gen_mm_walk *walk, struct folio *folio,
3470 int new_gen, bool dirty)
3471 {
3472 int old_gen;
3473
3474 if (!folio)
3475 return;
3476
3477 if (dirty && !folio_test_dirty(folio) &&
3478 !(folio_test_anon(folio) && folio_test_swapbacked(folio) &&
3479 !folio_test_swapcache(folio)))
3480 folio_mark_dirty(folio);
3481
3482 if (walk) {
3483 old_gen = folio_update_gen(folio, new_gen);
3484 if (old_gen >= 0 && old_gen != new_gen)
3485 update_batch_size(walk, folio, old_gen, new_gen);
3486 } else if (lru_gen_set_refs(folio)) {
3487 old_gen = folio_lru_gen(folio);
3488 if (old_gen >= 0 && old_gen != new_gen)
3489 folio_activate(folio);
3490 }
3491 }
3492
walk_pte_range(pmd_t * pmd,unsigned long start,unsigned long end,struct mm_walk * args)3493 static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
3494 struct mm_walk *args)
3495 {
3496 int i;
3497 bool dirty;
3498 pte_t *pte;
3499 spinlock_t *ptl;
3500 unsigned long addr;
3501 int total = 0;
3502 int young = 0;
3503 struct folio *last = NULL;
3504 struct lru_gen_mm_walk *walk = args->private;
3505 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
3506 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3507 DEFINE_MAX_SEQ(walk->lruvec);
3508 int gen = lru_gen_from_seq(max_seq);
3509 pmd_t pmdval;
3510
3511 pte = pte_offset_map_rw_nolock(args->mm, pmd, start & PMD_MASK, &pmdval, &ptl);
3512 if (!pte)
3513 return false;
3514
3515 if (!spin_trylock(ptl)) {
3516 pte_unmap(pte);
3517 return true;
3518 }
3519
3520 if (unlikely(!pmd_same(pmdval, pmdp_get_lockless(pmd)))) {
3521 pte_unmap_unlock(pte, ptl);
3522 return false;
3523 }
3524
3525 lazy_mmu_mode_enable();
3526 restart:
3527 for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) {
3528 unsigned long pfn;
3529 struct folio *folio;
3530 pte_t ptent = ptep_get(pte + i);
3531
3532 total++;
3533 walk->mm_stats[MM_LEAF_TOTAL]++;
3534
3535 pfn = get_pte_pfn(ptent, args->vma, addr, pgdat);
3536 if (pfn == -1)
3537 continue;
3538
3539 folio = get_pfn_folio(pfn, memcg, pgdat);
3540 if (!folio)
3541 continue;
3542
3543 if (!ptep_clear_young_notify(args->vma, addr, pte + i))
3544 continue;
3545
3546 if (last != folio) {
3547 walk_update_folio(walk, last, gen, dirty);
3548
3549 last = folio;
3550 dirty = false;
3551 }
3552
3553 if (pte_dirty(ptent))
3554 dirty = true;
3555
3556 young++;
3557 walk->mm_stats[MM_LEAF_YOUNG]++;
3558 }
3559
3560 walk_update_folio(walk, last, gen, dirty);
3561 last = NULL;
3562
3563 if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end))
3564 goto restart;
3565
3566 lazy_mmu_mode_disable();
3567 pte_unmap_unlock(pte, ptl);
3568
3569 return suitable_to_scan(total, young);
3570 }
3571
walk_pmd_range_locked(pud_t * pud,unsigned long addr,struct vm_area_struct * vma,struct mm_walk * args,unsigned long * bitmap,unsigned long * first)3572 static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area_struct *vma,
3573 struct mm_walk *args, unsigned long *bitmap, unsigned long *first)
3574 {
3575 int i;
3576 bool dirty;
3577 pmd_t *pmd;
3578 spinlock_t *ptl;
3579 struct folio *last = NULL;
3580 struct lru_gen_mm_walk *walk = args->private;
3581 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
3582 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3583 DEFINE_MAX_SEQ(walk->lruvec);
3584 int gen = lru_gen_from_seq(max_seq);
3585
3586 VM_WARN_ON_ONCE(pud_leaf(*pud));
3587
3588 /* try to batch at most 1+MIN_LRU_BATCH+1 entries */
3589 if (*first == -1) {
3590 *first = addr;
3591 bitmap_zero(bitmap, MIN_LRU_BATCH);
3592 return;
3593 }
3594
3595 i = addr == -1 ? 0 : pmd_index(addr) - pmd_index(*first);
3596 if (i && i <= MIN_LRU_BATCH) {
3597 __set_bit(i - 1, bitmap);
3598 return;
3599 }
3600
3601 pmd = pmd_offset(pud, *first);
3602
3603 ptl = pmd_lockptr(args->mm, pmd);
3604 if (!spin_trylock(ptl))
3605 goto done;
3606
3607 lazy_mmu_mode_enable();
3608
3609 do {
3610 unsigned long pfn;
3611 struct folio *folio;
3612
3613 /* don't round down the first address */
3614 addr = i ? (*first & PMD_MASK) + i * PMD_SIZE : *first;
3615
3616 if (!pmd_present(pmd[i]))
3617 goto next;
3618
3619 if (!pmd_trans_huge(pmd[i])) {
3620 if (!walk->force_scan && should_clear_pmd_young() &&
3621 !mm_has_notifiers(args->mm))
3622 pmdp_test_and_clear_young(vma, addr, pmd + i);
3623 goto next;
3624 }
3625
3626 pfn = get_pmd_pfn(pmd[i], vma, addr, pgdat);
3627 if (pfn == -1)
3628 goto next;
3629
3630 folio = get_pfn_folio(pfn, memcg, pgdat);
3631 if (!folio)
3632 goto next;
3633
3634 if (!pmdp_clear_young_notify(vma, addr, pmd + i))
3635 goto next;
3636
3637 if (last != folio) {
3638 walk_update_folio(walk, last, gen, dirty);
3639
3640 last = folio;
3641 dirty = false;
3642 }
3643
3644 if (pmd_dirty(pmd[i]))
3645 dirty = true;
3646
3647 walk->mm_stats[MM_LEAF_YOUNG]++;
3648 next:
3649 i = i > MIN_LRU_BATCH ? 0 : find_next_bit(bitmap, MIN_LRU_BATCH, i) + 1;
3650 } while (i <= MIN_LRU_BATCH);
3651
3652 walk_update_folio(walk, last, gen, dirty);
3653
3654 lazy_mmu_mode_disable();
3655 spin_unlock(ptl);
3656 done:
3657 *first = -1;
3658 }
3659
walk_pmd_range(pud_t * pud,unsigned long start,unsigned long end,struct mm_walk * args)3660 static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
3661 struct mm_walk *args)
3662 {
3663 int i;
3664 pmd_t *pmd;
3665 unsigned long next;
3666 unsigned long addr;
3667 struct vm_area_struct *vma;
3668 DECLARE_BITMAP(bitmap, MIN_LRU_BATCH);
3669 unsigned long first = -1;
3670 struct lru_gen_mm_walk *walk = args->private;
3671 struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec);
3672
3673 VM_WARN_ON_ONCE(pud_leaf(*pud));
3674
3675 /*
3676 * Finish an entire PMD in two passes: the first only reaches to PTE
3677 * tables to avoid taking the PMD lock; the second, if necessary, takes
3678 * the PMD lock to clear the accessed bit in PMD entries.
3679 */
3680 pmd = pmd_offset(pud, start & PUD_MASK);
3681 restart:
3682 /* walk_pte_range() may call get_next_vma() */
3683 vma = args->vma;
3684 for (i = pmd_index(start), addr = start; addr != end; i++, addr = next) {
3685 pmd_t val = pmdp_get_lockless(pmd + i);
3686
3687 next = pmd_addr_end(addr, end);
3688
3689 if (!pmd_present(val) || is_huge_zero_pmd(val)) {
3690 walk->mm_stats[MM_LEAF_TOTAL]++;
3691 continue;
3692 }
3693
3694 if (pmd_trans_huge(val)) {
3695 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3696 unsigned long pfn = get_pmd_pfn(val, vma, addr, pgdat);
3697
3698 walk->mm_stats[MM_LEAF_TOTAL]++;
3699
3700 if (pfn != -1)
3701 walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first);
3702 continue;
3703 }
3704
3705 if (!walk->force_scan && should_clear_pmd_young() &&
3706 !mm_has_notifiers(args->mm)) {
3707 if (!pmd_young(val))
3708 continue;
3709
3710 walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first);
3711 }
3712
3713 if (!walk->force_scan && !test_bloom_filter(mm_state, walk->seq, pmd + i))
3714 continue;
3715
3716 walk->mm_stats[MM_NONLEAF_FOUND]++;
3717
3718 if (!walk_pte_range(&val, addr, next, args))
3719 continue;
3720
3721 walk->mm_stats[MM_NONLEAF_ADDED]++;
3722
3723 /* carry over to the next generation */
3724 update_bloom_filter(mm_state, walk->seq + 1, pmd + i);
3725 }
3726
3727 walk_pmd_range_locked(pud, -1, vma, args, bitmap, &first);
3728
3729 if (i < PTRS_PER_PMD && get_next_vma(PUD_MASK, PMD_SIZE, args, &start, &end))
3730 goto restart;
3731 }
3732
walk_pud_range(p4d_t * p4d,unsigned long start,unsigned long end,struct mm_walk * args)3733 static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end,
3734 struct mm_walk *args)
3735 {
3736 int i;
3737 pud_t *pud;
3738 unsigned long addr;
3739 unsigned long next;
3740 struct lru_gen_mm_walk *walk = args->private;
3741
3742 VM_WARN_ON_ONCE(p4d_leaf(*p4d));
3743
3744 pud = pud_offset(p4d, start & P4D_MASK);
3745 restart:
3746 for (i = pud_index(start), addr = start; addr != end; i++, addr = next) {
3747 pud_t val = pudp_get(pud + i);
3748
3749 next = pud_addr_end(addr, end);
3750
3751 if (!pud_present(val) || WARN_ON_ONCE(pud_leaf(val)))
3752 continue;
3753
3754 walk_pmd_range(&val, addr, next, args);
3755
3756 if (need_resched() || walk->batched >= MAX_LRU_BATCH) {
3757 end = (addr | ~PUD_MASK) + 1;
3758 goto done;
3759 }
3760 }
3761
3762 if (i < PTRS_PER_PUD && get_next_vma(P4D_MASK, PUD_SIZE, args, &start, &end))
3763 goto restart;
3764
3765 end = round_up(end, P4D_SIZE);
3766 done:
3767 if (!end || !args->vma)
3768 return 1;
3769
3770 walk->next_addr = max(end, args->vma->vm_start);
3771
3772 return -EAGAIN;
3773 }
3774
walk_mm(struct mm_struct * mm,struct lru_gen_mm_walk * walk)3775 static void walk_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
3776 {
3777 static const struct mm_walk_ops mm_walk_ops = {
3778 .test_walk = should_skip_vma,
3779 .p4d_entry = walk_pud_range,
3780 .walk_lock = PGWALK_RDLOCK,
3781 };
3782 int err;
3783 struct lruvec *lruvec = walk->lruvec;
3784
3785 walk->next_addr = FIRST_USER_ADDRESS;
3786
3787 do {
3788 DEFINE_MAX_SEQ(lruvec);
3789
3790 err = -EBUSY;
3791
3792 /* another thread might have called inc_max_seq() */
3793 if (walk->seq != max_seq)
3794 break;
3795
3796 /* the caller might be holding the lock for write */
3797 if (mmap_read_trylock(mm)) {
3798 err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk);
3799
3800 mmap_read_unlock(mm);
3801 }
3802
3803 if (walk->batched) {
3804 spin_lock_irq(&lruvec->lru_lock);
3805 reset_batch_size(walk);
3806 spin_unlock_irq(&lruvec->lru_lock);
3807 }
3808
3809 cond_resched();
3810 } while (err == -EAGAIN);
3811 }
3812
set_mm_walk(struct pglist_data * pgdat,bool force_alloc)3813 static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat, bool force_alloc)
3814 {
3815 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
3816
3817 if (pgdat && current_is_kswapd()) {
3818 VM_WARN_ON_ONCE(walk);
3819
3820 walk = &pgdat->mm_walk;
3821 } else if (!walk && force_alloc) {
3822 VM_WARN_ON_ONCE(current_is_kswapd());
3823
3824 walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
3825 }
3826
3827 current->reclaim_state->mm_walk = walk;
3828
3829 return walk;
3830 }
3831
clear_mm_walk(void)3832 static void clear_mm_walk(void)
3833 {
3834 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
3835
3836 VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages)));
3837 VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats)));
3838
3839 current->reclaim_state->mm_walk = NULL;
3840
3841 if (!current_is_kswapd())
3842 kfree(walk);
3843 }
3844
inc_min_seq(struct lruvec * lruvec,int type,int swappiness)3845 static bool inc_min_seq(struct lruvec *lruvec, int type, int swappiness)
3846 {
3847 int zone;
3848 int remaining = MAX_LRU_BATCH;
3849 struct lru_gen_folio *lrugen = &lruvec->lrugen;
3850 int hist = lru_hist_from_seq(lrugen->min_seq[type]);
3851 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
3852
3853 /* For file type, skip the check if swappiness is anon only */
3854 if (type && (swappiness == SWAPPINESS_ANON_ONLY))
3855 goto done;
3856
3857 /* For anon type, skip the check if swappiness is zero (file only) */
3858 if (!type && !swappiness)
3859 goto done;
3860
3861 /* prevent cold/hot inversion if the type is evictable */
3862 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
3863 struct list_head *head = &lrugen->folios[old_gen][type][zone];
3864
3865 while (!list_empty(head)) {
3866 struct folio *folio = lru_to_folio(head);
3867 int refs = folio_lru_refs(folio);
3868 bool workingset = folio_test_workingset(folio);
3869
3870 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
3871 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
3872 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
3873 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
3874
3875 new_gen = folio_inc_gen(lruvec, folio, false);
3876 list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]);
3877
3878 /* don't count the workingset being lazily promoted */
3879 if (refs + workingset != BIT(LRU_REFS_WIDTH) + 1) {
3880 int tier = lru_tier_from_refs(refs, workingset);
3881 int delta = folio_nr_pages(folio);
3882
3883 WRITE_ONCE(lrugen->protected[hist][type][tier],
3884 lrugen->protected[hist][type][tier] + delta);
3885 }
3886
3887 if (!--remaining)
3888 return false;
3889 }
3890 }
3891 done:
3892 reset_ctrl_pos(lruvec, type, true);
3893 WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1);
3894
3895 return true;
3896 }
3897
try_to_inc_min_seq(struct lruvec * lruvec,int swappiness)3898 static bool try_to_inc_min_seq(struct lruvec *lruvec, int swappiness)
3899 {
3900 int gen, type, zone;
3901 bool success = false;
3902 bool seq_inc_flag = false;
3903 struct lru_gen_folio *lrugen = &lruvec->lrugen;
3904 DEFINE_MIN_SEQ(lruvec);
3905
3906 VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
3907
3908 /* find the oldest populated generation */
3909 for_each_evictable_type(type, swappiness) {
3910 while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) {
3911 gen = lru_gen_from_seq(min_seq[type]);
3912
3913 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
3914 if (!list_empty(&lrugen->folios[gen][type][zone]))
3915 goto next;
3916 }
3917
3918 min_seq[type]++;
3919 seq_inc_flag = true;
3920 }
3921 next:
3922 ;
3923 }
3924
3925 /*
3926 * If min_seq[type] of both anonymous and file is not increased,
3927 * we can directly return false to avoid unnecessary checking
3928 * overhead later.
3929 */
3930 if (!seq_inc_flag)
3931 return success;
3932
3933 /* see the comment on lru_gen_folio */
3934 if (swappiness && swappiness <= MAX_SWAPPINESS) {
3935 unsigned long seq = lrugen->max_seq - MIN_NR_GENS;
3936
3937 if (min_seq[LRU_GEN_ANON] > seq && min_seq[LRU_GEN_FILE] < seq)
3938 min_seq[LRU_GEN_ANON] = seq;
3939 else if (min_seq[LRU_GEN_FILE] > seq && min_seq[LRU_GEN_ANON] < seq)
3940 min_seq[LRU_GEN_FILE] = seq;
3941 }
3942
3943 for_each_evictable_type(type, swappiness) {
3944 if (min_seq[type] <= lrugen->min_seq[type])
3945 continue;
3946
3947 reset_ctrl_pos(lruvec, type, true);
3948 WRITE_ONCE(lrugen->min_seq[type], min_seq[type]);
3949 success = true;
3950 }
3951
3952 return success;
3953 }
3954
inc_max_seq(struct lruvec * lruvec,unsigned long seq,int swappiness)3955 static bool inc_max_seq(struct lruvec *lruvec, unsigned long seq, int swappiness)
3956 {
3957 bool success;
3958 int prev, next;
3959 int type, zone;
3960 struct lru_gen_folio *lrugen = &lruvec->lrugen;
3961 restart:
3962 if (seq < READ_ONCE(lrugen->max_seq))
3963 return false;
3964
3965 spin_lock_irq(&lruvec->lru_lock);
3966
3967 VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
3968
3969 success = seq == lrugen->max_seq;
3970 if (!success)
3971 goto unlock;
3972
3973 for (type = 0; type < ANON_AND_FILE; type++) {
3974 if (get_nr_gens(lruvec, type) != MAX_NR_GENS)
3975 continue;
3976
3977 if (inc_min_seq(lruvec, type, swappiness))
3978 continue;
3979
3980 spin_unlock_irq(&lruvec->lru_lock);
3981 cond_resched();
3982 goto restart;
3983 }
3984
3985 /*
3986 * Update the active/inactive LRU sizes for compatibility. Both sides of
3987 * the current max_seq need to be covered, since max_seq+1 can overlap
3988 * with min_seq[LRU_GEN_ANON] if swapping is constrained. And if they do
3989 * overlap, cold/hot inversion happens.
3990 */
3991 prev = lru_gen_from_seq(lrugen->max_seq - 1);
3992 next = lru_gen_from_seq(lrugen->max_seq + 1);
3993
3994 for (type = 0; type < ANON_AND_FILE; type++) {
3995 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
3996 enum lru_list lru = type * LRU_INACTIVE_FILE;
3997 long delta = lrugen->nr_pages[prev][type][zone] -
3998 lrugen->nr_pages[next][type][zone];
3999
4000 if (!delta)
4001 continue;
4002
4003 __update_lru_size(lruvec, lru, zone, delta);
4004 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta);
4005 }
4006 }
4007
4008 for (type = 0; type < ANON_AND_FILE; type++)
4009 reset_ctrl_pos(lruvec, type, false);
4010
4011 WRITE_ONCE(lrugen->timestamps[next], jiffies);
4012 /* make sure preceding modifications appear */
4013 smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1);
4014 unlock:
4015 spin_unlock_irq(&lruvec->lru_lock);
4016
4017 return success;
4018 }
4019
try_to_inc_max_seq(struct lruvec * lruvec,unsigned long seq,int swappiness,bool force_scan)4020 static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long seq,
4021 int swappiness, bool force_scan)
4022 {
4023 bool success;
4024 struct lru_gen_mm_walk *walk;
4025 struct mm_struct *mm = NULL;
4026 struct lru_gen_folio *lrugen = &lruvec->lrugen;
4027 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
4028
4029 VM_WARN_ON_ONCE(seq > READ_ONCE(lrugen->max_seq));
4030
4031 if (!mm_state)
4032 return inc_max_seq(lruvec, seq, swappiness);
4033
4034 /* see the comment in iterate_mm_list() */
4035 if (seq <= READ_ONCE(mm_state->seq))
4036 return false;
4037
4038 /*
4039 * If the hardware doesn't automatically set the accessed bit, fallback
4040 * to lru_gen_look_around(), which only clears the accessed bit in a
4041 * handful of PTEs. Spreading the work out over a period of time usually
4042 * is less efficient, but it avoids bursty page faults.
4043 */
4044 if (!should_walk_mmu()) {
4045 success = iterate_mm_list_nowalk(lruvec, seq);
4046 goto done;
4047 }
4048
4049 walk = set_mm_walk(NULL, true);
4050 if (!walk) {
4051 success = iterate_mm_list_nowalk(lruvec, seq);
4052 goto done;
4053 }
4054
4055 walk->lruvec = lruvec;
4056 walk->seq = seq;
4057 walk->swappiness = swappiness;
4058 walk->force_scan = force_scan;
4059
4060 do {
4061 success = iterate_mm_list(walk, &mm);
4062 if (mm)
4063 walk_mm(mm, walk);
4064 } while (mm);
4065 done:
4066 if (success) {
4067 success = inc_max_seq(lruvec, seq, swappiness);
4068 WARN_ON_ONCE(!success);
4069 }
4070
4071 return success;
4072 }
4073
4074 /******************************************************************************
4075 * working set protection
4076 ******************************************************************************/
4077
set_initial_priority(struct pglist_data * pgdat,struct scan_control * sc)4078 static void set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc)
4079 {
4080 int priority;
4081 unsigned long reclaimable;
4082
4083 if (sc->priority != DEF_PRIORITY || sc->nr_to_reclaim < MIN_LRU_BATCH)
4084 return;
4085 /*
4086 * Determine the initial priority based on
4087 * (total >> priority) * reclaimed_to_scanned_ratio = nr_to_reclaim,
4088 * where reclaimed_to_scanned_ratio = inactive / total.
4089 */
4090 reclaimable = node_page_state(pgdat, NR_INACTIVE_FILE);
4091 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc))
4092 reclaimable += node_page_state(pgdat, NR_INACTIVE_ANON);
4093
4094 /* round down reclaimable and round up sc->nr_to_reclaim */
4095 priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1);
4096
4097 /*
4098 * The estimation is based on LRU pages only, so cap it to prevent
4099 * overshoots of shrinker objects by large margins.
4100 */
4101 sc->priority = clamp(priority, DEF_PRIORITY / 2, DEF_PRIORITY);
4102 }
4103
lruvec_is_sizable(struct lruvec * lruvec,struct scan_control * sc)4104 static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
4105 {
4106 int gen, type, zone;
4107 unsigned long total = 0;
4108 int swappiness = get_swappiness(lruvec, sc);
4109 struct lru_gen_folio *lrugen = &lruvec->lrugen;
4110 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4111 DEFINE_MAX_SEQ(lruvec);
4112 DEFINE_MIN_SEQ(lruvec);
4113
4114 for_each_evictable_type(type, swappiness) {
4115 unsigned long seq;
4116
4117 for (seq = min_seq[type]; seq <= max_seq; seq++) {
4118 gen = lru_gen_from_seq(seq);
4119
4120 for (zone = 0; zone < MAX_NR_ZONES; zone++)
4121 total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
4122 }
4123 }
4124
4125 /* whether the size is big enough to be helpful */
4126 return mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
4127 }
4128
lruvec_is_reclaimable(struct lruvec * lruvec,struct scan_control * sc,unsigned long min_ttl)4129 static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc,
4130 unsigned long min_ttl)
4131 {
4132 int gen;
4133 unsigned long birth;
4134 int swappiness = get_swappiness(lruvec, sc);
4135 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4136 DEFINE_MIN_SEQ(lruvec);
4137
4138 if (mem_cgroup_below_min(NULL, memcg))
4139 return false;
4140
4141 if (!lruvec_is_sizable(lruvec, sc))
4142 return false;
4143
4144 gen = lru_gen_from_seq(evictable_min_seq(min_seq, swappiness));
4145 birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
4146
4147 return time_is_before_jiffies(birth + min_ttl);
4148 }
4149
4150 /* to protect the working set of the last N jiffies */
4151 static unsigned long lru_gen_min_ttl __read_mostly;
4152
lru_gen_age_node(struct pglist_data * pgdat,struct scan_control * sc)4153 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
4154 {
4155 struct mem_cgroup *memcg;
4156 unsigned long min_ttl = READ_ONCE(lru_gen_min_ttl);
4157 bool reclaimable = !min_ttl;
4158
4159 VM_WARN_ON_ONCE(!current_is_kswapd());
4160
4161 set_initial_priority(pgdat, sc);
4162
4163 memcg = mem_cgroup_iter(NULL, NULL, NULL);
4164 do {
4165 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
4166
4167 mem_cgroup_calculate_protection(NULL, memcg);
4168
4169 if (!reclaimable)
4170 reclaimable = lruvec_is_reclaimable(lruvec, sc, min_ttl);
4171 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
4172
4173 /*
4174 * The main goal is to OOM kill if every generation from all memcgs is
4175 * younger than min_ttl. However, another possibility is all memcgs are
4176 * either too small or below min.
4177 */
4178 if (!reclaimable && mutex_trylock(&oom_lock)) {
4179 struct oom_control oc = {
4180 .gfp_mask = sc->gfp_mask,
4181 };
4182
4183 out_of_memory(&oc);
4184
4185 mutex_unlock(&oom_lock);
4186 }
4187 }
4188
4189 /******************************************************************************
4190 * rmap/PT walk feedback
4191 ******************************************************************************/
4192
4193 /*
4194 * This function exploits spatial locality when shrink_folio_list() walks the
4195 * rmap. It scans the adjacent PTEs of a young PTE and promotes hot pages. If
4196 * the scan was done cacheline efficiently, it adds the PMD entry pointing to
4197 * the PTE table to the Bloom filter. This forms a feedback loop between the
4198 * eviction and the aging.
4199 */
lru_gen_look_around(struct page_vma_mapped_walk * pvmw)4200 bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
4201 {
4202 int i;
4203 bool dirty;
4204 unsigned long start;
4205 unsigned long end;
4206 struct lru_gen_mm_walk *walk;
4207 struct folio *last = NULL;
4208 int young = 1;
4209 pte_t *pte = pvmw->pte;
4210 unsigned long addr = pvmw->address;
4211 struct vm_area_struct *vma = pvmw->vma;
4212 struct folio *folio = pfn_folio(pvmw->pfn);
4213 struct mem_cgroup *memcg = folio_memcg(folio);
4214 struct pglist_data *pgdat = folio_pgdat(folio);
4215 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
4216 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
4217 DEFINE_MAX_SEQ(lruvec);
4218 int gen = lru_gen_from_seq(max_seq);
4219
4220 lockdep_assert_held(pvmw->ptl);
4221 VM_WARN_ON_ONCE_FOLIO(folio_test_lru(folio), folio);
4222
4223 if (!ptep_clear_young_notify(vma, addr, pte))
4224 return false;
4225
4226 if (spin_is_contended(pvmw->ptl))
4227 return true;
4228
4229 /* exclude special VMAs containing anon pages from COW */
4230 if (vma->vm_flags & VM_SPECIAL)
4231 return true;
4232
4233 /* avoid taking the LRU lock under the PTL when possible */
4234 walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL;
4235
4236 start = max(addr & PMD_MASK, vma->vm_start);
4237 end = min(addr | ~PMD_MASK, vma->vm_end - 1) + 1;
4238
4239 if (end - start == PAGE_SIZE)
4240 return true;
4241
4242 if (end - start > MIN_LRU_BATCH * PAGE_SIZE) {
4243 if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2)
4244 end = start + MIN_LRU_BATCH * PAGE_SIZE;
4245 else if (end - addr < MIN_LRU_BATCH * PAGE_SIZE / 2)
4246 start = end - MIN_LRU_BATCH * PAGE_SIZE;
4247 else {
4248 start = addr - MIN_LRU_BATCH * PAGE_SIZE / 2;
4249 end = addr + MIN_LRU_BATCH * PAGE_SIZE / 2;
4250 }
4251 }
4252
4253 lazy_mmu_mode_enable();
4254
4255 pte -= (addr - start) / PAGE_SIZE;
4256
4257 for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) {
4258 unsigned long pfn;
4259 pte_t ptent = ptep_get(pte + i);
4260
4261 pfn = get_pte_pfn(ptent, vma, addr, pgdat);
4262 if (pfn == -1)
4263 continue;
4264
4265 folio = get_pfn_folio(pfn, memcg, pgdat);
4266 if (!folio)
4267 continue;
4268
4269 if (!ptep_clear_young_notify(vma, addr, pte + i))
4270 continue;
4271
4272 if (last != folio) {
4273 walk_update_folio(walk, last, gen, dirty);
4274
4275 last = folio;
4276 dirty = false;
4277 }
4278
4279 if (pte_dirty(ptent))
4280 dirty = true;
4281
4282 young++;
4283 }
4284
4285 walk_update_folio(walk, last, gen, dirty);
4286
4287 lazy_mmu_mode_disable();
4288
4289 /* feedback from rmap walkers to page table walkers */
4290 if (mm_state && suitable_to_scan(i, young))
4291 update_bloom_filter(mm_state, max_seq, pvmw->pmd);
4292
4293 return true;
4294 }
4295
4296 /******************************************************************************
4297 * memcg LRU
4298 ******************************************************************************/
4299
4300 /* see the comment on MEMCG_NR_GENS */
4301 enum {
4302 MEMCG_LRU_NOP,
4303 MEMCG_LRU_HEAD,
4304 MEMCG_LRU_TAIL,
4305 MEMCG_LRU_OLD,
4306 MEMCG_LRU_YOUNG,
4307 };
4308
lru_gen_rotate_memcg(struct lruvec * lruvec,int op)4309 static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
4310 {
4311 int seg;
4312 int old, new;
4313 unsigned long flags;
4314 int bin = get_random_u32_below(MEMCG_NR_BINS);
4315 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
4316
4317 spin_lock_irqsave(&pgdat->memcg_lru.lock, flags);
4318
4319 VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list));
4320
4321 seg = 0;
4322 new = old = lruvec->lrugen.gen;
4323
4324 /* see the comment on MEMCG_NR_GENS */
4325 if (op == MEMCG_LRU_HEAD)
4326 seg = MEMCG_LRU_HEAD;
4327 else if (op == MEMCG_LRU_TAIL)
4328 seg = MEMCG_LRU_TAIL;
4329 else if (op == MEMCG_LRU_OLD)
4330 new = get_memcg_gen(pgdat->memcg_lru.seq);
4331 else if (op == MEMCG_LRU_YOUNG)
4332 new = get_memcg_gen(pgdat->memcg_lru.seq + 1);
4333 else
4334 VM_WARN_ON_ONCE(true);
4335
4336 WRITE_ONCE(lruvec->lrugen.seg, seg);
4337 WRITE_ONCE(lruvec->lrugen.gen, new);
4338
4339 hlist_nulls_del_rcu(&lruvec->lrugen.list);
4340
4341 if (op == MEMCG_LRU_HEAD || op == MEMCG_LRU_OLD)
4342 hlist_nulls_add_head_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]);
4343 else
4344 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]);
4345
4346 pgdat->memcg_lru.nr_memcgs[old]--;
4347 pgdat->memcg_lru.nr_memcgs[new]++;
4348
4349 if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq))
4350 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
4351
4352 spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags);
4353 }
4354
4355 #ifdef CONFIG_MEMCG
4356
lru_gen_online_memcg(struct mem_cgroup * memcg)4357 void lru_gen_online_memcg(struct mem_cgroup *memcg)
4358 {
4359 int gen;
4360 int nid;
4361 int bin = get_random_u32_below(MEMCG_NR_BINS);
4362
4363 for_each_node(nid) {
4364 struct pglist_data *pgdat = NODE_DATA(nid);
4365 struct lruvec *lruvec = get_lruvec(memcg, nid);
4366
4367 spin_lock_irq(&pgdat->memcg_lru.lock);
4368
4369 VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list));
4370
4371 gen = get_memcg_gen(pgdat->memcg_lru.seq);
4372
4373 lruvec->lrugen.gen = gen;
4374
4375 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]);
4376 pgdat->memcg_lru.nr_memcgs[gen]++;
4377
4378 spin_unlock_irq(&pgdat->memcg_lru.lock);
4379 }
4380 }
4381
lru_gen_offline_memcg(struct mem_cgroup * memcg)4382 void lru_gen_offline_memcg(struct mem_cgroup *memcg)
4383 {
4384 int nid;
4385
4386 for_each_node(nid) {
4387 struct lruvec *lruvec = get_lruvec(memcg, nid);
4388
4389 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_OLD);
4390 }
4391 }
4392
lru_gen_release_memcg(struct mem_cgroup * memcg)4393 void lru_gen_release_memcg(struct mem_cgroup *memcg)
4394 {
4395 int gen;
4396 int nid;
4397
4398 for_each_node(nid) {
4399 struct pglist_data *pgdat = NODE_DATA(nid);
4400 struct lruvec *lruvec = get_lruvec(memcg, nid);
4401
4402 spin_lock_irq(&pgdat->memcg_lru.lock);
4403
4404 if (hlist_nulls_unhashed(&lruvec->lrugen.list))
4405 goto unlock;
4406
4407 gen = lruvec->lrugen.gen;
4408
4409 hlist_nulls_del_init_rcu(&lruvec->lrugen.list);
4410 pgdat->memcg_lru.nr_memcgs[gen]--;
4411
4412 if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq))
4413 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
4414 unlock:
4415 spin_unlock_irq(&pgdat->memcg_lru.lock);
4416 }
4417 }
4418
lru_gen_soft_reclaim(struct mem_cgroup * memcg,int nid)4419 void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
4420 {
4421 struct lruvec *lruvec = get_lruvec(memcg, nid);
4422
4423 /* see the comment on MEMCG_NR_GENS */
4424 if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_HEAD)
4425 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_HEAD);
4426 }
4427
4428 #endif /* CONFIG_MEMCG */
4429
4430 /******************************************************************************
4431 * the eviction
4432 ******************************************************************************/
4433
sort_folio(struct lruvec * lruvec,struct folio * folio,struct scan_control * sc,int tier_idx)4434 static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc,
4435 int tier_idx)
4436 {
4437 bool success;
4438 bool dirty, writeback;
4439 int gen = folio_lru_gen(folio);
4440 int type = folio_is_file_lru(folio);
4441 int zone = folio_zonenum(folio);
4442 int delta = folio_nr_pages(folio);
4443 int refs = folio_lru_refs(folio);
4444 bool workingset = folio_test_workingset(folio);
4445 int tier = lru_tier_from_refs(refs, workingset);
4446 struct lru_gen_folio *lrugen = &lruvec->lrugen;
4447
4448 VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio);
4449
4450 /* unevictable */
4451 if (!folio_evictable(folio)) {
4452 success = lru_gen_del_folio(lruvec, folio, true);
4453 VM_WARN_ON_ONCE_FOLIO(!success, folio);
4454 folio_set_unevictable(folio);
4455 lruvec_add_folio(lruvec, folio);
4456 __count_vm_events(UNEVICTABLE_PGCULLED, delta);
4457 return true;
4458 }
4459
4460 /* promoted */
4461 if (gen != lru_gen_from_seq(lrugen->min_seq[type])) {
4462 list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
4463 return true;
4464 }
4465
4466 /* protected */
4467 if (tier > tier_idx || refs + workingset == BIT(LRU_REFS_WIDTH) + 1) {
4468 gen = folio_inc_gen(lruvec, folio, false);
4469 list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
4470
4471 /* don't count the workingset being lazily promoted */
4472 if (refs + workingset != BIT(LRU_REFS_WIDTH) + 1) {
4473 int hist = lru_hist_from_seq(lrugen->min_seq[type]);
4474
4475 WRITE_ONCE(lrugen->protected[hist][type][tier],
4476 lrugen->protected[hist][type][tier] + delta);
4477 }
4478 return true;
4479 }
4480
4481 /* ineligible */
4482 if (zone > sc->reclaim_idx) {
4483 gen = folio_inc_gen(lruvec, folio, false);
4484 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
4485 return true;
4486 }
4487
4488 dirty = folio_test_dirty(folio);
4489 writeback = folio_test_writeback(folio);
4490 if (type == LRU_GEN_FILE && dirty) {
4491 sc->nr.file_taken += delta;
4492 if (!writeback)
4493 sc->nr.unqueued_dirty += delta;
4494 }
4495
4496 /* waiting for writeback */
4497 if (writeback || (type == LRU_GEN_FILE && dirty)) {
4498 gen = folio_inc_gen(lruvec, folio, true);
4499 list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
4500 return true;
4501 }
4502
4503 return false;
4504 }
4505
isolate_folio(struct lruvec * lruvec,struct folio * folio,struct scan_control * sc)4506 static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc)
4507 {
4508 bool success;
4509
4510 /* swap constrained */
4511 if (!(sc->gfp_mask & __GFP_IO) &&
4512 (folio_test_dirty(folio) ||
4513 (folio_test_anon(folio) && !folio_test_swapcache(folio))))
4514 return false;
4515
4516 /* raced with release_pages() */
4517 if (!folio_try_get(folio))
4518 return false;
4519
4520 /* raced with another isolation */
4521 if (!folio_test_clear_lru(folio)) {
4522 folio_put(folio);
4523 return false;
4524 }
4525
4526 /* see the comment on LRU_REFS_FLAGS */
4527 if (!folio_test_referenced(folio))
4528 set_mask_bits(&folio->flags.f, LRU_REFS_MASK, 0);
4529
4530 /* for shrink_folio_list() */
4531 folio_clear_reclaim(folio);
4532
4533 success = lru_gen_del_folio(lruvec, folio, true);
4534 VM_WARN_ON_ONCE_FOLIO(!success, folio);
4535
4536 return true;
4537 }
4538
scan_folios(unsigned long nr_to_scan,struct lruvec * lruvec,struct scan_control * sc,int type,int tier,struct list_head * list)4539 static int scan_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
4540 struct scan_control *sc, int type, int tier,
4541 struct list_head *list)
4542 {
4543 int i;
4544 int gen;
4545 enum vm_event_item item;
4546 int sorted = 0;
4547 int scanned = 0;
4548 int isolated = 0;
4549 int skipped = 0;
4550 int scan_batch = min(nr_to_scan, MAX_LRU_BATCH);
4551 int remaining = scan_batch;
4552 struct lru_gen_folio *lrugen = &lruvec->lrugen;
4553 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4554
4555 VM_WARN_ON_ONCE(!list_empty(list));
4556
4557 if (get_nr_gens(lruvec, type) == MIN_NR_GENS)
4558 return 0;
4559
4560 gen = lru_gen_from_seq(lrugen->min_seq[type]);
4561
4562 for (i = MAX_NR_ZONES; i > 0; i--) {
4563 LIST_HEAD(moved);
4564 int skipped_zone = 0;
4565 int zone = (sc->reclaim_idx + i) % MAX_NR_ZONES;
4566 struct list_head *head = &lrugen->folios[gen][type][zone];
4567
4568 while (!list_empty(head)) {
4569 struct folio *folio = lru_to_folio(head);
4570 int delta = folio_nr_pages(folio);
4571
4572 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
4573 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
4574 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
4575 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
4576
4577 scanned += delta;
4578
4579 if (sort_folio(lruvec, folio, sc, tier))
4580 sorted += delta;
4581 else if (isolate_folio(lruvec, folio, sc)) {
4582 list_add(&folio->lru, list);
4583 isolated += delta;
4584 } else {
4585 list_move(&folio->lru, &moved);
4586 skipped_zone += delta;
4587 }
4588
4589 if (!--remaining || max(isolated, skipped_zone) >= MIN_LRU_BATCH)
4590 break;
4591 }
4592
4593 if (skipped_zone) {
4594 list_splice(&moved, head);
4595 __count_zid_vm_events(PGSCAN_SKIP, zone, skipped_zone);
4596 skipped += skipped_zone;
4597 }
4598
4599 if (!remaining || isolated >= MIN_LRU_BATCH)
4600 break;
4601 }
4602
4603 item = PGSCAN_KSWAPD + reclaimer_offset(sc);
4604 if (!cgroup_reclaim(sc)) {
4605 __count_vm_events(item, isolated);
4606 __count_vm_events(PGREFILL, sorted);
4607 }
4608 count_memcg_events(memcg, item, isolated);
4609 count_memcg_events(memcg, PGREFILL, sorted);
4610 __count_vm_events(PGSCAN_ANON + type, isolated);
4611 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, scan_batch,
4612 scanned, skipped, isolated,
4613 type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON);
4614 if (type == LRU_GEN_FILE)
4615 sc->nr.file_taken += isolated;
4616 /*
4617 * There might not be eligible folios due to reclaim_idx. Check the
4618 * remaining to prevent livelock if it's not making progress.
4619 */
4620 return isolated || !remaining ? scanned : 0;
4621 }
4622
get_tier_idx(struct lruvec * lruvec,int type)4623 static int get_tier_idx(struct lruvec *lruvec, int type)
4624 {
4625 int tier;
4626 struct ctrl_pos sp, pv;
4627
4628 /*
4629 * To leave a margin for fluctuations, use a larger gain factor (2:3).
4630 * This value is chosen because any other tier would have at least twice
4631 * as many refaults as the first tier.
4632 */
4633 read_ctrl_pos(lruvec, type, 0, 2, &sp);
4634 for (tier = 1; tier < MAX_NR_TIERS; tier++) {
4635 read_ctrl_pos(lruvec, type, tier, 3, &pv);
4636 if (!positive_ctrl_err(&sp, &pv))
4637 break;
4638 }
4639
4640 return tier - 1;
4641 }
4642
get_type_to_scan(struct lruvec * lruvec,int swappiness)4643 static int get_type_to_scan(struct lruvec *lruvec, int swappiness)
4644 {
4645 struct ctrl_pos sp, pv;
4646
4647 if (swappiness <= MIN_SWAPPINESS + 1)
4648 return LRU_GEN_FILE;
4649
4650 if (swappiness >= MAX_SWAPPINESS)
4651 return LRU_GEN_ANON;
4652 /*
4653 * Compare the sum of all tiers of anon with that of file to determine
4654 * which type to scan.
4655 */
4656 read_ctrl_pos(lruvec, LRU_GEN_ANON, MAX_NR_TIERS, swappiness, &sp);
4657 read_ctrl_pos(lruvec, LRU_GEN_FILE, MAX_NR_TIERS, MAX_SWAPPINESS - swappiness, &pv);
4658
4659 return positive_ctrl_err(&sp, &pv);
4660 }
4661
isolate_folios(unsigned long nr_to_scan,struct lruvec * lruvec,struct scan_control * sc,int swappiness,int * type_scanned,struct list_head * list)4662 static int isolate_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
4663 struct scan_control *sc, int swappiness,
4664 int *type_scanned, struct list_head *list)
4665 {
4666 int i;
4667 int type = get_type_to_scan(lruvec, swappiness);
4668
4669 for_each_evictable_type(i, swappiness) {
4670 int scanned;
4671 int tier = get_tier_idx(lruvec, type);
4672
4673 *type_scanned = type;
4674
4675 scanned = scan_folios(nr_to_scan, lruvec, sc, type, tier, list);
4676 if (scanned)
4677 return scanned;
4678
4679 type = !type;
4680 }
4681
4682 return 0;
4683 }
4684
evict_folios(unsigned long nr_to_scan,struct lruvec * lruvec,struct scan_control * sc,int swappiness)4685 static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
4686 struct scan_control *sc, int swappiness)
4687 {
4688 int type;
4689 int scanned;
4690 int reclaimed;
4691 LIST_HEAD(list);
4692 LIST_HEAD(clean);
4693 struct folio *folio;
4694 struct folio *next;
4695 enum vm_event_item item;
4696 struct reclaim_stat stat;
4697 struct lru_gen_mm_walk *walk;
4698 bool skip_retry = false;
4699 struct lru_gen_folio *lrugen = &lruvec->lrugen;
4700 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4701 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
4702
4703 spin_lock_irq(&lruvec->lru_lock);
4704
4705 scanned = isolate_folios(nr_to_scan, lruvec, sc, swappiness, &type, &list);
4706
4707 scanned += try_to_inc_min_seq(lruvec, swappiness);
4708
4709 if (evictable_min_seq(lrugen->min_seq, swappiness) + MIN_NR_GENS > lrugen->max_seq)
4710 scanned = 0;
4711
4712 spin_unlock_irq(&lruvec->lru_lock);
4713
4714 if (list_empty(&list))
4715 return scanned;
4716 retry:
4717 reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false, memcg);
4718 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
4719 sc->nr_reclaimed += reclaimed;
4720 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
4721 scanned, reclaimed, &stat, sc->priority,
4722 type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON);
4723
4724 list_for_each_entry_safe_reverse(folio, next, &list, lru) {
4725 DEFINE_MIN_SEQ(lruvec);
4726
4727 if (!folio_evictable(folio)) {
4728 list_del(&folio->lru);
4729 folio_putback_lru(folio);
4730 continue;
4731 }
4732
4733 /* retry folios that may have missed folio_rotate_reclaimable() */
4734 if (!skip_retry && !folio_test_active(folio) && !folio_mapped(folio) &&
4735 !folio_test_dirty(folio) && !folio_test_writeback(folio)) {
4736 list_move(&folio->lru, &clean);
4737 continue;
4738 }
4739
4740 /* don't add rejected folios to the oldest generation */
4741 if (lru_gen_folio_seq(lruvec, folio, false) == min_seq[type])
4742 set_mask_bits(&folio->flags.f, LRU_REFS_FLAGS, BIT(PG_active));
4743 }
4744
4745 spin_lock_irq(&lruvec->lru_lock);
4746
4747 move_folios_to_lru(lruvec, &list);
4748
4749 walk = current->reclaim_state->mm_walk;
4750 if (walk && walk->batched) {
4751 walk->lruvec = lruvec;
4752 reset_batch_size(walk);
4753 }
4754
4755 mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
4756 stat.nr_demoted);
4757
4758 item = PGSTEAL_KSWAPD + reclaimer_offset(sc);
4759 if (!cgroup_reclaim(sc))
4760 __count_vm_events(item, reclaimed);
4761 count_memcg_events(memcg, item, reclaimed);
4762 __count_vm_events(PGSTEAL_ANON + type, reclaimed);
4763
4764 spin_unlock_irq(&lruvec->lru_lock);
4765
4766 list_splice_init(&clean, &list);
4767
4768 if (!list_empty(&list)) {
4769 skip_retry = true;
4770 goto retry;
4771 }
4772
4773 return scanned;
4774 }
4775
should_run_aging(struct lruvec * lruvec,unsigned long max_seq,int swappiness,unsigned long * nr_to_scan)4776 static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
4777 int swappiness, unsigned long *nr_to_scan)
4778 {
4779 int gen, type, zone;
4780 unsigned long size = 0;
4781 struct lru_gen_folio *lrugen = &lruvec->lrugen;
4782 DEFINE_MIN_SEQ(lruvec);
4783
4784 *nr_to_scan = 0;
4785 /* have to run aging, since eviction is not possible anymore */
4786 if (evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS > max_seq)
4787 return true;
4788
4789 for_each_evictable_type(type, swappiness) {
4790 unsigned long seq;
4791
4792 for (seq = min_seq[type]; seq <= max_seq; seq++) {
4793 gen = lru_gen_from_seq(seq);
4794
4795 for (zone = 0; zone < MAX_NR_ZONES; zone++)
4796 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
4797 }
4798 }
4799
4800 *nr_to_scan = size;
4801 /* better to run aging even though eviction is still possible */
4802 return evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS == max_seq;
4803 }
4804
4805 /*
4806 * For future optimizations:
4807 * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
4808 * reclaim.
4809 */
get_nr_to_scan(struct lruvec * lruvec,struct scan_control * sc,int swappiness)4810 static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
4811 {
4812 bool success;
4813 unsigned long nr_to_scan;
4814 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4815 DEFINE_MAX_SEQ(lruvec);
4816
4817 if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg))
4818 return -1;
4819
4820 success = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
4821
4822 /* try to scrape all its memory if this memcg was deleted */
4823 if (nr_to_scan && !mem_cgroup_online(memcg))
4824 return nr_to_scan;
4825
4826 nr_to_scan = apply_proportional_protection(memcg, sc, nr_to_scan);
4827
4828 /* try to get away with not aging at the default priority */
4829 if (!success || sc->priority == DEF_PRIORITY)
4830 return nr_to_scan >> sc->priority;
4831
4832 /* stop scanning this lruvec as it's low on cold folios */
4833 return try_to_inc_max_seq(lruvec, max_seq, swappiness, false) ? -1 : 0;
4834 }
4835
should_abort_scan(struct lruvec * lruvec,struct scan_control * sc)4836 static bool should_abort_scan(struct lruvec *lruvec, struct scan_control *sc)
4837 {
4838 int i;
4839 enum zone_watermarks mark;
4840
4841 /* don't abort memcg reclaim to ensure fairness */
4842 if (!root_reclaim(sc))
4843 return false;
4844
4845 if (sc->nr_reclaimed >= max(sc->nr_to_reclaim, compact_gap(sc->order)))
4846 return true;
4847
4848 /* check the order to exclude compaction-induced reclaim */
4849 if (!current_is_kswapd() || sc->order)
4850 return false;
4851
4852 mark = sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING ?
4853 WMARK_PROMO : WMARK_HIGH;
4854
4855 for (i = 0; i <= sc->reclaim_idx; i++) {
4856 struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i;
4857 unsigned long size = wmark_pages(zone, mark) + MIN_LRU_BATCH;
4858
4859 if (managed_zone(zone) && !zone_watermark_ok(zone, 0, size, sc->reclaim_idx, 0))
4860 return false;
4861 }
4862
4863 /* kswapd should abort if all eligible zones are safe */
4864 return true;
4865 }
4866
try_to_shrink_lruvec(struct lruvec * lruvec,struct scan_control * sc)4867 static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
4868 {
4869 long nr_to_scan;
4870 unsigned long scanned = 0;
4871 int swappiness = get_swappiness(lruvec, sc);
4872
4873 while (true) {
4874 int delta;
4875
4876 nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
4877 if (nr_to_scan <= 0)
4878 break;
4879
4880 delta = evict_folios(nr_to_scan, lruvec, sc, swappiness);
4881 if (!delta)
4882 break;
4883
4884 scanned += delta;
4885 if (scanned >= nr_to_scan)
4886 break;
4887
4888 if (should_abort_scan(lruvec, sc))
4889 break;
4890
4891 cond_resched();
4892 }
4893
4894 /*
4895 * If too many file cache in the coldest generation can't be evicted
4896 * due to being dirty, wake up the flusher.
4897 */
4898 if (sc->nr.unqueued_dirty && sc->nr.unqueued_dirty == sc->nr.file_taken)
4899 wakeup_flusher_threads(WB_REASON_VMSCAN);
4900
4901 /* whether this lruvec should be rotated */
4902 return nr_to_scan < 0;
4903 }
4904
shrink_one(struct lruvec * lruvec,struct scan_control * sc)4905 static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
4906 {
4907 bool success;
4908 unsigned long scanned = sc->nr_scanned;
4909 unsigned long reclaimed = sc->nr_reclaimed;
4910 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4911 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
4912
4913 /* lru_gen_age_node() called mem_cgroup_calculate_protection() */
4914 if (mem_cgroup_below_min(NULL, memcg))
4915 return MEMCG_LRU_YOUNG;
4916
4917 if (mem_cgroup_below_low(NULL, memcg)) {
4918 /* see the comment on MEMCG_NR_GENS */
4919 if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL)
4920 return MEMCG_LRU_TAIL;
4921
4922 memcg_memory_event(memcg, MEMCG_LOW);
4923 }
4924
4925 success = try_to_shrink_lruvec(lruvec, sc);
4926
4927 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority);
4928
4929 if (!sc->proactive)
4930 vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned,
4931 sc->nr_reclaimed - reclaimed);
4932
4933 flush_reclaim_state(sc);
4934
4935 if (success && mem_cgroup_online(memcg))
4936 return MEMCG_LRU_YOUNG;
4937
4938 if (!success && lruvec_is_sizable(lruvec, sc))
4939 return 0;
4940
4941 /* one retry if offlined or too small */
4942 return READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL ?
4943 MEMCG_LRU_TAIL : MEMCG_LRU_YOUNG;
4944 }
4945
shrink_many(struct pglist_data * pgdat,struct scan_control * sc)4946 static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
4947 {
4948 int op;
4949 int gen;
4950 int bin;
4951 int first_bin;
4952 struct lruvec *lruvec;
4953 struct lru_gen_folio *lrugen;
4954 struct mem_cgroup *memcg;
4955 struct hlist_nulls_node *pos;
4956
4957 gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq));
4958 bin = first_bin = get_random_u32_below(MEMCG_NR_BINS);
4959 restart:
4960 op = 0;
4961 memcg = NULL;
4962
4963 rcu_read_lock();
4964
4965 hlist_nulls_for_each_entry_rcu(lrugen, pos, &pgdat->memcg_lru.fifo[gen][bin], list) {
4966 if (op) {
4967 lru_gen_rotate_memcg(lruvec, op);
4968 op = 0;
4969 }
4970
4971 mem_cgroup_put(memcg);
4972 memcg = NULL;
4973
4974 if (gen != READ_ONCE(lrugen->gen))
4975 continue;
4976
4977 lruvec = container_of(lrugen, struct lruvec, lrugen);
4978 memcg = lruvec_memcg(lruvec);
4979
4980 if (!mem_cgroup_tryget(memcg)) {
4981 lru_gen_release_memcg(memcg);
4982 memcg = NULL;
4983 continue;
4984 }
4985
4986 rcu_read_unlock();
4987
4988 op = shrink_one(lruvec, sc);
4989
4990 rcu_read_lock();
4991
4992 if (should_abort_scan(lruvec, sc))
4993 break;
4994 }
4995
4996 rcu_read_unlock();
4997
4998 if (op)
4999 lru_gen_rotate_memcg(lruvec, op);
5000
5001 mem_cgroup_put(memcg);
5002
5003 if (!is_a_nulls(pos))
5004 return;
5005
5006 /* restart if raced with lru_gen_rotate_memcg() */
5007 if (gen != get_nulls_value(pos))
5008 goto restart;
5009
5010 /* try the rest of the bins of the current generation */
5011 bin = get_memcg_bin(bin + 1);
5012 if (bin != first_bin)
5013 goto restart;
5014 }
5015
lru_gen_shrink_lruvec(struct lruvec * lruvec,struct scan_control * sc)5016 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
5017 {
5018 struct blk_plug plug;
5019
5020 VM_WARN_ON_ONCE(root_reclaim(sc));
5021 VM_WARN_ON_ONCE(!sc->may_writepage || !sc->may_unmap);
5022
5023 lru_add_drain();
5024
5025 blk_start_plug(&plug);
5026
5027 set_mm_walk(NULL, sc->proactive);
5028
5029 if (try_to_shrink_lruvec(lruvec, sc))
5030 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_YOUNG);
5031
5032 clear_mm_walk();
5033
5034 blk_finish_plug(&plug);
5035 }
5036
lru_gen_shrink_node(struct pglist_data * pgdat,struct scan_control * sc)5037 static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc)
5038 {
5039 struct blk_plug plug;
5040 unsigned long reclaimed = sc->nr_reclaimed;
5041
5042 VM_WARN_ON_ONCE(!root_reclaim(sc));
5043
5044 /*
5045 * Unmapped clean folios are already prioritized. Scanning for more of
5046 * them is likely futile and can cause high reclaim latency when there
5047 * is a large number of memcgs.
5048 */
5049 if (!sc->may_writepage || !sc->may_unmap)
5050 goto done;
5051
5052 lru_add_drain();
5053
5054 blk_start_plug(&plug);
5055
5056 set_mm_walk(pgdat, sc->proactive);
5057
5058 set_initial_priority(pgdat, sc);
5059
5060 if (current_is_kswapd())
5061 sc->nr_reclaimed = 0;
5062
5063 if (mem_cgroup_disabled())
5064 shrink_one(&pgdat->__lruvec, sc);
5065 else
5066 shrink_many(pgdat, sc);
5067
5068 if (current_is_kswapd())
5069 sc->nr_reclaimed += reclaimed;
5070
5071 clear_mm_walk();
5072
5073 blk_finish_plug(&plug);
5074 done:
5075 if (sc->nr_reclaimed > reclaimed)
5076 kswapd_try_clear_hopeless(pgdat, sc->order, sc->reclaim_idx);
5077 }
5078
5079 /******************************************************************************
5080 * state change
5081 ******************************************************************************/
5082
state_is_valid(struct lruvec * lruvec)5083 static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
5084 {
5085 struct lru_gen_folio *lrugen = &lruvec->lrugen;
5086
5087 if (lrugen->enabled) {
5088 enum lru_list lru;
5089
5090 for_each_evictable_lru(lru) {
5091 if (!list_empty(&lruvec->lists[lru]))
5092 return false;
5093 }
5094 } else {
5095 int gen, type, zone;
5096
5097 for_each_gen_type_zone(gen, type, zone) {
5098 if (!list_empty(&lrugen->folios[gen][type][zone]))
5099 return false;
5100 }
5101 }
5102
5103 return true;
5104 }
5105
fill_evictable(struct lruvec * lruvec)5106 static bool fill_evictable(struct lruvec *lruvec)
5107 {
5108 enum lru_list lru;
5109 int remaining = MAX_LRU_BATCH;
5110
5111 for_each_evictable_lru(lru) {
5112 int type = is_file_lru(lru);
5113 bool active = is_active_lru(lru);
5114 struct list_head *head = &lruvec->lists[lru];
5115
5116 while (!list_empty(head)) {
5117 bool success;
5118 struct folio *folio = lru_to_folio(head);
5119
5120 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
5121 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio) != active, folio);
5122 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
5123 VM_WARN_ON_ONCE_FOLIO(folio_lru_gen(folio) != -1, folio);
5124
5125 lruvec_del_folio(lruvec, folio);
5126 success = lru_gen_add_folio(lruvec, folio, false);
5127 VM_WARN_ON_ONCE(!success);
5128
5129 if (!--remaining)
5130 return false;
5131 }
5132 }
5133
5134 return true;
5135 }
5136
drain_evictable(struct lruvec * lruvec)5137 static bool drain_evictable(struct lruvec *lruvec)
5138 {
5139 int gen, type, zone;
5140 int remaining = MAX_LRU_BATCH;
5141
5142 for_each_gen_type_zone(gen, type, zone) {
5143 struct list_head *head = &lruvec->lrugen.folios[gen][type][zone];
5144
5145 while (!list_empty(head)) {
5146 bool success;
5147 struct folio *folio = lru_to_folio(head);
5148
5149 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
5150 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
5151 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
5152 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
5153
5154 success = lru_gen_del_folio(lruvec, folio, false);
5155 VM_WARN_ON_ONCE(!success);
5156 lruvec_add_folio(lruvec, folio);
5157
5158 if (!--remaining)
5159 return false;
5160 }
5161 }
5162
5163 return true;
5164 }
5165
lru_gen_change_state(bool enabled)5166 static void lru_gen_change_state(bool enabled)
5167 {
5168 static DEFINE_MUTEX(state_mutex);
5169
5170 struct mem_cgroup *memcg;
5171
5172 cgroup_lock();
5173 cpus_read_lock();
5174 get_online_mems();
5175 mutex_lock(&state_mutex);
5176
5177 if (enabled == lru_gen_enabled())
5178 goto unlock;
5179
5180 if (enabled)
5181 static_branch_enable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]);
5182 else
5183 static_branch_disable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]);
5184
5185 memcg = mem_cgroup_iter(NULL, NULL, NULL);
5186 do {
5187 int nid;
5188
5189 for_each_node(nid) {
5190 struct lruvec *lruvec = get_lruvec(memcg, nid);
5191
5192 spin_lock_irq(&lruvec->lru_lock);
5193
5194 VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
5195 VM_WARN_ON_ONCE(!state_is_valid(lruvec));
5196
5197 lruvec->lrugen.enabled = enabled;
5198
5199 while (!(enabled ? fill_evictable(lruvec) : drain_evictable(lruvec))) {
5200 spin_unlock_irq(&lruvec->lru_lock);
5201 cond_resched();
5202 spin_lock_irq(&lruvec->lru_lock);
5203 }
5204
5205 spin_unlock_irq(&lruvec->lru_lock);
5206 }
5207
5208 cond_resched();
5209 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
5210 unlock:
5211 mutex_unlock(&state_mutex);
5212 put_online_mems();
5213 cpus_read_unlock();
5214 cgroup_unlock();
5215 }
5216
5217 /******************************************************************************
5218 * sysfs interface
5219 ******************************************************************************/
5220
min_ttl_ms_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)5221 static ssize_t min_ttl_ms_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
5222 {
5223 return sysfs_emit(buf, "%u\n", jiffies_to_msecs(READ_ONCE(lru_gen_min_ttl)));
5224 }
5225
5226 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
min_ttl_ms_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t len)5227 static ssize_t min_ttl_ms_store(struct kobject *kobj, struct kobj_attribute *attr,
5228 const char *buf, size_t len)
5229 {
5230 unsigned int msecs;
5231
5232 if (kstrtouint(buf, 0, &msecs))
5233 return -EINVAL;
5234
5235 WRITE_ONCE(lru_gen_min_ttl, msecs_to_jiffies(msecs));
5236
5237 return len;
5238 }
5239
5240 static struct kobj_attribute lru_gen_min_ttl_attr = __ATTR_RW(min_ttl_ms);
5241
enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)5242 static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
5243 {
5244 unsigned int caps = 0;
5245
5246 if (get_cap(LRU_GEN_CORE))
5247 caps |= BIT(LRU_GEN_CORE);
5248
5249 if (should_walk_mmu())
5250 caps |= BIT(LRU_GEN_MM_WALK);
5251
5252 if (should_clear_pmd_young())
5253 caps |= BIT(LRU_GEN_NONLEAF_YOUNG);
5254
5255 return sysfs_emit(buf, "0x%04x\n", caps);
5256 }
5257
5258 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t len)5259 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
5260 const char *buf, size_t len)
5261 {
5262 int i;
5263 unsigned int caps;
5264
5265 if (tolower(*buf) == 'n')
5266 caps = 0;
5267 else if (tolower(*buf) == 'y')
5268 caps = -1;
5269 else if (kstrtouint(buf, 0, &caps))
5270 return -EINVAL;
5271
5272 for (i = 0; i < NR_LRU_GEN_CAPS; i++) {
5273 bool enabled = caps & BIT(i);
5274
5275 if (i == LRU_GEN_CORE)
5276 lru_gen_change_state(enabled);
5277 else if (enabled)
5278 static_branch_enable(&lru_gen_caps[i]);
5279 else
5280 static_branch_disable(&lru_gen_caps[i]);
5281 }
5282
5283 return len;
5284 }
5285
5286 static struct kobj_attribute lru_gen_enabled_attr = __ATTR_RW(enabled);
5287
5288 static struct attribute *lru_gen_attrs[] = {
5289 &lru_gen_min_ttl_attr.attr,
5290 &lru_gen_enabled_attr.attr,
5291 NULL
5292 };
5293
5294 static const struct attribute_group lru_gen_attr_group = {
5295 .name = "lru_gen",
5296 .attrs = lru_gen_attrs,
5297 };
5298
5299 /******************************************************************************
5300 * debugfs interface
5301 ******************************************************************************/
5302
lru_gen_seq_start(struct seq_file * m,loff_t * pos)5303 static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos)
5304 {
5305 struct mem_cgroup *memcg;
5306 loff_t nr_to_skip = *pos;
5307
5308 m->private = kvmalloc(PATH_MAX, GFP_KERNEL);
5309 if (!m->private)
5310 return ERR_PTR(-ENOMEM);
5311
5312 memcg = mem_cgroup_iter(NULL, NULL, NULL);
5313 do {
5314 int nid;
5315
5316 for_each_node_state(nid, N_MEMORY) {
5317 if (!nr_to_skip--)
5318 return get_lruvec(memcg, nid);
5319 }
5320 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
5321
5322 return NULL;
5323 }
5324
lru_gen_seq_stop(struct seq_file * m,void * v)5325 static void lru_gen_seq_stop(struct seq_file *m, void *v)
5326 {
5327 if (!IS_ERR_OR_NULL(v))
5328 mem_cgroup_iter_break(NULL, lruvec_memcg(v));
5329
5330 kvfree(m->private);
5331 m->private = NULL;
5332 }
5333
lru_gen_seq_next(struct seq_file * m,void * v,loff_t * pos)5334 static void *lru_gen_seq_next(struct seq_file *m, void *v, loff_t *pos)
5335 {
5336 int nid = lruvec_pgdat(v)->node_id;
5337 struct mem_cgroup *memcg = lruvec_memcg(v);
5338
5339 ++*pos;
5340
5341 nid = next_memory_node(nid);
5342 if (nid == MAX_NUMNODES) {
5343 memcg = mem_cgroup_iter(NULL, memcg, NULL);
5344 if (!memcg)
5345 return NULL;
5346
5347 nid = first_memory_node;
5348 }
5349
5350 return get_lruvec(memcg, nid);
5351 }
5352
lru_gen_seq_show_full(struct seq_file * m,struct lruvec * lruvec,unsigned long max_seq,unsigned long * min_seq,unsigned long seq)5353 static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
5354 unsigned long max_seq, unsigned long *min_seq,
5355 unsigned long seq)
5356 {
5357 int i;
5358 int type, tier;
5359 int hist = lru_hist_from_seq(seq);
5360 struct lru_gen_folio *lrugen = &lruvec->lrugen;
5361 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
5362
5363 for (tier = 0; tier < MAX_NR_TIERS; tier++) {
5364 seq_printf(m, " %10d", tier);
5365 for (type = 0; type < ANON_AND_FILE; type++) {
5366 const char *s = "xxx";
5367 unsigned long n[3] = {};
5368
5369 if (seq == max_seq) {
5370 s = "RTx";
5371 n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]);
5372 n[1] = READ_ONCE(lrugen->avg_total[type][tier]);
5373 } else if (seq == min_seq[type] || NR_HIST_GENS > 1) {
5374 s = "rep";
5375 n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]);
5376 n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]);
5377 n[2] = READ_ONCE(lrugen->protected[hist][type][tier]);
5378 }
5379
5380 for (i = 0; i < 3; i++)
5381 seq_printf(m, " %10lu%c", n[i], s[i]);
5382 }
5383 seq_putc(m, '\n');
5384 }
5385
5386 if (!mm_state)
5387 return;
5388
5389 seq_puts(m, " ");
5390 for (i = 0; i < NR_MM_STATS; i++) {
5391 const char *s = "xxxx";
5392 unsigned long n = 0;
5393
5394 if (seq == max_seq && NR_HIST_GENS == 1) {
5395 s = "TYFA";
5396 n = READ_ONCE(mm_state->stats[hist][i]);
5397 } else if (seq != max_seq && NR_HIST_GENS > 1) {
5398 s = "tyfa";
5399 n = READ_ONCE(mm_state->stats[hist][i]);
5400 }
5401
5402 seq_printf(m, " %10lu%c", n, s[i]);
5403 }
5404 seq_putc(m, '\n');
5405 }
5406
5407 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
lru_gen_seq_show(struct seq_file * m,void * v)5408 static int lru_gen_seq_show(struct seq_file *m, void *v)
5409 {
5410 unsigned long seq;
5411 bool full = debugfs_get_aux_num(m->file);
5412 struct lruvec *lruvec = v;
5413 struct lru_gen_folio *lrugen = &lruvec->lrugen;
5414 int nid = lruvec_pgdat(lruvec)->node_id;
5415 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
5416 DEFINE_MAX_SEQ(lruvec);
5417 DEFINE_MIN_SEQ(lruvec);
5418
5419 if (nid == first_memory_node) {
5420 const char *path = memcg ? m->private : "";
5421
5422 #ifdef CONFIG_MEMCG
5423 if (memcg)
5424 cgroup_path(memcg->css.cgroup, m->private, PATH_MAX);
5425 #endif
5426 seq_printf(m, "memcg %llu %s\n", mem_cgroup_id(memcg), path);
5427 }
5428
5429 seq_printf(m, " node %5d\n", nid);
5430
5431 if (!full)
5432 seq = evictable_min_seq(min_seq, MAX_SWAPPINESS / 2);
5433 else if (max_seq >= MAX_NR_GENS)
5434 seq = max_seq - MAX_NR_GENS + 1;
5435 else
5436 seq = 0;
5437
5438 for (; seq <= max_seq; seq++) {
5439 int type, zone;
5440 int gen = lru_gen_from_seq(seq);
5441 unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
5442
5443 seq_printf(m, " %10lu %10u", seq, jiffies_to_msecs(jiffies - birth));
5444
5445 for (type = 0; type < ANON_AND_FILE; type++) {
5446 unsigned long size = 0;
5447 char mark = full && seq < min_seq[type] ? 'x' : ' ';
5448
5449 for (zone = 0; zone < MAX_NR_ZONES; zone++)
5450 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
5451
5452 seq_printf(m, " %10lu%c", size, mark);
5453 }
5454
5455 seq_putc(m, '\n');
5456
5457 if (full)
5458 lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq);
5459 }
5460
5461 return 0;
5462 }
5463
5464 static const struct seq_operations lru_gen_seq_ops = {
5465 .start = lru_gen_seq_start,
5466 .stop = lru_gen_seq_stop,
5467 .next = lru_gen_seq_next,
5468 .show = lru_gen_seq_show,
5469 };
5470
run_aging(struct lruvec * lruvec,unsigned long seq,int swappiness,bool force_scan)5471 static int run_aging(struct lruvec *lruvec, unsigned long seq,
5472 int swappiness, bool force_scan)
5473 {
5474 DEFINE_MAX_SEQ(lruvec);
5475
5476 if (seq > max_seq)
5477 return -EINVAL;
5478
5479 return try_to_inc_max_seq(lruvec, max_seq, swappiness, force_scan) ? 0 : -EEXIST;
5480 }
5481
run_eviction(struct lruvec * lruvec,unsigned long seq,struct scan_control * sc,int swappiness,unsigned long nr_to_reclaim)5482 static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
5483 int swappiness, unsigned long nr_to_reclaim)
5484 {
5485 DEFINE_MAX_SEQ(lruvec);
5486
5487 if (seq + MIN_NR_GENS > max_seq)
5488 return -EINVAL;
5489
5490 sc->nr_reclaimed = 0;
5491
5492 while (!signal_pending(current)) {
5493 DEFINE_MIN_SEQ(lruvec);
5494
5495 if (seq < evictable_min_seq(min_seq, swappiness))
5496 return 0;
5497
5498 if (sc->nr_reclaimed >= nr_to_reclaim)
5499 return 0;
5500
5501 if (!evict_folios(nr_to_reclaim - sc->nr_reclaimed, lruvec, sc,
5502 swappiness))
5503 return 0;
5504
5505 cond_resched();
5506 }
5507
5508 return -EINTR;
5509 }
5510
run_cmd(char cmd,u64 memcg_id,int nid,unsigned long seq,struct scan_control * sc,int swappiness,unsigned long opt)5511 static int run_cmd(char cmd, u64 memcg_id, int nid, unsigned long seq,
5512 struct scan_control *sc, int swappiness, unsigned long opt)
5513 {
5514 struct lruvec *lruvec;
5515 int err = -EINVAL;
5516 struct mem_cgroup *memcg = NULL;
5517
5518 if (nid < 0 || nid >= MAX_NUMNODES || !node_state(nid, N_MEMORY))
5519 return -EINVAL;
5520
5521 if (!mem_cgroup_disabled()) {
5522 memcg = mem_cgroup_get_from_id(memcg_id);
5523 if (!memcg)
5524 return -EINVAL;
5525 }
5526
5527 if (memcg_id != mem_cgroup_id(memcg))
5528 goto done;
5529
5530 sc->target_mem_cgroup = memcg;
5531 lruvec = get_lruvec(memcg, nid);
5532
5533 if (swappiness < MIN_SWAPPINESS)
5534 swappiness = get_swappiness(lruvec, sc);
5535 else if (swappiness > SWAPPINESS_ANON_ONLY)
5536 goto done;
5537
5538 switch (cmd) {
5539 case '+':
5540 err = run_aging(lruvec, seq, swappiness, opt);
5541 break;
5542 case '-':
5543 err = run_eviction(lruvec, seq, sc, swappiness, opt);
5544 break;
5545 }
5546 done:
5547 mem_cgroup_put(memcg);
5548
5549 return err;
5550 }
5551
5552 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
lru_gen_seq_write(struct file * file,const char __user * src,size_t len,loff_t * pos)5553 static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
5554 size_t len, loff_t *pos)
5555 {
5556 void *buf;
5557 char *cur, *next;
5558 unsigned int flags;
5559 struct blk_plug plug;
5560 int err = -EINVAL;
5561 struct scan_control sc = {
5562 .may_writepage = true,
5563 .may_unmap = true,
5564 .may_swap = true,
5565 .reclaim_idx = MAX_NR_ZONES - 1,
5566 .gfp_mask = GFP_KERNEL,
5567 .proactive = true,
5568 };
5569
5570 buf = kvmalloc(len + 1, GFP_KERNEL);
5571 if (!buf)
5572 return -ENOMEM;
5573
5574 if (copy_from_user(buf, src, len)) {
5575 kvfree(buf);
5576 return -EFAULT;
5577 }
5578
5579 set_task_reclaim_state(current, &sc.reclaim_state);
5580 flags = memalloc_noreclaim_save();
5581 blk_start_plug(&plug);
5582 if (!set_mm_walk(NULL, true)) {
5583 err = -ENOMEM;
5584 goto done;
5585 }
5586
5587 next = buf;
5588 next[len] = '\0';
5589
5590 while ((cur = strsep(&next, ",;\n"))) {
5591 int n;
5592 int end;
5593 char cmd, swap_string[5];
5594 u64 memcg_id;
5595 unsigned int nid;
5596 unsigned long seq;
5597 unsigned int swappiness;
5598 unsigned long opt = -1;
5599
5600 cur = skip_spaces(cur);
5601 if (!*cur)
5602 continue;
5603
5604 n = sscanf(cur, "%c %llu %u %lu %n %4s %n %lu %n", &cmd, &memcg_id, &nid,
5605 &seq, &end, swap_string, &end, &opt, &end);
5606 if (n < 4 || cur[end]) {
5607 err = -EINVAL;
5608 break;
5609 }
5610
5611 if (n == 4) {
5612 swappiness = -1;
5613 } else if (!strcmp("max", swap_string)) {
5614 /* set by userspace for anonymous memory only */
5615 swappiness = SWAPPINESS_ANON_ONLY;
5616 } else {
5617 err = kstrtouint(swap_string, 0, &swappiness);
5618 if (err)
5619 break;
5620 }
5621
5622 err = run_cmd(cmd, memcg_id, nid, seq, &sc, swappiness, opt);
5623 if (err)
5624 break;
5625 }
5626 done:
5627 clear_mm_walk();
5628 blk_finish_plug(&plug);
5629 memalloc_noreclaim_restore(flags);
5630 set_task_reclaim_state(current, NULL);
5631
5632 kvfree(buf);
5633
5634 return err ? : len;
5635 }
5636
lru_gen_seq_open(struct inode * inode,struct file * file)5637 static int lru_gen_seq_open(struct inode *inode, struct file *file)
5638 {
5639 return seq_open(file, &lru_gen_seq_ops);
5640 }
5641
5642 static const struct file_operations lru_gen_rw_fops = {
5643 .open = lru_gen_seq_open,
5644 .read = seq_read,
5645 .write = lru_gen_seq_write,
5646 .llseek = seq_lseek,
5647 .release = seq_release,
5648 };
5649
5650 static const struct file_operations lru_gen_ro_fops = {
5651 .open = lru_gen_seq_open,
5652 .read = seq_read,
5653 .llseek = seq_lseek,
5654 .release = seq_release,
5655 };
5656
5657 /******************************************************************************
5658 * initialization
5659 ******************************************************************************/
5660
lru_gen_init_pgdat(struct pglist_data * pgdat)5661 void lru_gen_init_pgdat(struct pglist_data *pgdat)
5662 {
5663 int i, j;
5664
5665 spin_lock_init(&pgdat->memcg_lru.lock);
5666
5667 for (i = 0; i < MEMCG_NR_GENS; i++) {
5668 for (j = 0; j < MEMCG_NR_BINS; j++)
5669 INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i);
5670 }
5671 }
5672
lru_gen_init_lruvec(struct lruvec * lruvec)5673 void lru_gen_init_lruvec(struct lruvec *lruvec)
5674 {
5675 int i;
5676 int gen, type, zone;
5677 struct lru_gen_folio *lrugen = &lruvec->lrugen;
5678 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
5679
5680 lrugen->max_seq = MIN_NR_GENS + 1;
5681 lrugen->enabled = lru_gen_enabled();
5682
5683 for (i = 0; i <= MIN_NR_GENS + 1; i++)
5684 lrugen->timestamps[i] = jiffies;
5685
5686 for_each_gen_type_zone(gen, type, zone)
5687 INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]);
5688
5689 if (mm_state)
5690 mm_state->seq = MIN_NR_GENS;
5691 }
5692
5693 #ifdef CONFIG_MEMCG
5694
lru_gen_init_memcg(struct mem_cgroup * memcg)5695 void lru_gen_init_memcg(struct mem_cgroup *memcg)
5696 {
5697 struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
5698
5699 if (!mm_list)
5700 return;
5701
5702 INIT_LIST_HEAD(&mm_list->fifo);
5703 spin_lock_init(&mm_list->lock);
5704 }
5705
lru_gen_exit_memcg(struct mem_cgroup * memcg)5706 void lru_gen_exit_memcg(struct mem_cgroup *memcg)
5707 {
5708 int i;
5709 int nid;
5710 struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
5711
5712 VM_WARN_ON_ONCE(mm_list && !list_empty(&mm_list->fifo));
5713
5714 for_each_node(nid) {
5715 struct lruvec *lruvec = get_lruvec(memcg, nid);
5716 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
5717
5718 VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0,
5719 sizeof(lruvec->lrugen.nr_pages)));
5720
5721 lruvec->lrugen.list.next = LIST_POISON1;
5722
5723 if (!mm_state)
5724 continue;
5725
5726 for (i = 0; i < NR_BLOOM_FILTERS; i++) {
5727 bitmap_free(mm_state->filters[i]);
5728 mm_state->filters[i] = NULL;
5729 }
5730 }
5731 }
5732
5733 #endif /* CONFIG_MEMCG */
5734
init_lru_gen(void)5735 static int __init init_lru_gen(void)
5736 {
5737 BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS);
5738 BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS);
5739
5740 if (sysfs_create_group(mm_kobj, &lru_gen_attr_group))
5741 pr_err("lru_gen: failed to create sysfs group\n");
5742
5743 debugfs_create_file_aux_num("lru_gen", 0644, NULL, NULL, false,
5744 &lru_gen_rw_fops);
5745 debugfs_create_file_aux_num("lru_gen_full", 0444, NULL, NULL, true,
5746 &lru_gen_ro_fops);
5747
5748 return 0;
5749 };
5750 late_initcall(init_lru_gen);
5751
5752 #else /* !CONFIG_LRU_GEN */
5753
lru_gen_age_node(struct pglist_data * pgdat,struct scan_control * sc)5754 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
5755 {
5756 BUILD_BUG();
5757 }
5758
lru_gen_shrink_lruvec(struct lruvec * lruvec,struct scan_control * sc)5759 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
5760 {
5761 BUILD_BUG();
5762 }
5763
lru_gen_shrink_node(struct pglist_data * pgdat,struct scan_control * sc)5764 static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc)
5765 {
5766 BUILD_BUG();
5767 }
5768
5769 #endif /* CONFIG_LRU_GEN */
5770
shrink_lruvec(struct lruvec * lruvec,struct scan_control * sc)5771 static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
5772 {
5773 unsigned long nr[NR_LRU_LISTS];
5774 unsigned long targets[NR_LRU_LISTS];
5775 unsigned long nr_to_scan;
5776 enum lru_list lru;
5777 unsigned long nr_reclaimed = 0;
5778 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
5779 bool proportional_reclaim;
5780 struct blk_plug plug;
5781
5782 if (lru_gen_enabled() && !root_reclaim(sc)) {
5783 lru_gen_shrink_lruvec(lruvec, sc);
5784 return;
5785 }
5786
5787 get_scan_count(lruvec, sc, nr);
5788
5789 /* Record the original scan target for proportional adjustments later */
5790 memcpy(targets, nr, sizeof(nr));
5791
5792 /*
5793 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
5794 * event that can occur when there is little memory pressure e.g.
5795 * multiple streaming readers/writers. Hence, we do not abort scanning
5796 * when the requested number of pages are reclaimed when scanning at
5797 * DEF_PRIORITY on the assumption that the fact we are direct
5798 * reclaiming implies that kswapd is not keeping up and it is best to
5799 * do a batch of work at once. For memcg reclaim one check is made to
5800 * abort proportional reclaim if either the file or anon lru has already
5801 * dropped to zero at the first pass.
5802 */
5803 proportional_reclaim = (!cgroup_reclaim(sc) && !current_is_kswapd() &&
5804 sc->priority == DEF_PRIORITY);
5805
5806 blk_start_plug(&plug);
5807 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
5808 nr[LRU_INACTIVE_FILE]) {
5809 unsigned long nr_anon, nr_file, percentage;
5810 unsigned long nr_scanned;
5811
5812 for_each_evictable_lru(lru) {
5813 if (nr[lru]) {
5814 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
5815 nr[lru] -= nr_to_scan;
5816
5817 nr_reclaimed += shrink_list(lru, nr_to_scan,
5818 lruvec, sc);
5819 }
5820 }
5821
5822 cond_resched();
5823
5824 if (nr_reclaimed < nr_to_reclaim || proportional_reclaim)
5825 continue;
5826
5827 /*
5828 * For kswapd and memcg, reclaim at least the number of pages
5829 * requested. Ensure that the anon and file LRUs are scanned
5830 * proportionally what was requested by get_scan_count(). We
5831 * stop reclaiming one LRU and reduce the amount scanning
5832 * proportional to the original scan target.
5833 */
5834 nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
5835 nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
5836
5837 /*
5838 * It's just vindictive to attack the larger once the smaller
5839 * has gone to zero. And given the way we stop scanning the
5840 * smaller below, this makes sure that we only make one nudge
5841 * towards proportionality once we've got nr_to_reclaim.
5842 */
5843 if (!nr_file || !nr_anon)
5844 break;
5845
5846 if (nr_file > nr_anon) {
5847 unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
5848 targets[LRU_ACTIVE_ANON] + 1;
5849 lru = LRU_BASE;
5850 percentage = nr_anon * 100 / scan_target;
5851 } else {
5852 unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
5853 targets[LRU_ACTIVE_FILE] + 1;
5854 lru = LRU_FILE;
5855 percentage = nr_file * 100 / scan_target;
5856 }
5857
5858 /* Stop scanning the smaller of the LRU */
5859 nr[lru] = 0;
5860 nr[lru + LRU_ACTIVE] = 0;
5861
5862 /*
5863 * Recalculate the other LRU scan count based on its original
5864 * scan target and the percentage scanning already complete
5865 */
5866 lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
5867 nr_scanned = targets[lru] - nr[lru];
5868 nr[lru] = targets[lru] * (100 - percentage) / 100;
5869 nr[lru] -= min(nr[lru], nr_scanned);
5870
5871 lru += LRU_ACTIVE;
5872 nr_scanned = targets[lru] - nr[lru];
5873 nr[lru] = targets[lru] * (100 - percentage) / 100;
5874 nr[lru] -= min(nr[lru], nr_scanned);
5875 }
5876 blk_finish_plug(&plug);
5877 sc->nr_reclaimed += nr_reclaimed;
5878
5879 /*
5880 * Even if we did not try to evict anon pages at all, we want to
5881 * rebalance the anon lru active/inactive ratio.
5882 */
5883 if (can_age_anon_pages(lruvec, sc) &&
5884 inactive_is_low(lruvec, LRU_INACTIVE_ANON))
5885 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
5886 sc, LRU_ACTIVE_ANON);
5887 }
5888
5889 /* Use reclaim/compaction for costly allocs or under memory pressure */
in_reclaim_compaction(struct scan_control * sc)5890 static bool in_reclaim_compaction(struct scan_control *sc)
5891 {
5892 if (gfp_compaction_allowed(sc->gfp_mask) && sc->order &&
5893 (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
5894 sc->priority < DEF_PRIORITY - 2))
5895 return true;
5896
5897 return false;
5898 }
5899
5900 /*
5901 * Reclaim/compaction is used for high-order allocation requests. It reclaims
5902 * order-0 pages before compacting the zone. should_continue_reclaim() returns
5903 * true if more pages should be reclaimed such that when the page allocator
5904 * calls try_to_compact_pages() that it will have enough free pages to succeed.
5905 * It will give up earlier than that if there is difficulty reclaiming pages.
5906 */
should_continue_reclaim(struct pglist_data * pgdat,unsigned long nr_reclaimed,struct scan_control * sc)5907 static inline bool should_continue_reclaim(struct pglist_data *pgdat,
5908 unsigned long nr_reclaimed,
5909 struct scan_control *sc)
5910 {
5911 unsigned long pages_for_compaction;
5912 unsigned long inactive_lru_pages;
5913 int z;
5914 struct zone *zone;
5915
5916 /* If not in reclaim/compaction mode, stop */
5917 if (!in_reclaim_compaction(sc))
5918 return false;
5919
5920 /*
5921 * Stop if we failed to reclaim any pages from the last SWAP_CLUSTER_MAX
5922 * number of pages that were scanned. This will return to the caller
5923 * with the risk reclaim/compaction and the resulting allocation attempt
5924 * fails. In the past we have tried harder for __GFP_RETRY_MAYFAIL
5925 * allocations through requiring that the full LRU list has been scanned
5926 * first, by assuming that zero delta of sc->nr_scanned means full LRU
5927 * scan, but that approximation was wrong, and there were corner cases
5928 * where always a non-zero amount of pages were scanned.
5929 */
5930 if (!nr_reclaimed)
5931 return false;
5932
5933 /* If compaction would go ahead or the allocation would succeed, stop */
5934 for_each_managed_zone_pgdat(zone, pgdat, z, sc->reclaim_idx) {
5935 unsigned long watermark = min_wmark_pages(zone);
5936
5937 /* Allocation can already succeed, nothing to do */
5938 if (zone_watermark_ok(zone, sc->order, watermark,
5939 sc->reclaim_idx, 0))
5940 return false;
5941
5942 if (compaction_suitable(zone, sc->order, watermark,
5943 sc->reclaim_idx))
5944 return false;
5945 }
5946
5947 /*
5948 * If we have not reclaimed enough pages for compaction and the
5949 * inactive lists are large enough, continue reclaiming
5950 */
5951 pages_for_compaction = compact_gap(sc->order);
5952 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
5953 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc))
5954 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
5955
5956 return inactive_lru_pages > pages_for_compaction;
5957 }
5958
shrink_node_memcgs(pg_data_t * pgdat,struct scan_control * sc)5959 static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
5960 {
5961 struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
5962 struct mem_cgroup_reclaim_cookie reclaim = {
5963 .pgdat = pgdat,
5964 };
5965 struct mem_cgroup_reclaim_cookie *partial = &reclaim;
5966 struct mem_cgroup *memcg;
5967
5968 /*
5969 * In most cases, direct reclaimers can do partial walks
5970 * through the cgroup tree, using an iterator state that
5971 * persists across invocations. This strikes a balance between
5972 * fairness and allocation latency.
5973 *
5974 * For kswapd, reliable forward progress is more important
5975 * than a quick return to idle. Always do full walks.
5976 */
5977 if (current_is_kswapd() || sc->memcg_full_walk)
5978 partial = NULL;
5979
5980 memcg = mem_cgroup_iter(target_memcg, NULL, partial);
5981 do {
5982 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
5983 unsigned long reclaimed;
5984 unsigned long scanned;
5985
5986 /*
5987 * This loop can become CPU-bound when target memcgs
5988 * aren't eligible for reclaim - either because they
5989 * don't have any reclaimable pages, or because their
5990 * memory is explicitly protected. Avoid soft lockups.
5991 */
5992 cond_resched();
5993
5994 mem_cgroup_calculate_protection(target_memcg, memcg);
5995
5996 if (mem_cgroup_below_min(target_memcg, memcg)) {
5997 /*
5998 * Hard protection.
5999 * If there is no reclaimable memory, OOM.
6000 */
6001 continue;
6002 } else if (mem_cgroup_below_low(target_memcg, memcg)) {
6003 /*
6004 * Soft protection.
6005 * Respect the protection only as long as
6006 * there is an unprotected supply
6007 * of reclaimable memory from other cgroups.
6008 */
6009 if (!sc->memcg_low_reclaim) {
6010 sc->memcg_low_skipped = 1;
6011 continue;
6012 }
6013 memcg_memory_event(memcg, MEMCG_LOW);
6014 }
6015
6016 reclaimed = sc->nr_reclaimed;
6017 scanned = sc->nr_scanned;
6018
6019 shrink_lruvec(lruvec, sc);
6020
6021 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
6022 sc->priority);
6023
6024 /* Record the group's reclaim efficiency */
6025 if (!sc->proactive)
6026 vmpressure(sc->gfp_mask, memcg, false,
6027 sc->nr_scanned - scanned,
6028 sc->nr_reclaimed - reclaimed);
6029
6030 /* If partial walks are allowed, bail once goal is reached */
6031 if (partial && sc->nr_reclaimed >= sc->nr_to_reclaim) {
6032 mem_cgroup_iter_break(target_memcg, memcg);
6033 break;
6034 }
6035 } while ((memcg = mem_cgroup_iter(target_memcg, memcg, partial)));
6036 }
6037
shrink_node(pg_data_t * pgdat,struct scan_control * sc)6038 static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
6039 {
6040 unsigned long nr_reclaimed, nr_scanned, nr_node_reclaimed;
6041 struct lruvec *target_lruvec;
6042 bool reclaimable = false;
6043
6044 if (lru_gen_enabled() && root_reclaim(sc)) {
6045 memset(&sc->nr, 0, sizeof(sc->nr));
6046 lru_gen_shrink_node(pgdat, sc);
6047 return;
6048 }
6049
6050 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
6051
6052 again:
6053 memset(&sc->nr, 0, sizeof(sc->nr));
6054
6055 nr_reclaimed = sc->nr_reclaimed;
6056 nr_scanned = sc->nr_scanned;
6057
6058 prepare_scan_control(pgdat, sc);
6059
6060 shrink_node_memcgs(pgdat, sc);
6061
6062 flush_reclaim_state(sc);
6063
6064 nr_node_reclaimed = sc->nr_reclaimed - nr_reclaimed;
6065
6066 /* Record the subtree's reclaim efficiency */
6067 if (!sc->proactive)
6068 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
6069 sc->nr_scanned - nr_scanned, nr_node_reclaimed);
6070
6071 if (nr_node_reclaimed)
6072 reclaimable = true;
6073
6074 if (current_is_kswapd()) {
6075 /*
6076 * If reclaim is isolating dirty pages under writeback,
6077 * it implies that the long-lived page allocation rate
6078 * is exceeding the page laundering rate. Either the
6079 * global limits are not being effective at throttling
6080 * processes due to the page distribution throughout
6081 * zones or there is heavy usage of a slow backing
6082 * device. The only option is to throttle from reclaim
6083 * context which is not ideal as there is no guarantee
6084 * the dirtying process is throttled in the same way
6085 * balance_dirty_pages() manages.
6086 *
6087 * Once a node is flagged PGDAT_WRITEBACK, kswapd will
6088 * count the number of pages under pages flagged for
6089 * immediate reclaim and stall if any are encountered
6090 * in the nr_immediate check below.
6091 */
6092 if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)
6093 set_bit(PGDAT_WRITEBACK, &pgdat->flags);
6094
6095 /*
6096 * If kswapd scans pages marked for immediate
6097 * reclaim and under writeback (nr_immediate), it
6098 * implies that pages are cycling through the LRU
6099 * faster than they are written so forcibly stall
6100 * until some pages complete writeback.
6101 */
6102 if (sc->nr.immediate)
6103 reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
6104 }
6105
6106 /*
6107 * Tag a node/memcg as congested if all the dirty pages were marked
6108 * for writeback and immediate reclaim (counted in nr.congested).
6109 *
6110 * Legacy memcg will stall in page writeback so avoid forcibly
6111 * stalling in reclaim_throttle().
6112 */
6113 if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested) {
6114 if (cgroup_reclaim(sc) && writeback_throttling_sane(sc))
6115 set_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags);
6116
6117 if (current_is_kswapd())
6118 set_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags);
6119 }
6120
6121 /*
6122 * Stall direct reclaim for IO completions if the lruvec is
6123 * node is congested. Allow kswapd to continue until it
6124 * starts encountering unqueued dirty pages or cycling through
6125 * the LRU too quickly.
6126 */
6127 if (!current_is_kswapd() && current_may_throttle() &&
6128 !sc->hibernation_mode &&
6129 (test_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags) ||
6130 test_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags)))
6131 reclaim_throttle(pgdat, VMSCAN_THROTTLE_CONGESTED);
6132
6133 if (should_continue_reclaim(pgdat, nr_node_reclaimed, sc))
6134 goto again;
6135
6136 /*
6137 * Kswapd gives up on balancing particular nodes after too
6138 * many failures to reclaim anything from them and goes to
6139 * sleep. On reclaim progress, reset the failure counter. A
6140 * successful direct reclaim run will revive a dormant kswapd.
6141 */
6142 if (reclaimable)
6143 kswapd_try_clear_hopeless(pgdat, sc->order, sc->reclaim_idx);
6144 else if (sc->cache_trim_mode)
6145 sc->cache_trim_mode_failed = 1;
6146 }
6147
6148 /*
6149 * Returns true if compaction should go ahead for a costly-order request, or
6150 * the allocation would already succeed without compaction. Return false if we
6151 * should reclaim first.
6152 */
compaction_ready(struct zone * zone,struct scan_control * sc)6153 static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
6154 {
6155 unsigned long watermark;
6156
6157 if (!gfp_compaction_allowed(sc->gfp_mask))
6158 return false;
6159
6160 /* Allocation can already succeed, nothing to do */
6161 if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
6162 sc->reclaim_idx, 0))
6163 return true;
6164
6165 /*
6166 * Direct reclaim usually targets the min watermark, but compaction
6167 * takes time to run and there are potentially other callers using the
6168 * pages just freed. So target a higher buffer to give compaction a
6169 * reasonable chance of completing and allocating the pages.
6170 *
6171 * Note that we won't actually reclaim the whole buffer in one attempt
6172 * as the target watermark in should_continue_reclaim() is lower. But if
6173 * we are already above the high+gap watermark, don't reclaim at all.
6174 */
6175 watermark = high_wmark_pages(zone);
6176 if (compaction_suitable(zone, sc->order, watermark, sc->reclaim_idx))
6177 return true;
6178
6179 return false;
6180 }
6181
consider_reclaim_throttle(pg_data_t * pgdat,struct scan_control * sc)6182 static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc)
6183 {
6184 /*
6185 * If reclaim is making progress greater than 12% efficiency then
6186 * wake all the NOPROGRESS throttled tasks.
6187 */
6188 if (sc->nr_reclaimed > (sc->nr_scanned >> 3)) {
6189 wait_queue_head_t *wqh;
6190
6191 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_NOPROGRESS];
6192 if (waitqueue_active(wqh))
6193 wake_up(wqh);
6194
6195 return;
6196 }
6197
6198 /*
6199 * Do not throttle kswapd or cgroup reclaim on NOPROGRESS as it will
6200 * throttle on VMSCAN_THROTTLE_WRITEBACK if there are too many pages
6201 * under writeback and marked for immediate reclaim at the tail of the
6202 * LRU.
6203 */
6204 if (current_is_kswapd() || cgroup_reclaim(sc))
6205 return;
6206
6207 /* Throttle if making no progress at high prioities. */
6208 if (sc->priority == 1 && !sc->nr_reclaimed)
6209 reclaim_throttle(pgdat, VMSCAN_THROTTLE_NOPROGRESS);
6210 }
6211
6212 /*
6213 * This is the direct reclaim path, for page-allocating processes. We only
6214 * try to reclaim pages from zones which will satisfy the caller's allocation
6215 * request.
6216 *
6217 * If a zone is deemed to be full of pinned pages then just give it a light
6218 * scan then give up on it.
6219 */
shrink_zones(struct zonelist * zonelist,struct scan_control * sc)6220 static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
6221 {
6222 struct zoneref *z;
6223 struct zone *zone;
6224 unsigned long nr_soft_reclaimed;
6225 unsigned long nr_soft_scanned;
6226 gfp_t orig_mask;
6227 pg_data_t *last_pgdat = NULL;
6228 pg_data_t *first_pgdat = NULL;
6229
6230 /*
6231 * If the number of buffer_heads in the machine exceeds the maximum
6232 * allowed level, force direct reclaim to scan the highmem zone as
6233 * highmem pages could be pinning lowmem pages storing buffer_heads
6234 */
6235 orig_mask = sc->gfp_mask;
6236 if (buffer_heads_over_limit) {
6237 sc->gfp_mask |= __GFP_HIGHMEM;
6238 sc->reclaim_idx = gfp_zone(sc->gfp_mask);
6239 }
6240
6241 for_each_zone_zonelist_nodemask(zone, z, zonelist,
6242 sc->reclaim_idx, sc->nodemask) {
6243 /*
6244 * Take care memory controller reclaiming has small influence
6245 * to global LRU.
6246 */
6247 if (!cgroup_reclaim(sc)) {
6248 if (!cpuset_zone_allowed(zone,
6249 GFP_KERNEL | __GFP_HARDWALL))
6250 continue;
6251
6252 /*
6253 * If we already have plenty of memory free for
6254 * compaction in this zone, don't free any more.
6255 * Even though compaction is invoked for any
6256 * non-zero order, only frequent costly order
6257 * reclamation is disruptive enough to become a
6258 * noticeable problem, like transparent huge
6259 * page allocations.
6260 */
6261 if (IS_ENABLED(CONFIG_COMPACTION) &&
6262 sc->order > PAGE_ALLOC_COSTLY_ORDER &&
6263 compaction_ready(zone, sc)) {
6264 sc->compaction_ready = true;
6265 continue;
6266 }
6267
6268 /*
6269 * Shrink each node in the zonelist once. If the
6270 * zonelist is ordered by zone (not the default) then a
6271 * node may be shrunk multiple times but in that case
6272 * the user prefers lower zones being preserved.
6273 */
6274 if (zone->zone_pgdat == last_pgdat)
6275 continue;
6276
6277 /*
6278 * This steals pages from memory cgroups over softlimit
6279 * and returns the number of reclaimed pages and
6280 * scanned pages. This works for global memory pressure
6281 * and balancing, not for a memcg's limit.
6282 */
6283 nr_soft_scanned = 0;
6284 nr_soft_reclaimed = memcg1_soft_limit_reclaim(zone->zone_pgdat,
6285 sc->order, sc->gfp_mask,
6286 &nr_soft_scanned);
6287 sc->nr_reclaimed += nr_soft_reclaimed;
6288 sc->nr_scanned += nr_soft_scanned;
6289 /* need some check for avoid more shrink_zone() */
6290 }
6291
6292 if (!first_pgdat)
6293 first_pgdat = zone->zone_pgdat;
6294
6295 /* See comment about same check for global reclaim above */
6296 if (zone->zone_pgdat == last_pgdat)
6297 continue;
6298 last_pgdat = zone->zone_pgdat;
6299 shrink_node(zone->zone_pgdat, sc);
6300 }
6301
6302 if (first_pgdat)
6303 consider_reclaim_throttle(first_pgdat, sc);
6304
6305 /*
6306 * Restore to original mask to avoid the impact on the caller if we
6307 * promoted it to __GFP_HIGHMEM.
6308 */
6309 sc->gfp_mask = orig_mask;
6310 }
6311
snapshot_refaults(struct mem_cgroup * target_memcg,pg_data_t * pgdat)6312 static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
6313 {
6314 struct lruvec *target_lruvec;
6315 unsigned long refaults;
6316
6317 if (lru_gen_enabled())
6318 return;
6319
6320 target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
6321 refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON);
6322 target_lruvec->refaults[WORKINGSET_ANON] = refaults;
6323 refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE);
6324 target_lruvec->refaults[WORKINGSET_FILE] = refaults;
6325 }
6326
6327 /*
6328 * This is the main entry point to direct page reclaim.
6329 *
6330 * If a full scan of the inactive list fails to free enough memory then we
6331 * are "out of memory" and something needs to be killed.
6332 *
6333 * If the caller is !__GFP_FS then the probability of a failure is reasonably
6334 * high - the zone may be full of dirty or under-writeback pages, which this
6335 * caller can't do much about. We kick the writeback threads and take explicit
6336 * naps in the hope that some of these pages can be written. But if the
6337 * allocating task holds filesystem locks which prevent writeout this might not
6338 * work, and the allocation attempt will fail.
6339 *
6340 * returns: 0, if no pages reclaimed
6341 * else, the number of pages reclaimed
6342 */
do_try_to_free_pages(struct zonelist * zonelist,struct scan_control * sc)6343 static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
6344 struct scan_control *sc)
6345 {
6346 int initial_priority = sc->priority;
6347 pg_data_t *last_pgdat;
6348 struct zoneref *z;
6349 struct zone *zone;
6350 retry:
6351 delayacct_freepages_start();
6352
6353 if (!cgroup_reclaim(sc))
6354 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
6355
6356 do {
6357 if (!sc->proactive)
6358 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
6359 sc->priority);
6360 sc->nr_scanned = 0;
6361 shrink_zones(zonelist, sc);
6362
6363 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
6364 break;
6365
6366 if (sc->compaction_ready)
6367 break;
6368 } while (--sc->priority >= 0);
6369
6370 last_pgdat = NULL;
6371 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx,
6372 sc->nodemask) {
6373 if (zone->zone_pgdat == last_pgdat)
6374 continue;
6375 last_pgdat = zone->zone_pgdat;
6376
6377 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
6378
6379 if (cgroup_reclaim(sc)) {
6380 struct lruvec *lruvec;
6381
6382 lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup,
6383 zone->zone_pgdat);
6384 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags);
6385 }
6386 }
6387
6388 delayacct_freepages_end();
6389
6390 if (sc->nr_reclaimed)
6391 return sc->nr_reclaimed;
6392
6393 /* Aborted reclaim to try compaction? don't OOM, then */
6394 if (sc->compaction_ready)
6395 return 1;
6396
6397 /*
6398 * In most cases, direct reclaimers can do partial walks
6399 * through the cgroup tree to meet the reclaim goal while
6400 * keeping latency low. Since the iterator state is shared
6401 * among all direct reclaim invocations (to retain fairness
6402 * among cgroups), though, high concurrency can result in
6403 * individual threads not seeing enough cgroups to make
6404 * meaningful forward progress. Avoid false OOMs in this case.
6405 */
6406 if (!sc->memcg_full_walk) {
6407 sc->priority = initial_priority;
6408 sc->memcg_full_walk = 1;
6409 goto retry;
6410 }
6411
6412 /*
6413 * We make inactive:active ratio decisions based on the node's
6414 * composition of memory, but a restrictive reclaim_idx or a
6415 * memory.low cgroup setting can exempt large amounts of
6416 * memory from reclaim. Neither of which are very common, so
6417 * instead of doing costly eligibility calculations of the
6418 * entire cgroup subtree up front, we assume the estimates are
6419 * good, and retry with forcible deactivation if that fails.
6420 */
6421 if (sc->skipped_deactivate) {
6422 sc->priority = initial_priority;
6423 sc->force_deactivate = 1;
6424 sc->skipped_deactivate = 0;
6425 goto retry;
6426 }
6427
6428 /* Untapped cgroup reserves? Don't OOM, retry. */
6429 if (sc->memcg_low_skipped) {
6430 sc->priority = initial_priority;
6431 sc->force_deactivate = 0;
6432 sc->memcg_low_reclaim = 1;
6433 sc->memcg_low_skipped = 0;
6434 goto retry;
6435 }
6436
6437 return 0;
6438 }
6439
allow_direct_reclaim(pg_data_t * pgdat)6440 static bool allow_direct_reclaim(pg_data_t *pgdat)
6441 {
6442 struct zone *zone;
6443 unsigned long pfmemalloc_reserve = 0;
6444 unsigned long free_pages = 0;
6445 int i;
6446 bool wmark_ok;
6447
6448 if (kswapd_test_hopeless(pgdat))
6449 return true;
6450
6451 for_each_managed_zone_pgdat(zone, pgdat, i, ZONE_NORMAL) {
6452 if (!zone_reclaimable_pages(zone) && zone_page_state_snapshot(zone, NR_FREE_PAGES))
6453 continue;
6454
6455 pfmemalloc_reserve += min_wmark_pages(zone);
6456 free_pages += zone_page_state_snapshot(zone, NR_FREE_PAGES);
6457 }
6458
6459 /* If there are no reserves (unexpected config) then do not throttle */
6460 if (!pfmemalloc_reserve)
6461 return true;
6462
6463 wmark_ok = free_pages > pfmemalloc_reserve / 2;
6464
6465 /* kswapd must be awake if processes are being throttled */
6466 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
6467 if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL)
6468 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL);
6469
6470 wake_up_interruptible(&pgdat->kswapd_wait);
6471 }
6472
6473 return wmark_ok;
6474 }
6475
6476 /*
6477 * Throttle direct reclaimers if backing storage is backed by the network
6478 * and the PFMEMALLOC reserve for the preferred node is getting dangerously
6479 * depleted. kswapd will continue to make progress and wake the processes
6480 * when the low watermark is reached.
6481 *
6482 * Returns true if a fatal signal was delivered during throttling. If this
6483 * happens, the page allocator should not consider triggering the OOM killer.
6484 */
throttle_direct_reclaim(gfp_t gfp_mask,struct zonelist * zonelist,nodemask_t * nodemask)6485 static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
6486 nodemask_t *nodemask)
6487 {
6488 struct zoneref *z;
6489 struct zone *zone;
6490 pg_data_t *pgdat = NULL;
6491
6492 /*
6493 * Kernel threads should not be throttled as they may be indirectly
6494 * responsible for cleaning pages necessary for reclaim to make forward
6495 * progress. kjournald for example may enter direct reclaim while
6496 * committing a transaction where throttling it could forcing other
6497 * processes to block on log_wait_commit().
6498 */
6499 if (current->flags & PF_KTHREAD)
6500 goto out;
6501
6502 /*
6503 * If a fatal signal is pending, this process should not throttle.
6504 * It should return quickly so it can exit and free its memory
6505 */
6506 if (fatal_signal_pending(current))
6507 goto out;
6508
6509 /*
6510 * Check if the pfmemalloc reserves are ok by finding the first node
6511 * with a usable ZONE_NORMAL or lower zone. The expectation is that
6512 * GFP_KERNEL will be required for allocating network buffers when
6513 * swapping over the network so ZONE_HIGHMEM is unusable.
6514 *
6515 * Throttling is based on the first usable node and throttled processes
6516 * wait on a queue until kswapd makes progress and wakes them. There
6517 * is an affinity then between processes waking up and where reclaim
6518 * progress has been made assuming the process wakes on the same node.
6519 * More importantly, processes running on remote nodes will not compete
6520 * for remote pfmemalloc reserves and processes on different nodes
6521 * should make reasonable progress.
6522 */
6523 for_each_zone_zonelist_nodemask(zone, z, zonelist,
6524 gfp_zone(gfp_mask), nodemask) {
6525 if (zone_idx(zone) > ZONE_NORMAL)
6526 continue;
6527
6528 /* Throttle based on the first usable node */
6529 pgdat = zone->zone_pgdat;
6530 if (allow_direct_reclaim(pgdat))
6531 goto out;
6532 break;
6533 }
6534
6535 /* If no zone was usable by the allocation flags then do not throttle */
6536 if (!pgdat)
6537 goto out;
6538
6539 /* Account for the throttling */
6540 count_vm_event(PGSCAN_DIRECT_THROTTLE);
6541
6542 /*
6543 * If the caller cannot enter the filesystem, it's possible that it
6544 * is due to the caller holding an FS lock or performing a journal
6545 * transaction in the case of a filesystem like ext[3|4]. In this case,
6546 * it is not safe to block on pfmemalloc_wait as kswapd could be
6547 * blocked waiting on the same lock. Instead, throttle for up to a
6548 * second before continuing.
6549 */
6550 if (!(gfp_mask & __GFP_FS))
6551 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
6552 allow_direct_reclaim(pgdat), HZ);
6553 else
6554 /* Throttle until kswapd wakes the process */
6555 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
6556 allow_direct_reclaim(pgdat));
6557
6558 if (fatal_signal_pending(current))
6559 return true;
6560
6561 out:
6562 return false;
6563 }
6564
try_to_free_pages(struct zonelist * zonelist,int order,gfp_t gfp_mask,nodemask_t * nodemask)6565 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
6566 gfp_t gfp_mask, nodemask_t *nodemask)
6567 {
6568 unsigned long nr_reclaimed;
6569 struct scan_control sc = {
6570 .nr_to_reclaim = SWAP_CLUSTER_MAX,
6571 .gfp_mask = current_gfp_context(gfp_mask),
6572 .reclaim_idx = gfp_zone(gfp_mask),
6573 .order = order,
6574 .nodemask = nodemask,
6575 .priority = DEF_PRIORITY,
6576 .may_writepage = 1,
6577 .may_unmap = 1,
6578 .may_swap = 1,
6579 };
6580
6581 /*
6582 * scan_control uses s8 fields for order, priority, and reclaim_idx.
6583 * Confirm they are large enough for max values.
6584 */
6585 BUILD_BUG_ON(MAX_PAGE_ORDER >= S8_MAX);
6586 BUILD_BUG_ON(DEF_PRIORITY > S8_MAX);
6587 BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX);
6588
6589 /*
6590 * Do not enter reclaim if fatal signal was delivered while throttled.
6591 * 1 is returned so that the page allocator does not OOM kill at this
6592 * point.
6593 */
6594 if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
6595 return 1;
6596
6597 set_task_reclaim_state(current, &sc.reclaim_state);
6598 trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
6599
6600 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
6601
6602 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
6603 set_task_reclaim_state(current, NULL);
6604
6605 return nr_reclaimed;
6606 }
6607
6608 #ifdef CONFIG_MEMCG
6609
6610 /* Only used by soft limit reclaim. Do not reuse for anything else. */
mem_cgroup_shrink_node(struct mem_cgroup * memcg,gfp_t gfp_mask,bool noswap,pg_data_t * pgdat,unsigned long * nr_scanned)6611 unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
6612 gfp_t gfp_mask, bool noswap,
6613 pg_data_t *pgdat,
6614 unsigned long *nr_scanned)
6615 {
6616 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
6617 struct scan_control sc = {
6618 .nr_to_reclaim = SWAP_CLUSTER_MAX,
6619 .target_mem_cgroup = memcg,
6620 .may_writepage = 1,
6621 .may_unmap = 1,
6622 .reclaim_idx = MAX_NR_ZONES - 1,
6623 .may_swap = !noswap,
6624 };
6625
6626 WARN_ON_ONCE(!current->reclaim_state);
6627
6628 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
6629 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
6630
6631 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
6632 sc.gfp_mask);
6633
6634 /*
6635 * NOTE: Although we can get the priority field, using it
6636 * here is not a good idea, since it limits the pages we can scan.
6637 * if we don't reclaim here, the shrink_node from balance_pgdat
6638 * will pick up pages from other mem cgroup's as well. We hack
6639 * the priority and make it zero.
6640 */
6641 shrink_lruvec(lruvec, &sc);
6642
6643 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
6644
6645 *nr_scanned = sc.nr_scanned;
6646
6647 return sc.nr_reclaimed;
6648 }
6649
try_to_free_mem_cgroup_pages(struct mem_cgroup * memcg,unsigned long nr_pages,gfp_t gfp_mask,unsigned int reclaim_options,int * swappiness)6650 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
6651 unsigned long nr_pages,
6652 gfp_t gfp_mask,
6653 unsigned int reclaim_options,
6654 int *swappiness)
6655 {
6656 unsigned long nr_reclaimed;
6657 unsigned int noreclaim_flag;
6658 struct scan_control sc = {
6659 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
6660 .proactive_swappiness = swappiness,
6661 .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) |
6662 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
6663 .reclaim_idx = MAX_NR_ZONES - 1,
6664 .target_mem_cgroup = memcg,
6665 .priority = DEF_PRIORITY,
6666 .may_writepage = 1,
6667 .may_unmap = 1,
6668 .may_swap = !!(reclaim_options & MEMCG_RECLAIM_MAY_SWAP),
6669 .proactive = !!(reclaim_options & MEMCG_RECLAIM_PROACTIVE),
6670 };
6671 /*
6672 * Traverse the ZONELIST_FALLBACK zonelist of the current node to put
6673 * equal pressure on all the nodes. This is based on the assumption that
6674 * the reclaim does not bail out early.
6675 */
6676 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
6677
6678 set_task_reclaim_state(current, &sc.reclaim_state);
6679 trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
6680 noreclaim_flag = memalloc_noreclaim_save();
6681
6682 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
6683
6684 memalloc_noreclaim_restore(noreclaim_flag);
6685 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
6686 set_task_reclaim_state(current, NULL);
6687
6688 return nr_reclaimed;
6689 }
6690 #else
try_to_free_mem_cgroup_pages(struct mem_cgroup * memcg,unsigned long nr_pages,gfp_t gfp_mask,unsigned int reclaim_options,int * swappiness)6691 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
6692 unsigned long nr_pages,
6693 gfp_t gfp_mask,
6694 unsigned int reclaim_options,
6695 int *swappiness)
6696 {
6697 return 0;
6698 }
6699 #endif
6700
kswapd_age_node(struct pglist_data * pgdat,struct scan_control * sc)6701 static void kswapd_age_node(struct pglist_data *pgdat, struct scan_control *sc)
6702 {
6703 struct mem_cgroup *memcg;
6704 struct lruvec *lruvec;
6705
6706 if (lru_gen_enabled()) {
6707 lru_gen_age_node(pgdat, sc);
6708 return;
6709 }
6710
6711 lruvec = mem_cgroup_lruvec(NULL, pgdat);
6712 if (!can_age_anon_pages(lruvec, sc))
6713 return;
6714
6715 if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON))
6716 return;
6717
6718 memcg = mem_cgroup_iter(NULL, NULL, NULL);
6719 do {
6720 lruvec = mem_cgroup_lruvec(memcg, pgdat);
6721 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
6722 sc, LRU_ACTIVE_ANON);
6723 memcg = mem_cgroup_iter(NULL, memcg, NULL);
6724 } while (memcg);
6725 }
6726
pgdat_watermark_boosted(pg_data_t * pgdat,int highest_zoneidx)6727 static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx)
6728 {
6729 int i;
6730 struct zone *zone;
6731
6732 /*
6733 * Check for watermark boosts top-down as the higher zones
6734 * are more likely to be boosted. Both watermarks and boosts
6735 * should not be checked at the same time as reclaim would
6736 * start prematurely when there is no boosting and a lower
6737 * zone is balanced.
6738 */
6739 for (i = highest_zoneidx; i >= 0; i--) {
6740 zone = pgdat->node_zones + i;
6741 if (!managed_zone(zone))
6742 continue;
6743
6744 if (zone->watermark_boost)
6745 return true;
6746 }
6747
6748 return false;
6749 }
6750
6751 /*
6752 * Returns true if there is an eligible zone balanced for the request order
6753 * and highest_zoneidx
6754 */
pgdat_balanced(pg_data_t * pgdat,int order,int highest_zoneidx)6755 static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx)
6756 {
6757 int i;
6758 unsigned long mark = -1;
6759 struct zone *zone;
6760
6761 /*
6762 * Check watermarks bottom-up as lower zones are more likely to
6763 * meet watermarks.
6764 */
6765 for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) {
6766 enum zone_stat_item item;
6767 unsigned long free_pages;
6768
6769 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)
6770 mark = promo_wmark_pages(zone);
6771 else
6772 mark = high_wmark_pages(zone);
6773
6774 /*
6775 * In defrag_mode, watermarks must be met in whole
6776 * blocks to avoid polluting allocator fallbacks.
6777 *
6778 * However, kswapd usually cannot accomplish this on
6779 * its own and needs kcompactd support. Once it's
6780 * reclaimed a compaction gap, and kswapd_shrink_node
6781 * has dropped order, simply ensure there are enough
6782 * base pages for compaction, wake kcompactd & sleep.
6783 */
6784 if (defrag_mode && order)
6785 item = NR_FREE_PAGES_BLOCKS;
6786 else
6787 item = NR_FREE_PAGES;
6788
6789 /*
6790 * When there is a high number of CPUs in the system,
6791 * the cumulative error from the vmstat per-cpu cache
6792 * can blur the line between the watermarks. In that
6793 * case, be safe and get an accurate snapshot.
6794 *
6795 * TODO: NR_FREE_PAGES_BLOCKS moves in steps of
6796 * pageblock_nr_pages, while the vmstat pcp threshold
6797 * is limited to 125. On many configurations that
6798 * counter won't actually be per-cpu cached. But keep
6799 * things simple for now; revisit when somebody cares.
6800 */
6801 free_pages = zone_page_state(zone, item);
6802 if (zone->percpu_drift_mark && free_pages < zone->percpu_drift_mark)
6803 free_pages = zone_page_state_snapshot(zone, item);
6804
6805 if (__zone_watermark_ok(zone, order, mark, highest_zoneidx,
6806 0, free_pages))
6807 return true;
6808 }
6809
6810 /*
6811 * If a node has no managed zone within highest_zoneidx, it does not
6812 * need balancing by definition. This can happen if a zone-restricted
6813 * allocation tries to wake a remote kswapd.
6814 */
6815 if (mark == -1)
6816 return true;
6817
6818 return false;
6819 }
6820
6821 /* Clear pgdat state for congested, dirty or under writeback. */
clear_pgdat_congested(pg_data_t * pgdat)6822 static void clear_pgdat_congested(pg_data_t *pgdat)
6823 {
6824 struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
6825
6826 clear_bit(LRUVEC_NODE_CONGESTED, &lruvec->flags);
6827 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags);
6828 clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
6829 }
6830
6831 /*
6832 * Prepare kswapd for sleeping. This verifies that there are no processes
6833 * waiting in throttle_direct_reclaim() and that watermarks have been met.
6834 *
6835 * Returns true if kswapd is ready to sleep
6836 */
prepare_kswapd_sleep(pg_data_t * pgdat,int order,int highest_zoneidx)6837 static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order,
6838 int highest_zoneidx)
6839 {
6840 /*
6841 * The throttled processes are normally woken up in balance_pgdat() as
6842 * soon as allow_direct_reclaim() is true. But there is a potential
6843 * race between when kswapd checks the watermarks and a process gets
6844 * throttled. There is also a potential race if processes get
6845 * throttled, kswapd wakes, a large process exits thereby balancing the
6846 * zones, which causes kswapd to exit balance_pgdat() before reaching
6847 * the wake up checks. If kswapd is going to sleep, no process should
6848 * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
6849 * the wake up is premature, processes will wake kswapd and get
6850 * throttled again. The difference from wake ups in balance_pgdat() is
6851 * that here we are under prepare_to_wait().
6852 */
6853 if (waitqueue_active(&pgdat->pfmemalloc_wait))
6854 wake_up_all(&pgdat->pfmemalloc_wait);
6855
6856 /* Hopeless node, leave it to direct reclaim */
6857 if (kswapd_test_hopeless(pgdat))
6858 return true;
6859
6860 if (pgdat_balanced(pgdat, order, highest_zoneidx)) {
6861 clear_pgdat_congested(pgdat);
6862 return true;
6863 }
6864
6865 return false;
6866 }
6867
6868 /*
6869 * kswapd shrinks a node of pages that are at or below the highest usable
6870 * zone that is currently unbalanced.
6871 *
6872 * Returns true if kswapd scanned at least the requested number of pages to
6873 * reclaim or if the lack of progress was due to pages under writeback.
6874 * This is used to determine if the scanning priority needs to be raised.
6875 */
kswapd_shrink_node(pg_data_t * pgdat,struct scan_control * sc)6876 static bool kswapd_shrink_node(pg_data_t *pgdat,
6877 struct scan_control *sc)
6878 {
6879 struct zone *zone;
6880 int z;
6881 unsigned long nr_reclaimed = sc->nr_reclaimed;
6882
6883 /* Reclaim a number of pages proportional to the number of zones */
6884 sc->nr_to_reclaim = 0;
6885 for_each_managed_zone_pgdat(zone, pgdat, z, sc->reclaim_idx) {
6886 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
6887 }
6888
6889 /*
6890 * Historically care was taken to put equal pressure on all zones but
6891 * now pressure is applied based on node LRU order.
6892 */
6893 shrink_node(pgdat, sc);
6894
6895 /*
6896 * Fragmentation may mean that the system cannot be rebalanced for
6897 * high-order allocations. If twice the allocation size has been
6898 * reclaimed then recheck watermarks only at order-0 to prevent
6899 * excessive reclaim. Assume that a process requested a high-order
6900 * can direct reclaim/compact.
6901 */
6902 if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
6903 sc->order = 0;
6904
6905 /* account for progress from mm_account_reclaimed_pages() */
6906 return max(sc->nr_scanned, sc->nr_reclaimed - nr_reclaimed) >= sc->nr_to_reclaim;
6907 }
6908
6909 /* Page allocator PCP high watermark is lowered if reclaim is active. */
6910 static inline void
update_reclaim_active(pg_data_t * pgdat,int highest_zoneidx,bool active)6911 update_reclaim_active(pg_data_t *pgdat, int highest_zoneidx, bool active)
6912 {
6913 int i;
6914 struct zone *zone;
6915
6916 for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) {
6917 if (active)
6918 set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags);
6919 else
6920 clear_bit(ZONE_RECLAIM_ACTIVE, &zone->flags);
6921 }
6922 }
6923
6924 static inline void
set_reclaim_active(pg_data_t * pgdat,int highest_zoneidx)6925 set_reclaim_active(pg_data_t *pgdat, int highest_zoneidx)
6926 {
6927 update_reclaim_active(pgdat, highest_zoneidx, true);
6928 }
6929
6930 static inline void
clear_reclaim_active(pg_data_t * pgdat,int highest_zoneidx)6931 clear_reclaim_active(pg_data_t *pgdat, int highest_zoneidx)
6932 {
6933 update_reclaim_active(pgdat, highest_zoneidx, false);
6934 }
6935
6936 /*
6937 * For kswapd, balance_pgdat() will reclaim pages across a node from zones
6938 * that are eligible for use by the caller until at least one zone is
6939 * balanced.
6940 *
6941 * Returns the order kswapd finished reclaiming at.
6942 *
6943 * kswapd scans the zones in the highmem->normal->dma direction. It skips
6944 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
6945 * found to have free_pages <= high_wmark_pages(zone), any page in that zone
6946 * or lower is eligible for reclaim until at least one usable zone is
6947 * balanced.
6948 */
balance_pgdat(pg_data_t * pgdat,int order,int highest_zoneidx)6949 static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
6950 {
6951 int i;
6952 unsigned long nr_soft_reclaimed;
6953 unsigned long nr_soft_scanned;
6954 unsigned long pflags;
6955 unsigned long nr_boost_reclaim;
6956 unsigned long zone_boosts[MAX_NR_ZONES] = { 0, };
6957 bool boosted;
6958 struct zone *zone;
6959 struct scan_control sc = {
6960 .gfp_mask = GFP_KERNEL,
6961 .order = order,
6962 .may_unmap = 1,
6963 };
6964
6965 set_task_reclaim_state(current, &sc.reclaim_state);
6966 psi_memstall_enter(&pflags);
6967 __fs_reclaim_acquire(_THIS_IP_);
6968
6969 count_vm_event(PAGEOUTRUN);
6970
6971 /*
6972 * Account for the reclaim boost. Note that the zone boost is left in
6973 * place so that parallel allocations that are near the watermark will
6974 * stall or direct reclaim until kswapd is finished.
6975 */
6976 nr_boost_reclaim = 0;
6977 for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) {
6978 nr_boost_reclaim += zone->watermark_boost;
6979 zone_boosts[i] = zone->watermark_boost;
6980 }
6981 boosted = nr_boost_reclaim;
6982
6983 restart:
6984 set_reclaim_active(pgdat, highest_zoneidx);
6985 sc.priority = DEF_PRIORITY;
6986 do {
6987 unsigned long nr_reclaimed = sc.nr_reclaimed;
6988 bool raise_priority = true;
6989 bool balanced;
6990 bool ret;
6991 bool was_frozen;
6992
6993 sc.reclaim_idx = highest_zoneidx;
6994
6995 /*
6996 * If the number of buffer_heads exceeds the maximum allowed
6997 * then consider reclaiming from all zones. This has a dual
6998 * purpose -- on 64-bit systems it is expected that
6999 * buffer_heads are stripped during active rotation. On 32-bit
7000 * systems, highmem pages can pin lowmem memory and shrinking
7001 * buffers can relieve lowmem pressure. Reclaim may still not
7002 * go ahead if all eligible zones for the original allocation
7003 * request are balanced to avoid excessive reclaim from kswapd.
7004 */
7005 if (buffer_heads_over_limit) {
7006 for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
7007 zone = pgdat->node_zones + i;
7008 if (!managed_zone(zone))
7009 continue;
7010
7011 sc.reclaim_idx = i;
7012 break;
7013 }
7014 }
7015
7016 /*
7017 * If the pgdat is imbalanced then ignore boosting and preserve
7018 * the watermarks for a later time and restart. Note that the
7019 * zone watermarks will be still reset at the end of balancing
7020 * on the grounds that the normal reclaim should be enough to
7021 * re-evaluate if boosting is required when kswapd next wakes.
7022 */
7023 balanced = pgdat_balanced(pgdat, sc.order, highest_zoneidx);
7024 if (!balanced && nr_boost_reclaim) {
7025 nr_boost_reclaim = 0;
7026 goto restart;
7027 }
7028
7029 /*
7030 * If boosting is not active then only reclaim if there are no
7031 * eligible zones. Note that sc.reclaim_idx is not used as
7032 * buffer_heads_over_limit may have adjusted it.
7033 */
7034 if (!nr_boost_reclaim && balanced)
7035 goto out;
7036
7037 /* Limit the priority of boosting to avoid reclaim writeback */
7038 if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2)
7039 raise_priority = false;
7040
7041 /*
7042 * Do not writeback or swap pages for boosted reclaim. The
7043 * intent is to relieve pressure not issue sub-optimal IO
7044 * from reclaim context. If no pages are reclaimed, the
7045 * reclaim will be aborted.
7046 */
7047 sc.may_writepage = !nr_boost_reclaim;
7048 sc.may_swap = !nr_boost_reclaim;
7049
7050 /*
7051 * Do some background aging, to give pages a chance to be
7052 * referenced before reclaiming. All pages are rotated
7053 * regardless of classzone as this is about consistent aging.
7054 */
7055 kswapd_age_node(pgdat, &sc);
7056
7057 /* Call soft limit reclaim before calling shrink_node. */
7058 sc.nr_scanned = 0;
7059 nr_soft_scanned = 0;
7060 nr_soft_reclaimed = memcg1_soft_limit_reclaim(pgdat, sc.order,
7061 sc.gfp_mask, &nr_soft_scanned);
7062 sc.nr_reclaimed += nr_soft_reclaimed;
7063
7064 /*
7065 * There should be no need to raise the scanning priority if
7066 * enough pages are already being scanned that that high
7067 * watermark would be met at 100% efficiency.
7068 */
7069 if (kswapd_shrink_node(pgdat, &sc))
7070 raise_priority = false;
7071
7072 /*
7073 * If the low watermark is met there is no need for processes
7074 * to be throttled on pfmemalloc_wait as they should not be
7075 * able to safely make forward progress. Wake them
7076 */
7077 if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
7078 allow_direct_reclaim(pgdat))
7079 wake_up_all(&pgdat->pfmemalloc_wait);
7080
7081 /* Check if kswapd should be suspending */
7082 __fs_reclaim_release(_THIS_IP_);
7083 ret = kthread_freezable_should_stop(&was_frozen);
7084 __fs_reclaim_acquire(_THIS_IP_);
7085 if (was_frozen || ret)
7086 break;
7087
7088 /*
7089 * Raise priority if scanning rate is too low or there was no
7090 * progress in reclaiming pages
7091 */
7092 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed;
7093 nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed);
7094
7095 /*
7096 * If reclaim made no progress for a boost, stop reclaim as
7097 * IO cannot be queued and it could be an infinite loop in
7098 * extreme circumstances.
7099 */
7100 if (nr_boost_reclaim && !nr_reclaimed)
7101 break;
7102
7103 if (raise_priority || !nr_reclaimed)
7104 sc.priority--;
7105 } while (sc.priority >= 1);
7106
7107 /*
7108 * Restart only if it went through the priority loop all the way,
7109 * but cache_trim_mode didn't work.
7110 */
7111 if (!sc.nr_reclaimed && sc.priority < 1 &&
7112 !sc.no_cache_trim_mode && sc.cache_trim_mode_failed) {
7113 sc.no_cache_trim_mode = 1;
7114 goto restart;
7115 }
7116
7117 /*
7118 * If the reclaim was boosted, we might still be far from the
7119 * watermark_high at this point. We need to avoid increasing the
7120 * failure count to prevent the kswapd thread from stopping.
7121 */
7122 if (!sc.nr_reclaimed && !boosted) {
7123 int fail_cnt = atomic_inc_return(&pgdat->kswapd_failures);
7124 /* kswapd context, low overhead to trace every failure */
7125 trace_mm_vmscan_kswapd_reclaim_fail(pgdat->node_id, fail_cnt);
7126 }
7127
7128 out:
7129 clear_reclaim_active(pgdat, highest_zoneidx);
7130
7131 /* If reclaim was boosted, account for the reclaim done in this pass */
7132 if (boosted) {
7133 unsigned long flags;
7134
7135 for (i = 0; i <= highest_zoneidx; i++) {
7136 if (!zone_boosts[i])
7137 continue;
7138
7139 /* Increments are under the zone lock */
7140 zone = pgdat->node_zones + i;
7141 spin_lock_irqsave(&zone->lock, flags);
7142 zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]);
7143 spin_unlock_irqrestore(&zone->lock, flags);
7144 }
7145
7146 /*
7147 * As there is now likely space, wakeup kcompact to defragment
7148 * pageblocks.
7149 */
7150 wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx);
7151 }
7152
7153 snapshot_refaults(NULL, pgdat);
7154 __fs_reclaim_release(_THIS_IP_);
7155 psi_memstall_leave(&pflags);
7156 set_task_reclaim_state(current, NULL);
7157
7158 /*
7159 * Return the order kswapd stopped reclaiming at as
7160 * prepare_kswapd_sleep() takes it into account. If another caller
7161 * entered the allocator slow path while kswapd was awake, order will
7162 * remain at the higher level.
7163 */
7164 return sc.order;
7165 }
7166
7167 /*
7168 * The pgdat->kswapd_highest_zoneidx is used to pass the highest zone index to
7169 * be reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is
7170 * not a valid index then either kswapd runs for first time or kswapd couldn't
7171 * sleep after previous reclaim attempt (node is still unbalanced). In that
7172 * case return the zone index of the previous kswapd reclaim cycle.
7173 */
kswapd_highest_zoneidx(pg_data_t * pgdat,enum zone_type prev_highest_zoneidx)7174 static enum zone_type kswapd_highest_zoneidx(pg_data_t *pgdat,
7175 enum zone_type prev_highest_zoneidx)
7176 {
7177 enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx);
7178
7179 return curr_idx == MAX_NR_ZONES ? prev_highest_zoneidx : curr_idx;
7180 }
7181
kswapd_try_to_sleep(pg_data_t * pgdat,int alloc_order,int reclaim_order,unsigned int highest_zoneidx)7182 static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
7183 unsigned int highest_zoneidx)
7184 {
7185 long remaining = 0;
7186 DEFINE_WAIT(wait);
7187
7188 if (freezing(current) || kthread_should_stop())
7189 return;
7190
7191 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
7192
7193 /*
7194 * Try to sleep for a short interval. Note that kcompactd will only be
7195 * woken if it is possible to sleep for a short interval. This is
7196 * deliberate on the assumption that if reclaim cannot keep an
7197 * eligible zone balanced that it's also unlikely that compaction will
7198 * succeed.
7199 */
7200 if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {
7201 /*
7202 * Compaction records what page blocks it recently failed to
7203 * isolate pages from and skips them in the future scanning.
7204 * When kswapd is going to sleep, it is reasonable to assume
7205 * that pages and compaction may succeed so reset the cache.
7206 */
7207 reset_isolation_suitable(pgdat);
7208
7209 /*
7210 * We have freed the memory, now we should compact it to make
7211 * allocation of the requested order possible.
7212 */
7213 wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx);
7214
7215 remaining = schedule_timeout(HZ/10);
7216
7217 /*
7218 * If woken prematurely then reset kswapd_highest_zoneidx and
7219 * order. The values will either be from a wakeup request or
7220 * the previous request that slept prematurely.
7221 */
7222 if (remaining) {
7223 WRITE_ONCE(pgdat->kswapd_highest_zoneidx,
7224 kswapd_highest_zoneidx(pgdat,
7225 highest_zoneidx));
7226
7227 if (READ_ONCE(pgdat->kswapd_order) < reclaim_order)
7228 WRITE_ONCE(pgdat->kswapd_order, reclaim_order);
7229 }
7230
7231 finish_wait(&pgdat->kswapd_wait, &wait);
7232 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
7233 }
7234
7235 /*
7236 * After a short sleep, check if it was a premature sleep. If not, then
7237 * go fully to sleep until explicitly woken up.
7238 */
7239 if (!remaining &&
7240 prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {
7241 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
7242
7243 /*
7244 * vmstat counters are not perfectly accurate and the estimated
7245 * value for counters such as NR_FREE_PAGES can deviate from the
7246 * true value by nr_online_cpus * threshold. To avoid the zone
7247 * watermarks being breached while under pressure, we reduce the
7248 * per-cpu vmstat threshold while kswapd is awake and restore
7249 * them before going back to sleep.
7250 */
7251 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
7252
7253 if (!kthread_should_stop())
7254 schedule();
7255
7256 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
7257 } else {
7258 if (remaining)
7259 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
7260 else
7261 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
7262 }
7263 finish_wait(&pgdat->kswapd_wait, &wait);
7264 }
7265
7266 /*
7267 * The background pageout daemon, started as a kernel thread
7268 * from the init process.
7269 *
7270 * This basically trickles out pages so that we have _some_
7271 * free memory available even if there is no other activity
7272 * that frees anything up. This is needed for things like routing
7273 * etc, where we otherwise might have all activity going on in
7274 * asynchronous contexts that cannot page things out.
7275 *
7276 * If there are applications that are active memory-allocators
7277 * (most normal use), this basically shouldn't matter.
7278 */
kswapd(void * p)7279 static int kswapd(void *p)
7280 {
7281 unsigned int alloc_order, reclaim_order;
7282 unsigned int highest_zoneidx = MAX_NR_ZONES - 1;
7283 pg_data_t *pgdat = (pg_data_t *)p;
7284 struct task_struct *tsk = current;
7285
7286 /*
7287 * Tell the memory management that we're a "memory allocator",
7288 * and that if we need more memory we should get access to it
7289 * regardless (see "__alloc_pages()"). "kswapd" should
7290 * never get caught in the normal page freeing logic.
7291 *
7292 * (Kswapd normally doesn't need memory anyway, but sometimes
7293 * you need a small amount of memory in order to be able to
7294 * page out something else, and this flag essentially protects
7295 * us from recursively trying to free more memory as we're
7296 * trying to free the first piece of memory in the first place).
7297 */
7298 tsk->flags |= PF_MEMALLOC | PF_KSWAPD;
7299 set_freezable();
7300
7301 WRITE_ONCE(pgdat->kswapd_order, 0);
7302 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
7303 atomic_set(&pgdat->nr_writeback_throttled, 0);
7304 for ( ; ; ) {
7305 bool was_frozen;
7306
7307 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
7308 highest_zoneidx = kswapd_highest_zoneidx(pgdat,
7309 highest_zoneidx);
7310
7311 kswapd_try_sleep:
7312 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
7313 highest_zoneidx);
7314
7315 /* Read the new order and highest_zoneidx */
7316 alloc_order = READ_ONCE(pgdat->kswapd_order);
7317 highest_zoneidx = kswapd_highest_zoneidx(pgdat,
7318 highest_zoneidx);
7319 WRITE_ONCE(pgdat->kswapd_order, 0);
7320 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
7321
7322 if (kthread_freezable_should_stop(&was_frozen))
7323 break;
7324
7325 /*
7326 * We can speed up thawing tasks if we don't call balance_pgdat
7327 * after returning from the refrigerator
7328 */
7329 if (was_frozen)
7330 continue;
7331
7332 /*
7333 * Reclaim begins at the requested order but if a high-order
7334 * reclaim fails then kswapd falls back to reclaiming for
7335 * order-0. If that happens, kswapd will consider sleeping
7336 * for the order it finished reclaiming at (reclaim_order)
7337 * but kcompactd is woken to compact for the original
7338 * request (alloc_order).
7339 */
7340 trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx,
7341 alloc_order);
7342 reclaim_order = balance_pgdat(pgdat, alloc_order,
7343 highest_zoneidx);
7344 if (reclaim_order < alloc_order)
7345 goto kswapd_try_sleep;
7346 }
7347
7348 tsk->flags &= ~(PF_MEMALLOC | PF_KSWAPD);
7349
7350 return 0;
7351 }
7352
7353 /*
7354 * A zone is low on free memory or too fragmented for high-order memory. If
7355 * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's
7356 * pgdat. It will wake up kcompactd after reclaiming memory. If kswapd reclaim
7357 * has failed or is not needed, still wake up kcompactd if only compaction is
7358 * needed.
7359 */
wakeup_kswapd(struct zone * zone,gfp_t gfp_flags,int order,enum zone_type highest_zoneidx)7360 void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
7361 enum zone_type highest_zoneidx)
7362 {
7363 pg_data_t *pgdat;
7364 enum zone_type curr_idx;
7365
7366 if (!managed_zone(zone))
7367 return;
7368
7369 if (!cpuset_zone_allowed(zone, gfp_flags))
7370 return;
7371
7372 pgdat = zone->zone_pgdat;
7373 curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx);
7374
7375 if (curr_idx == MAX_NR_ZONES || curr_idx < highest_zoneidx)
7376 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx);
7377
7378 if (READ_ONCE(pgdat->kswapd_order) < order)
7379 WRITE_ONCE(pgdat->kswapd_order, order);
7380
7381 if (!waitqueue_active(&pgdat->kswapd_wait))
7382 return;
7383
7384 /* Hopeless node, leave it to direct reclaim if possible */
7385 if (kswapd_test_hopeless(pgdat) ||
7386 (pgdat_balanced(pgdat, order, highest_zoneidx) &&
7387 !pgdat_watermark_boosted(pgdat, highest_zoneidx))) {
7388 /*
7389 * There may be plenty of free memory available, but it's too
7390 * fragmented for high-order allocations. Wake up kcompactd
7391 * and rely on compaction_suitable() to determine if it's
7392 * needed. If it fails, it will defer subsequent attempts to
7393 * ratelimit its work.
7394 */
7395 if (!(gfp_flags & __GFP_DIRECT_RECLAIM))
7396 wakeup_kcompactd(pgdat, order, highest_zoneidx);
7397 return;
7398 }
7399
7400 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order,
7401 gfp_flags);
7402 wake_up_interruptible(&pgdat->kswapd_wait);
7403 }
7404
kswapd_clear_hopeless(pg_data_t * pgdat,enum kswapd_clear_hopeless_reason reason)7405 void kswapd_clear_hopeless(pg_data_t *pgdat, enum kswapd_clear_hopeless_reason reason)
7406 {
7407 /* Only trace actual resets, not redundant zero-to-zero */
7408 if (atomic_xchg(&pgdat->kswapd_failures, 0))
7409 trace_mm_vmscan_kswapd_clear_hopeless(pgdat->node_id, reason);
7410 }
7411
7412 /*
7413 * Reset kswapd_failures only when the node is balanced. Without this
7414 * check, successful direct reclaim (e.g., from cgroup memory.high
7415 * throttling) can keep resetting kswapd_failures even when the node
7416 * cannot be balanced, causing kswapd to run endlessly.
7417 */
kswapd_try_clear_hopeless(struct pglist_data * pgdat,unsigned int order,int highest_zoneidx)7418 void kswapd_try_clear_hopeless(struct pglist_data *pgdat,
7419 unsigned int order, int highest_zoneidx)
7420 {
7421 if (pgdat_balanced(pgdat, order, highest_zoneidx))
7422 kswapd_clear_hopeless(pgdat, current_is_kswapd() ?
7423 KSWAPD_CLEAR_HOPELESS_KSWAPD : KSWAPD_CLEAR_HOPELESS_DIRECT);
7424 }
7425
kswapd_test_hopeless(pg_data_t * pgdat)7426 bool kswapd_test_hopeless(pg_data_t *pgdat)
7427 {
7428 return atomic_read(&pgdat->kswapd_failures) >= MAX_RECLAIM_RETRIES;
7429 }
7430
7431 #ifdef CONFIG_HIBERNATION
7432 /*
7433 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
7434 * freed pages.
7435 *
7436 * Rather than trying to age LRUs the aim is to preserve the overall
7437 * LRU order by reclaiming preferentially
7438 * inactive > active > active referenced > active mapped
7439 */
shrink_all_memory(unsigned long nr_to_reclaim)7440 unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
7441 {
7442 struct scan_control sc = {
7443 .nr_to_reclaim = nr_to_reclaim,
7444 .gfp_mask = GFP_HIGHUSER_MOVABLE,
7445 .reclaim_idx = MAX_NR_ZONES - 1,
7446 .priority = DEF_PRIORITY,
7447 .may_writepage = 1,
7448 .may_unmap = 1,
7449 .may_swap = 1,
7450 .hibernation_mode = 1,
7451 };
7452 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
7453 unsigned long nr_reclaimed;
7454 unsigned int noreclaim_flag;
7455
7456 fs_reclaim_acquire(sc.gfp_mask);
7457 noreclaim_flag = memalloc_noreclaim_save();
7458 set_task_reclaim_state(current, &sc.reclaim_state);
7459
7460 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
7461
7462 set_task_reclaim_state(current, NULL);
7463 memalloc_noreclaim_restore(noreclaim_flag);
7464 fs_reclaim_release(sc.gfp_mask);
7465
7466 return nr_reclaimed;
7467 }
7468 #endif /* CONFIG_HIBERNATION */
7469
7470 /*
7471 * This kswapd start function will be called by init and node-hot-add.
7472 */
kswapd_run(int nid)7473 void __meminit kswapd_run(int nid)
7474 {
7475 pg_data_t *pgdat = NODE_DATA(nid);
7476
7477 pgdat_kswapd_lock(pgdat);
7478 if (!pgdat->kswapd) {
7479 pgdat->kswapd = kthread_create_on_node(kswapd, pgdat, nid, "kswapd%d", nid);
7480 if (IS_ERR(pgdat->kswapd)) {
7481 /* failure at boot is fatal */
7482 pr_err("Failed to start kswapd on node %d, ret=%pe\n",
7483 nid, pgdat->kswapd);
7484 BUG_ON(system_state < SYSTEM_RUNNING);
7485 pgdat->kswapd = NULL;
7486 } else {
7487 wake_up_process(pgdat->kswapd);
7488 }
7489 }
7490 pgdat_kswapd_unlock(pgdat);
7491 }
7492
7493 /*
7494 * Called by memory hotplug when all memory in a node is offlined. Caller must
7495 * be holding mem_hotplug_begin/done().
7496 */
kswapd_stop(int nid)7497 void __meminit kswapd_stop(int nid)
7498 {
7499 pg_data_t *pgdat = NODE_DATA(nid);
7500 struct task_struct *kswapd;
7501
7502 pgdat_kswapd_lock(pgdat);
7503 kswapd = pgdat->kswapd;
7504 if (kswapd) {
7505 kthread_stop(kswapd);
7506 pgdat->kswapd = NULL;
7507 }
7508 pgdat_kswapd_unlock(pgdat);
7509 }
7510
7511 static const struct ctl_table vmscan_sysctl_table[] = {
7512 {
7513 .procname = "swappiness",
7514 .data = &vm_swappiness,
7515 .maxlen = sizeof(vm_swappiness),
7516 .mode = 0644,
7517 .proc_handler = proc_dointvec_minmax,
7518 .extra1 = SYSCTL_ZERO,
7519 .extra2 = SYSCTL_TWO_HUNDRED,
7520 },
7521 #ifdef CONFIG_NUMA
7522 {
7523 .procname = "zone_reclaim_mode",
7524 .data = &node_reclaim_mode,
7525 .maxlen = sizeof(node_reclaim_mode),
7526 .mode = 0644,
7527 .proc_handler = proc_dointvec_minmax,
7528 .extra1 = SYSCTL_ZERO,
7529 }
7530 #endif
7531 };
7532
kswapd_init(void)7533 static int __init kswapd_init(void)
7534 {
7535 int nid;
7536
7537 swap_setup();
7538 for_each_node_state(nid, N_MEMORY)
7539 kswapd_run(nid);
7540 register_sysctl_init("vm", vmscan_sysctl_table);
7541 return 0;
7542 }
7543
7544 module_init(kswapd_init)
7545
7546 #ifdef CONFIG_NUMA
7547 /*
7548 * Node reclaim mode
7549 *
7550 * If non-zero call node_reclaim when the number of free pages falls below
7551 * the watermarks.
7552 */
7553 int node_reclaim_mode __read_mostly;
7554
7555 /*
7556 * Priority for NODE_RECLAIM. This determines the fraction of pages
7557 * of a node considered for each zone_reclaim. 4 scans 1/16th of
7558 * a zone.
7559 */
7560 #define NODE_RECLAIM_PRIORITY 4
7561
7562 /*
7563 * Percentage of pages in a zone that must be unmapped for node_reclaim to
7564 * occur.
7565 */
7566 int sysctl_min_unmapped_ratio = 1;
7567
7568 /*
7569 * If the number of slab pages in a zone grows beyond this percentage then
7570 * slab reclaim needs to occur.
7571 */
7572 int sysctl_min_slab_ratio = 5;
7573
node_unmapped_file_pages(struct pglist_data * pgdat)7574 static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
7575 {
7576 unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
7577 unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
7578 node_page_state(pgdat, NR_ACTIVE_FILE);
7579
7580 /*
7581 * It's possible for there to be more file mapped pages than
7582 * accounted for by the pages on the file LRU lists because
7583 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
7584 */
7585 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
7586 }
7587
7588 /* Work out how many page cache pages we can reclaim in this reclaim_mode */
node_pagecache_reclaimable(struct pglist_data * pgdat)7589 static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
7590 {
7591 unsigned long nr_pagecache_reclaimable;
7592 unsigned long delta = 0;
7593
7594 /*
7595 * If RECLAIM_UNMAP is set, then all file pages are considered
7596 * potentially reclaimable. Otherwise, we have to worry about
7597 * pages like swapcache and node_unmapped_file_pages() provides
7598 * a better estimate
7599 */
7600 if (node_reclaim_mode & RECLAIM_UNMAP)
7601 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
7602 else
7603 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
7604
7605 /*
7606 * Since we can't clean folios through reclaim, remove dirty file
7607 * folios from consideration.
7608 */
7609 delta += node_page_state(pgdat, NR_FILE_DIRTY);
7610
7611 /* Watch for any possible underflows due to delta */
7612 if (unlikely(delta > nr_pagecache_reclaimable))
7613 delta = nr_pagecache_reclaimable;
7614
7615 return nr_pagecache_reclaimable - delta;
7616 }
7617
7618 /*
7619 * Try to free up some pages from this node through reclaim.
7620 */
__node_reclaim(struct pglist_data * pgdat,gfp_t gfp_mask,unsigned long nr_pages,struct scan_control * sc)7621 static unsigned long __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask,
7622 unsigned long nr_pages,
7623 struct scan_control *sc)
7624 {
7625 struct task_struct *p = current;
7626 unsigned int noreclaim_flag;
7627 unsigned long pflags;
7628
7629 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, sc->order,
7630 sc->gfp_mask);
7631
7632 cond_resched();
7633 psi_memstall_enter(&pflags);
7634 delayacct_freepages_start();
7635 fs_reclaim_acquire(sc->gfp_mask);
7636 /*
7637 * We need to be able to allocate from the reserves for RECLAIM_UNMAP
7638 */
7639 noreclaim_flag = memalloc_noreclaim_save();
7640 set_task_reclaim_state(p, &sc->reclaim_state);
7641
7642 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages ||
7643 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) > pgdat->min_slab_pages) {
7644 /*
7645 * Free memory by calling shrink node with increasing
7646 * priorities until we have enough memory freed.
7647 */
7648 do {
7649 shrink_node(pgdat, sc);
7650 } while (sc->nr_reclaimed < nr_pages && --sc->priority >= 0);
7651 }
7652
7653 set_task_reclaim_state(p, NULL);
7654 memalloc_noreclaim_restore(noreclaim_flag);
7655 fs_reclaim_release(sc->gfp_mask);
7656 delayacct_freepages_end();
7657 psi_memstall_leave(&pflags);
7658
7659 trace_mm_vmscan_node_reclaim_end(sc->nr_reclaimed);
7660
7661 return sc->nr_reclaimed;
7662 }
7663
node_reclaim(struct pglist_data * pgdat,gfp_t gfp_mask,unsigned int order)7664 int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
7665 {
7666 int ret;
7667 /* Minimum pages needed in order to stay on node */
7668 const unsigned long nr_pages = 1 << order;
7669 struct scan_control sc = {
7670 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
7671 .gfp_mask = current_gfp_context(gfp_mask),
7672 .order = order,
7673 .priority = NODE_RECLAIM_PRIORITY,
7674 .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
7675 .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
7676 .may_swap = 1,
7677 .reclaim_idx = gfp_zone(gfp_mask),
7678 };
7679
7680 /*
7681 * Node reclaim reclaims unmapped file backed pages and
7682 * slab pages if we are over the defined limits.
7683 *
7684 * A small portion of unmapped file backed pages is needed for
7685 * file I/O otherwise pages read by file I/O will be immediately
7686 * thrown out if the node is overallocated. So we do not reclaim
7687 * if less than a specified percentage of the node is used by
7688 * unmapped file backed pages.
7689 */
7690 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
7691 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <=
7692 pgdat->min_slab_pages)
7693 return NODE_RECLAIM_FULL;
7694
7695 /*
7696 * Do not scan if the allocation should not be delayed.
7697 */
7698 if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
7699 return NODE_RECLAIM_NOSCAN;
7700
7701 /*
7702 * Only run node reclaim on the local node or on nodes that do not
7703 * have associated processors. This will favor the local processor
7704 * over remote processors and spread off node memory allocations
7705 * as wide as possible.
7706 */
7707 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
7708 return NODE_RECLAIM_NOSCAN;
7709
7710 if (test_and_set_bit_lock(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
7711 return NODE_RECLAIM_NOSCAN;
7712
7713 ret = __node_reclaim(pgdat, gfp_mask, nr_pages, &sc) >= nr_pages;
7714 clear_bit_unlock(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
7715
7716 if (ret)
7717 count_vm_event(PGSCAN_ZONE_RECLAIM_SUCCESS);
7718 else
7719 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
7720
7721 return ret;
7722 }
7723
7724 #else
7725
7726 static unsigned long __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask,
7727 unsigned long nr_pages,
7728 struct scan_control *sc)
7729 {
7730 return 0;
7731 }
7732
7733 #endif
7734
7735 enum {
7736 MEMORY_RECLAIM_SWAPPINESS = 0,
7737 MEMORY_RECLAIM_SWAPPINESS_MAX,
7738 MEMORY_RECLAIM_NULL,
7739 };
7740 static const match_table_t tokens = {
7741 { MEMORY_RECLAIM_SWAPPINESS, "swappiness=%d"},
7742 { MEMORY_RECLAIM_SWAPPINESS_MAX, "swappiness=max"},
7743 { MEMORY_RECLAIM_NULL, NULL },
7744 };
7745
user_proactive_reclaim(char * buf,struct mem_cgroup * memcg,pg_data_t * pgdat)7746 int user_proactive_reclaim(char *buf,
7747 struct mem_cgroup *memcg, pg_data_t *pgdat)
7748 {
7749 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
7750 unsigned long nr_to_reclaim, nr_reclaimed = 0;
7751 int swappiness = -1;
7752 char *old_buf, *start;
7753 substring_t args[MAX_OPT_ARGS];
7754 gfp_t gfp_mask = GFP_KERNEL;
7755
7756 if (!buf || (!memcg && !pgdat) || (memcg && pgdat))
7757 return -EINVAL;
7758
7759 buf = strstrip(buf);
7760
7761 old_buf = buf;
7762 nr_to_reclaim = memparse(buf, &buf) / PAGE_SIZE;
7763 if (buf == old_buf)
7764 return -EINVAL;
7765
7766 buf = strstrip(buf);
7767
7768 while ((start = strsep(&buf, " ")) != NULL) {
7769 if (!strlen(start))
7770 continue;
7771 switch (match_token(start, tokens, args)) {
7772 case MEMORY_RECLAIM_SWAPPINESS:
7773 if (match_int(&args[0], &swappiness))
7774 return -EINVAL;
7775 if (swappiness < MIN_SWAPPINESS ||
7776 swappiness > MAX_SWAPPINESS)
7777 return -EINVAL;
7778 break;
7779 case MEMORY_RECLAIM_SWAPPINESS_MAX:
7780 swappiness = SWAPPINESS_ANON_ONLY;
7781 break;
7782 default:
7783 return -EINVAL;
7784 }
7785 }
7786
7787 while (nr_reclaimed < nr_to_reclaim) {
7788 /* Will converge on zero, but reclaim enforces a minimum */
7789 unsigned long batch_size = (nr_to_reclaim - nr_reclaimed) / 4;
7790 unsigned long reclaimed;
7791
7792 if (signal_pending(current))
7793 return -EINTR;
7794
7795 /*
7796 * This is the final attempt, drain percpu lru caches in the
7797 * hope of introducing more evictable pages.
7798 */
7799 if (!nr_retries)
7800 lru_add_drain_all();
7801
7802 if (memcg) {
7803 unsigned int reclaim_options;
7804
7805 reclaim_options = MEMCG_RECLAIM_MAY_SWAP |
7806 MEMCG_RECLAIM_PROACTIVE;
7807 reclaimed = try_to_free_mem_cgroup_pages(memcg,
7808 batch_size, gfp_mask,
7809 reclaim_options,
7810 swappiness == -1 ? NULL : &swappiness);
7811 } else {
7812 struct scan_control sc = {
7813 .gfp_mask = current_gfp_context(gfp_mask),
7814 .reclaim_idx = gfp_zone(gfp_mask),
7815 .proactive_swappiness = swappiness == -1 ? NULL : &swappiness,
7816 .priority = DEF_PRIORITY,
7817 .may_writepage = 1,
7818 .nr_to_reclaim = max(batch_size, SWAP_CLUSTER_MAX),
7819 .may_unmap = 1,
7820 .may_swap = 1,
7821 .proactive = 1,
7822 };
7823
7824 if (test_and_set_bit_lock(PGDAT_RECLAIM_LOCKED,
7825 &pgdat->flags))
7826 return -EBUSY;
7827
7828 reclaimed = __node_reclaim(pgdat, gfp_mask,
7829 batch_size, &sc);
7830 clear_bit_unlock(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
7831 }
7832
7833 if (!reclaimed && !nr_retries--)
7834 return -EAGAIN;
7835
7836 nr_reclaimed += reclaimed;
7837 }
7838
7839 return 0;
7840 }
7841
7842 /**
7843 * check_move_unevictable_folios - Move evictable folios to appropriate zone
7844 * lru list
7845 * @fbatch: Batch of lru folios to check.
7846 *
7847 * Checks folios for evictability, if an evictable folio is in the unevictable
7848 * lru list, moves it to the appropriate evictable lru list. This function
7849 * should be only used for lru folios.
7850 */
check_move_unevictable_folios(struct folio_batch * fbatch)7851 void check_move_unevictable_folios(struct folio_batch *fbatch)
7852 {
7853 struct lruvec *lruvec = NULL;
7854 int pgscanned = 0;
7855 int pgrescued = 0;
7856 int i;
7857
7858 for (i = 0; i < fbatch->nr; i++) {
7859 struct folio *folio = fbatch->folios[i];
7860 int nr_pages = folio_nr_pages(folio);
7861
7862 pgscanned += nr_pages;
7863
7864 /* block memcg migration while the folio moves between lrus */
7865 if (!folio_test_clear_lru(folio))
7866 continue;
7867
7868 lruvec = folio_lruvec_relock_irq(folio, lruvec);
7869 if (folio_evictable(folio) && folio_test_unevictable(folio)) {
7870 lruvec_del_folio(lruvec, folio);
7871 folio_clear_unevictable(folio);
7872 lruvec_add_folio(lruvec, folio);
7873 pgrescued += nr_pages;
7874 }
7875 folio_set_lru(folio);
7876 }
7877
7878 if (lruvec) {
7879 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
7880 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
7881 unlock_page_lruvec_irq(lruvec);
7882 } else if (pgscanned) {
7883 count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
7884 }
7885 }
7886 EXPORT_SYMBOL_GPL(check_move_unevictable_folios);
7887
7888 #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
reclaim_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)7889 static ssize_t reclaim_store(struct device *dev,
7890 struct device_attribute *attr,
7891 const char *buf, size_t count)
7892 {
7893 int ret, nid = dev->id;
7894
7895 ret = user_proactive_reclaim((char *)buf, NULL, NODE_DATA(nid));
7896 return ret ? -EAGAIN : count;
7897 }
7898
7899 static DEVICE_ATTR_WO(reclaim);
reclaim_register_node(struct node * node)7900 int reclaim_register_node(struct node *node)
7901 {
7902 return device_create_file(&node->dev, &dev_attr_reclaim);
7903 }
7904
reclaim_unregister_node(struct node * node)7905 void reclaim_unregister_node(struct node *node)
7906 {
7907 return device_remove_file(&node->dev, &dev_attr_reclaim);
7908 }
7909 #endif
7910