xref: /linux/mm/workingset.c (revision 9d9a2f29aefdadc86e450308ff056017a209c755)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Workingset detection
4  *
5  * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
6  */
7 
8 #include <linux/memcontrol.h>
9 #include <linux/mm_inline.h>
10 #include <linux/writeback.h>
11 #include <linux/shmem_fs.h>
12 #include <linux/pagemap.h>
13 #include <linux/atomic.h>
14 #include <linux/module.h>
15 #include <linux/swap.h>
16 #include <linux/dax.h>
17 #include <linux/fs.h>
18 #include <linux/mm.h>
19 #include "internal.h"
20 
21 /*
22  *		Double CLOCK lists
23  *
24  * Per node, two clock lists are maintained for file pages: the
25  * inactive and the active list.  Freshly faulted pages start out at
26  * the head of the inactive list and page reclaim scans pages from the
27  * tail.  Pages that are accessed multiple times on the inactive list
28  * are promoted to the active list, to protect them from reclaim,
29  * whereas active pages are demoted to the inactive list when the
30  * active list grows too big.
31  *
32  *   fault ------------------------+
33  *                                 |
34  *              +--------------+   |            +-------------+
35  *   reclaim <- |   inactive   | <-+-- demotion |    active   | <--+
36  *              +--------------+                +-------------+    |
37  *                     |                                           |
38  *                     +-------------- promotion ------------------+
39  *
40  *
41  *		Access frequency and refault distance
42  *
43  * A workload is thrashing when its pages are frequently used but they
44  * are evicted from the inactive list every time before another access
45  * would have promoted them to the active list.
46  *
47  * In cases where the average access distance between thrashing pages
48  * is bigger than the size of memory there is nothing that can be
49  * done - the thrashing set could never fit into memory under any
50  * circumstance.
51  *
52  * However, the average access distance could be bigger than the
53  * inactive list, yet smaller than the size of memory.  In this case,
54  * the set could fit into memory if it weren't for the currently
55  * active pages - which may be used more, hopefully less frequently:
56  *
57  *      +-memory available to cache-+
58  *      |                           |
59  *      +-inactive------+-active----+
60  *  a b | c d e f g h i | J K L M N |
61  *      +---------------+-----------+
62  *
63  * It is prohibitively expensive to accurately track access frequency
64  * of pages.  But a reasonable approximation can be made to measure
65  * thrashing on the inactive list, after which refaulting pages can be
66  * activated optimistically to compete with the existing active pages.
67  *
68  * Approximating inactive page access frequency - Observations:
69  *
70  * 1. When a page is accessed for the first time, it is added to the
71  *    head of the inactive list, slides every existing inactive page
72  *    towards the tail by one slot, and pushes the current tail page
73  *    out of memory.
74  *
75  * 2. When a page is accessed for the second time, it is promoted to
76  *    the active list, shrinking the inactive list by one slot.  This
77  *    also slides all inactive pages that were faulted into the cache
78  *    more recently than the activated page towards the tail of the
79  *    inactive list.
80  *
81  * Thus:
82  *
83  * 1. The sum of evictions and activations between any two points in
84  *    time indicate the minimum number of inactive pages accessed in
85  *    between.
86  *
87  * 2. Moving one inactive page N page slots towards the tail of the
88  *    list requires at least N inactive page accesses.
89  *
90  * Combining these:
91  *
92  * 1. When a page is finally evicted from memory, the number of
93  *    inactive pages accessed while the page was in cache is at least
94  *    the number of page slots on the inactive list.
95  *
96  * 2. In addition, measuring the sum of evictions and activations (E)
97  *    at the time of a page's eviction, and comparing it to another
98  *    reading (R) at the time the page faults back into memory tells
99  *    the minimum number of accesses while the page was not cached.
100  *    This is called the refault distance.
101  *
102  * Because the first access of the page was the fault and the second
103  * access the refault, we combine the in-cache distance with the
104  * out-of-cache distance to get the complete minimum access distance
105  * of this page:
106  *
107  *      NR_inactive + (R - E)
108  *
109  * And knowing the minimum access distance of a page, we can easily
110  * tell if the page would be able to stay in cache assuming all page
111  * slots in the cache were available:
112  *
113  *   NR_inactive + (R - E) <= NR_inactive + NR_active
114  *
115  * If we have swap we should consider about NR_inactive_anon and
116  * NR_active_anon, so for page cache and anonymous respectively:
117  *
118  *   NR_inactive_file + (R - E) <= NR_inactive_file + NR_active_file
119  *   + NR_inactive_anon + NR_active_anon
120  *
121  *   NR_inactive_anon + (R - E) <= NR_inactive_anon + NR_active_anon
122  *   + NR_inactive_file + NR_active_file
123  *
124  * Which can be further simplified to:
125  *
126  *   (R - E) <= NR_active_file + NR_inactive_anon + NR_active_anon
127  *
128  *   (R - E) <= NR_active_anon + NR_inactive_file + NR_active_file
129  *
130  * Put into words, the refault distance (out-of-cache) can be seen as
131  * a deficit in inactive list space (in-cache).  If the inactive list
132  * had (R - E) more page slots, the page would not have been evicted
133  * in between accesses, but activated instead.  And on a full system,
134  * the only thing eating into inactive list space is active pages.
135  *
136  *
137  *		Refaulting inactive pages
138  *
139  * All that is known about the active list is that the pages have been
140  * accessed more than once in the past.  This means that at any given
141  * time there is actually a good chance that pages on the active list
142  * are no longer in active use.
143  *
144  * So when a refault distance of (R - E) is observed and there are at
145  * least (R - E) pages in the userspace workingset, the refaulting page
146  * is activated optimistically in the hope that (R - E) pages are actually
147  * used less frequently than the refaulting page - or even not used at
148  * all anymore.
149  *
150  * That means if inactive cache is refaulting with a suitable refault
151  * distance, we assume the cache workingset is transitioning and put
152  * pressure on the current workingset.
153  *
154  * If this is wrong and demotion kicks in, the pages which are truly
155  * used more frequently will be reactivated while the less frequently
156  * used once will be evicted from memory.
157  *
158  * But if this is right, the stale pages will be pushed out of memory
159  * and the used pages get to stay in cache.
160  *
161  *		Refaulting active pages
162  *
163  * If on the other hand the refaulting pages have recently been
164  * deactivated, it means that the active list is no longer protecting
165  * actively used cache from reclaim. The cache is NOT transitioning to
166  * a different workingset; the existing workingset is thrashing in the
167  * space allocated to the page cache.
168  *
169  *
170  *		Implementation
171  *
172  * For each node's LRU lists, a counter for inactive evictions and
173  * activations is maintained (node->nonresident_age).
174  *
175  * On eviction, a snapshot of this counter (along with some bits to
176  * identify the node) is stored in the now empty page cache
177  * slot of the evicted page.  This is called a shadow entry.
178  *
179  * On cache misses for which there are shadow entries, an eligible
180  * refault distance will immediately activate the refaulting page.
181  */
182 
183 #define WORKINGSET_SHIFT 1
184 #define EVICTION_SHIFT	((BITS_PER_LONG - BITS_PER_XA_VALUE) +	\
185 			 WORKINGSET_SHIFT + NODES_SHIFT + \
186 			 MEM_CGROUP_ID_SHIFT)
187 #define EVICTION_MASK	(~0UL >> EVICTION_SHIFT)
188 
189 /*
190  * Eviction timestamps need to be able to cover the full range of
191  * actionable refaults. However, bits are tight in the xarray
192  * entry, and after storing the identifier for the lruvec there might
193  * not be enough left to represent every single actionable refault. In
194  * that case, we have to sacrifice granularity for distance, and group
195  * evictions into coarser buckets by shaving off lower timestamp bits.
196  */
197 static unsigned int bucket_order __read_mostly;
198 
pack_shadow(int memcgid,pg_data_t * pgdat,unsigned long eviction,bool workingset)199 static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
200 			 bool workingset)
201 {
202 	eviction &= EVICTION_MASK;
203 	eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
204 	eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
205 	eviction = (eviction << WORKINGSET_SHIFT) | workingset;
206 
207 	return xa_mk_value(eviction);
208 }
209 
unpack_shadow(void * shadow,int * memcgidp,pg_data_t ** pgdat,unsigned long * evictionp,bool * workingsetp)210 static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
211 			  unsigned long *evictionp, bool *workingsetp)
212 {
213 	unsigned long entry = xa_to_value(shadow);
214 	int memcgid, nid;
215 	bool workingset;
216 
217 	workingset = entry & ((1UL << WORKINGSET_SHIFT) - 1);
218 	entry >>= WORKINGSET_SHIFT;
219 	nid = entry & ((1UL << NODES_SHIFT) - 1);
220 	entry >>= NODES_SHIFT;
221 	memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
222 	entry >>= MEM_CGROUP_ID_SHIFT;
223 
224 	*memcgidp = memcgid;
225 	*pgdat = NODE_DATA(nid);
226 	*evictionp = entry;
227 	*workingsetp = workingset;
228 }
229 
230 #ifdef CONFIG_LRU_GEN
231 
lru_gen_eviction(struct folio * folio)232 static void *lru_gen_eviction(struct folio *folio)
233 {
234 	int hist;
235 	unsigned long token;
236 	unsigned long min_seq;
237 	struct lruvec *lruvec;
238 	struct lru_gen_folio *lrugen;
239 	int type = folio_is_file_lru(folio);
240 	int delta = folio_nr_pages(folio);
241 	int refs = folio_lru_refs(folio);
242 	int tier = lru_tier_from_refs(refs);
243 	struct mem_cgroup *memcg = folio_memcg(folio);
244 	struct pglist_data *pgdat = folio_pgdat(folio);
245 
246 	BUILD_BUG_ON(LRU_GEN_WIDTH + LRU_REFS_WIDTH > BITS_PER_LONG - EVICTION_SHIFT);
247 
248 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
249 	lrugen = &lruvec->lrugen;
250 	min_seq = READ_ONCE(lrugen->min_seq[type]);
251 	token = (min_seq << LRU_REFS_WIDTH) | max(refs - 1, 0);
252 
253 	hist = lru_hist_from_seq(min_seq);
254 	atomic_long_add(delta, &lrugen->evicted[hist][type][tier]);
255 
256 	return pack_shadow(mem_cgroup_id(memcg), pgdat, token, refs);
257 }
258 
259 /*
260  * Tests if the shadow entry is for a folio that was recently evicted.
261  * Fills in @lruvec, @token, @workingset with the values unpacked from shadow.
262  */
lru_gen_test_recent(void * shadow,bool file,struct lruvec ** lruvec,unsigned long * token,bool * workingset)263 static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec,
264 				unsigned long *token, bool *workingset)
265 {
266 	int memcg_id;
267 	unsigned long min_seq;
268 	struct mem_cgroup *memcg;
269 	struct pglist_data *pgdat;
270 
271 	unpack_shadow(shadow, &memcg_id, &pgdat, token, workingset);
272 
273 	memcg = mem_cgroup_from_id(memcg_id);
274 	*lruvec = mem_cgroup_lruvec(memcg, pgdat);
275 
276 	min_seq = READ_ONCE((*lruvec)->lrugen.min_seq[file]);
277 	return (*token >> LRU_REFS_WIDTH) == (min_seq & (EVICTION_MASK >> LRU_REFS_WIDTH));
278 }
279 
lru_gen_refault(struct folio * folio,void * shadow)280 static void lru_gen_refault(struct folio *folio, void *shadow)
281 {
282 	bool recent;
283 	int hist, tier, refs;
284 	bool workingset;
285 	unsigned long token;
286 	struct lruvec *lruvec;
287 	struct lru_gen_folio *lrugen;
288 	int type = folio_is_file_lru(folio);
289 	int delta = folio_nr_pages(folio);
290 
291 	rcu_read_lock();
292 
293 	recent = lru_gen_test_recent(shadow, type, &lruvec, &token, &workingset);
294 	if (lruvec != folio_lruvec(folio))
295 		goto unlock;
296 
297 	mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + type, delta);
298 
299 	if (!recent)
300 		goto unlock;
301 
302 	lrugen = &lruvec->lrugen;
303 
304 	hist = lru_hist_from_seq(READ_ONCE(lrugen->min_seq[type]));
305 	/* see the comment in folio_lru_refs() */
306 	refs = (token & (BIT(LRU_REFS_WIDTH) - 1)) + workingset;
307 	tier = lru_tier_from_refs(refs);
308 
309 	atomic_long_add(delta, &lrugen->refaulted[hist][type][tier]);
310 	mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + type, delta);
311 
312 	/*
313 	 * Count the following two cases as stalls:
314 	 * 1. For pages accessed through page tables, hotter pages pushed out
315 	 *    hot pages which refaulted immediately.
316 	 * 2. For pages accessed multiple times through file descriptors,
317 	 *    they would have been protected by sort_folio().
318 	 */
319 	if (lru_gen_in_fault() || refs >= BIT(LRU_REFS_WIDTH) - 1) {
320 		set_mask_bits(&folio->flags, 0, LRU_REFS_MASK | BIT(PG_workingset));
321 		mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + type, delta);
322 	}
323 unlock:
324 	rcu_read_unlock();
325 }
326 
327 #else /* !CONFIG_LRU_GEN */
328 
lru_gen_eviction(struct folio * folio)329 static void *lru_gen_eviction(struct folio *folio)
330 {
331 	return NULL;
332 }
333 
lru_gen_test_recent(void * shadow,bool file,struct lruvec ** lruvec,unsigned long * token,bool * workingset)334 static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec,
335 				unsigned long *token, bool *workingset)
336 {
337 	return false;
338 }
339 
lru_gen_refault(struct folio * folio,void * shadow)340 static void lru_gen_refault(struct folio *folio, void *shadow)
341 {
342 }
343 
344 #endif /* CONFIG_LRU_GEN */
345 
346 /**
347  * workingset_age_nonresident - age non-resident entries as LRU ages
348  * @lruvec: the lruvec that was aged
349  * @nr_pages: the number of pages to count
350  *
351  * As in-memory pages are aged, non-resident pages need to be aged as
352  * well, in order for the refault distances later on to be comparable
353  * to the in-memory dimensions. This function allows reclaim and LRU
354  * operations to drive the non-resident aging along in parallel.
355  */
workingset_age_nonresident(struct lruvec * lruvec,unsigned long nr_pages)356 void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages)
357 {
358 	/*
359 	 * Reclaiming a cgroup means reclaiming all its children in a
360 	 * round-robin fashion. That means that each cgroup has an LRU
361 	 * order that is composed of the LRU orders of its child
362 	 * cgroups; and every page has an LRU position not just in the
363 	 * cgroup that owns it, but in all of that group's ancestors.
364 	 *
365 	 * So when the physical inactive list of a leaf cgroup ages,
366 	 * the virtual inactive lists of all its parents, including
367 	 * the root cgroup's, age as well.
368 	 */
369 	do {
370 		atomic_long_add(nr_pages, &lruvec->nonresident_age);
371 	} while ((lruvec = parent_lruvec(lruvec)));
372 }
373 
374 /**
375  * workingset_eviction - note the eviction of a folio from memory
376  * @target_memcg: the cgroup that is causing the reclaim
377  * @folio: the folio being evicted
378  *
379  * Return: a shadow entry to be stored in @folio->mapping->i_pages in place
380  * of the evicted @folio so that a later refault can be detected.
381  */
workingset_eviction(struct folio * folio,struct mem_cgroup * target_memcg)382 void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg)
383 {
384 	struct pglist_data *pgdat = folio_pgdat(folio);
385 	unsigned long eviction;
386 	struct lruvec *lruvec;
387 	int memcgid;
388 
389 	/* Folio is fully exclusive and pins folio's memory cgroup pointer */
390 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
391 	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
392 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
393 
394 	if (lru_gen_enabled())
395 		return lru_gen_eviction(folio);
396 
397 	lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
398 	/* XXX: target_memcg can be NULL, go through lruvec */
399 	memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
400 	eviction = atomic_long_read(&lruvec->nonresident_age);
401 	eviction >>= bucket_order;
402 	workingset_age_nonresident(lruvec, folio_nr_pages(folio));
403 	return pack_shadow(memcgid, pgdat, eviction,
404 				folio_test_workingset(folio));
405 }
406 
407 /**
408  * workingset_test_recent - tests if the shadow entry is for a folio that was
409  * recently evicted. Also fills in @workingset with the value unpacked from
410  * shadow.
411  * @shadow: the shadow entry to be tested.
412  * @file: whether the corresponding folio is from the file lru.
413  * @workingset: where the workingset value unpacked from shadow should
414  * be stored.
415  * @flush: whether to flush cgroup rstat.
416  *
417  * Return: true if the shadow is for a recently evicted folio; false otherwise.
418  */
workingset_test_recent(void * shadow,bool file,bool * workingset,bool flush)419 bool workingset_test_recent(void *shadow, bool file, bool *workingset,
420 				bool flush)
421 {
422 	struct mem_cgroup *eviction_memcg;
423 	struct lruvec *eviction_lruvec;
424 	unsigned long refault_distance;
425 	unsigned long workingset_size;
426 	unsigned long refault;
427 	int memcgid;
428 	struct pglist_data *pgdat;
429 	unsigned long eviction;
430 
431 	rcu_read_lock();
432 
433 	if (lru_gen_enabled()) {
434 		bool recent = lru_gen_test_recent(shadow, file,
435 				&eviction_lruvec, &eviction, workingset);
436 
437 		rcu_read_unlock();
438 		return recent;
439 	}
440 
441 
442 	unpack_shadow(shadow, &memcgid, &pgdat, &eviction, workingset);
443 	eviction <<= bucket_order;
444 
445 	/*
446 	 * Look up the memcg associated with the stored ID. It might
447 	 * have been deleted since the folio's eviction.
448 	 *
449 	 * Note that in rare events the ID could have been recycled
450 	 * for a new cgroup that refaults a shared folio. This is
451 	 * impossible to tell from the available data. However, this
452 	 * should be a rare and limited disturbance, and activations
453 	 * are always speculative anyway. Ultimately, it's the aging
454 	 * algorithm's job to shake out the minimum access frequency
455 	 * for the active cache.
456 	 *
457 	 * XXX: On !CONFIG_MEMCG, this will always return NULL; it
458 	 * would be better if the root_mem_cgroup existed in all
459 	 * configurations instead.
460 	 */
461 	eviction_memcg = mem_cgroup_from_id(memcgid);
462 	if (!mem_cgroup_disabled() &&
463 	    (!eviction_memcg || !mem_cgroup_tryget(eviction_memcg))) {
464 		rcu_read_unlock();
465 		return false;
466 	}
467 
468 	rcu_read_unlock();
469 
470 	/*
471 	 * Flush stats (and potentially sleep) outside the RCU read section.
472 	 *
473 	 * Note that workingset_test_recent() itself might be called in RCU read
474 	 * section (for e.g, in cachestat) - these callers need to skip flushing
475 	 * stats (via the flush argument).
476 	 *
477 	 * XXX: With per-memcg flushing and thresholding, is ratelimiting
478 	 * still needed here?
479 	 */
480 	if (flush)
481 		mem_cgroup_flush_stats_ratelimited(eviction_memcg);
482 
483 	eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
484 	refault = atomic_long_read(&eviction_lruvec->nonresident_age);
485 
486 	/*
487 	 * Calculate the refault distance
488 	 *
489 	 * The unsigned subtraction here gives an accurate distance
490 	 * across nonresident_age overflows in most cases. There is a
491 	 * special case: usually, shadow entries have a short lifetime
492 	 * and are either refaulted or reclaimed along with the inode
493 	 * before they get too old.  But it is not impossible for the
494 	 * nonresident_age to lap a shadow entry in the field, which
495 	 * can then result in a false small refault distance, leading
496 	 * to a false activation should this old entry actually
497 	 * refault again.  However, earlier kernels used to deactivate
498 	 * unconditionally with *every* reclaim invocation for the
499 	 * longest time, so the occasional inappropriate activation
500 	 * leading to pressure on the active list is not a problem.
501 	 */
502 	refault_distance = (refault - eviction) & EVICTION_MASK;
503 
504 	/*
505 	 * Compare the distance to the existing workingset size. We
506 	 * don't activate pages that couldn't stay resident even if
507 	 * all the memory was available to the workingset. Whether
508 	 * workingset competition needs to consider anon or not depends
509 	 * on having free swap space.
510 	 */
511 	workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE);
512 	if (!file) {
513 		workingset_size += lruvec_page_state(eviction_lruvec,
514 						     NR_INACTIVE_FILE);
515 	}
516 	if (mem_cgroup_get_nr_swap_pages(eviction_memcg) > 0) {
517 		workingset_size += lruvec_page_state(eviction_lruvec,
518 						     NR_ACTIVE_ANON);
519 		if (file) {
520 			workingset_size += lruvec_page_state(eviction_lruvec,
521 						     NR_INACTIVE_ANON);
522 		}
523 	}
524 
525 	mem_cgroup_put(eviction_memcg);
526 	return refault_distance <= workingset_size;
527 }
528 
529 /**
530  * workingset_refault - Evaluate the refault of a previously evicted folio.
531  * @folio: The freshly allocated replacement folio.
532  * @shadow: Shadow entry of the evicted folio.
533  *
534  * Calculates and evaluates the refault distance of the previously
535  * evicted folio in the context of the node and the memcg whose memory
536  * pressure caused the eviction.
537  */
workingset_refault(struct folio * folio,void * shadow)538 void workingset_refault(struct folio *folio, void *shadow)
539 {
540 	bool file = folio_is_file_lru(folio);
541 	struct pglist_data *pgdat;
542 	struct mem_cgroup *memcg;
543 	struct lruvec *lruvec;
544 	bool workingset;
545 	long nr;
546 
547 	if (lru_gen_enabled()) {
548 		lru_gen_refault(folio, shadow);
549 		return;
550 	}
551 
552 	/*
553 	 * The activation decision for this folio is made at the level
554 	 * where the eviction occurred, as that is where the LRU order
555 	 * during folio reclaim is being determined.
556 	 *
557 	 * However, the cgroup that will own the folio is the one that
558 	 * is actually experiencing the refault event. Make sure the folio is
559 	 * locked to guarantee folio_memcg() stability throughout.
560 	 */
561 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
562 	nr = folio_nr_pages(folio);
563 	memcg = folio_memcg(folio);
564 	pgdat = folio_pgdat(folio);
565 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
566 
567 	mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr);
568 
569 	if (!workingset_test_recent(shadow, file, &workingset, true))
570 		return;
571 
572 	folio_set_active(folio);
573 	workingset_age_nonresident(lruvec, nr);
574 	mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file, nr);
575 
576 	/* Folio was active prior to eviction */
577 	if (workingset) {
578 		folio_set_workingset(folio);
579 		/*
580 		 * XXX: Move to folio_add_lru() when it supports new vs
581 		 * putback
582 		 */
583 		lru_note_cost_refault(folio);
584 		mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file, nr);
585 	}
586 }
587 
588 /**
589  * workingset_activation - note a page activation
590  * @folio: Folio that is being activated.
591  */
workingset_activation(struct folio * folio)592 void workingset_activation(struct folio *folio)
593 {
594 	struct mem_cgroup *memcg;
595 
596 	rcu_read_lock();
597 	/*
598 	 * Filter non-memcg pages here, e.g. unmap can call
599 	 * mark_page_accessed() on VDSO pages.
600 	 *
601 	 * XXX: See workingset_refault() - this should return
602 	 * root_mem_cgroup even for !CONFIG_MEMCG.
603 	 */
604 	memcg = folio_memcg_rcu(folio);
605 	if (!mem_cgroup_disabled() && !memcg)
606 		goto out;
607 	workingset_age_nonresident(folio_lruvec(folio), folio_nr_pages(folio));
608 out:
609 	rcu_read_unlock();
610 }
611 
612 /*
613  * Shadow entries reflect the share of the working set that does not
614  * fit into memory, so their number depends on the access pattern of
615  * the workload.  In most cases, they will refault or get reclaimed
616  * along with the inode, but a (malicious) workload that streams
617  * through files with a total size several times that of available
618  * memory, while preventing the inodes from being reclaimed, can
619  * create excessive amounts of shadow nodes.  To keep a lid on this,
620  * track shadow nodes and reclaim them when they grow way past the
621  * point where they would still be useful.
622  */
623 
624 struct list_lru shadow_nodes;
625 
workingset_update_node(struct xa_node * node)626 void workingset_update_node(struct xa_node *node)
627 {
628 	struct address_space *mapping;
629 	struct page *page = virt_to_page(node);
630 
631 	/*
632 	 * Track non-empty nodes that contain only shadow entries;
633 	 * unlink those that contain pages or are being freed.
634 	 *
635 	 * Avoid acquiring the list_lru lock when the nodes are
636 	 * already where they should be. The list_empty() test is safe
637 	 * as node->private_list is protected by the i_pages lock.
638 	 */
639 	mapping = container_of(node->array, struct address_space, i_pages);
640 	lockdep_assert_held(&mapping->i_pages.xa_lock);
641 
642 	if (node->count && node->count == node->nr_values) {
643 		if (list_empty(&node->private_list)) {
644 			list_lru_add_obj(&shadow_nodes, &node->private_list);
645 			__inc_node_page_state(page, WORKINGSET_NODES);
646 		}
647 	} else {
648 		if (!list_empty(&node->private_list)) {
649 			list_lru_del_obj(&shadow_nodes, &node->private_list);
650 			__dec_node_page_state(page, WORKINGSET_NODES);
651 		}
652 	}
653 }
654 
count_shadow_nodes(struct shrinker * shrinker,struct shrink_control * sc)655 static unsigned long count_shadow_nodes(struct shrinker *shrinker,
656 					struct shrink_control *sc)
657 {
658 	unsigned long max_nodes;
659 	unsigned long nodes;
660 	unsigned long pages;
661 
662 	nodes = list_lru_shrink_count(&shadow_nodes, sc);
663 	if (!nodes)
664 		return SHRINK_EMPTY;
665 
666 	/*
667 	 * Approximate a reasonable limit for the nodes
668 	 * containing shadow entries. We don't need to keep more
669 	 * shadow entries than possible pages on the active list,
670 	 * since refault distances bigger than that are dismissed.
671 	 *
672 	 * The size of the active list converges toward 100% of
673 	 * overall page cache as memory grows, with only a tiny
674 	 * inactive list. Assume the total cache size for that.
675 	 *
676 	 * Nodes might be sparsely populated, with only one shadow
677 	 * entry in the extreme case. Obviously, we cannot keep one
678 	 * node for every eligible shadow entry, so compromise on a
679 	 * worst-case density of 1/8th. Below that, not all eligible
680 	 * refaults can be detected anymore.
681 	 *
682 	 * On 64-bit with 7 xa_nodes per page and 64 slots
683 	 * each, this will reclaim shadow entries when they consume
684 	 * ~1.8% of available memory:
685 	 *
686 	 * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE
687 	 */
688 #ifdef CONFIG_MEMCG
689 	if (sc->memcg) {
690 		struct lruvec *lruvec;
691 		int i;
692 
693 		mem_cgroup_flush_stats_ratelimited(sc->memcg);
694 		lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
695 		for (pages = 0, i = 0; i < NR_LRU_LISTS; i++)
696 			pages += lruvec_page_state_local(lruvec,
697 							 NR_LRU_BASE + i);
698 		pages += lruvec_page_state_local(
699 			lruvec, NR_SLAB_RECLAIMABLE_B) >> PAGE_SHIFT;
700 		pages += lruvec_page_state_local(
701 			lruvec, NR_SLAB_UNRECLAIMABLE_B) >> PAGE_SHIFT;
702 	} else
703 #endif
704 		pages = node_present_pages(sc->nid);
705 
706 	max_nodes = pages >> (XA_CHUNK_SHIFT - 3);
707 
708 	if (nodes <= max_nodes)
709 		return 0;
710 	return nodes - max_nodes;
711 }
712 
shadow_lru_isolate(struct list_head * item,struct list_lru_one * lru,spinlock_t * lru_lock,void * arg)713 static enum lru_status shadow_lru_isolate(struct list_head *item,
714 					  struct list_lru_one *lru,
715 					  spinlock_t *lru_lock,
716 					  void *arg) __must_hold(lru_lock)
717 {
718 	struct xa_node *node = container_of(item, struct xa_node, private_list);
719 	struct address_space *mapping;
720 	int ret;
721 
722 	/*
723 	 * Page cache insertions and deletions synchronously maintain
724 	 * the shadow node LRU under the i_pages lock and the
725 	 * lru_lock.  Because the page cache tree is emptied before
726 	 * the inode can be destroyed, holding the lru_lock pins any
727 	 * address_space that has nodes on the LRU.
728 	 *
729 	 * We can then safely transition to the i_pages lock to
730 	 * pin only the address_space of the particular node we want
731 	 * to reclaim, take the node off-LRU, and drop the lru_lock.
732 	 */
733 
734 	mapping = container_of(node->array, struct address_space, i_pages);
735 
736 	/* Coming from the list, invert the lock order */
737 	if (!xa_trylock(&mapping->i_pages)) {
738 		spin_unlock_irq(lru_lock);
739 		ret = LRU_RETRY;
740 		goto out;
741 	}
742 
743 	/* For page cache we need to hold i_lock */
744 	if (mapping->host != NULL) {
745 		if (!spin_trylock(&mapping->host->i_lock)) {
746 			xa_unlock(&mapping->i_pages);
747 			spin_unlock_irq(lru_lock);
748 			ret = LRU_RETRY;
749 			goto out;
750 		}
751 	}
752 
753 	list_lru_isolate(lru, item);
754 	__dec_node_page_state(virt_to_page(node), WORKINGSET_NODES);
755 
756 	spin_unlock(lru_lock);
757 
758 	/*
759 	 * The nodes should only contain one or more shadow entries,
760 	 * no pages, so we expect to be able to remove them all and
761 	 * delete and free the empty node afterwards.
762 	 */
763 	if (WARN_ON_ONCE(!node->nr_values))
764 		goto out_invalid;
765 	if (WARN_ON_ONCE(node->count != node->nr_values))
766 		goto out_invalid;
767 	xa_delete_node(node, workingset_update_node);
768 	__inc_lruvec_kmem_state(node, WORKINGSET_NODERECLAIM);
769 
770 out_invalid:
771 	xa_unlock_irq(&mapping->i_pages);
772 	if (mapping->host != NULL) {
773 		if (mapping_shrinkable(mapping))
774 			inode_add_lru(mapping->host);
775 		spin_unlock(&mapping->host->i_lock);
776 	}
777 	ret = LRU_REMOVED_RETRY;
778 out:
779 	cond_resched();
780 	spin_lock_irq(lru_lock);
781 	return ret;
782 }
783 
scan_shadow_nodes(struct shrinker * shrinker,struct shrink_control * sc)784 static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
785 				       struct shrink_control *sc)
786 {
787 	/* list_lru lock nests inside the IRQ-safe i_pages lock */
788 	return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate,
789 					NULL);
790 }
791 
792 /*
793  * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
794  * i_pages lock.
795  */
796 static struct lock_class_key shadow_nodes_key;
797 
workingset_init(void)798 static int __init workingset_init(void)
799 {
800 	struct shrinker *workingset_shadow_shrinker;
801 	unsigned int timestamp_bits;
802 	unsigned int max_order;
803 	int ret = -ENOMEM;
804 
805 	BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
806 	/*
807 	 * Calculate the eviction bucket size to cover the longest
808 	 * actionable refault distance, which is currently half of
809 	 * memory (totalram_pages/2). However, memory hotplug may add
810 	 * some more pages at runtime, so keep working with up to
811 	 * double the initial memory by using totalram_pages as-is.
812 	 */
813 	timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
814 	max_order = fls_long(totalram_pages() - 1);
815 	if (max_order > timestamp_bits)
816 		bucket_order = max_order - timestamp_bits;
817 	pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
818 	       timestamp_bits, max_order, bucket_order);
819 
820 	workingset_shadow_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
821 						    SHRINKER_MEMCG_AWARE,
822 						    "mm-shadow");
823 	if (!workingset_shadow_shrinker)
824 		goto err;
825 
826 	ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key,
827 			      workingset_shadow_shrinker);
828 	if (ret)
829 		goto err_list_lru;
830 
831 	workingset_shadow_shrinker->count_objects = count_shadow_nodes;
832 	workingset_shadow_shrinker->scan_objects = scan_shadow_nodes;
833 	/* ->count reports only fully expendable nodes */
834 	workingset_shadow_shrinker->seeks = 0;
835 
836 	shrinker_register(workingset_shadow_shrinker);
837 	return 0;
838 err_list_lru:
839 	shrinker_free(workingset_shadow_shrinker);
840 err:
841 	return ret;
842 }
843 module_init(workingset_init);
844