xref: /linux/mm/workingset.c (revision 95f28190aa012b18eab14799b905b6db3cf31529)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Workingset detection
4  *
5  * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
6  */
7 
8 #include <linux/memcontrol.h>
9 #include <linux/writeback.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/pagemap.h>
12 #include <linux/atomic.h>
13 #include <linux/module.h>
14 #include <linux/swap.h>
15 #include <linux/dax.h>
16 #include <linux/fs.h>
17 #include <linux/mm.h>
18 
19 /*
20  *		Double CLOCK lists
21  *
22  * Per node, two clock lists are maintained for file pages: the
23  * inactive and the active list.  Freshly faulted pages start out at
24  * the head of the inactive list and page reclaim scans pages from the
25  * tail.  Pages that are accessed multiple times on the inactive list
26  * are promoted to the active list, to protect them from reclaim,
27  * whereas active pages are demoted to the inactive list when the
28  * active list grows too big.
29  *
30  *   fault ------------------------+
31  *                                 |
32  *              +--------------+   |            +-------------+
33  *   reclaim <- |   inactive   | <-+-- demotion |    active   | <--+
34  *              +--------------+                +-------------+    |
35  *                     |                                           |
36  *                     +-------------- promotion ------------------+
37  *
38  *
39  *		Access frequency and refault distance
40  *
41  * A workload is thrashing when its pages are frequently used but they
42  * are evicted from the inactive list every time before another access
43  * would have promoted them to the active list.
44  *
45  * In cases where the average access distance between thrashing pages
46  * is bigger than the size of memory there is nothing that can be
47  * done - the thrashing set could never fit into memory under any
48  * circumstance.
49  *
50  * However, the average access distance could be bigger than the
51  * inactive list, yet smaller than the size of memory.  In this case,
52  * the set could fit into memory if it weren't for the currently
53  * active pages - which may be used more, hopefully less frequently:
54  *
55  *      +-memory available to cache-+
56  *      |                           |
57  *      +-inactive------+-active----+
58  *  a b | c d e f g h i | J K L M N |
59  *      +---------------+-----------+
60  *
61  * It is prohibitively expensive to accurately track access frequency
62  * of pages.  But a reasonable approximation can be made to measure
63  * thrashing on the inactive list, after which refaulting pages can be
64  * activated optimistically to compete with the existing active pages.
65  *
66  * Approximating inactive page access frequency - Observations:
67  *
68  * 1. When a page is accessed for the first time, it is added to the
69  *    head of the inactive list, slides every existing inactive page
70  *    towards the tail by one slot, and pushes the current tail page
71  *    out of memory.
72  *
73  * 2. When a page is accessed for the second time, it is promoted to
74  *    the active list, shrinking the inactive list by one slot.  This
75  *    also slides all inactive pages that were faulted into the cache
76  *    more recently than the activated page towards the tail of the
77  *    inactive list.
78  *
79  * Thus:
80  *
81  * 1. The sum of evictions and activations between any two points in
82  *    time indicate the minimum number of inactive pages accessed in
83  *    between.
84  *
85  * 2. Moving one inactive page N page slots towards the tail of the
86  *    list requires at least N inactive page accesses.
87  *
88  * Combining these:
89  *
90  * 1. When a page is finally evicted from memory, the number of
91  *    inactive pages accessed while the page was in cache is at least
92  *    the number of page slots on the inactive list.
93  *
94  * 2. In addition, measuring the sum of evictions and activations (E)
95  *    at the time of a page's eviction, and comparing it to another
96  *    reading (R) at the time the page faults back into memory tells
97  *    the minimum number of accesses while the page was not cached.
98  *    This is called the refault distance.
99  *
100  * Because the first access of the page was the fault and the second
101  * access the refault, we combine the in-cache distance with the
102  * out-of-cache distance to get the complete minimum access distance
103  * of this page:
104  *
105  *      NR_inactive + (R - E)
106  *
107  * And knowing the minimum access distance of a page, we can easily
108  * tell if the page would be able to stay in cache assuming all page
109  * slots in the cache were available:
110  *
111  *   NR_inactive + (R - E) <= NR_inactive + NR_active
112  *
113  * which can be further simplified to
114  *
115  *   (R - E) <= NR_active
116  *
117  * Put into words, the refault distance (out-of-cache) can be seen as
118  * a deficit in inactive list space (in-cache).  If the inactive list
119  * had (R - E) more page slots, the page would not have been evicted
120  * in between accesses, but activated instead.  And on a full system,
121  * the only thing eating into inactive list space is active pages.
122  *
123  *
124  *		Activating refaulting pages
125  *
126  * All that is known about the active list is that the pages have been
127  * accessed more than once in the past.  This means that at any given
128  * time there is actually a good chance that pages on the active list
129  * are no longer in active use.
130  *
131  * So when a refault distance of (R - E) is observed and there are at
132  * least (R - E) active pages, the refaulting page is activated
133  * optimistically in the hope that (R - E) active pages are actually
134  * used less frequently than the refaulting page - or even not used at
135  * all anymore.
136  *
137  * If this is wrong and demotion kicks in, the pages which are truly
138  * used more frequently will be reactivated while the less frequently
139  * used once will be evicted from memory.
140  *
141  * But if this is right, the stale pages will be pushed out of memory
142  * and the used pages get to stay in cache.
143  *
144  *
145  *		Implementation
146  *
147  * For each node's file LRU lists, a counter for inactive evictions
148  * and activations is maintained (node->inactive_age).
149  *
150  * On eviction, a snapshot of this counter (along with some bits to
151  * identify the node) is stored in the now empty page cache radix tree
152  * slot of the evicted page.  This is called a shadow entry.
153  *
154  * On cache misses for which there are shadow entries, an eligible
155  * refault distance will immediately activate the refaulting page.
156  */
157 
158 #define EVICTION_SHIFT	(RADIX_TREE_EXCEPTIONAL_ENTRY + \
159 			 NODES_SHIFT +	\
160 			 MEM_CGROUP_ID_SHIFT)
161 #define EVICTION_MASK	(~0UL >> EVICTION_SHIFT)
162 
163 /*
164  * Eviction timestamps need to be able to cover the full range of
165  * actionable refaults. However, bits are tight in the radix tree
166  * entry, and after storing the identifier for the lruvec there might
167  * not be enough left to represent every single actionable refault. In
168  * that case, we have to sacrifice granularity for distance, and group
169  * evictions into coarser buckets by shaving off lower timestamp bits.
170  */
171 static unsigned int bucket_order __read_mostly;
172 
173 static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction)
174 {
175 	eviction >>= bucket_order;
176 	eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
177 	eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
178 	eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT);
179 
180 	return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY);
181 }
182 
183 static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
184 			  unsigned long *evictionp)
185 {
186 	unsigned long entry = (unsigned long)shadow;
187 	int memcgid, nid;
188 
189 	entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT;
190 	nid = entry & ((1UL << NODES_SHIFT) - 1);
191 	entry >>= NODES_SHIFT;
192 	memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
193 	entry >>= MEM_CGROUP_ID_SHIFT;
194 
195 	*memcgidp = memcgid;
196 	*pgdat = NODE_DATA(nid);
197 	*evictionp = entry << bucket_order;
198 }
199 
200 /**
201  * workingset_eviction - note the eviction of a page from memory
202  * @mapping: address space the page was backing
203  * @page: the page being evicted
204  *
205  * Returns a shadow entry to be stored in @mapping->page_tree in place
206  * of the evicted @page so that a later refault can be detected.
207  */
208 void *workingset_eviction(struct address_space *mapping, struct page *page)
209 {
210 	struct mem_cgroup *memcg = page_memcg(page);
211 	struct pglist_data *pgdat = page_pgdat(page);
212 	int memcgid = mem_cgroup_id(memcg);
213 	unsigned long eviction;
214 	struct lruvec *lruvec;
215 
216 	/* Page is fully exclusive and pins page->mem_cgroup */
217 	VM_BUG_ON_PAGE(PageLRU(page), page);
218 	VM_BUG_ON_PAGE(page_count(page), page);
219 	VM_BUG_ON_PAGE(!PageLocked(page), page);
220 
221 	lruvec = mem_cgroup_lruvec(pgdat, memcg);
222 	eviction = atomic_long_inc_return(&lruvec->inactive_age);
223 	return pack_shadow(memcgid, pgdat, eviction);
224 }
225 
226 /**
227  * workingset_refault - evaluate the refault of a previously evicted page
228  * @shadow: shadow entry of the evicted page
229  *
230  * Calculates and evaluates the refault distance of the previously
231  * evicted page in the context of the node it was allocated in.
232  *
233  * Returns %true if the page should be activated, %false otherwise.
234  */
235 bool workingset_refault(void *shadow)
236 {
237 	unsigned long refault_distance;
238 	unsigned long active_file;
239 	struct mem_cgroup *memcg;
240 	unsigned long eviction;
241 	struct lruvec *lruvec;
242 	unsigned long refault;
243 	struct pglist_data *pgdat;
244 	int memcgid;
245 
246 	unpack_shadow(shadow, &memcgid, &pgdat, &eviction);
247 
248 	rcu_read_lock();
249 	/*
250 	 * Look up the memcg associated with the stored ID. It might
251 	 * have been deleted since the page's eviction.
252 	 *
253 	 * Note that in rare events the ID could have been recycled
254 	 * for a new cgroup that refaults a shared page. This is
255 	 * impossible to tell from the available data. However, this
256 	 * should be a rare and limited disturbance, and activations
257 	 * are always speculative anyway. Ultimately, it's the aging
258 	 * algorithm's job to shake out the minimum access frequency
259 	 * for the active cache.
260 	 *
261 	 * XXX: On !CONFIG_MEMCG, this will always return NULL; it
262 	 * would be better if the root_mem_cgroup existed in all
263 	 * configurations instead.
264 	 */
265 	memcg = mem_cgroup_from_id(memcgid);
266 	if (!mem_cgroup_disabled() && !memcg) {
267 		rcu_read_unlock();
268 		return false;
269 	}
270 	lruvec = mem_cgroup_lruvec(pgdat, memcg);
271 	refault = atomic_long_read(&lruvec->inactive_age);
272 	active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES);
273 
274 	/*
275 	 * The unsigned subtraction here gives an accurate distance
276 	 * across inactive_age overflows in most cases.
277 	 *
278 	 * There is a special case: usually, shadow entries have a
279 	 * short lifetime and are either refaulted or reclaimed along
280 	 * with the inode before they get too old.  But it is not
281 	 * impossible for the inactive_age to lap a shadow entry in
282 	 * the field, which can then can result in a false small
283 	 * refault distance, leading to a false activation should this
284 	 * old entry actually refault again.  However, earlier kernels
285 	 * used to deactivate unconditionally with *every* reclaim
286 	 * invocation for the longest time, so the occasional
287 	 * inappropriate activation leading to pressure on the active
288 	 * list is not a problem.
289 	 */
290 	refault_distance = (refault - eviction) & EVICTION_MASK;
291 
292 	inc_lruvec_state(lruvec, WORKINGSET_REFAULT);
293 
294 	if (refault_distance <= active_file) {
295 		inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE);
296 		rcu_read_unlock();
297 		return true;
298 	}
299 	rcu_read_unlock();
300 	return false;
301 }
302 
303 /**
304  * workingset_activation - note a page activation
305  * @page: page that is being activated
306  */
307 void workingset_activation(struct page *page)
308 {
309 	struct mem_cgroup *memcg;
310 	struct lruvec *lruvec;
311 
312 	rcu_read_lock();
313 	/*
314 	 * Filter non-memcg pages here, e.g. unmap can call
315 	 * mark_page_accessed() on VDSO pages.
316 	 *
317 	 * XXX: See workingset_refault() - this should return
318 	 * root_mem_cgroup even for !CONFIG_MEMCG.
319 	 */
320 	memcg = page_memcg_rcu(page);
321 	if (!mem_cgroup_disabled() && !memcg)
322 		goto out;
323 	lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
324 	atomic_long_inc(&lruvec->inactive_age);
325 out:
326 	rcu_read_unlock();
327 }
328 
329 /*
330  * Shadow entries reflect the share of the working set that does not
331  * fit into memory, so their number depends on the access pattern of
332  * the workload.  In most cases, they will refault or get reclaimed
333  * along with the inode, but a (malicious) workload that streams
334  * through files with a total size several times that of available
335  * memory, while preventing the inodes from being reclaimed, can
336  * create excessive amounts of shadow nodes.  To keep a lid on this,
337  * track shadow nodes and reclaim them when they grow way past the
338  * point where they would still be useful.
339  */
340 
341 static struct list_lru shadow_nodes;
342 
343 void workingset_update_node(struct radix_tree_node *node)
344 {
345 	/*
346 	 * Track non-empty nodes that contain only shadow entries;
347 	 * unlink those that contain pages or are being freed.
348 	 *
349 	 * Avoid acquiring the list_lru lock when the nodes are
350 	 * already where they should be. The list_empty() test is safe
351 	 * as node->private_list is protected by &mapping->tree_lock.
352 	 */
353 	if (node->count && node->count == node->exceptional) {
354 		if (list_empty(&node->private_list))
355 			list_lru_add(&shadow_nodes, &node->private_list);
356 	} else {
357 		if (!list_empty(&node->private_list))
358 			list_lru_del(&shadow_nodes, &node->private_list);
359 	}
360 }
361 
362 static unsigned long count_shadow_nodes(struct shrinker *shrinker,
363 					struct shrink_control *sc)
364 {
365 	unsigned long max_nodes;
366 	unsigned long nodes;
367 	unsigned long cache;
368 
369 	/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
370 	local_irq_disable();
371 	nodes = list_lru_shrink_count(&shadow_nodes, sc);
372 	local_irq_enable();
373 
374 	/*
375 	 * Approximate a reasonable limit for the radix tree nodes
376 	 * containing shadow entries. We don't need to keep more
377 	 * shadow entries than possible pages on the active list,
378 	 * since refault distances bigger than that are dismissed.
379 	 *
380 	 * The size of the active list converges toward 100% of
381 	 * overall page cache as memory grows, with only a tiny
382 	 * inactive list. Assume the total cache size for that.
383 	 *
384 	 * Nodes might be sparsely populated, with only one shadow
385 	 * entry in the extreme case. Obviously, we cannot keep one
386 	 * node for every eligible shadow entry, so compromise on a
387 	 * worst-case density of 1/8th. Below that, not all eligible
388 	 * refaults can be detected anymore.
389 	 *
390 	 * On 64-bit with 7 radix_tree_nodes per page and 64 slots
391 	 * each, this will reclaim shadow entries when they consume
392 	 * ~1.8% of available memory:
393 	 *
394 	 * PAGE_SIZE / radix_tree_nodes / node_entries * 8 / PAGE_SIZE
395 	 */
396 	if (sc->memcg) {
397 		cache = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
398 						     LRU_ALL_FILE);
399 	} else {
400 		cache = node_page_state(NODE_DATA(sc->nid), NR_ACTIVE_FILE) +
401 			node_page_state(NODE_DATA(sc->nid), NR_INACTIVE_FILE);
402 	}
403 	max_nodes = cache >> (RADIX_TREE_MAP_SHIFT - 3);
404 
405 	if (nodes <= max_nodes)
406 		return 0;
407 	return nodes - max_nodes;
408 }
409 
410 static enum lru_status shadow_lru_isolate(struct list_head *item,
411 					  struct list_lru_one *lru,
412 					  spinlock_t *lru_lock,
413 					  void *arg)
414 {
415 	struct address_space *mapping;
416 	struct radix_tree_node *node;
417 	unsigned int i;
418 	int ret;
419 
420 	/*
421 	 * Page cache insertions and deletions synchroneously maintain
422 	 * the shadow node LRU under the mapping->tree_lock and the
423 	 * lru_lock.  Because the page cache tree is emptied before
424 	 * the inode can be destroyed, holding the lru_lock pins any
425 	 * address_space that has radix tree nodes on the LRU.
426 	 *
427 	 * We can then safely transition to the mapping->tree_lock to
428 	 * pin only the address_space of the particular node we want
429 	 * to reclaim, take the node off-LRU, and drop the lru_lock.
430 	 */
431 
432 	node = container_of(item, struct radix_tree_node, private_list);
433 	mapping = container_of(node->root, struct address_space, page_tree);
434 
435 	/* Coming from the list, invert the lock order */
436 	if (!spin_trylock(&mapping->tree_lock)) {
437 		spin_unlock(lru_lock);
438 		ret = LRU_RETRY;
439 		goto out;
440 	}
441 
442 	list_lru_isolate(lru, item);
443 	spin_unlock(lru_lock);
444 
445 	/*
446 	 * The nodes should only contain one or more shadow entries,
447 	 * no pages, so we expect to be able to remove them all and
448 	 * delete and free the empty node afterwards.
449 	 */
450 	if (WARN_ON_ONCE(!node->exceptional))
451 		goto out_invalid;
452 	if (WARN_ON_ONCE(node->count != node->exceptional))
453 		goto out_invalid;
454 	for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
455 		if (node->slots[i]) {
456 			if (WARN_ON_ONCE(!radix_tree_exceptional_entry(node->slots[i])))
457 				goto out_invalid;
458 			if (WARN_ON_ONCE(!node->exceptional))
459 				goto out_invalid;
460 			if (WARN_ON_ONCE(!mapping->nrexceptional))
461 				goto out_invalid;
462 			node->slots[i] = NULL;
463 			node->exceptional--;
464 			node->count--;
465 			mapping->nrexceptional--;
466 		}
467 	}
468 	if (WARN_ON_ONCE(node->exceptional))
469 		goto out_invalid;
470 	inc_lruvec_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM);
471 	__radix_tree_delete_node(&mapping->page_tree, node,
472 				 workingset_lookup_update(mapping));
473 
474 out_invalid:
475 	spin_unlock(&mapping->tree_lock);
476 	ret = LRU_REMOVED_RETRY;
477 out:
478 	local_irq_enable();
479 	cond_resched();
480 	local_irq_disable();
481 	spin_lock(lru_lock);
482 	return ret;
483 }
484 
485 static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
486 				       struct shrink_control *sc)
487 {
488 	unsigned long ret;
489 
490 	/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
491 	local_irq_disable();
492 	ret = list_lru_shrink_walk(&shadow_nodes, sc, shadow_lru_isolate, NULL);
493 	local_irq_enable();
494 	return ret;
495 }
496 
497 static struct shrinker workingset_shadow_shrinker = {
498 	.count_objects = count_shadow_nodes,
499 	.scan_objects = scan_shadow_nodes,
500 	.seeks = DEFAULT_SEEKS,
501 	.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
502 };
503 
504 /*
505  * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
506  * mapping->tree_lock.
507  */
508 static struct lock_class_key shadow_nodes_key;
509 
510 static int __init workingset_init(void)
511 {
512 	unsigned int timestamp_bits;
513 	unsigned int max_order;
514 	int ret;
515 
516 	BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
517 	/*
518 	 * Calculate the eviction bucket size to cover the longest
519 	 * actionable refault distance, which is currently half of
520 	 * memory (totalram_pages/2). However, memory hotplug may add
521 	 * some more pages at runtime, so keep working with up to
522 	 * double the initial memory by using totalram_pages as-is.
523 	 */
524 	timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
525 	max_order = fls_long(totalram_pages - 1);
526 	if (max_order > timestamp_bits)
527 		bucket_order = max_order - timestamp_bits;
528 	pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
529 	       timestamp_bits, max_order, bucket_order);
530 
531 	ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key);
532 	if (ret)
533 		goto err;
534 	ret = register_shrinker(&workingset_shadow_shrinker);
535 	if (ret)
536 		goto err_list_lru;
537 	return 0;
538 err_list_lru:
539 	list_lru_destroy(&shadow_nodes);
540 err:
541 	return ret;
542 }
543 module_init(workingset_init);
544