xref: /linux/mm/workingset.c (revision 0ed66cb7b6d38f0bab061466c1aa0e9f3db45e93)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Workingset detection
4   *
5   * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
6   */
7  
8  #include <linux/memcontrol.h>
9  #include <linux/mm_inline.h>
10  #include <linux/writeback.h>
11  #include <linux/shmem_fs.h>
12  #include <linux/pagemap.h>
13  #include <linux/atomic.h>
14  #include <linux/module.h>
15  #include <linux/swap.h>
16  #include <linux/dax.h>
17  #include <linux/fs.h>
18  #include <linux/mm.h>
19  
20  /*
21   *		Double CLOCK lists
22   *
23   * Per node, two clock lists are maintained for file pages: the
24   * inactive and the active list.  Freshly faulted pages start out at
25   * the head of the inactive list and page reclaim scans pages from the
26   * tail.  Pages that are accessed multiple times on the inactive list
27   * are promoted to the active list, to protect them from reclaim,
28   * whereas active pages are demoted to the inactive list when the
29   * active list grows too big.
30   *
31   *   fault ------------------------+
32   *                                 |
33   *              +--------------+   |            +-------------+
34   *   reclaim <- |   inactive   | <-+-- demotion |    active   | <--+
35   *              +--------------+                +-------------+    |
36   *                     |                                           |
37   *                     +-------------- promotion ------------------+
38   *
39   *
40   *		Access frequency and refault distance
41   *
42   * A workload is thrashing when its pages are frequently used but they
43   * are evicted from the inactive list every time before another access
44   * would have promoted them to the active list.
45   *
46   * In cases where the average access distance between thrashing pages
47   * is bigger than the size of memory there is nothing that can be
48   * done - the thrashing set could never fit into memory under any
49   * circumstance.
50   *
51   * However, the average access distance could be bigger than the
52   * inactive list, yet smaller than the size of memory.  In this case,
53   * the set could fit into memory if it weren't for the currently
54   * active pages - which may be used more, hopefully less frequently:
55   *
56   *      +-memory available to cache-+
57   *      |                           |
58   *      +-inactive------+-active----+
59   *  a b | c d e f g h i | J K L M N |
60   *      +---------------+-----------+
61   *
62   * It is prohibitively expensive to accurately track access frequency
63   * of pages.  But a reasonable approximation can be made to measure
64   * thrashing on the inactive list, after which refaulting pages can be
65   * activated optimistically to compete with the existing active pages.
66   *
67   * Approximating inactive page access frequency - Observations:
68   *
69   * 1. When a page is accessed for the first time, it is added to the
70   *    head of the inactive list, slides every existing inactive page
71   *    towards the tail by one slot, and pushes the current tail page
72   *    out of memory.
73   *
74   * 2. When a page is accessed for the second time, it is promoted to
75   *    the active list, shrinking the inactive list by one slot.  This
76   *    also slides all inactive pages that were faulted into the cache
77   *    more recently than the activated page towards the tail of the
78   *    inactive list.
79   *
80   * Thus:
81   *
82   * 1. The sum of evictions and activations between any two points in
83   *    time indicate the minimum number of inactive pages accessed in
84   *    between.
85   *
86   * 2. Moving one inactive page N page slots towards the tail of the
87   *    list requires at least N inactive page accesses.
88   *
89   * Combining these:
90   *
91   * 1. When a page is finally evicted from memory, the number of
92   *    inactive pages accessed while the page was in cache is at least
93   *    the number of page slots on the inactive list.
94   *
95   * 2. In addition, measuring the sum of evictions and activations (E)
96   *    at the time of a page's eviction, and comparing it to another
97   *    reading (R) at the time the page faults back into memory tells
98   *    the minimum number of accesses while the page was not cached.
99   *    This is called the refault distance.
100   *
101   * Because the first access of the page was the fault and the second
102   * access the refault, we combine the in-cache distance with the
103   * out-of-cache distance to get the complete minimum access distance
104   * of this page:
105   *
106   *      NR_inactive + (R - E)
107   *
108   * And knowing the minimum access distance of a page, we can easily
109   * tell if the page would be able to stay in cache assuming all page
110   * slots in the cache were available:
111   *
112   *   NR_inactive + (R - E) <= NR_inactive + NR_active
113   *
114   * which can be further simplified to
115   *
116   *   (R - E) <= NR_active
117   *
118   * Put into words, the refault distance (out-of-cache) can be seen as
119   * a deficit in inactive list space (in-cache).  If the inactive list
120   * had (R - E) more page slots, the page would not have been evicted
121   * in between accesses, but activated instead.  And on a full system,
122   * the only thing eating into inactive list space is active pages.
123   *
124   *
125   *		Refaulting inactive pages
126   *
127   * All that is known about the active list is that the pages have been
128   * accessed more than once in the past.  This means that at any given
129   * time there is actually a good chance that pages on the active list
130   * are no longer in active use.
131   *
132   * So when a refault distance of (R - E) is observed and there are at
133   * least (R - E) active pages, the refaulting page is activated
134   * optimistically in the hope that (R - E) active pages are actually
135   * used less frequently than the refaulting page - or even not used at
136   * all anymore.
137   *
138   * That means if inactive cache is refaulting with a suitable refault
139   * distance, we assume the cache workingset is transitioning and put
140   * pressure on the current active list.
141   *
142   * If this is wrong and demotion kicks in, the pages which are truly
143   * used more frequently will be reactivated while the less frequently
144   * used once will be evicted from memory.
145   *
146   * But if this is right, the stale pages will be pushed out of memory
147   * and the used pages get to stay in cache.
148   *
149   *		Refaulting active pages
150   *
151   * If on the other hand the refaulting pages have recently been
152   * deactivated, it means that the active list is no longer protecting
153   * actively used cache from reclaim. The cache is NOT transitioning to
154   * a different workingset; the existing workingset is thrashing in the
155   * space allocated to the page cache.
156   *
157   *
158   *		Implementation
159   *
160   * For each node's LRU lists, a counter for inactive evictions and
161   * activations is maintained (node->nonresident_age).
162   *
163   * On eviction, a snapshot of this counter (along with some bits to
164   * identify the node) is stored in the now empty page cache
165   * slot of the evicted page.  This is called a shadow entry.
166   *
167   * On cache misses for which there are shadow entries, an eligible
168   * refault distance will immediately activate the refaulting page.
169   */
170  
171  #define WORKINGSET_SHIFT 1
172  #define EVICTION_SHIFT	((BITS_PER_LONG - BITS_PER_XA_VALUE) +	\
173  			 WORKINGSET_SHIFT + NODES_SHIFT + \
174  			 MEM_CGROUP_ID_SHIFT)
175  #define EVICTION_MASK	(~0UL >> EVICTION_SHIFT)
176  
177  /*
178   * Eviction timestamps need to be able to cover the full range of
179   * actionable refaults. However, bits are tight in the xarray
180   * entry, and after storing the identifier for the lruvec there might
181   * not be enough left to represent every single actionable refault. In
182   * that case, we have to sacrifice granularity for distance, and group
183   * evictions into coarser buckets by shaving off lower timestamp bits.
184   */
185  static unsigned int bucket_order __read_mostly;
186  
187  static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
188  			 bool workingset)
189  {
190  	eviction >>= bucket_order;
191  	eviction &= EVICTION_MASK;
192  	eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
193  	eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
194  	eviction = (eviction << WORKINGSET_SHIFT) | workingset;
195  
196  	return xa_mk_value(eviction);
197  }
198  
199  static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
200  			  unsigned long *evictionp, bool *workingsetp)
201  {
202  	unsigned long entry = xa_to_value(shadow);
203  	int memcgid, nid;
204  	bool workingset;
205  
206  	workingset = entry & ((1UL << WORKINGSET_SHIFT) - 1);
207  	entry >>= WORKINGSET_SHIFT;
208  	nid = entry & ((1UL << NODES_SHIFT) - 1);
209  	entry >>= NODES_SHIFT;
210  	memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
211  	entry >>= MEM_CGROUP_ID_SHIFT;
212  
213  	*memcgidp = memcgid;
214  	*pgdat = NODE_DATA(nid);
215  	*evictionp = entry << bucket_order;
216  	*workingsetp = workingset;
217  }
218  
219  /**
220   * workingset_age_nonresident - age non-resident entries as LRU ages
221   * @lruvec: the lruvec that was aged
222   * @nr_pages: the number of pages to count
223   *
224   * As in-memory pages are aged, non-resident pages need to be aged as
225   * well, in order for the refault distances later on to be comparable
226   * to the in-memory dimensions. This function allows reclaim and LRU
227   * operations to drive the non-resident aging along in parallel.
228   */
229  void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages)
230  {
231  	/*
232  	 * Reclaiming a cgroup means reclaiming all its children in a
233  	 * round-robin fashion. That means that each cgroup has an LRU
234  	 * order that is composed of the LRU orders of its child
235  	 * cgroups; and every page has an LRU position not just in the
236  	 * cgroup that owns it, but in all of that group's ancestors.
237  	 *
238  	 * So when the physical inactive list of a leaf cgroup ages,
239  	 * the virtual inactive lists of all its parents, including
240  	 * the root cgroup's, age as well.
241  	 */
242  	do {
243  		atomic_long_add(nr_pages, &lruvec->nonresident_age);
244  	} while ((lruvec = parent_lruvec(lruvec)));
245  }
246  
247  /**
248   * workingset_eviction - note the eviction of a page from memory
249   * @target_memcg: the cgroup that is causing the reclaim
250   * @page: the page being evicted
251   *
252   * Return: a shadow entry to be stored in @page->mapping->i_pages in place
253   * of the evicted @page so that a later refault can be detected.
254   */
255  void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
256  {
257  	struct pglist_data *pgdat = page_pgdat(page);
258  	unsigned long eviction;
259  	struct lruvec *lruvec;
260  	int memcgid;
261  
262  	/* Page is fully exclusive and pins page's memory cgroup pointer */
263  	VM_BUG_ON_PAGE(PageLRU(page), page);
264  	VM_BUG_ON_PAGE(page_count(page), page);
265  	VM_BUG_ON_PAGE(!PageLocked(page), page);
266  
267  	lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
268  	/* XXX: target_memcg can be NULL, go through lruvec */
269  	memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
270  	eviction = atomic_long_read(&lruvec->nonresident_age);
271  	workingset_age_nonresident(lruvec, thp_nr_pages(page));
272  	return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
273  }
274  
275  /**
276   * workingset_refault - evaluate the refault of a previously evicted page
277   * @page: the freshly allocated replacement page
278   * @shadow: shadow entry of the evicted page
279   *
280   * Calculates and evaluates the refault distance of the previously
281   * evicted page in the context of the node and the memcg whose memory
282   * pressure caused the eviction.
283   */
284  void workingset_refault(struct page *page, void *shadow)
285  {
286  	bool file = page_is_file_lru(page);
287  	struct mem_cgroup *eviction_memcg;
288  	struct lruvec *eviction_lruvec;
289  	unsigned long refault_distance;
290  	unsigned long workingset_size;
291  	struct pglist_data *pgdat;
292  	struct mem_cgroup *memcg;
293  	unsigned long eviction;
294  	struct lruvec *lruvec;
295  	unsigned long refault;
296  	bool workingset;
297  	int memcgid;
298  
299  	unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
300  
301  	rcu_read_lock();
302  	/*
303  	 * Look up the memcg associated with the stored ID. It might
304  	 * have been deleted since the page's eviction.
305  	 *
306  	 * Note that in rare events the ID could have been recycled
307  	 * for a new cgroup that refaults a shared page. This is
308  	 * impossible to tell from the available data. However, this
309  	 * should be a rare and limited disturbance, and activations
310  	 * are always speculative anyway. Ultimately, it's the aging
311  	 * algorithm's job to shake out the minimum access frequency
312  	 * for the active cache.
313  	 *
314  	 * XXX: On !CONFIG_MEMCG, this will always return NULL; it
315  	 * would be better if the root_mem_cgroup existed in all
316  	 * configurations instead.
317  	 */
318  	eviction_memcg = mem_cgroup_from_id(memcgid);
319  	if (!mem_cgroup_disabled() && !eviction_memcg)
320  		goto out;
321  	eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
322  	refault = atomic_long_read(&eviction_lruvec->nonresident_age);
323  
324  	/*
325  	 * Calculate the refault distance
326  	 *
327  	 * The unsigned subtraction here gives an accurate distance
328  	 * across nonresident_age overflows in most cases. There is a
329  	 * special case: usually, shadow entries have a short lifetime
330  	 * and are either refaulted or reclaimed along with the inode
331  	 * before they get too old.  But it is not impossible for the
332  	 * nonresident_age to lap a shadow entry in the field, which
333  	 * can then result in a false small refault distance, leading
334  	 * to a false activation should this old entry actually
335  	 * refault again.  However, earlier kernels used to deactivate
336  	 * unconditionally with *every* reclaim invocation for the
337  	 * longest time, so the occasional inappropriate activation
338  	 * leading to pressure on the active list is not a problem.
339  	 */
340  	refault_distance = (refault - eviction) & EVICTION_MASK;
341  
342  	/*
343  	 * The activation decision for this page is made at the level
344  	 * where the eviction occurred, as that is where the LRU order
345  	 * during page reclaim is being determined.
346  	 *
347  	 * However, the cgroup that will own the page is the one that
348  	 * is actually experiencing the refault event.
349  	 */
350  	memcg = page_memcg(page);
351  	lruvec = mem_cgroup_lruvec(memcg, pgdat);
352  
353  	inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);
354  
355  	/*
356  	 * Compare the distance to the existing workingset size. We
357  	 * don't activate pages that couldn't stay resident even if
358  	 * all the memory was available to the workingset. Whether
359  	 * workingset competition needs to consider anon or not depends
360  	 * on having swap.
361  	 */
362  	workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE);
363  	if (!file) {
364  		workingset_size += lruvec_page_state(eviction_lruvec,
365  						     NR_INACTIVE_FILE);
366  	}
367  	if (mem_cgroup_get_nr_swap_pages(memcg) > 0) {
368  		workingset_size += lruvec_page_state(eviction_lruvec,
369  						     NR_ACTIVE_ANON);
370  		if (file) {
371  			workingset_size += lruvec_page_state(eviction_lruvec,
372  						     NR_INACTIVE_ANON);
373  		}
374  	}
375  	if (refault_distance > workingset_size)
376  		goto out;
377  
378  	SetPageActive(page);
379  	workingset_age_nonresident(lruvec, thp_nr_pages(page));
380  	inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file);
381  
382  	/* Page was active prior to eviction */
383  	if (workingset) {
384  		SetPageWorkingset(page);
385  		/* XXX: Move to lru_cache_add() when it supports new vs putback */
386  		lru_note_cost_page(page);
387  		inc_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file);
388  	}
389  out:
390  	rcu_read_unlock();
391  }
392  
393  /**
394   * workingset_activation - note a page activation
395   * @page: page that is being activated
396   */
397  void workingset_activation(struct page *page)
398  {
399  	struct mem_cgroup *memcg;
400  	struct lruvec *lruvec;
401  
402  	rcu_read_lock();
403  	/*
404  	 * Filter non-memcg pages here, e.g. unmap can call
405  	 * mark_page_accessed() on VDSO pages.
406  	 *
407  	 * XXX: See workingset_refault() - this should return
408  	 * root_mem_cgroup even for !CONFIG_MEMCG.
409  	 */
410  	memcg = page_memcg_rcu(page);
411  	if (!mem_cgroup_disabled() && !memcg)
412  		goto out;
413  	lruvec = mem_cgroup_page_lruvec(page);
414  	workingset_age_nonresident(lruvec, thp_nr_pages(page));
415  out:
416  	rcu_read_unlock();
417  }
418  
419  /*
420   * Shadow entries reflect the share of the working set that does not
421   * fit into memory, so their number depends on the access pattern of
422   * the workload.  In most cases, they will refault or get reclaimed
423   * along with the inode, but a (malicious) workload that streams
424   * through files with a total size several times that of available
425   * memory, while preventing the inodes from being reclaimed, can
426   * create excessive amounts of shadow nodes.  To keep a lid on this,
427   * track shadow nodes and reclaim them when they grow way past the
428   * point where they would still be useful.
429   */
430  
431  static struct list_lru shadow_nodes;
432  
433  void workingset_update_node(struct xa_node *node)
434  {
435  	/*
436  	 * Track non-empty nodes that contain only shadow entries;
437  	 * unlink those that contain pages or are being freed.
438  	 *
439  	 * Avoid acquiring the list_lru lock when the nodes are
440  	 * already where they should be. The list_empty() test is safe
441  	 * as node->private_list is protected by the i_pages lock.
442  	 */
443  	VM_WARN_ON_ONCE(!irqs_disabled());  /* For __inc_lruvec_page_state */
444  
445  	if (node->count && node->count == node->nr_values) {
446  		if (list_empty(&node->private_list)) {
447  			list_lru_add(&shadow_nodes, &node->private_list);
448  			__inc_lruvec_kmem_state(node, WORKINGSET_NODES);
449  		}
450  	} else {
451  		if (!list_empty(&node->private_list)) {
452  			list_lru_del(&shadow_nodes, &node->private_list);
453  			__dec_lruvec_kmem_state(node, WORKINGSET_NODES);
454  		}
455  	}
456  }
457  
458  static unsigned long count_shadow_nodes(struct shrinker *shrinker,
459  					struct shrink_control *sc)
460  {
461  	unsigned long max_nodes;
462  	unsigned long nodes;
463  	unsigned long pages;
464  
465  	nodes = list_lru_shrink_count(&shadow_nodes, sc);
466  	if (!nodes)
467  		return SHRINK_EMPTY;
468  
469  	/*
470  	 * Approximate a reasonable limit for the nodes
471  	 * containing shadow entries. We don't need to keep more
472  	 * shadow entries than possible pages on the active list,
473  	 * since refault distances bigger than that are dismissed.
474  	 *
475  	 * The size of the active list converges toward 100% of
476  	 * overall page cache as memory grows, with only a tiny
477  	 * inactive list. Assume the total cache size for that.
478  	 *
479  	 * Nodes might be sparsely populated, with only one shadow
480  	 * entry in the extreme case. Obviously, we cannot keep one
481  	 * node for every eligible shadow entry, so compromise on a
482  	 * worst-case density of 1/8th. Below that, not all eligible
483  	 * refaults can be detected anymore.
484  	 *
485  	 * On 64-bit with 7 xa_nodes per page and 64 slots
486  	 * each, this will reclaim shadow entries when they consume
487  	 * ~1.8% of available memory:
488  	 *
489  	 * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE
490  	 */
491  #ifdef CONFIG_MEMCG
492  	if (sc->memcg) {
493  		struct lruvec *lruvec;
494  		int i;
495  
496  		lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
497  		for (pages = 0, i = 0; i < NR_LRU_LISTS; i++)
498  			pages += lruvec_page_state_local(lruvec,
499  							 NR_LRU_BASE + i);
500  		pages += lruvec_page_state_local(
501  			lruvec, NR_SLAB_RECLAIMABLE_B) >> PAGE_SHIFT;
502  		pages += lruvec_page_state_local(
503  			lruvec, NR_SLAB_UNRECLAIMABLE_B) >> PAGE_SHIFT;
504  	} else
505  #endif
506  		pages = node_present_pages(sc->nid);
507  
508  	max_nodes = pages >> (XA_CHUNK_SHIFT - 3);
509  
510  	if (nodes <= max_nodes)
511  		return 0;
512  	return nodes - max_nodes;
513  }
514  
515  static enum lru_status shadow_lru_isolate(struct list_head *item,
516  					  struct list_lru_one *lru,
517  					  spinlock_t *lru_lock,
518  					  void *arg) __must_hold(lru_lock)
519  {
520  	struct xa_node *node = container_of(item, struct xa_node, private_list);
521  	struct address_space *mapping;
522  	int ret;
523  
524  	/*
525  	 * Page cache insertions and deletions synchronously maintain
526  	 * the shadow node LRU under the i_pages lock and the
527  	 * lru_lock.  Because the page cache tree is emptied before
528  	 * the inode can be destroyed, holding the lru_lock pins any
529  	 * address_space that has nodes on the LRU.
530  	 *
531  	 * We can then safely transition to the i_pages lock to
532  	 * pin only the address_space of the particular node we want
533  	 * to reclaim, take the node off-LRU, and drop the lru_lock.
534  	 */
535  
536  	mapping = container_of(node->array, struct address_space, i_pages);
537  
538  	/* Coming from the list, invert the lock order */
539  	if (!xa_trylock(&mapping->i_pages)) {
540  		spin_unlock_irq(lru_lock);
541  		ret = LRU_RETRY;
542  		goto out;
543  	}
544  
545  	list_lru_isolate(lru, item);
546  	__dec_lruvec_kmem_state(node, WORKINGSET_NODES);
547  
548  	spin_unlock(lru_lock);
549  
550  	/*
551  	 * The nodes should only contain one or more shadow entries,
552  	 * no pages, so we expect to be able to remove them all and
553  	 * delete and free the empty node afterwards.
554  	 */
555  	if (WARN_ON_ONCE(!node->nr_values))
556  		goto out_invalid;
557  	if (WARN_ON_ONCE(node->count != node->nr_values))
558  		goto out_invalid;
559  	xa_delete_node(node, workingset_update_node);
560  	__inc_lruvec_kmem_state(node, WORKINGSET_NODERECLAIM);
561  
562  out_invalid:
563  	xa_unlock_irq(&mapping->i_pages);
564  	ret = LRU_REMOVED_RETRY;
565  out:
566  	cond_resched();
567  	spin_lock_irq(lru_lock);
568  	return ret;
569  }
570  
571  static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
572  				       struct shrink_control *sc)
573  {
574  	/* list_lru lock nests inside the IRQ-safe i_pages lock */
575  	return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate,
576  					NULL);
577  }
578  
579  static struct shrinker workingset_shadow_shrinker = {
580  	.count_objects = count_shadow_nodes,
581  	.scan_objects = scan_shadow_nodes,
582  	.seeks = 0, /* ->count reports only fully expendable nodes */
583  	.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
584  };
585  
586  /*
587   * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
588   * i_pages lock.
589   */
590  static struct lock_class_key shadow_nodes_key;
591  
592  static int __init workingset_init(void)
593  {
594  	unsigned int timestamp_bits;
595  	unsigned int max_order;
596  	int ret;
597  
598  	BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
599  	/*
600  	 * Calculate the eviction bucket size to cover the longest
601  	 * actionable refault distance, which is currently half of
602  	 * memory (totalram_pages/2). However, memory hotplug may add
603  	 * some more pages at runtime, so keep working with up to
604  	 * double the initial memory by using totalram_pages as-is.
605  	 */
606  	timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
607  	max_order = fls_long(totalram_pages() - 1);
608  	if (max_order > timestamp_bits)
609  		bucket_order = max_order - timestamp_bits;
610  	pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
611  	       timestamp_bits, max_order, bucket_order);
612  
613  	ret = prealloc_shrinker(&workingset_shadow_shrinker);
614  	if (ret)
615  		goto err;
616  	ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key,
617  			      &workingset_shadow_shrinker);
618  	if (ret)
619  		goto err_list_lru;
620  	register_shrinker_prepared(&workingset_shadow_shrinker);
621  	return 0;
622  err_list_lru:
623  	free_prealloced_shrinker(&workingset_shadow_shrinker);
624  err:
625  	return ret;
626  }
627  module_init(workingset_init);
628