xref: /linux/mm/zswap.c (revision e3c33bc767b5512dbfec643a02abf58ce608f3b2)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * zswap.c - zswap driver file
4  *
5  * zswap is a cache that takes pages that are in the process
6  * of being swapped out and attempts to compress and store them in a
7  * RAM-based memory pool.  This can result in a significant I/O reduction on
8  * the swap device and, in the case where decompressing from RAM is faster
9  * than reading from the swap device, can also improve workload performance.
10  *
11  * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
12 */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/module.h>
17 #include <linux/cpu.h>
18 #include <linux/highmem.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 #include <linux/atomic.h>
23 #include <linux/swap.h>
24 #include <linux/crypto.h>
25 #include <linux/scatterlist.h>
26 #include <linux/mempolicy.h>
27 #include <linux/mempool.h>
28 #include <crypto/acompress.h>
29 #include <crypto/scatterwalk.h>
30 #include <linux/zswap.h>
31 #include <linux/mm_types.h>
32 #include <linux/page-flags.h>
33 #include <linux/swapops.h>
34 #include <linux/writeback.h>
35 #include <linux/pagemap.h>
36 #include <linux/workqueue.h>
37 #include <linux/list_lru.h>
38 #include <linux/zsmalloc.h>
39 
40 #include "swap.h"
41 #include "internal.h"
42 
43 /*********************************
44 * statistics
45 **********************************/
46 /* The number of pages currently stored in zswap */
47 atomic_long_t zswap_stored_pages = ATOMIC_LONG_INIT(0);
48 /* The number of incompressible pages currently stored in zswap */
49 static atomic_long_t zswap_stored_incompressible_pages = ATOMIC_LONG_INIT(0);
50 
51 /*
52  * The statistics below are not protected from concurrent access for
53  * performance reasons so they may not be a 100% accurate.  However,
54  * they do provide useful information on roughly how many times a
55  * certain event is occurring.
56 */
57 
58 /* Pool limit was hit (see zswap_max_pool_percent) */
59 static u64 zswap_pool_limit_hit;
60 /* Pages written back when pool limit was reached */
61 static u64 zswap_written_back_pages;
62 /* Store failed due to a reclaim failure after pool limit was reached */
63 static u64 zswap_reject_reclaim_fail;
64 /* Store failed due to compression algorithm failure */
65 static u64 zswap_reject_compress_fail;
66 /* Compressed page was too big for the allocator to (optimally) store */
67 static u64 zswap_reject_compress_poor;
68 /* Load or writeback failed due to decompression failure */
69 static u64 zswap_decompress_fail;
70 /* Store failed because underlying allocator could not get memory */
71 static u64 zswap_reject_alloc_fail;
72 /* Store failed because the entry metadata could not be allocated (rare) */
73 static u64 zswap_reject_kmemcache_fail;
74 
75 /* Shrinker work queue */
76 static struct workqueue_struct *shrink_wq;
77 /* Pool limit was hit, we need to calm down */
78 static bool zswap_pool_reached_full;
79 
80 /*********************************
81 * tunables
82 **********************************/
83 
84 #define ZSWAP_PARAM_UNSET ""
85 
86 static int zswap_setup(void);
87 
88 /* Enable/disable zswap */
89 static DEFINE_STATIC_KEY_MAYBE(CONFIG_ZSWAP_DEFAULT_ON, zswap_ever_enabled);
90 static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
91 static int zswap_enabled_param_set(const char *,
92 				   const struct kernel_param *);
93 static const struct kernel_param_ops zswap_enabled_param_ops = {
94 	.set =		zswap_enabled_param_set,
95 	.get =		param_get_bool,
96 };
97 module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
98 
99 /* Crypto compressor to use */
100 static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
101 static int zswap_compressor_param_set(const char *,
102 				      const struct kernel_param *);
103 static const struct kernel_param_ops zswap_compressor_param_ops = {
104 	.set =		zswap_compressor_param_set,
105 	.get =		param_get_charp,
106 	.free =		param_free_charp,
107 };
108 module_param_cb(compressor, &zswap_compressor_param_ops,
109 		&zswap_compressor, 0644);
110 
111 /* The maximum percentage of memory that the compressed pool can occupy */
112 static unsigned int zswap_max_pool_percent = 20;
113 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
114 
115 /* The threshold for accepting new pages after the max_pool_percent was hit */
116 static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
117 module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
118 		   uint, 0644);
119 
120 /* Enable/disable memory pressure-based shrinker. */
121 static bool zswap_shrinker_enabled = IS_ENABLED(
122 		CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
123 module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
124 
zswap_is_enabled(void)125 bool zswap_is_enabled(void)
126 {
127 	return zswap_enabled;
128 }
129 
zswap_never_enabled(void)130 bool zswap_never_enabled(void)
131 {
132 	return !static_branch_maybe(CONFIG_ZSWAP_DEFAULT_ON, &zswap_ever_enabled);
133 }
134 
135 /*********************************
136 * data structures
137 **********************************/
138 
139 struct crypto_acomp_ctx {
140 	struct crypto_acomp *acomp;
141 	struct acomp_req *req;
142 	struct crypto_wait wait;
143 	u8 *buffer;
144 	struct mutex mutex;
145 };
146 
147 /*
148  * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
149  * The only case where lru_lock is not acquired while holding tree.lock is
150  * when a zswap_entry is taken off the lru for writeback, in that case it
151  * needs to be verified that it's still valid in the tree.
152  */
153 struct zswap_pool {
154 	struct zs_pool *zs_pool;
155 	struct crypto_acomp_ctx __percpu *acomp_ctx;
156 	struct percpu_ref ref;
157 	struct list_head list;
158 	struct work_struct release_work;
159 	struct hlist_node node;
160 	char tfm_name[CRYPTO_MAX_ALG_NAME];
161 };
162 
163 /* Global LRU lists shared by all zswap pools. */
164 static struct list_lru zswap_list_lru;
165 
166 /* The lock protects zswap_next_shrink updates. */
167 static DEFINE_SPINLOCK(zswap_shrink_lock);
168 static struct mem_cgroup *zswap_next_shrink;
169 static struct work_struct zswap_shrink_work;
170 static struct shrinker *zswap_shrinker;
171 
172 /*
173  * struct zswap_entry
174  *
175  * This structure contains the metadata for tracking a single compressed
176  * page within zswap.
177  *
178  * swpentry - associated swap entry, the offset indexes into the xarray
179  * length - the length in bytes of the compressed page data.  Needed during
180  *          decompression.
181  * referenced - true if the entry recently entered the zswap pool. Unset by the
182  *              writeback logic. The entry is only reclaimed by the writeback
183  *              logic if referenced is unset. See comments in the shrinker
184  *              section for context.
185  * pool - the zswap_pool the entry's data is in
186  * handle - zsmalloc allocation handle that stores the compressed page data
187  * objcg - the obj_cgroup that the compressed memory is charged to
188  * lru - handle to the pool's lru used to evict pages.
189  */
190 struct zswap_entry {
191 	swp_entry_t swpentry;
192 	unsigned int length;
193 	bool referenced;
194 	struct zswap_pool *pool;
195 	unsigned long handle;
196 	struct obj_cgroup *objcg;
197 	struct list_head lru;
198 };
199 
200 static struct xarray *zswap_trees[MAX_SWAPFILES];
201 static unsigned int nr_zswap_trees[MAX_SWAPFILES];
202 
203 /* RCU-protected iteration */
204 static LIST_HEAD(zswap_pools);
205 /* protects zswap_pools list modification */
206 static DEFINE_SPINLOCK(zswap_pools_lock);
207 /* pool counter to provide unique names to zsmalloc */
208 static atomic_t zswap_pools_count = ATOMIC_INIT(0);
209 
210 enum zswap_init_type {
211 	ZSWAP_UNINIT,
212 	ZSWAP_INIT_SUCCEED,
213 	ZSWAP_INIT_FAILED
214 };
215 
216 static enum zswap_init_type zswap_init_state;
217 
218 /* used to ensure the integrity of initialization */
219 static DEFINE_MUTEX(zswap_init_lock);
220 
221 /* init completed, but couldn't create the initial pool */
222 static bool zswap_has_pool;
223 
224 /*********************************
225 * helpers and fwd declarations
226 **********************************/
227 
228 /* One swap address space for each 64M swap space */
229 #define ZSWAP_ADDRESS_SPACE_SHIFT 14
230 #define ZSWAP_ADDRESS_SPACE_PAGES (1 << ZSWAP_ADDRESS_SPACE_SHIFT)
swap_zswap_tree(swp_entry_t swp)231 static inline struct xarray *swap_zswap_tree(swp_entry_t swp)
232 {
233 	return &zswap_trees[swp_type(swp)][swp_offset(swp)
234 		>> ZSWAP_ADDRESS_SPACE_SHIFT];
235 }
236 
237 #define zswap_pool_debug(msg, p)			\
238 	pr_debug("%s pool %s\n", msg, (p)->tfm_name)
239 
240 /*********************************
241 * pool functions
242 **********************************/
243 static void __zswap_pool_empty(struct percpu_ref *ref);
244 
zswap_pool_create(char * compressor)245 static struct zswap_pool *zswap_pool_create(char *compressor)
246 {
247 	struct zswap_pool *pool;
248 	char name[38]; /* 'zswap' + 32 char (max) num + \0 */
249 	int ret, cpu;
250 
251 	if (!zswap_has_pool && !strcmp(compressor, ZSWAP_PARAM_UNSET))
252 		return NULL;
253 
254 	pool = kzalloc_obj(*pool);
255 	if (!pool)
256 		return NULL;
257 
258 	/* unique name for each pool specifically required by zsmalloc */
259 	snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
260 	pool->zs_pool = zs_create_pool(name);
261 	if (!pool->zs_pool)
262 		goto error;
263 
264 	strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
265 
266 	pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
267 	if (!pool->acomp_ctx) {
268 		pr_err("percpu alloc failed\n");
269 		goto error;
270 	}
271 
272 	for_each_possible_cpu(cpu)
273 		mutex_init(&per_cpu_ptr(pool->acomp_ctx, cpu)->mutex);
274 
275 	ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
276 				       &pool->node);
277 	if (ret)
278 		goto error;
279 
280 	/* being the current pool takes 1 ref; this func expects the
281 	 * caller to always add the new pool as the current pool
282 	 */
283 	ret = percpu_ref_init(&pool->ref, __zswap_pool_empty,
284 			      PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
285 	if (ret)
286 		goto ref_fail;
287 	INIT_LIST_HEAD(&pool->list);
288 
289 	zswap_pool_debug("created", pool);
290 
291 	return pool;
292 
293 ref_fail:
294 	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
295 error:
296 	if (pool->acomp_ctx)
297 		free_percpu(pool->acomp_ctx);
298 	if (pool->zs_pool)
299 		zs_destroy_pool(pool->zs_pool);
300 	kfree(pool);
301 	return NULL;
302 }
303 
__zswap_pool_create_fallback(void)304 static struct zswap_pool *__zswap_pool_create_fallback(void)
305 {
306 	if (!crypto_has_acomp(zswap_compressor, 0, 0) &&
307 	    strcmp(zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
308 		pr_err("compressor %s not available, using default %s\n",
309 		       zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
310 		param_free_charp(&zswap_compressor);
311 		zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
312 	}
313 
314 	/* Default compressor should be available. Kconfig bug? */
315 	if (WARN_ON_ONCE(!crypto_has_acomp(zswap_compressor, 0, 0))) {
316 		zswap_compressor = ZSWAP_PARAM_UNSET;
317 		return NULL;
318 	}
319 
320 	return zswap_pool_create(zswap_compressor);
321 }
322 
zswap_pool_destroy(struct zswap_pool * pool)323 static void zswap_pool_destroy(struct zswap_pool *pool)
324 {
325 	zswap_pool_debug("destroying", pool);
326 
327 	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
328 	free_percpu(pool->acomp_ctx);
329 
330 	zs_destroy_pool(pool->zs_pool);
331 	kfree(pool);
332 }
333 
__zswap_pool_release(struct work_struct * work)334 static void __zswap_pool_release(struct work_struct *work)
335 {
336 	struct zswap_pool *pool = container_of(work, typeof(*pool),
337 						release_work);
338 
339 	synchronize_rcu();
340 
341 	/* nobody should have been able to get a ref... */
342 	WARN_ON(!percpu_ref_is_zero(&pool->ref));
343 	percpu_ref_exit(&pool->ref);
344 
345 	/* pool is now off zswap_pools list and has no references. */
346 	zswap_pool_destroy(pool);
347 }
348 
349 static struct zswap_pool *zswap_pool_current(void);
350 
__zswap_pool_empty(struct percpu_ref * ref)351 static void __zswap_pool_empty(struct percpu_ref *ref)
352 {
353 	struct zswap_pool *pool;
354 
355 	pool = container_of(ref, typeof(*pool), ref);
356 
357 	spin_lock_bh(&zswap_pools_lock);
358 
359 	WARN_ON(pool == zswap_pool_current());
360 
361 	list_del_rcu(&pool->list);
362 
363 	INIT_WORK(&pool->release_work, __zswap_pool_release);
364 	schedule_work(&pool->release_work);
365 
366 	spin_unlock_bh(&zswap_pools_lock);
367 }
368 
zswap_pool_tryget(struct zswap_pool * pool)369 static int __must_check zswap_pool_tryget(struct zswap_pool *pool)
370 {
371 	if (!pool)
372 		return 0;
373 
374 	return percpu_ref_tryget(&pool->ref);
375 }
376 
377 /* The caller must already have a reference. */
zswap_pool_get(struct zswap_pool * pool)378 static void zswap_pool_get(struct zswap_pool *pool)
379 {
380 	percpu_ref_get(&pool->ref);
381 }
382 
zswap_pool_put(struct zswap_pool * pool)383 static void zswap_pool_put(struct zswap_pool *pool)
384 {
385 	percpu_ref_put(&pool->ref);
386 }
387 
__zswap_pool_current(void)388 static struct zswap_pool *__zswap_pool_current(void)
389 {
390 	struct zswap_pool *pool;
391 
392 	pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
393 	WARN_ONCE(!pool && zswap_has_pool,
394 		  "%s: no page storage pool!\n", __func__);
395 
396 	return pool;
397 }
398 
zswap_pool_current(void)399 static struct zswap_pool *zswap_pool_current(void)
400 {
401 	assert_spin_locked(&zswap_pools_lock);
402 
403 	return __zswap_pool_current();
404 }
405 
zswap_pool_current_get(void)406 static struct zswap_pool *zswap_pool_current_get(void)
407 {
408 	struct zswap_pool *pool;
409 
410 	rcu_read_lock();
411 
412 	pool = __zswap_pool_current();
413 	if (!zswap_pool_tryget(pool))
414 		pool = NULL;
415 
416 	rcu_read_unlock();
417 
418 	return pool;
419 }
420 
421 /* type and compressor must be null-terminated */
zswap_pool_find_get(char * compressor)422 static struct zswap_pool *zswap_pool_find_get(char *compressor)
423 {
424 	struct zswap_pool *pool;
425 
426 	assert_spin_locked(&zswap_pools_lock);
427 
428 	list_for_each_entry_rcu(pool, &zswap_pools, list) {
429 		if (strcmp(pool->tfm_name, compressor))
430 			continue;
431 		/* if we can't get it, it's about to be destroyed */
432 		if (!zswap_pool_tryget(pool))
433 			continue;
434 		return pool;
435 	}
436 
437 	return NULL;
438 }
439 
zswap_max_pages(void)440 static unsigned long zswap_max_pages(void)
441 {
442 	return totalram_pages() * zswap_max_pool_percent / 100;
443 }
444 
zswap_accept_thr_pages(void)445 static unsigned long zswap_accept_thr_pages(void)
446 {
447 	return zswap_max_pages() * zswap_accept_thr_percent / 100;
448 }
449 
zswap_total_pages(void)450 unsigned long zswap_total_pages(void)
451 {
452 	struct zswap_pool *pool;
453 	unsigned long total = 0;
454 
455 	rcu_read_lock();
456 	list_for_each_entry_rcu(pool, &zswap_pools, list)
457 		total += zs_get_total_pages(pool->zs_pool);
458 	rcu_read_unlock();
459 
460 	return total;
461 }
462 
zswap_check_limits(void)463 static bool zswap_check_limits(void)
464 {
465 	unsigned long cur_pages = zswap_total_pages();
466 	unsigned long max_pages = zswap_max_pages();
467 
468 	if (cur_pages >= max_pages) {
469 		zswap_pool_limit_hit++;
470 		zswap_pool_reached_full = true;
471 	} else if (zswap_pool_reached_full &&
472 		   cur_pages <= zswap_accept_thr_pages()) {
473 			zswap_pool_reached_full = false;
474 	}
475 	return zswap_pool_reached_full;
476 }
477 
478 /*********************************
479 * param callbacks
480 **********************************/
481 
zswap_compressor_param_set(const char * val,const struct kernel_param * kp)482 static int zswap_compressor_param_set(const char *val, const struct kernel_param *kp)
483 {
484 	struct zswap_pool *pool, *put_pool = NULL;
485 	char *s = strstrip((char *)val);
486 	bool create_pool = false;
487 	int ret = 0;
488 
489 	mutex_lock(&zswap_init_lock);
490 	switch (zswap_init_state) {
491 	case ZSWAP_UNINIT:
492 		/* Handled in zswap_setup() */
493 		ret = param_set_charp(s, kp);
494 		break;
495 	case ZSWAP_INIT_SUCCEED:
496 		if (!zswap_has_pool || strcmp(s, *(char **)kp->arg))
497 			create_pool = true;
498 		break;
499 	case ZSWAP_INIT_FAILED:
500 		pr_err("can't set param, initialization failed\n");
501 		ret = -ENODEV;
502 	}
503 	mutex_unlock(&zswap_init_lock);
504 
505 	if (!create_pool)
506 		return ret;
507 
508 	if (!crypto_has_acomp(s, 0, 0)) {
509 		pr_err("compressor %s not available\n", s);
510 		return -ENOENT;
511 	}
512 
513 	spin_lock_bh(&zswap_pools_lock);
514 
515 	pool = zswap_pool_find_get(s);
516 	if (pool) {
517 		zswap_pool_debug("using existing", pool);
518 		WARN_ON(pool == zswap_pool_current());
519 		list_del_rcu(&pool->list);
520 	}
521 
522 	spin_unlock_bh(&zswap_pools_lock);
523 
524 	if (!pool)
525 		pool = zswap_pool_create(s);
526 	else {
527 		/*
528 		 * Restore the initial ref dropped by percpu_ref_kill()
529 		 * when the pool was decommissioned and switch it again
530 		 * to percpu mode.
531 		 */
532 		percpu_ref_resurrect(&pool->ref);
533 
534 		/* Drop the ref from zswap_pool_find_get(). */
535 		zswap_pool_put(pool);
536 	}
537 
538 	if (pool)
539 		ret = param_set_charp(s, kp);
540 	else
541 		ret = -EINVAL;
542 
543 	spin_lock_bh(&zswap_pools_lock);
544 
545 	if (!ret) {
546 		put_pool = zswap_pool_current();
547 		list_add_rcu(&pool->list, &zswap_pools);
548 		zswap_has_pool = true;
549 	} else if (pool) {
550 		/*
551 		 * Add the possibly pre-existing pool to the end of the pools
552 		 * list; if it's new (and empty) then it'll be removed and
553 		 * destroyed by the put after we drop the lock
554 		 */
555 		list_add_tail_rcu(&pool->list, &zswap_pools);
556 		put_pool = pool;
557 	}
558 
559 	spin_unlock_bh(&zswap_pools_lock);
560 
561 	/*
562 	 * Drop the ref from either the old current pool,
563 	 * or the new pool we failed to add
564 	 */
565 	if (put_pool)
566 		percpu_ref_kill(&put_pool->ref);
567 
568 	return ret;
569 }
570 
zswap_enabled_param_set(const char * val,const struct kernel_param * kp)571 static int zswap_enabled_param_set(const char *val,
572 				   const struct kernel_param *kp)
573 {
574 	int ret = -ENODEV;
575 
576 	/* if this is load-time (pre-init) param setting, only set param. */
577 	if (system_state != SYSTEM_RUNNING)
578 		return param_set_bool(val, kp);
579 
580 	mutex_lock(&zswap_init_lock);
581 	switch (zswap_init_state) {
582 	case ZSWAP_UNINIT:
583 		if (zswap_setup())
584 			break;
585 		fallthrough;
586 	case ZSWAP_INIT_SUCCEED:
587 		if (!zswap_has_pool)
588 			pr_err("can't enable, no pool configured\n");
589 		else
590 			ret = param_set_bool(val, kp);
591 		break;
592 	case ZSWAP_INIT_FAILED:
593 		pr_err("can't enable, initialization failed\n");
594 	}
595 	mutex_unlock(&zswap_init_lock);
596 
597 	return ret;
598 }
599 
600 /*********************************
601 * lru functions
602 **********************************/
603 
604 /* should be called under RCU */
605 #ifdef CONFIG_MEMCG
mem_cgroup_from_entry(struct zswap_entry * entry)606 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
607 {
608 	return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
609 }
610 #else
mem_cgroup_from_entry(struct zswap_entry * entry)611 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
612 {
613 	return NULL;
614 }
615 #endif
616 
entry_to_nid(struct zswap_entry * entry)617 static inline int entry_to_nid(struct zswap_entry *entry)
618 {
619 	return page_to_nid(virt_to_page(entry));
620 }
621 
zswap_lru_add(struct list_lru * list_lru,struct zswap_entry * entry)622 static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
623 {
624 	int nid = entry_to_nid(entry);
625 	struct mem_cgroup *memcg;
626 
627 	/*
628 	 * Note that it is safe to use rcu_read_lock() here, even in the face of
629 	 * concurrent memcg offlining:
630 	 *
631 	 * 1. list_lru_add() is called before list_lru_one is dead. The
632 	 *    new entry will be reparented to memcg's parent's list_lru.
633 	 * 2. list_lru_add() is called after list_lru_one is dead. The
634 	 *    new entry will be added directly to memcg's parent's list_lru.
635 	 *
636 	 * Similar reasoning holds for list_lru_del().
637 	 */
638 	rcu_read_lock();
639 	memcg = mem_cgroup_from_entry(entry);
640 	/* will always succeed */
641 	list_lru_add(list_lru, &entry->lru, nid, memcg);
642 	rcu_read_unlock();
643 }
644 
zswap_lru_del(struct list_lru * list_lru,struct zswap_entry * entry)645 static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
646 {
647 	int nid = entry_to_nid(entry);
648 	struct mem_cgroup *memcg;
649 
650 	rcu_read_lock();
651 	memcg = mem_cgroup_from_entry(entry);
652 	/* will always succeed */
653 	list_lru_del(list_lru, &entry->lru, nid, memcg);
654 	rcu_read_unlock();
655 }
656 
zswap_lruvec_state_init(struct lruvec * lruvec)657 void zswap_lruvec_state_init(struct lruvec *lruvec)
658 {
659 	atomic_long_set(&lruvec->zswap_lruvec_state.nr_disk_swapins, 0);
660 }
661 
zswap_folio_swapin(struct folio * folio)662 void zswap_folio_swapin(struct folio *folio)
663 {
664 	struct lruvec *lruvec;
665 
666 	if (folio) {
667 		lruvec = folio_lruvec(folio);
668 		atomic_long_inc(&lruvec->zswap_lruvec_state.nr_disk_swapins);
669 	}
670 }
671 
672 /*
673  * This function should be called when a memcg is being offlined.
674  *
675  * Since the global shrinker shrink_worker() may hold a reference
676  * of the memcg, we must check and release the reference in
677  * zswap_next_shrink.
678  *
679  * shrink_worker() must handle the case where this function releases
680  * the reference of memcg being shrunk.
681  */
zswap_memcg_offline_cleanup(struct mem_cgroup * memcg)682 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
683 {
684 	/* lock out zswap shrinker walking memcg tree */
685 	spin_lock(&zswap_shrink_lock);
686 	if (zswap_next_shrink == memcg) {
687 		do {
688 			zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
689 		} while (zswap_next_shrink && !mem_cgroup_online(zswap_next_shrink));
690 	}
691 	spin_unlock(&zswap_shrink_lock);
692 }
693 
694 /*********************************
695 * zswap entry functions
696 **********************************/
697 static struct kmem_cache *zswap_entry_cache;
698 
zswap_entry_cache_alloc(gfp_t gfp,int nid)699 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
700 {
701 	struct zswap_entry *entry;
702 	entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
703 	if (!entry)
704 		return NULL;
705 	return entry;
706 }
707 
zswap_entry_cache_free(struct zswap_entry * entry)708 static void zswap_entry_cache_free(struct zswap_entry *entry)
709 {
710 	kmem_cache_free(zswap_entry_cache, entry);
711 }
712 
713 /*
714  * Carries out the common pattern of freeing an entry's zsmalloc allocation,
715  * freeing the entry itself, and decrementing the number of stored pages.
716  */
zswap_entry_free(struct zswap_entry * entry)717 static void zswap_entry_free(struct zswap_entry *entry)
718 {
719 	zswap_lru_del(&zswap_list_lru, entry);
720 	zs_free(entry->pool->zs_pool, entry->handle);
721 	zswap_pool_put(entry->pool);
722 	if (entry->objcg) {
723 		obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
724 		obj_cgroup_put(entry->objcg);
725 	}
726 	if (entry->length == PAGE_SIZE)
727 		atomic_long_dec(&zswap_stored_incompressible_pages);
728 	zswap_entry_cache_free(entry);
729 	atomic_long_dec(&zswap_stored_pages);
730 }
731 
732 /*********************************
733 * compressed storage functions
734 **********************************/
zswap_cpu_comp_prepare(unsigned int cpu,struct hlist_node * node)735 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
736 {
737 	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
738 	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
739 	struct crypto_acomp *acomp = NULL;
740 	struct acomp_req *req = NULL;
741 	u8 *buffer = NULL;
742 	int ret;
743 
744 	buffer = kmalloc_node(PAGE_SIZE, GFP_KERNEL, cpu_to_node(cpu));
745 	if (!buffer) {
746 		ret = -ENOMEM;
747 		goto fail;
748 	}
749 
750 	acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
751 	if (IS_ERR(acomp)) {
752 		pr_err("could not alloc crypto acomp %s : %pe\n",
753 				pool->tfm_name, acomp);
754 		ret = PTR_ERR(acomp);
755 		goto fail;
756 	}
757 
758 	req = acomp_request_alloc(acomp);
759 	if (!req) {
760 		pr_err("could not alloc crypto acomp_request %s\n",
761 		       pool->tfm_name);
762 		ret = -ENOMEM;
763 		goto fail;
764 	}
765 
766 	/*
767 	 * Only hold the mutex after completing allocations, otherwise we may
768 	 * recurse into zswap through reclaim and attempt to hold the mutex
769 	 * again resulting in a deadlock.
770 	 */
771 	mutex_lock(&acomp_ctx->mutex);
772 	crypto_init_wait(&acomp_ctx->wait);
773 
774 	/*
775 	 * if the backend of acomp is async zip, crypto_req_done() will wakeup
776 	 * crypto_wait_req(); if the backend of acomp is scomp, the callback
777 	 * won't be called, crypto_wait_req() will return without blocking.
778 	 */
779 	acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
780 				   crypto_req_done, &acomp_ctx->wait);
781 
782 	acomp_ctx->buffer = buffer;
783 	acomp_ctx->acomp = acomp;
784 	acomp_ctx->req = req;
785 	mutex_unlock(&acomp_ctx->mutex);
786 	return 0;
787 
788 fail:
789 	if (!IS_ERR_OR_NULL(acomp))
790 		crypto_free_acomp(acomp);
791 	kfree(buffer);
792 	return ret;
793 }
794 
zswap_cpu_comp_dead(unsigned int cpu,struct hlist_node * node)795 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
796 {
797 	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
798 	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
799 	struct acomp_req *req;
800 	struct crypto_acomp *acomp;
801 	u8 *buffer;
802 
803 	if (IS_ERR_OR_NULL(acomp_ctx))
804 		return 0;
805 
806 	mutex_lock(&acomp_ctx->mutex);
807 	req = acomp_ctx->req;
808 	acomp = acomp_ctx->acomp;
809 	buffer = acomp_ctx->buffer;
810 	acomp_ctx->req = NULL;
811 	acomp_ctx->acomp = NULL;
812 	acomp_ctx->buffer = NULL;
813 	mutex_unlock(&acomp_ctx->mutex);
814 
815 	/*
816 	 * Do the actual freeing after releasing the mutex to avoid subtle
817 	 * locking dependencies causing deadlocks.
818 	 */
819 	if (!IS_ERR_OR_NULL(req))
820 		acomp_request_free(req);
821 	if (!IS_ERR_OR_NULL(acomp))
822 		crypto_free_acomp(acomp);
823 	kfree(buffer);
824 
825 	return 0;
826 }
827 
acomp_ctx_get_cpu_lock(struct zswap_pool * pool)828 static struct crypto_acomp_ctx *acomp_ctx_get_cpu_lock(struct zswap_pool *pool)
829 {
830 	struct crypto_acomp_ctx *acomp_ctx;
831 
832 	for (;;) {
833 		acomp_ctx = raw_cpu_ptr(pool->acomp_ctx);
834 		mutex_lock(&acomp_ctx->mutex);
835 		if (likely(acomp_ctx->req))
836 			return acomp_ctx;
837 		/*
838 		 * It is possible that we were migrated to a different CPU after
839 		 * getting the per-CPU ctx but before the mutex was acquired. If
840 		 * the old CPU got offlined, zswap_cpu_comp_dead() could have
841 		 * already freed ctx->req (among other things) and set it to
842 		 * NULL. Just try again on the new CPU that we ended up on.
843 		 */
844 		mutex_unlock(&acomp_ctx->mutex);
845 	}
846 }
847 
acomp_ctx_put_unlock(struct crypto_acomp_ctx * acomp_ctx)848 static void acomp_ctx_put_unlock(struct crypto_acomp_ctx *acomp_ctx)
849 {
850 	mutex_unlock(&acomp_ctx->mutex);
851 }
852 
zswap_compress(struct page * page,struct zswap_entry * entry,struct zswap_pool * pool)853 static bool zswap_compress(struct page *page, struct zswap_entry *entry,
854 			   struct zswap_pool *pool)
855 {
856 	struct crypto_acomp_ctx *acomp_ctx;
857 	struct scatterlist input, output;
858 	int comp_ret = 0, alloc_ret = 0;
859 	unsigned int dlen = PAGE_SIZE;
860 	unsigned long handle;
861 	gfp_t gfp;
862 	u8 *dst;
863 	bool mapped = false;
864 
865 	acomp_ctx = acomp_ctx_get_cpu_lock(pool);
866 	dst = acomp_ctx->buffer;
867 	sg_init_table(&input, 1);
868 	sg_set_page(&input, page, PAGE_SIZE, 0);
869 
870 	sg_init_one(&output, dst, PAGE_SIZE);
871 	acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
872 
873 	/*
874 	 * it maybe looks a little bit silly that we send an asynchronous request,
875 	 * then wait for its completion synchronously. This makes the process look
876 	 * synchronous in fact.
877 	 * Theoretically, acomp supports users send multiple acomp requests in one
878 	 * acomp instance, then get those requests done simultaneously. but in this
879 	 * case, zswap actually does store and load page by page, there is no
880 	 * existing method to send the second page before the first page is done
881 	 * in one thread doing zswap.
882 	 * but in different threads running on different cpu, we have different
883 	 * acomp instance, so multiple threads can do (de)compression in parallel.
884 	 */
885 	comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
886 	dlen = acomp_ctx->req->dlen;
887 
888 	/*
889 	 * If a page cannot be compressed into a size smaller than PAGE_SIZE,
890 	 * save the content as is without a compression, to keep the LRU order
891 	 * of writebacks.  If writeback is disabled, reject the page since it
892 	 * only adds metadata overhead.  swap_writeout() will put the page back
893 	 * to the active LRU list in the case.
894 	 */
895 	if (comp_ret || !dlen || dlen >= PAGE_SIZE) {
896 		if (!mem_cgroup_zswap_writeback_enabled(
897 					folio_memcg(page_folio(page)))) {
898 			comp_ret = comp_ret ? comp_ret : -EINVAL;
899 			goto unlock;
900 		}
901 		comp_ret = 0;
902 		dlen = PAGE_SIZE;
903 		dst = kmap_local_page(page);
904 		mapped = true;
905 	}
906 
907 	gfp = GFP_NOWAIT | __GFP_NORETRY | __GFP_HIGHMEM | __GFP_MOVABLE;
908 	handle = zs_malloc(pool->zs_pool, dlen, gfp, page_to_nid(page));
909 	if (IS_ERR_VALUE(handle)) {
910 		alloc_ret = PTR_ERR((void *)handle);
911 		goto unlock;
912 	}
913 
914 	zs_obj_write(pool->zs_pool, handle, dst, dlen);
915 	entry->handle = handle;
916 	entry->length = dlen;
917 
918 unlock:
919 	if (mapped)
920 		kunmap_local(dst);
921 	if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC)
922 		zswap_reject_compress_poor++;
923 	else if (comp_ret)
924 		zswap_reject_compress_fail++;
925 	else if (alloc_ret)
926 		zswap_reject_alloc_fail++;
927 
928 	acomp_ctx_put_unlock(acomp_ctx);
929 	return comp_ret == 0 && alloc_ret == 0;
930 }
931 
zswap_decompress(struct zswap_entry * entry,struct folio * folio)932 static bool zswap_decompress(struct zswap_entry *entry, struct folio *folio)
933 {
934 	struct zswap_pool *pool = entry->pool;
935 	struct scatterlist input[2]; /* zsmalloc returns an SG list 1-2 entries */
936 	struct scatterlist output;
937 	struct crypto_acomp_ctx *acomp_ctx;
938 	int ret = 0, dlen;
939 
940 	acomp_ctx = acomp_ctx_get_cpu_lock(pool);
941 	zs_obj_read_sg_begin(pool->zs_pool, entry->handle, input, entry->length);
942 
943 	/* zswap entries of length PAGE_SIZE are not compressed. */
944 	if (entry->length == PAGE_SIZE) {
945 		void *dst;
946 
947 		WARN_ON_ONCE(input->length != PAGE_SIZE);
948 
949 		dst = kmap_local_folio(folio, 0);
950 		memcpy_from_sglist(dst, input, 0, PAGE_SIZE);
951 		dlen = PAGE_SIZE;
952 		kunmap_local(dst);
953 		flush_dcache_folio(folio);
954 	} else {
955 		sg_init_table(&output, 1);
956 		sg_set_folio(&output, folio, PAGE_SIZE, 0);
957 		acomp_request_set_params(acomp_ctx->req, input, &output,
958 					 entry->length, PAGE_SIZE);
959 		ret = crypto_acomp_decompress(acomp_ctx->req);
960 		ret = crypto_wait_req(ret, &acomp_ctx->wait);
961 		dlen = acomp_ctx->req->dlen;
962 	}
963 
964 	zs_obj_read_sg_end(pool->zs_pool, entry->handle);
965 	acomp_ctx_put_unlock(acomp_ctx);
966 
967 	if (!ret && dlen == PAGE_SIZE)
968 		return true;
969 
970 	zswap_decompress_fail++;
971 	pr_alert_ratelimited("Decompression error from zswap (%d:%lu %s %u->%d)\n",
972 						swp_type(entry->swpentry),
973 						swp_offset(entry->swpentry),
974 						entry->pool->tfm_name,
975 						entry->length, dlen);
976 	return false;
977 }
978 
979 /*********************************
980 * writeback code
981 **********************************/
982 /*
983  * Attempts to free an entry by adding a folio to the swap cache,
984  * decompressing the entry data into the folio, and issuing a
985  * bio write to write the folio back to the swap device.
986  *
987  * This can be thought of as a "resumed writeback" of the folio
988  * to the swap device.  We are basically resuming the same swap
989  * writeback path that was intercepted with the zswap_store()
990  * in the first place.  After the folio has been decompressed into
991  * the swap cache, the compressed version stored by zswap can be
992  * freed.
993  */
zswap_writeback_entry(struct zswap_entry * entry,swp_entry_t swpentry)994 static int zswap_writeback_entry(struct zswap_entry *entry,
995 				 swp_entry_t swpentry)
996 {
997 	struct xarray *tree;
998 	pgoff_t offset = swp_offset(swpentry);
999 	struct folio *folio;
1000 	struct mempolicy *mpol;
1001 	bool folio_was_allocated;
1002 	struct swap_info_struct *si;
1003 	int ret = 0;
1004 
1005 	/* try to allocate swap cache folio */
1006 	si = get_swap_device(swpentry);
1007 	if (!si)
1008 		return -EEXIST;
1009 
1010 	mpol = get_task_policy(current);
1011 	folio = swap_cache_alloc_folio(swpentry, GFP_KERNEL, mpol,
1012 				       NO_INTERLEAVE_INDEX, &folio_was_allocated);
1013 	put_swap_device(si);
1014 	if (!folio)
1015 		return -ENOMEM;
1016 
1017 	/*
1018 	 * Found an existing folio, we raced with swapin or concurrent
1019 	 * shrinker. We generally writeback cold folios from zswap, and
1020 	 * swapin means the folio just became hot, so skip this folio.
1021 	 * For unlikely concurrent shrinker case, it will be unlinked
1022 	 * and freed when invalidated by the concurrent shrinker anyway.
1023 	 */
1024 	if (!folio_was_allocated) {
1025 		ret = -EEXIST;
1026 		goto out;
1027 	}
1028 
1029 	/*
1030 	 * folio is locked, and the swapcache is now secured against
1031 	 * concurrent swapping to and from the slot, and concurrent
1032 	 * swapoff so we can safely dereference the zswap tree here.
1033 	 * Verify that the swap entry hasn't been invalidated and recycled
1034 	 * behind our backs, to avoid overwriting a new swap folio with
1035 	 * old compressed data. Only when this is successful can the entry
1036 	 * be dereferenced.
1037 	 */
1038 	tree = swap_zswap_tree(swpentry);
1039 	if (entry != xa_load(tree, offset)) {
1040 		ret = -ENOMEM;
1041 		goto out;
1042 	}
1043 
1044 	if (!zswap_decompress(entry, folio)) {
1045 		ret = -EIO;
1046 		goto out;
1047 	}
1048 
1049 	xa_erase(tree, offset);
1050 
1051 	count_vm_event(ZSWPWB);
1052 	if (entry->objcg)
1053 		count_objcg_events(entry->objcg, ZSWPWB, 1);
1054 
1055 	zswap_entry_free(entry);
1056 
1057 	/* folio is up to date */
1058 	folio_mark_uptodate(folio);
1059 
1060 	/* move it to the tail of the inactive list after end_writeback */
1061 	folio_set_reclaim(folio);
1062 
1063 	/* start writeback */
1064 	__swap_writepage(folio, NULL);
1065 
1066 out:
1067 	if (ret && ret != -EEXIST) {
1068 		swap_cache_del_folio(folio);
1069 		folio_unlock(folio);
1070 	}
1071 	folio_put(folio);
1072 	return ret;
1073 }
1074 
1075 /*********************************
1076 * shrinker functions
1077 **********************************/
1078 /*
1079  * The dynamic shrinker is modulated by the following factors:
1080  *
1081  * 1. Each zswap entry has a referenced bit, which the shrinker unsets (giving
1082  *    the entry a second chance) before rotating it in the LRU list. If the
1083  *    entry is considered again by the shrinker, with its referenced bit unset,
1084  *    it is written back. The writeback rate as a result is dynamically
1085  *    adjusted by the pool activities - if the pool is dominated by new entries
1086  *    (i.e lots of recent zswapouts), these entries will be protected and
1087  *    the writeback rate will slow down. On the other hand, if the pool has a
1088  *    lot of stagnant entries, these entries will be reclaimed immediately,
1089  *    effectively increasing the writeback rate.
1090  *
1091  * 2. Swapins counter: If we observe swapins, it is a sign that we are
1092  *    overshrinking and should slow down. We maintain a swapins counter, which
1093  *    is consumed and subtract from the number of eligible objects on the LRU
1094  *    in zswap_shrinker_count().
1095  *
1096  * 3. Compression ratio. The better the workload compresses, the less gains we
1097  *    can expect from writeback. We scale down the number of objects available
1098  *    for reclaim by this ratio.
1099  */
shrink_memcg_cb(struct list_head * item,struct list_lru_one * l,void * arg)1100 static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
1101 				       void *arg)
1102 {
1103 	struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
1104 	bool *encountered_page_in_swapcache = (bool *)arg;
1105 	swp_entry_t swpentry;
1106 	enum lru_status ret = LRU_REMOVED_RETRY;
1107 	int writeback_result;
1108 
1109 	/*
1110 	 * Second chance algorithm: if the entry has its referenced bit set, give it
1111 	 * a second chance. Only clear the referenced bit and rotate it in the
1112 	 * zswap's LRU list.
1113 	 */
1114 	if (entry->referenced) {
1115 		entry->referenced = false;
1116 		return LRU_ROTATE;
1117 	}
1118 
1119 	/*
1120 	 * As soon as we drop the LRU lock, the entry can be freed by
1121 	 * a concurrent invalidation. This means the following:
1122 	 *
1123 	 * 1. We extract the swp_entry_t to the stack, allowing
1124 	 *    zswap_writeback_entry() to pin the swap entry and
1125 	 *    then validate the zswap entry against that swap entry's
1126 	 *    tree using pointer value comparison. Only when that
1127 	 *    is successful can the entry be dereferenced.
1128 	 *
1129 	 * 2. Usually, objects are taken off the LRU for reclaim. In
1130 	 *    this case this isn't possible, because if reclaim fails
1131 	 *    for whatever reason, we have no means of knowing if the
1132 	 *    entry is alive to put it back on the LRU.
1133 	 *
1134 	 *    So rotate it before dropping the lock. If the entry is
1135 	 *    written back or invalidated, the free path will unlink
1136 	 *    it. For failures, rotation is the right thing as well.
1137 	 *
1138 	 *    Temporary failures, where the same entry should be tried
1139 	 *    again immediately, almost never happen for this shrinker.
1140 	 *    We don't do any trylocking; -ENOMEM comes closest,
1141 	 *    but that's extremely rare and doesn't happen spuriously
1142 	 *    either. Don't bother distinguishing this case.
1143 	 */
1144 	list_move_tail(item, &l->list);
1145 
1146 	/*
1147 	 * Once the lru lock is dropped, the entry might get freed. The
1148 	 * swpentry is copied to the stack, and entry isn't deref'd again
1149 	 * until the entry is verified to still be alive in the tree.
1150 	 */
1151 	swpentry = entry->swpentry;
1152 
1153 	/*
1154 	 * It's safe to drop the lock here because we return either
1155 	 * LRU_REMOVED_RETRY, LRU_RETRY or LRU_STOP.
1156 	 */
1157 	spin_unlock(&l->lock);
1158 
1159 	writeback_result = zswap_writeback_entry(entry, swpentry);
1160 
1161 	if (writeback_result) {
1162 		zswap_reject_reclaim_fail++;
1163 		ret = LRU_RETRY;
1164 
1165 		/*
1166 		 * Encountering a page already in swap cache is a sign that we are shrinking
1167 		 * into the warmer region. We should terminate shrinking (if we're in the dynamic
1168 		 * shrinker context).
1169 		 */
1170 		if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
1171 			ret = LRU_STOP;
1172 			*encountered_page_in_swapcache = true;
1173 		}
1174 	} else {
1175 		zswap_written_back_pages++;
1176 	}
1177 
1178 	return ret;
1179 }
1180 
zswap_shrinker_scan(struct shrinker * shrinker,struct shrink_control * sc)1181 static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
1182 		struct shrink_control *sc)
1183 {
1184 	unsigned long shrink_ret;
1185 	bool encountered_page_in_swapcache = false;
1186 
1187 	if (!zswap_shrinker_enabled ||
1188 			!mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
1189 		sc->nr_scanned = 0;
1190 		return SHRINK_STOP;
1191 	}
1192 
1193 	shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb,
1194 		&encountered_page_in_swapcache);
1195 
1196 	if (encountered_page_in_swapcache)
1197 		return SHRINK_STOP;
1198 
1199 	return shrink_ret ? shrink_ret : SHRINK_STOP;
1200 }
1201 
zswap_shrinker_count(struct shrinker * shrinker,struct shrink_control * sc)1202 static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
1203 		struct shrink_control *sc)
1204 {
1205 	struct mem_cgroup *memcg = sc->memcg;
1206 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
1207 	atomic_long_t *nr_disk_swapins =
1208 		&lruvec->zswap_lruvec_state.nr_disk_swapins;
1209 	unsigned long nr_backing, nr_stored, nr_freeable, nr_disk_swapins_cur,
1210 		nr_remain;
1211 
1212 	if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
1213 		return 0;
1214 
1215 	/*
1216 	 * The shrinker resumes swap writeback, which will enter block
1217 	 * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS
1218 	 * rules (may_enter_fs()), which apply on a per-folio basis.
1219 	 */
1220 	if (!gfp_has_io_fs(sc->gfp_mask))
1221 		return 0;
1222 
1223 	/*
1224 	 * For memcg, use the cgroup-wide ZSWAP stats since we don't
1225 	 * have them per-node and thus per-lruvec. Careful if memcg is
1226 	 * runtime-disabled: we can get sc->memcg == NULL, which is ok
1227 	 * for the lruvec, but not for memcg_page_state().
1228 	 *
1229 	 * Without memcg, use the zswap pool-wide metrics.
1230 	 */
1231 	if (!mem_cgroup_disabled()) {
1232 		mem_cgroup_flush_stats(memcg);
1233 		nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
1234 		nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
1235 	} else {
1236 		nr_backing = zswap_total_pages();
1237 		nr_stored = atomic_long_read(&zswap_stored_pages);
1238 	}
1239 
1240 	if (!nr_stored)
1241 		return 0;
1242 
1243 	nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc);
1244 	if (!nr_freeable)
1245 		return 0;
1246 
1247 	/*
1248 	 * Subtract from the lru size the number of pages that are recently swapped
1249 	 * in from disk. The idea is that had we protect the zswap's LRU by this
1250 	 * amount of pages, these disk swapins would not have happened.
1251 	 */
1252 	nr_disk_swapins_cur = atomic_long_read(nr_disk_swapins);
1253 	do {
1254 		if (nr_freeable >= nr_disk_swapins_cur)
1255 			nr_remain = 0;
1256 		else
1257 			nr_remain = nr_disk_swapins_cur - nr_freeable;
1258 	} while (!atomic_long_try_cmpxchg(
1259 		nr_disk_swapins, &nr_disk_swapins_cur, nr_remain));
1260 
1261 	nr_freeable -= nr_disk_swapins_cur - nr_remain;
1262 	if (!nr_freeable)
1263 		return 0;
1264 
1265 	/*
1266 	 * Scale the number of freeable pages by the memory saving factor.
1267 	 * This ensures that the better zswap compresses memory, the fewer
1268 	 * pages we will evict to swap (as it will otherwise incur IO for
1269 	 * relatively small memory saving).
1270 	 */
1271 	return mult_frac(nr_freeable, nr_backing, nr_stored);
1272 }
1273 
zswap_alloc_shrinker(void)1274 static struct shrinker *zswap_alloc_shrinker(void)
1275 {
1276 	struct shrinker *shrinker;
1277 
1278 	shrinker =
1279 		shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
1280 	if (!shrinker)
1281 		return NULL;
1282 
1283 	shrinker->scan_objects = zswap_shrinker_scan;
1284 	shrinker->count_objects = zswap_shrinker_count;
1285 	shrinker->batch = 0;
1286 	shrinker->seeks = DEFAULT_SEEKS;
1287 	return shrinker;
1288 }
1289 
shrink_memcg(struct mem_cgroup * memcg)1290 static int shrink_memcg(struct mem_cgroup *memcg)
1291 {
1292 	int nid, shrunk = 0, scanned = 0;
1293 
1294 	if (!mem_cgroup_zswap_writeback_enabled(memcg))
1295 		return -ENOENT;
1296 
1297 	/*
1298 	 * Skip zombies because their LRUs are reparented and we would be
1299 	 * reclaiming from the parent instead of the dead memcg.
1300 	 */
1301 	if (memcg && !mem_cgroup_online(memcg))
1302 		return -ENOENT;
1303 
1304 	for_each_node_state(nid, N_NORMAL_MEMORY) {
1305 		unsigned long nr_to_walk = 1;
1306 
1307 		shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg,
1308 					    &shrink_memcg_cb, NULL, &nr_to_walk);
1309 		scanned += 1 - nr_to_walk;
1310 	}
1311 
1312 	if (!scanned)
1313 		return -ENOENT;
1314 
1315 	return shrunk ? 0 : -EAGAIN;
1316 }
1317 
shrink_worker(struct work_struct * w)1318 static void shrink_worker(struct work_struct *w)
1319 {
1320 	struct mem_cgroup *memcg;
1321 	int ret, failures = 0, attempts = 0;
1322 	unsigned long thr;
1323 
1324 	/* Reclaim down to the accept threshold */
1325 	thr = zswap_accept_thr_pages();
1326 
1327 	/*
1328 	 * Global reclaim will select cgroup in a round-robin fashion from all
1329 	 * online memcgs, but memcgs that have no pages in zswap and
1330 	 * writeback-disabled memcgs (memory.zswap.writeback=0) are not
1331 	 * candidates for shrinking.
1332 	 *
1333 	 * Shrinking will be aborted if we encounter the following
1334 	 * MAX_RECLAIM_RETRIES times:
1335 	 * - No writeback-candidate memcgs found in a memcg tree walk.
1336 	 * - Shrinking a writeback-candidate memcg failed.
1337 	 *
1338 	 * We save iteration cursor memcg into zswap_next_shrink,
1339 	 * which can be modified by the offline memcg cleaner
1340 	 * zswap_memcg_offline_cleanup().
1341 	 *
1342 	 * Since the offline cleaner is called only once, we cannot leave an
1343 	 * offline memcg reference in zswap_next_shrink.
1344 	 * We can rely on the cleaner only if we get online memcg under lock.
1345 	 *
1346 	 * If we get an offline memcg, we cannot determine if the cleaner has
1347 	 * already been called or will be called later. We must put back the
1348 	 * reference before returning from this function. Otherwise, the
1349 	 * offline memcg left in zswap_next_shrink will hold the reference
1350 	 * until the next run of shrink_worker().
1351 	 */
1352 	do {
1353 		/*
1354 		 * Start shrinking from the next memcg after zswap_next_shrink.
1355 		 * When the offline cleaner has already advanced the cursor,
1356 		 * advancing the cursor here overlooks one memcg, but this
1357 		 * should be negligibly rare.
1358 		 *
1359 		 * If we get an online memcg, keep the extra reference in case
1360 		 * the original one obtained by mem_cgroup_iter() is dropped by
1361 		 * zswap_memcg_offline_cleanup() while we are shrinking the
1362 		 * memcg.
1363 		 */
1364 		spin_lock(&zswap_shrink_lock);
1365 		do {
1366 			memcg = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
1367 			zswap_next_shrink = memcg;
1368 		} while (memcg && !mem_cgroup_tryget_online(memcg));
1369 		spin_unlock(&zswap_shrink_lock);
1370 
1371 		if (!memcg) {
1372 			/*
1373 			 * Continue shrinking without incrementing failures if
1374 			 * we found candidate memcgs in the last tree walk.
1375 			 */
1376 			if (!attempts && ++failures == MAX_RECLAIM_RETRIES)
1377 				break;
1378 
1379 			attempts = 0;
1380 			goto resched;
1381 		}
1382 
1383 		ret = shrink_memcg(memcg);
1384 		/* drop the extra reference */
1385 		mem_cgroup_put(memcg);
1386 
1387 		/*
1388 		 * There are no writeback-candidate pages in the memcg.
1389 		 * This is not an issue as long as we can find another memcg
1390 		 * with pages in zswap. Skip this without incrementing attempts
1391 		 * and failures.
1392 		 */
1393 		if (ret == -ENOENT)
1394 			continue;
1395 		++attempts;
1396 
1397 		if (ret && ++failures == MAX_RECLAIM_RETRIES)
1398 			break;
1399 resched:
1400 		cond_resched();
1401 	} while (zswap_total_pages() > thr);
1402 }
1403 
1404 /*********************************
1405 * main API
1406 **********************************/
1407 
zswap_store_page(struct page * page,struct obj_cgroup * objcg,struct zswap_pool * pool)1408 static bool zswap_store_page(struct page *page,
1409 			     struct obj_cgroup *objcg,
1410 			     struct zswap_pool *pool)
1411 {
1412 	swp_entry_t page_swpentry = page_swap_entry(page);
1413 	struct zswap_entry *entry, *old;
1414 
1415 	/* allocate entry */
1416 	entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page));
1417 	if (!entry) {
1418 		zswap_reject_kmemcache_fail++;
1419 		return false;
1420 	}
1421 
1422 	if (!zswap_compress(page, entry, pool))
1423 		goto compress_failed;
1424 
1425 	old = xa_store(swap_zswap_tree(page_swpentry),
1426 		       swp_offset(page_swpentry),
1427 		       entry, GFP_KERNEL);
1428 	if (xa_is_err(old)) {
1429 		int err = xa_err(old);
1430 
1431 		WARN_ONCE(err != -ENOMEM, "unexpected xarray error: %d\n", err);
1432 		zswap_reject_alloc_fail++;
1433 		goto store_failed;
1434 	}
1435 
1436 	/*
1437 	 * We may have had an existing entry that became stale when
1438 	 * the folio was redirtied and now the new version is being
1439 	 * swapped out. Get rid of the old.
1440 	 */
1441 	if (old)
1442 		zswap_entry_free(old);
1443 
1444 	/*
1445 	 * The entry is successfully compressed and stored in the tree, there is
1446 	 * no further possibility of failure. Grab refs to the pool and objcg,
1447 	 * charge zswap memory, and increment zswap_stored_pages.
1448 	 * The opposite actions will be performed by zswap_entry_free()
1449 	 * when the entry is removed from the tree.
1450 	 */
1451 	zswap_pool_get(pool);
1452 	if (objcg) {
1453 		obj_cgroup_get(objcg);
1454 		obj_cgroup_charge_zswap(objcg, entry->length);
1455 	}
1456 	atomic_long_inc(&zswap_stored_pages);
1457 	if (entry->length == PAGE_SIZE)
1458 		atomic_long_inc(&zswap_stored_incompressible_pages);
1459 
1460 	/*
1461 	 * We finish initializing the entry while it's already in xarray.
1462 	 * This is safe because:
1463 	 *
1464 	 * 1. Concurrent stores and invalidations are excluded by folio lock.
1465 	 *
1466 	 * 2. Writeback is excluded by the entry not being on the LRU yet.
1467 	 *    The publishing order matters to prevent writeback from seeing
1468 	 *    an incoherent entry.
1469 	 */
1470 	entry->pool = pool;
1471 	entry->swpentry = page_swpentry;
1472 	entry->objcg = objcg;
1473 	entry->referenced = true;
1474 	if (entry->length) {
1475 		INIT_LIST_HEAD(&entry->lru);
1476 		zswap_lru_add(&zswap_list_lru, entry);
1477 	}
1478 
1479 	return true;
1480 
1481 store_failed:
1482 	zs_free(pool->zs_pool, entry->handle);
1483 compress_failed:
1484 	zswap_entry_cache_free(entry);
1485 	return false;
1486 }
1487 
zswap_store(struct folio * folio)1488 bool zswap_store(struct folio *folio)
1489 {
1490 	long nr_pages = folio_nr_pages(folio);
1491 	swp_entry_t swp = folio->swap;
1492 	struct obj_cgroup *objcg = NULL;
1493 	struct mem_cgroup *memcg = NULL;
1494 	struct zswap_pool *pool;
1495 	bool ret = false;
1496 	long index;
1497 
1498 	VM_WARN_ON_ONCE(!folio_test_locked(folio));
1499 	VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
1500 
1501 	if (!zswap_enabled)
1502 		goto check_old;
1503 
1504 	objcg = get_obj_cgroup_from_folio(folio);
1505 	if (objcg && !obj_cgroup_may_zswap(objcg)) {
1506 		memcg = get_mem_cgroup_from_objcg(objcg);
1507 		if (shrink_memcg(memcg)) {
1508 			mem_cgroup_put(memcg);
1509 			goto put_objcg;
1510 		}
1511 		mem_cgroup_put(memcg);
1512 	}
1513 
1514 	if (zswap_check_limits())
1515 		goto put_objcg;
1516 
1517 	pool = zswap_pool_current_get();
1518 	if (!pool)
1519 		goto put_objcg;
1520 
1521 	if (objcg) {
1522 		memcg = get_mem_cgroup_from_objcg(objcg);
1523 		if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) {
1524 			mem_cgroup_put(memcg);
1525 			goto put_pool;
1526 		}
1527 		mem_cgroup_put(memcg);
1528 	}
1529 
1530 	for (index = 0; index < nr_pages; ++index) {
1531 		struct page *page = folio_page(folio, index);
1532 
1533 		if (!zswap_store_page(page, objcg, pool))
1534 			goto put_pool;
1535 	}
1536 
1537 	if (objcg)
1538 		count_objcg_events(objcg, ZSWPOUT, nr_pages);
1539 
1540 	count_vm_events(ZSWPOUT, nr_pages);
1541 
1542 	ret = true;
1543 
1544 put_pool:
1545 	zswap_pool_put(pool);
1546 put_objcg:
1547 	obj_cgroup_put(objcg);
1548 	if (!ret && zswap_pool_reached_full)
1549 		queue_work(shrink_wq, &zswap_shrink_work);
1550 check_old:
1551 	/*
1552 	 * If the zswap store fails or zswap is disabled, we must invalidate
1553 	 * the possibly stale entries which were previously stored at the
1554 	 * offsets corresponding to each page of the folio. Otherwise,
1555 	 * writeback could overwrite the new data in the swapfile.
1556 	 */
1557 	if (!ret) {
1558 		unsigned type = swp_type(swp);
1559 		pgoff_t offset = swp_offset(swp);
1560 		struct zswap_entry *entry;
1561 		struct xarray *tree;
1562 
1563 		for (index = 0; index < nr_pages; ++index) {
1564 			tree = swap_zswap_tree(swp_entry(type, offset + index));
1565 			entry = xa_erase(tree, offset + index);
1566 			if (entry)
1567 				zswap_entry_free(entry);
1568 		}
1569 	}
1570 
1571 	return ret;
1572 }
1573 
1574 /**
1575  * zswap_load() - load a folio from zswap
1576  * @folio: folio to load
1577  *
1578  * Return: 0 on success, with the folio unlocked and marked up-to-date, or one
1579  * of the following error codes:
1580  *
1581  *  -EIO: if the swapped out content was in zswap, but could not be loaded
1582  *  into the page due to a decompression failure. The folio is unlocked, but
1583  *  NOT marked up-to-date, so that an IO error is emitted (e.g. do_swap_page()
1584  *  will SIGBUS).
1585  *
1586  *  -EINVAL: if the swapped out content was in zswap, but the page belongs
1587  *  to a large folio, which is not supported by zswap. The folio is unlocked,
1588  *  but NOT marked up-to-date, so that an IO error is emitted (e.g.
1589  *  do_swap_page() will SIGBUS).
1590  *
1591  *  -ENOENT: if the swapped out content was not in zswap. The folio remains
1592  *  locked on return.
1593  */
zswap_load(struct folio * folio)1594 int zswap_load(struct folio *folio)
1595 {
1596 	swp_entry_t swp = folio->swap;
1597 	pgoff_t offset = swp_offset(swp);
1598 	bool swapcache = folio_test_swapcache(folio);
1599 	struct xarray *tree = swap_zswap_tree(swp);
1600 	struct zswap_entry *entry;
1601 
1602 	VM_WARN_ON_ONCE(!folio_test_locked(folio));
1603 
1604 	if (zswap_never_enabled())
1605 		return -ENOENT;
1606 
1607 	/*
1608 	 * Large folios should not be swapped in while zswap is being used, as
1609 	 * they are not properly handled. Zswap does not properly load large
1610 	 * folios, and a large folio may only be partially in zswap.
1611 	 */
1612 	if (WARN_ON_ONCE(folio_test_large(folio))) {
1613 		folio_unlock(folio);
1614 		return -EINVAL;
1615 	}
1616 
1617 	entry = xa_load(tree, offset);
1618 	if (!entry)
1619 		return -ENOENT;
1620 
1621 	if (!zswap_decompress(entry, folio)) {
1622 		folio_unlock(folio);
1623 		return -EIO;
1624 	}
1625 
1626 	folio_mark_uptodate(folio);
1627 
1628 	count_vm_event(ZSWPIN);
1629 	if (entry->objcg)
1630 		count_objcg_events(entry->objcg, ZSWPIN, 1);
1631 
1632 	/*
1633 	 * When reading into the swapcache, invalidate our entry. The
1634 	 * swapcache can be the authoritative owner of the page and
1635 	 * its mappings, and the pressure that results from having two
1636 	 * in-memory copies outweighs any benefits of caching the
1637 	 * compression work.
1638 	 *
1639 	 * (Most swapins go through the swapcache. The notable
1640 	 * exception is the singleton fault on SWP_SYNCHRONOUS_IO
1641 	 * files, which reads into a private page and may free it if
1642 	 * the fault fails. We remain the primary owner of the entry.)
1643 	 */
1644 	if (swapcache) {
1645 		folio_mark_dirty(folio);
1646 		xa_erase(tree, offset);
1647 		zswap_entry_free(entry);
1648 	}
1649 
1650 	folio_unlock(folio);
1651 	return 0;
1652 }
1653 
zswap_invalidate(swp_entry_t swp)1654 void zswap_invalidate(swp_entry_t swp)
1655 {
1656 	pgoff_t offset = swp_offset(swp);
1657 	struct xarray *tree = swap_zswap_tree(swp);
1658 	struct zswap_entry *entry;
1659 
1660 	if (xa_empty(tree))
1661 		return;
1662 
1663 	entry = xa_erase(tree, offset);
1664 	if (entry)
1665 		zswap_entry_free(entry);
1666 }
1667 
zswap_swapon(int type,unsigned long nr_pages)1668 int zswap_swapon(int type, unsigned long nr_pages)
1669 {
1670 	struct xarray *trees, *tree;
1671 	unsigned int nr, i;
1672 
1673 	nr = DIV_ROUND_UP(nr_pages, ZSWAP_ADDRESS_SPACE_PAGES);
1674 	trees = kvzalloc_objs(*tree, nr);
1675 	if (!trees) {
1676 		pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1677 		return -ENOMEM;
1678 	}
1679 
1680 	for (i = 0; i < nr; i++)
1681 		xa_init(trees + i);
1682 
1683 	nr_zswap_trees[type] = nr;
1684 	zswap_trees[type] = trees;
1685 	return 0;
1686 }
1687 
zswap_swapoff(int type)1688 void zswap_swapoff(int type)
1689 {
1690 	struct xarray *trees = zswap_trees[type];
1691 	unsigned int i;
1692 
1693 	if (!trees)
1694 		return;
1695 
1696 	/* try_to_unuse() invalidated all the entries already */
1697 	for (i = 0; i < nr_zswap_trees[type]; i++)
1698 		WARN_ON_ONCE(!xa_empty(trees + i));
1699 
1700 	kvfree(trees);
1701 	nr_zswap_trees[type] = 0;
1702 	zswap_trees[type] = NULL;
1703 }
1704 
1705 /*********************************
1706 * debugfs functions
1707 **********************************/
1708 #ifdef CONFIG_DEBUG_FS
1709 #include <linux/debugfs.h>
1710 
1711 static struct dentry *zswap_debugfs_root;
1712 
debugfs_get_total_size(void * data,u64 * val)1713 static int debugfs_get_total_size(void *data, u64 *val)
1714 {
1715 	*val = zswap_total_pages() * PAGE_SIZE;
1716 	return 0;
1717 }
1718 DEFINE_DEBUGFS_ATTRIBUTE(total_size_fops, debugfs_get_total_size, NULL, "%llu\n");
1719 
debugfs_get_stored_pages(void * data,u64 * val)1720 static int debugfs_get_stored_pages(void *data, u64 *val)
1721 {
1722 	*val = atomic_long_read(&zswap_stored_pages);
1723 	return 0;
1724 }
1725 DEFINE_DEBUGFS_ATTRIBUTE(stored_pages_fops, debugfs_get_stored_pages, NULL, "%llu\n");
1726 
debugfs_get_stored_incompressible_pages(void * data,u64 * val)1727 static int debugfs_get_stored_incompressible_pages(void *data, u64 *val)
1728 {
1729 	*val = atomic_long_read(&zswap_stored_incompressible_pages);
1730 	return 0;
1731 }
1732 DEFINE_DEBUGFS_ATTRIBUTE(stored_incompressible_pages_fops,
1733 		debugfs_get_stored_incompressible_pages, NULL, "%llu\n");
1734 
zswap_debugfs_init(void)1735 static int zswap_debugfs_init(void)
1736 {
1737 	if (!debugfs_initialized())
1738 		return -ENODEV;
1739 
1740 	zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1741 
1742 	debugfs_create_u64("pool_limit_hit", 0444,
1743 			   zswap_debugfs_root, &zswap_pool_limit_hit);
1744 	debugfs_create_u64("reject_reclaim_fail", 0444,
1745 			   zswap_debugfs_root, &zswap_reject_reclaim_fail);
1746 	debugfs_create_u64("reject_alloc_fail", 0444,
1747 			   zswap_debugfs_root, &zswap_reject_alloc_fail);
1748 	debugfs_create_u64("reject_kmemcache_fail", 0444,
1749 			   zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1750 	debugfs_create_u64("reject_compress_fail", 0444,
1751 			   zswap_debugfs_root, &zswap_reject_compress_fail);
1752 	debugfs_create_u64("reject_compress_poor", 0444,
1753 			   zswap_debugfs_root, &zswap_reject_compress_poor);
1754 	debugfs_create_u64("decompress_fail", 0444,
1755 			   zswap_debugfs_root, &zswap_decompress_fail);
1756 	debugfs_create_u64("written_back_pages", 0444,
1757 			   zswap_debugfs_root, &zswap_written_back_pages);
1758 	debugfs_create_file("pool_total_size", 0444,
1759 			    zswap_debugfs_root, NULL, &total_size_fops);
1760 	debugfs_create_file("stored_pages", 0444,
1761 			    zswap_debugfs_root, NULL, &stored_pages_fops);
1762 	debugfs_create_file("stored_incompressible_pages", 0444,
1763 			    zswap_debugfs_root, NULL,
1764 			    &stored_incompressible_pages_fops);
1765 
1766 	return 0;
1767 }
1768 #else
zswap_debugfs_init(void)1769 static int zswap_debugfs_init(void)
1770 {
1771 	return 0;
1772 }
1773 #endif
1774 
1775 /*********************************
1776 * module init and exit
1777 **********************************/
zswap_setup(void)1778 static int zswap_setup(void)
1779 {
1780 	struct zswap_pool *pool;
1781 	int ret;
1782 
1783 	zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1784 	if (!zswap_entry_cache) {
1785 		pr_err("entry cache creation failed\n");
1786 		goto cache_fail;
1787 	}
1788 
1789 	ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1790 				      "mm/zswap_pool:prepare",
1791 				      zswap_cpu_comp_prepare,
1792 				      zswap_cpu_comp_dead);
1793 	if (ret)
1794 		goto hp_fail;
1795 
1796 	shrink_wq = alloc_workqueue("zswap-shrink",
1797 			WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
1798 	if (!shrink_wq)
1799 		goto shrink_wq_fail;
1800 
1801 	zswap_shrinker = zswap_alloc_shrinker();
1802 	if (!zswap_shrinker)
1803 		goto shrinker_fail;
1804 	if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker))
1805 		goto lru_fail;
1806 	shrinker_register(zswap_shrinker);
1807 
1808 	INIT_WORK(&zswap_shrink_work, shrink_worker);
1809 
1810 	pool = __zswap_pool_create_fallback();
1811 	if (pool) {
1812 		pr_info("loaded using pool %s\n", pool->tfm_name);
1813 		list_add(&pool->list, &zswap_pools);
1814 		zswap_has_pool = true;
1815 		static_branch_enable(&zswap_ever_enabled);
1816 	} else {
1817 		pr_err("pool creation failed\n");
1818 		zswap_enabled = false;
1819 	}
1820 
1821 	if (zswap_debugfs_init())
1822 		pr_warn("debugfs initialization failed\n");
1823 	zswap_init_state = ZSWAP_INIT_SUCCEED;
1824 	return 0;
1825 
1826 lru_fail:
1827 	shrinker_free(zswap_shrinker);
1828 shrinker_fail:
1829 	destroy_workqueue(shrink_wq);
1830 shrink_wq_fail:
1831 	cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE);
1832 hp_fail:
1833 	kmem_cache_destroy(zswap_entry_cache);
1834 cache_fail:
1835 	/* if built-in, we aren't unloaded on failure; don't allow use */
1836 	zswap_init_state = ZSWAP_INIT_FAILED;
1837 	zswap_enabled = false;
1838 	return -ENOMEM;
1839 }
1840 
zswap_init(void)1841 static int __init zswap_init(void)
1842 {
1843 	if (!zswap_enabled)
1844 		return 0;
1845 	return zswap_setup();
1846 }
1847 /* must be late so crypto has time to come up */
1848 late_initcall(zswap_init);
1849 
1850 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1851 MODULE_DESCRIPTION("Compressed cache for swap pages");
1852