xref: /linux/mm/zswap.c (revision 9fffa4e9b3b158f63334e603e610da7d529a0f9a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * zswap.c - zswap driver file
4  *
5  * zswap is a cache that takes pages that are in the process
6  * of being swapped out and attempts to compress and store them in a
7  * RAM-based memory pool.  This can result in a significant I/O reduction on
8  * the swap device and, in the case where decompressing from RAM is faster
9  * than reading from the swap device, can also improve workload performance.
10  *
11  * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
12 */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/module.h>
17 #include <linux/cpu.h>
18 #include <linux/highmem.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 #include <linux/atomic.h>
23 #include <linux/swap.h>
24 #include <linux/crypto.h>
25 #include <linux/scatterlist.h>
26 #include <linux/mempolicy.h>
27 #include <linux/mempool.h>
28 #include <linux/zpool.h>
29 #include <crypto/acompress.h>
30 #include <linux/zswap.h>
31 #include <linux/mm_types.h>
32 #include <linux/page-flags.h>
33 #include <linux/swapops.h>
34 #include <linux/writeback.h>
35 #include <linux/pagemap.h>
36 #include <linux/workqueue.h>
37 #include <linux/list_lru.h>
38 
39 #include "swap.h"
40 #include "internal.h"
41 
42 /*********************************
43 * statistics
44 **********************************/
45 /* The number of compressed pages currently stored in zswap */
46 atomic_t zswap_stored_pages = ATOMIC_INIT(0);
47 
48 /*
49  * The statistics below are not protected from concurrent access for
50  * performance reasons so they may not be a 100% accurate.  However,
51  * they do provide useful information on roughly how many times a
52  * certain event is occurring.
53 */
54 
55 /* Pool limit was hit (see zswap_max_pool_percent) */
56 static u64 zswap_pool_limit_hit;
57 /* Pages written back when pool limit was reached */
58 static u64 zswap_written_back_pages;
59 /* Store failed due to a reclaim failure after pool limit was reached */
60 static u64 zswap_reject_reclaim_fail;
61 /* Store failed due to compression algorithm failure */
62 static u64 zswap_reject_compress_fail;
63 /* Compressed page was too big for the allocator to (optimally) store */
64 static u64 zswap_reject_compress_poor;
65 /* Store failed because underlying allocator could not get memory */
66 static u64 zswap_reject_alloc_fail;
67 /* Store failed because the entry metadata could not be allocated (rare) */
68 static u64 zswap_reject_kmemcache_fail;
69 
70 /* Shrinker work queue */
71 static struct workqueue_struct *shrink_wq;
72 /* Pool limit was hit, we need to calm down */
73 static bool zswap_pool_reached_full;
74 
75 /*********************************
76 * tunables
77 **********************************/
78 
79 #define ZSWAP_PARAM_UNSET ""
80 
81 static int zswap_setup(void);
82 
83 /* Enable/disable zswap */
84 static DEFINE_STATIC_KEY_MAYBE(CONFIG_ZSWAP_DEFAULT_ON, zswap_ever_enabled);
85 static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
86 static int zswap_enabled_param_set(const char *,
87 				   const struct kernel_param *);
88 static const struct kernel_param_ops zswap_enabled_param_ops = {
89 	.set =		zswap_enabled_param_set,
90 	.get =		param_get_bool,
91 };
92 module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
93 
94 /* Crypto compressor to use */
95 static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
96 static int zswap_compressor_param_set(const char *,
97 				      const struct kernel_param *);
98 static const struct kernel_param_ops zswap_compressor_param_ops = {
99 	.set =		zswap_compressor_param_set,
100 	.get =		param_get_charp,
101 	.free =		param_free_charp,
102 };
103 module_param_cb(compressor, &zswap_compressor_param_ops,
104 		&zswap_compressor, 0644);
105 
106 /* Compressed storage zpool to use */
107 static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
108 static int zswap_zpool_param_set(const char *, const struct kernel_param *);
109 static const struct kernel_param_ops zswap_zpool_param_ops = {
110 	.set =		zswap_zpool_param_set,
111 	.get =		param_get_charp,
112 	.free =		param_free_charp,
113 };
114 module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
115 
116 /* The maximum percentage of memory that the compressed pool can occupy */
117 static unsigned int zswap_max_pool_percent = 20;
118 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
119 
120 /* The threshold for accepting new pages after the max_pool_percent was hit */
121 static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
122 module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
123 		   uint, 0644);
124 
125 /* Enable/disable memory pressure-based shrinker. */
126 static bool zswap_shrinker_enabled = IS_ENABLED(
127 		CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
128 module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
129 
130 bool zswap_is_enabled(void)
131 {
132 	return zswap_enabled;
133 }
134 
135 bool zswap_never_enabled(void)
136 {
137 	return !static_branch_maybe(CONFIG_ZSWAP_DEFAULT_ON, &zswap_ever_enabled);
138 }
139 
140 /*********************************
141 * data structures
142 **********************************/
143 
144 struct crypto_acomp_ctx {
145 	struct crypto_acomp *acomp;
146 	struct acomp_req *req;
147 	struct crypto_wait wait;
148 	u8 *buffer;
149 	struct mutex mutex;
150 	bool is_sleepable;
151 };
152 
153 /*
154  * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
155  * The only case where lru_lock is not acquired while holding tree.lock is
156  * when a zswap_entry is taken off the lru for writeback, in that case it
157  * needs to be verified that it's still valid in the tree.
158  */
159 struct zswap_pool {
160 	struct zpool *zpool;
161 	struct crypto_acomp_ctx __percpu *acomp_ctx;
162 	struct percpu_ref ref;
163 	struct list_head list;
164 	struct work_struct release_work;
165 	struct hlist_node node;
166 	char tfm_name[CRYPTO_MAX_ALG_NAME];
167 };
168 
169 /* Global LRU lists shared by all zswap pools. */
170 static struct list_lru zswap_list_lru;
171 
172 /* The lock protects zswap_next_shrink updates. */
173 static DEFINE_SPINLOCK(zswap_shrink_lock);
174 static struct mem_cgroup *zswap_next_shrink;
175 static struct work_struct zswap_shrink_work;
176 static struct shrinker *zswap_shrinker;
177 
178 /*
179  * struct zswap_entry
180  *
181  * This structure contains the metadata for tracking a single compressed
182  * page within zswap.
183  *
184  * swpentry - associated swap entry, the offset indexes into the red-black tree
185  * length - the length in bytes of the compressed page data.  Needed during
186  *          decompression.
187  * referenced - true if the entry recently entered the zswap pool. Unset by the
188  *              writeback logic. The entry is only reclaimed by the writeback
189  *              logic if referenced is unset. See comments in the shrinker
190  *              section for context.
191  * pool - the zswap_pool the entry's data is in
192  * handle - zpool allocation handle that stores the compressed page data
193  * value - value of the same-value filled pages which have same content
194  * objcg - the obj_cgroup that the compressed memory is charged to
195  * lru - handle to the pool's lru used to evict pages.
196  */
197 struct zswap_entry {
198 	swp_entry_t swpentry;
199 	unsigned int length;
200 	bool referenced;
201 	struct zswap_pool *pool;
202 	unsigned long handle;
203 	struct obj_cgroup *objcg;
204 	struct list_head lru;
205 };
206 
207 static struct xarray *zswap_trees[MAX_SWAPFILES];
208 static unsigned int nr_zswap_trees[MAX_SWAPFILES];
209 
210 /* RCU-protected iteration */
211 static LIST_HEAD(zswap_pools);
212 /* protects zswap_pools list modification */
213 static DEFINE_SPINLOCK(zswap_pools_lock);
214 /* pool counter to provide unique names to zpool */
215 static atomic_t zswap_pools_count = ATOMIC_INIT(0);
216 
217 enum zswap_init_type {
218 	ZSWAP_UNINIT,
219 	ZSWAP_INIT_SUCCEED,
220 	ZSWAP_INIT_FAILED
221 };
222 
223 static enum zswap_init_type zswap_init_state;
224 
225 /* used to ensure the integrity of initialization */
226 static DEFINE_MUTEX(zswap_init_lock);
227 
228 /* init completed, but couldn't create the initial pool */
229 static bool zswap_has_pool;
230 
231 /*********************************
232 * helpers and fwd declarations
233 **********************************/
234 
235 static inline struct xarray *swap_zswap_tree(swp_entry_t swp)
236 {
237 	return &zswap_trees[swp_type(swp)][swp_offset(swp)
238 		>> SWAP_ADDRESS_SPACE_SHIFT];
239 }
240 
241 #define zswap_pool_debug(msg, p)				\
242 	pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,		\
243 		 zpool_get_type((p)->zpool))
244 
245 /*********************************
246 * pool functions
247 **********************************/
248 static void __zswap_pool_empty(struct percpu_ref *ref);
249 
250 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
251 {
252 	struct zswap_pool *pool;
253 	char name[38]; /* 'zswap' + 32 char (max) num + \0 */
254 	gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
255 	int ret;
256 
257 	if (!zswap_has_pool) {
258 		/* if either are unset, pool initialization failed, and we
259 		 * need both params to be set correctly before trying to
260 		 * create a pool.
261 		 */
262 		if (!strcmp(type, ZSWAP_PARAM_UNSET))
263 			return NULL;
264 		if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
265 			return NULL;
266 	}
267 
268 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
269 	if (!pool)
270 		return NULL;
271 
272 	/* unique name for each pool specifically required by zsmalloc */
273 	snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
274 	pool->zpool = zpool_create_pool(type, name, gfp);
275 	if (!pool->zpool) {
276 		pr_err("%s zpool not available\n", type);
277 		goto error;
278 	}
279 	pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
280 
281 	strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
282 
283 	pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
284 	if (!pool->acomp_ctx) {
285 		pr_err("percpu alloc failed\n");
286 		goto error;
287 	}
288 
289 	ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
290 				       &pool->node);
291 	if (ret)
292 		goto error;
293 
294 	/* being the current pool takes 1 ref; this func expects the
295 	 * caller to always add the new pool as the current pool
296 	 */
297 	ret = percpu_ref_init(&pool->ref, __zswap_pool_empty,
298 			      PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
299 	if (ret)
300 		goto ref_fail;
301 	INIT_LIST_HEAD(&pool->list);
302 
303 	zswap_pool_debug("created", pool);
304 
305 	return pool;
306 
307 ref_fail:
308 	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
309 error:
310 	if (pool->acomp_ctx)
311 		free_percpu(pool->acomp_ctx);
312 	if (pool->zpool)
313 		zpool_destroy_pool(pool->zpool);
314 	kfree(pool);
315 	return NULL;
316 }
317 
318 static struct zswap_pool *__zswap_pool_create_fallback(void)
319 {
320 	bool has_comp, has_zpool;
321 
322 	has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
323 	if (!has_comp && strcmp(zswap_compressor,
324 				CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
325 		pr_err("compressor %s not available, using default %s\n",
326 		       zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
327 		param_free_charp(&zswap_compressor);
328 		zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
329 		has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
330 	}
331 	if (!has_comp) {
332 		pr_err("default compressor %s not available\n",
333 		       zswap_compressor);
334 		param_free_charp(&zswap_compressor);
335 		zswap_compressor = ZSWAP_PARAM_UNSET;
336 	}
337 
338 	has_zpool = zpool_has_pool(zswap_zpool_type);
339 	if (!has_zpool && strcmp(zswap_zpool_type,
340 				 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
341 		pr_err("zpool %s not available, using default %s\n",
342 		       zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
343 		param_free_charp(&zswap_zpool_type);
344 		zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
345 		has_zpool = zpool_has_pool(zswap_zpool_type);
346 	}
347 	if (!has_zpool) {
348 		pr_err("default zpool %s not available\n",
349 		       zswap_zpool_type);
350 		param_free_charp(&zswap_zpool_type);
351 		zswap_zpool_type = ZSWAP_PARAM_UNSET;
352 	}
353 
354 	if (!has_comp || !has_zpool)
355 		return NULL;
356 
357 	return zswap_pool_create(zswap_zpool_type, zswap_compressor);
358 }
359 
360 static void zswap_pool_destroy(struct zswap_pool *pool)
361 {
362 	zswap_pool_debug("destroying", pool);
363 
364 	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
365 	free_percpu(pool->acomp_ctx);
366 
367 	zpool_destroy_pool(pool->zpool);
368 	kfree(pool);
369 }
370 
371 static void __zswap_pool_release(struct work_struct *work)
372 {
373 	struct zswap_pool *pool = container_of(work, typeof(*pool),
374 						release_work);
375 
376 	synchronize_rcu();
377 
378 	/* nobody should have been able to get a ref... */
379 	WARN_ON(!percpu_ref_is_zero(&pool->ref));
380 	percpu_ref_exit(&pool->ref);
381 
382 	/* pool is now off zswap_pools list and has no references. */
383 	zswap_pool_destroy(pool);
384 }
385 
386 static struct zswap_pool *zswap_pool_current(void);
387 
388 static void __zswap_pool_empty(struct percpu_ref *ref)
389 {
390 	struct zswap_pool *pool;
391 
392 	pool = container_of(ref, typeof(*pool), ref);
393 
394 	spin_lock_bh(&zswap_pools_lock);
395 
396 	WARN_ON(pool == zswap_pool_current());
397 
398 	list_del_rcu(&pool->list);
399 
400 	INIT_WORK(&pool->release_work, __zswap_pool_release);
401 	schedule_work(&pool->release_work);
402 
403 	spin_unlock_bh(&zswap_pools_lock);
404 }
405 
406 static int __must_check zswap_pool_get(struct zswap_pool *pool)
407 {
408 	if (!pool)
409 		return 0;
410 
411 	return percpu_ref_tryget(&pool->ref);
412 }
413 
414 static void zswap_pool_put(struct zswap_pool *pool)
415 {
416 	percpu_ref_put(&pool->ref);
417 }
418 
419 static struct zswap_pool *__zswap_pool_current(void)
420 {
421 	struct zswap_pool *pool;
422 
423 	pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
424 	WARN_ONCE(!pool && zswap_has_pool,
425 		  "%s: no page storage pool!\n", __func__);
426 
427 	return pool;
428 }
429 
430 static struct zswap_pool *zswap_pool_current(void)
431 {
432 	assert_spin_locked(&zswap_pools_lock);
433 
434 	return __zswap_pool_current();
435 }
436 
437 static struct zswap_pool *zswap_pool_current_get(void)
438 {
439 	struct zswap_pool *pool;
440 
441 	rcu_read_lock();
442 
443 	pool = __zswap_pool_current();
444 	if (!zswap_pool_get(pool))
445 		pool = NULL;
446 
447 	rcu_read_unlock();
448 
449 	return pool;
450 }
451 
452 /* type and compressor must be null-terminated */
453 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
454 {
455 	struct zswap_pool *pool;
456 
457 	assert_spin_locked(&zswap_pools_lock);
458 
459 	list_for_each_entry_rcu(pool, &zswap_pools, list) {
460 		if (strcmp(pool->tfm_name, compressor))
461 			continue;
462 		if (strcmp(zpool_get_type(pool->zpool), type))
463 			continue;
464 		/* if we can't get it, it's about to be destroyed */
465 		if (!zswap_pool_get(pool))
466 			continue;
467 		return pool;
468 	}
469 
470 	return NULL;
471 }
472 
473 static unsigned long zswap_max_pages(void)
474 {
475 	return totalram_pages() * zswap_max_pool_percent / 100;
476 }
477 
478 static unsigned long zswap_accept_thr_pages(void)
479 {
480 	return zswap_max_pages() * zswap_accept_thr_percent / 100;
481 }
482 
483 unsigned long zswap_total_pages(void)
484 {
485 	struct zswap_pool *pool;
486 	unsigned long total = 0;
487 
488 	rcu_read_lock();
489 	list_for_each_entry_rcu(pool, &zswap_pools, list)
490 		total += zpool_get_total_pages(pool->zpool);
491 	rcu_read_unlock();
492 
493 	return total;
494 }
495 
496 static bool zswap_check_limits(void)
497 {
498 	unsigned long cur_pages = zswap_total_pages();
499 	unsigned long max_pages = zswap_max_pages();
500 
501 	if (cur_pages >= max_pages) {
502 		zswap_pool_limit_hit++;
503 		zswap_pool_reached_full = true;
504 	} else if (zswap_pool_reached_full &&
505 		   cur_pages <= zswap_accept_thr_pages()) {
506 			zswap_pool_reached_full = false;
507 	}
508 	return zswap_pool_reached_full;
509 }
510 
511 /*********************************
512 * param callbacks
513 **********************************/
514 
515 static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
516 {
517 	/* no change required */
518 	if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
519 		return false;
520 	return true;
521 }
522 
523 /* val must be a null-terminated string */
524 static int __zswap_param_set(const char *val, const struct kernel_param *kp,
525 			     char *type, char *compressor)
526 {
527 	struct zswap_pool *pool, *put_pool = NULL;
528 	char *s = strstrip((char *)val);
529 	int ret = 0;
530 	bool new_pool = false;
531 
532 	mutex_lock(&zswap_init_lock);
533 	switch (zswap_init_state) {
534 	case ZSWAP_UNINIT:
535 		/* if this is load-time (pre-init) param setting,
536 		 * don't create a pool; that's done during init.
537 		 */
538 		ret = param_set_charp(s, kp);
539 		break;
540 	case ZSWAP_INIT_SUCCEED:
541 		new_pool = zswap_pool_changed(s, kp);
542 		break;
543 	case ZSWAP_INIT_FAILED:
544 		pr_err("can't set param, initialization failed\n");
545 		ret = -ENODEV;
546 	}
547 	mutex_unlock(&zswap_init_lock);
548 
549 	/* no need to create a new pool, return directly */
550 	if (!new_pool)
551 		return ret;
552 
553 	if (!type) {
554 		if (!zpool_has_pool(s)) {
555 			pr_err("zpool %s not available\n", s);
556 			return -ENOENT;
557 		}
558 		type = s;
559 	} else if (!compressor) {
560 		if (!crypto_has_acomp(s, 0, 0)) {
561 			pr_err("compressor %s not available\n", s);
562 			return -ENOENT;
563 		}
564 		compressor = s;
565 	} else {
566 		WARN_ON(1);
567 		return -EINVAL;
568 	}
569 
570 	spin_lock_bh(&zswap_pools_lock);
571 
572 	pool = zswap_pool_find_get(type, compressor);
573 	if (pool) {
574 		zswap_pool_debug("using existing", pool);
575 		WARN_ON(pool == zswap_pool_current());
576 		list_del_rcu(&pool->list);
577 	}
578 
579 	spin_unlock_bh(&zswap_pools_lock);
580 
581 	if (!pool)
582 		pool = zswap_pool_create(type, compressor);
583 	else {
584 		/*
585 		 * Restore the initial ref dropped by percpu_ref_kill()
586 		 * when the pool was decommissioned and switch it again
587 		 * to percpu mode.
588 		 */
589 		percpu_ref_resurrect(&pool->ref);
590 
591 		/* Drop the ref from zswap_pool_find_get(). */
592 		zswap_pool_put(pool);
593 	}
594 
595 	if (pool)
596 		ret = param_set_charp(s, kp);
597 	else
598 		ret = -EINVAL;
599 
600 	spin_lock_bh(&zswap_pools_lock);
601 
602 	if (!ret) {
603 		put_pool = zswap_pool_current();
604 		list_add_rcu(&pool->list, &zswap_pools);
605 		zswap_has_pool = true;
606 	} else if (pool) {
607 		/* add the possibly pre-existing pool to the end of the pools
608 		 * list; if it's new (and empty) then it'll be removed and
609 		 * destroyed by the put after we drop the lock
610 		 */
611 		list_add_tail_rcu(&pool->list, &zswap_pools);
612 		put_pool = pool;
613 	}
614 
615 	spin_unlock_bh(&zswap_pools_lock);
616 
617 	if (!zswap_has_pool && !pool) {
618 		/* if initial pool creation failed, and this pool creation also
619 		 * failed, maybe both compressor and zpool params were bad.
620 		 * Allow changing this param, so pool creation will succeed
621 		 * when the other param is changed. We already verified this
622 		 * param is ok in the zpool_has_pool() or crypto_has_acomp()
623 		 * checks above.
624 		 */
625 		ret = param_set_charp(s, kp);
626 	}
627 
628 	/* drop the ref from either the old current pool,
629 	 * or the new pool we failed to add
630 	 */
631 	if (put_pool)
632 		percpu_ref_kill(&put_pool->ref);
633 
634 	return ret;
635 }
636 
637 static int zswap_compressor_param_set(const char *val,
638 				      const struct kernel_param *kp)
639 {
640 	return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
641 }
642 
643 static int zswap_zpool_param_set(const char *val,
644 				 const struct kernel_param *kp)
645 {
646 	return __zswap_param_set(val, kp, NULL, zswap_compressor);
647 }
648 
649 static int zswap_enabled_param_set(const char *val,
650 				   const struct kernel_param *kp)
651 {
652 	int ret = -ENODEV;
653 
654 	/* if this is load-time (pre-init) param setting, only set param. */
655 	if (system_state != SYSTEM_RUNNING)
656 		return param_set_bool(val, kp);
657 
658 	mutex_lock(&zswap_init_lock);
659 	switch (zswap_init_state) {
660 	case ZSWAP_UNINIT:
661 		if (zswap_setup())
662 			break;
663 		fallthrough;
664 	case ZSWAP_INIT_SUCCEED:
665 		if (!zswap_has_pool)
666 			pr_err("can't enable, no pool configured\n");
667 		else
668 			ret = param_set_bool(val, kp);
669 		break;
670 	case ZSWAP_INIT_FAILED:
671 		pr_err("can't enable, initialization failed\n");
672 	}
673 	mutex_unlock(&zswap_init_lock);
674 
675 	return ret;
676 }
677 
678 /*********************************
679 * lru functions
680 **********************************/
681 
682 /* should be called under RCU */
683 #ifdef CONFIG_MEMCG
684 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
685 {
686 	return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
687 }
688 #else
689 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
690 {
691 	return NULL;
692 }
693 #endif
694 
695 static inline int entry_to_nid(struct zswap_entry *entry)
696 {
697 	return page_to_nid(virt_to_page(entry));
698 }
699 
700 static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
701 {
702 	int nid = entry_to_nid(entry);
703 	struct mem_cgroup *memcg;
704 
705 	/*
706 	 * Note that it is safe to use rcu_read_lock() here, even in the face of
707 	 * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
708 	 * used in list_lru lookup, only two scenarios are possible:
709 	 *
710 	 * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
711 	 *    new entry will be reparented to memcg's parent's list_lru.
712 	 * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
713 	 *    new entry will be added directly to memcg's parent's list_lru.
714 	 *
715 	 * Similar reasoning holds for list_lru_del().
716 	 */
717 	rcu_read_lock();
718 	memcg = mem_cgroup_from_entry(entry);
719 	/* will always succeed */
720 	list_lru_add(list_lru, &entry->lru, nid, memcg);
721 	rcu_read_unlock();
722 }
723 
724 static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
725 {
726 	int nid = entry_to_nid(entry);
727 	struct mem_cgroup *memcg;
728 
729 	rcu_read_lock();
730 	memcg = mem_cgroup_from_entry(entry);
731 	/* will always succeed */
732 	list_lru_del(list_lru, &entry->lru, nid, memcg);
733 	rcu_read_unlock();
734 }
735 
736 void zswap_lruvec_state_init(struct lruvec *lruvec)
737 {
738 	atomic_long_set(&lruvec->zswap_lruvec_state.nr_disk_swapins, 0);
739 }
740 
741 void zswap_folio_swapin(struct folio *folio)
742 {
743 	struct lruvec *lruvec;
744 
745 	if (folio) {
746 		lruvec = folio_lruvec(folio);
747 		atomic_long_inc(&lruvec->zswap_lruvec_state.nr_disk_swapins);
748 	}
749 }
750 
751 /*
752  * This function should be called when a memcg is being offlined.
753  *
754  * Since the global shrinker shrink_worker() may hold a reference
755  * of the memcg, we must check and release the reference in
756  * zswap_next_shrink.
757  *
758  * shrink_worker() must handle the case where this function releases
759  * the reference of memcg being shrunk.
760  */
761 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
762 {
763 	/* lock out zswap shrinker walking memcg tree */
764 	spin_lock(&zswap_shrink_lock);
765 	if (zswap_next_shrink == memcg) {
766 		do {
767 			zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
768 		} while (zswap_next_shrink && !mem_cgroup_online(zswap_next_shrink));
769 	}
770 	spin_unlock(&zswap_shrink_lock);
771 }
772 
773 /*********************************
774 * zswap entry functions
775 **********************************/
776 static struct kmem_cache *zswap_entry_cache;
777 
778 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
779 {
780 	struct zswap_entry *entry;
781 	entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
782 	if (!entry)
783 		return NULL;
784 	return entry;
785 }
786 
787 static void zswap_entry_cache_free(struct zswap_entry *entry)
788 {
789 	kmem_cache_free(zswap_entry_cache, entry);
790 }
791 
792 /*
793  * Carries out the common pattern of freeing and entry's zpool allocation,
794  * freeing the entry itself, and decrementing the number of stored pages.
795  */
796 static void zswap_entry_free(struct zswap_entry *entry)
797 {
798 	zswap_lru_del(&zswap_list_lru, entry);
799 	zpool_free(entry->pool->zpool, entry->handle);
800 	zswap_pool_put(entry->pool);
801 	if (entry->objcg) {
802 		obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
803 		obj_cgroup_put(entry->objcg);
804 	}
805 	zswap_entry_cache_free(entry);
806 	atomic_dec(&zswap_stored_pages);
807 }
808 
809 /*********************************
810 * compressed storage functions
811 **********************************/
812 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
813 {
814 	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
815 	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
816 	struct crypto_acomp *acomp;
817 	struct acomp_req *req;
818 	int ret;
819 
820 	mutex_init(&acomp_ctx->mutex);
821 
822 	acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
823 	if (!acomp_ctx->buffer)
824 		return -ENOMEM;
825 
826 	acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
827 	if (IS_ERR(acomp)) {
828 		pr_err("could not alloc crypto acomp %s : %ld\n",
829 				pool->tfm_name, PTR_ERR(acomp));
830 		ret = PTR_ERR(acomp);
831 		goto acomp_fail;
832 	}
833 	acomp_ctx->acomp = acomp;
834 	acomp_ctx->is_sleepable = acomp_is_async(acomp);
835 
836 	req = acomp_request_alloc(acomp_ctx->acomp);
837 	if (!req) {
838 		pr_err("could not alloc crypto acomp_request %s\n",
839 		       pool->tfm_name);
840 		ret = -ENOMEM;
841 		goto req_fail;
842 	}
843 	acomp_ctx->req = req;
844 
845 	crypto_init_wait(&acomp_ctx->wait);
846 	/*
847 	 * if the backend of acomp is async zip, crypto_req_done() will wakeup
848 	 * crypto_wait_req(); if the backend of acomp is scomp, the callback
849 	 * won't be called, crypto_wait_req() will return without blocking.
850 	 */
851 	acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
852 				   crypto_req_done, &acomp_ctx->wait);
853 
854 	return 0;
855 
856 req_fail:
857 	crypto_free_acomp(acomp_ctx->acomp);
858 acomp_fail:
859 	kfree(acomp_ctx->buffer);
860 	return ret;
861 }
862 
863 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
864 {
865 	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
866 	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
867 
868 	if (!IS_ERR_OR_NULL(acomp_ctx)) {
869 		if (!IS_ERR_OR_NULL(acomp_ctx->req))
870 			acomp_request_free(acomp_ctx->req);
871 		if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
872 			crypto_free_acomp(acomp_ctx->acomp);
873 		kfree(acomp_ctx->buffer);
874 	}
875 
876 	return 0;
877 }
878 
879 static bool zswap_compress(struct folio *folio, struct zswap_entry *entry)
880 {
881 	struct crypto_acomp_ctx *acomp_ctx;
882 	struct scatterlist input, output;
883 	int comp_ret = 0, alloc_ret = 0;
884 	unsigned int dlen = PAGE_SIZE;
885 	unsigned long handle;
886 	struct zpool *zpool;
887 	char *buf;
888 	gfp_t gfp;
889 	u8 *dst;
890 
891 	acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
892 
893 	mutex_lock(&acomp_ctx->mutex);
894 
895 	dst = acomp_ctx->buffer;
896 	sg_init_table(&input, 1);
897 	sg_set_folio(&input, folio, PAGE_SIZE, 0);
898 
899 	/*
900 	 * We need PAGE_SIZE * 2 here since there maybe over-compression case,
901 	 * and hardware-accelerators may won't check the dst buffer size, so
902 	 * giving the dst buffer with enough length to avoid buffer overflow.
903 	 */
904 	sg_init_one(&output, dst, PAGE_SIZE * 2);
905 	acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
906 
907 	/*
908 	 * it maybe looks a little bit silly that we send an asynchronous request,
909 	 * then wait for its completion synchronously. This makes the process look
910 	 * synchronous in fact.
911 	 * Theoretically, acomp supports users send multiple acomp requests in one
912 	 * acomp instance, then get those requests done simultaneously. but in this
913 	 * case, zswap actually does store and load page by page, there is no
914 	 * existing method to send the second page before the first page is done
915 	 * in one thread doing zwap.
916 	 * but in different threads running on different cpu, we have different
917 	 * acomp instance, so multiple threads can do (de)compression in parallel.
918 	 */
919 	comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
920 	dlen = acomp_ctx->req->dlen;
921 	if (comp_ret)
922 		goto unlock;
923 
924 	zpool = entry->pool->zpool;
925 	gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
926 	if (zpool_malloc_support_movable(zpool))
927 		gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
928 	alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle);
929 	if (alloc_ret)
930 		goto unlock;
931 
932 	buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
933 	memcpy(buf, dst, dlen);
934 	zpool_unmap_handle(zpool, handle);
935 
936 	entry->handle = handle;
937 	entry->length = dlen;
938 
939 unlock:
940 	if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC)
941 		zswap_reject_compress_poor++;
942 	else if (comp_ret)
943 		zswap_reject_compress_fail++;
944 	else if (alloc_ret)
945 		zswap_reject_alloc_fail++;
946 
947 	mutex_unlock(&acomp_ctx->mutex);
948 	return comp_ret == 0 && alloc_ret == 0;
949 }
950 
951 static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
952 {
953 	struct zpool *zpool = entry->pool->zpool;
954 	struct scatterlist input, output;
955 	struct crypto_acomp_ctx *acomp_ctx;
956 	u8 *src;
957 
958 	acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
959 	mutex_lock(&acomp_ctx->mutex);
960 
961 	src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
962 	/*
963 	 * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer
964 	 * to do crypto_acomp_decompress() which might sleep. In such cases, we must
965 	 * resort to copying the buffer to a temporary one.
966 	 * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer,
967 	 * such as a kmap address of high memory or even ever a vmap address.
968 	 * However, sg_init_one is only equipped to handle linearly mapped low memory.
969 	 * In such cases, we also must copy the buffer to a temporary and lowmem one.
970 	 */
971 	if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) ||
972 	    !virt_addr_valid(src)) {
973 		memcpy(acomp_ctx->buffer, src, entry->length);
974 		src = acomp_ctx->buffer;
975 		zpool_unmap_handle(zpool, entry->handle);
976 	}
977 
978 	sg_init_one(&input, src, entry->length);
979 	sg_init_table(&output, 1);
980 	sg_set_folio(&output, folio, PAGE_SIZE, 0);
981 	acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
982 	BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
983 	BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
984 	mutex_unlock(&acomp_ctx->mutex);
985 
986 	if (src != acomp_ctx->buffer)
987 		zpool_unmap_handle(zpool, entry->handle);
988 }
989 
990 /*********************************
991 * writeback code
992 **********************************/
993 /*
994  * Attempts to free an entry by adding a folio to the swap cache,
995  * decompressing the entry data into the folio, and issuing a
996  * bio write to write the folio back to the swap device.
997  *
998  * This can be thought of as a "resumed writeback" of the folio
999  * to the swap device.  We are basically resuming the same swap
1000  * writeback path that was intercepted with the zswap_store()
1001  * in the first place.  After the folio has been decompressed into
1002  * the swap cache, the compressed version stored by zswap can be
1003  * freed.
1004  */
1005 static int zswap_writeback_entry(struct zswap_entry *entry,
1006 				 swp_entry_t swpentry)
1007 {
1008 	struct xarray *tree;
1009 	pgoff_t offset = swp_offset(swpentry);
1010 	struct folio *folio;
1011 	struct mempolicy *mpol;
1012 	bool folio_was_allocated;
1013 	struct writeback_control wbc = {
1014 		.sync_mode = WB_SYNC_NONE,
1015 	};
1016 
1017 	/* try to allocate swap cache folio */
1018 	mpol = get_task_policy(current);
1019 	folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
1020 				NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
1021 	if (!folio)
1022 		return -ENOMEM;
1023 
1024 	/*
1025 	 * Found an existing folio, we raced with swapin or concurrent
1026 	 * shrinker. We generally writeback cold folios from zswap, and
1027 	 * swapin means the folio just became hot, so skip this folio.
1028 	 * For unlikely concurrent shrinker case, it will be unlinked
1029 	 * and freed when invalidated by the concurrent shrinker anyway.
1030 	 */
1031 	if (!folio_was_allocated) {
1032 		folio_put(folio);
1033 		return -EEXIST;
1034 	}
1035 
1036 	/*
1037 	 * folio is locked, and the swapcache is now secured against
1038 	 * concurrent swapping to and from the slot, and concurrent
1039 	 * swapoff so we can safely dereference the zswap tree here.
1040 	 * Verify that the swap entry hasn't been invalidated and recycled
1041 	 * behind our backs, to avoid overwriting a new swap folio with
1042 	 * old compressed data. Only when this is successful can the entry
1043 	 * be dereferenced.
1044 	 */
1045 	tree = swap_zswap_tree(swpentry);
1046 	if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) {
1047 		delete_from_swap_cache(folio);
1048 		folio_unlock(folio);
1049 		folio_put(folio);
1050 		return -ENOMEM;
1051 	}
1052 
1053 	zswap_decompress(entry, folio);
1054 
1055 	count_vm_event(ZSWPWB);
1056 	if (entry->objcg)
1057 		count_objcg_event(entry->objcg, ZSWPWB);
1058 
1059 	zswap_entry_free(entry);
1060 
1061 	/* folio is up to date */
1062 	folio_mark_uptodate(folio);
1063 
1064 	/* move it to the tail of the inactive list after end_writeback */
1065 	folio_set_reclaim(folio);
1066 
1067 	/* start writeback */
1068 	__swap_writepage(folio, &wbc);
1069 	folio_put(folio);
1070 
1071 	return 0;
1072 }
1073 
1074 /*********************************
1075 * shrinker functions
1076 **********************************/
1077 /*
1078  * The dynamic shrinker is modulated by the following factors:
1079  *
1080  * 1. Each zswap entry has a referenced bit, which the shrinker unsets (giving
1081  *    the entry a second chance) before rotating it in the LRU list. If the
1082  *    entry is considered again by the shrinker, with its referenced bit unset,
1083  *    it is written back. The writeback rate as a result is dynamically
1084  *    adjusted by the pool activities - if the pool is dominated by new entries
1085  *    (i.e lots of recent zswapouts), these entries will be protected and
1086  *    the writeback rate will slow down. On the other hand, if the pool has a
1087  *    lot of stagnant entries, these entries will be reclaimed immediately,
1088  *    effectively increasing the writeback rate.
1089  *
1090  * 2. Swapins counter: If we observe swapins, it is a sign that we are
1091  *    overshrinking and should slow down. We maintain a swapins counter, which
1092  *    is consumed and subtract from the number of eligible objects on the LRU
1093  *    in zswap_shrinker_count().
1094  *
1095  * 3. Compression ratio. The better the workload compresses, the less gains we
1096  *    can expect from writeback. We scale down the number of objects available
1097  *    for reclaim by this ratio.
1098  */
1099 static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
1100 				       spinlock_t *lock, void *arg)
1101 {
1102 	struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
1103 	bool *encountered_page_in_swapcache = (bool *)arg;
1104 	swp_entry_t swpentry;
1105 	enum lru_status ret = LRU_REMOVED_RETRY;
1106 	int writeback_result;
1107 
1108 	/*
1109 	 * Second chance algorithm: if the entry has its referenced bit set, give it
1110 	 * a second chance. Only clear the referenced bit and rotate it in the
1111 	 * zswap's LRU list.
1112 	 */
1113 	if (entry->referenced) {
1114 		entry->referenced = false;
1115 		return LRU_ROTATE;
1116 	}
1117 
1118 	/*
1119 	 * As soon as we drop the LRU lock, the entry can be freed by
1120 	 * a concurrent invalidation. This means the following:
1121 	 *
1122 	 * 1. We extract the swp_entry_t to the stack, allowing
1123 	 *    zswap_writeback_entry() to pin the swap entry and
1124 	 *    then validate the zwap entry against that swap entry's
1125 	 *    tree using pointer value comparison. Only when that
1126 	 *    is successful can the entry be dereferenced.
1127 	 *
1128 	 * 2. Usually, objects are taken off the LRU for reclaim. In
1129 	 *    this case this isn't possible, because if reclaim fails
1130 	 *    for whatever reason, we have no means of knowing if the
1131 	 *    entry is alive to put it back on the LRU.
1132 	 *
1133 	 *    So rotate it before dropping the lock. If the entry is
1134 	 *    written back or invalidated, the free path will unlink
1135 	 *    it. For failures, rotation is the right thing as well.
1136 	 *
1137 	 *    Temporary failures, where the same entry should be tried
1138 	 *    again immediately, almost never happen for this shrinker.
1139 	 *    We don't do any trylocking; -ENOMEM comes closest,
1140 	 *    but that's extremely rare and doesn't happen spuriously
1141 	 *    either. Don't bother distinguishing this case.
1142 	 */
1143 	list_move_tail(item, &l->list);
1144 
1145 	/*
1146 	 * Once the lru lock is dropped, the entry might get freed. The
1147 	 * swpentry is copied to the stack, and entry isn't deref'd again
1148 	 * until the entry is verified to still be alive in the tree.
1149 	 */
1150 	swpentry = entry->swpentry;
1151 
1152 	/*
1153 	 * It's safe to drop the lock here because we return either
1154 	 * LRU_REMOVED_RETRY or LRU_RETRY.
1155 	 */
1156 	spin_unlock(lock);
1157 
1158 	writeback_result = zswap_writeback_entry(entry, swpentry);
1159 
1160 	if (writeback_result) {
1161 		zswap_reject_reclaim_fail++;
1162 		ret = LRU_RETRY;
1163 
1164 		/*
1165 		 * Encountering a page already in swap cache is a sign that we are shrinking
1166 		 * into the warmer region. We should terminate shrinking (if we're in the dynamic
1167 		 * shrinker context).
1168 		 */
1169 		if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
1170 			ret = LRU_STOP;
1171 			*encountered_page_in_swapcache = true;
1172 		}
1173 	} else {
1174 		zswap_written_back_pages++;
1175 	}
1176 
1177 	spin_lock(lock);
1178 	return ret;
1179 }
1180 
1181 static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
1182 		struct shrink_control *sc)
1183 {
1184 	unsigned long shrink_ret;
1185 	bool encountered_page_in_swapcache = false;
1186 
1187 	if (!zswap_shrinker_enabled ||
1188 			!mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
1189 		sc->nr_scanned = 0;
1190 		return SHRINK_STOP;
1191 	}
1192 
1193 	shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb,
1194 		&encountered_page_in_swapcache);
1195 
1196 	if (encountered_page_in_swapcache)
1197 		return SHRINK_STOP;
1198 
1199 	return shrink_ret ? shrink_ret : SHRINK_STOP;
1200 }
1201 
1202 static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
1203 		struct shrink_control *sc)
1204 {
1205 	struct mem_cgroup *memcg = sc->memcg;
1206 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
1207 	atomic_long_t *nr_disk_swapins =
1208 		&lruvec->zswap_lruvec_state.nr_disk_swapins;
1209 	unsigned long nr_backing, nr_stored, nr_freeable, nr_disk_swapins_cur,
1210 		nr_remain;
1211 
1212 	if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
1213 		return 0;
1214 
1215 	/*
1216 	 * The shrinker resumes swap writeback, which will enter block
1217 	 * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS
1218 	 * rules (may_enter_fs()), which apply on a per-folio basis.
1219 	 */
1220 	if (!gfp_has_io_fs(sc->gfp_mask))
1221 		return 0;
1222 
1223 	/*
1224 	 * For memcg, use the cgroup-wide ZSWAP stats since we don't
1225 	 * have them per-node and thus per-lruvec. Careful if memcg is
1226 	 * runtime-disabled: we can get sc->memcg == NULL, which is ok
1227 	 * for the lruvec, but not for memcg_page_state().
1228 	 *
1229 	 * Without memcg, use the zswap pool-wide metrics.
1230 	 */
1231 	if (!mem_cgroup_disabled()) {
1232 		mem_cgroup_flush_stats(memcg);
1233 		nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
1234 		nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
1235 	} else {
1236 		nr_backing = zswap_total_pages();
1237 		nr_stored = atomic_read(&zswap_stored_pages);
1238 	}
1239 
1240 	if (!nr_stored)
1241 		return 0;
1242 
1243 	nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc);
1244 	if (!nr_freeable)
1245 		return 0;
1246 
1247 	/*
1248 	 * Subtract from the lru size the number of pages that are recently swapped
1249 	 * in from disk. The idea is that had we protect the zswap's LRU by this
1250 	 * amount of pages, these disk swapins would not have happened.
1251 	 */
1252 	nr_disk_swapins_cur = atomic_long_read(nr_disk_swapins);
1253 	do {
1254 		if (nr_freeable >= nr_disk_swapins_cur)
1255 			nr_remain = 0;
1256 		else
1257 			nr_remain = nr_disk_swapins_cur - nr_freeable;
1258 	} while (!atomic_long_try_cmpxchg(
1259 		nr_disk_swapins, &nr_disk_swapins_cur, nr_remain));
1260 
1261 	nr_freeable -= nr_disk_swapins_cur - nr_remain;
1262 	if (!nr_freeable)
1263 		return 0;
1264 
1265 	/*
1266 	 * Scale the number of freeable pages by the memory saving factor.
1267 	 * This ensures that the better zswap compresses memory, the fewer
1268 	 * pages we will evict to swap (as it will otherwise incur IO for
1269 	 * relatively small memory saving).
1270 	 */
1271 	return mult_frac(nr_freeable, nr_backing, nr_stored);
1272 }
1273 
1274 static struct shrinker *zswap_alloc_shrinker(void)
1275 {
1276 	struct shrinker *shrinker;
1277 
1278 	shrinker =
1279 		shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
1280 	if (!shrinker)
1281 		return NULL;
1282 
1283 	shrinker->scan_objects = zswap_shrinker_scan;
1284 	shrinker->count_objects = zswap_shrinker_count;
1285 	shrinker->batch = 0;
1286 	shrinker->seeks = DEFAULT_SEEKS;
1287 	return shrinker;
1288 }
1289 
1290 static int shrink_memcg(struct mem_cgroup *memcg)
1291 {
1292 	int nid, shrunk = 0, scanned = 0;
1293 
1294 	if (!mem_cgroup_zswap_writeback_enabled(memcg))
1295 		return -ENOENT;
1296 
1297 	/*
1298 	 * Skip zombies because their LRUs are reparented and we would be
1299 	 * reclaiming from the parent instead of the dead memcg.
1300 	 */
1301 	if (memcg && !mem_cgroup_online(memcg))
1302 		return -ENOENT;
1303 
1304 	for_each_node_state(nid, N_NORMAL_MEMORY) {
1305 		unsigned long nr_to_walk = 1;
1306 
1307 		shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg,
1308 					    &shrink_memcg_cb, NULL, &nr_to_walk);
1309 		scanned += 1 - nr_to_walk;
1310 	}
1311 
1312 	if (!scanned)
1313 		return -ENOENT;
1314 
1315 	return shrunk ? 0 : -EAGAIN;
1316 }
1317 
1318 static void shrink_worker(struct work_struct *w)
1319 {
1320 	struct mem_cgroup *memcg;
1321 	int ret, failures = 0, attempts = 0;
1322 	unsigned long thr;
1323 
1324 	/* Reclaim down to the accept threshold */
1325 	thr = zswap_accept_thr_pages();
1326 
1327 	/*
1328 	 * Global reclaim will select cgroup in a round-robin fashion from all
1329 	 * online memcgs, but memcgs that have no pages in zswap and
1330 	 * writeback-disabled memcgs (memory.zswap.writeback=0) are not
1331 	 * candidates for shrinking.
1332 	 *
1333 	 * Shrinking will be aborted if we encounter the following
1334 	 * MAX_RECLAIM_RETRIES times:
1335 	 * - No writeback-candidate memcgs found in a memcg tree walk.
1336 	 * - Shrinking a writeback-candidate memcg failed.
1337 	 *
1338 	 * We save iteration cursor memcg into zswap_next_shrink,
1339 	 * which can be modified by the offline memcg cleaner
1340 	 * zswap_memcg_offline_cleanup().
1341 	 *
1342 	 * Since the offline cleaner is called only once, we cannot leave an
1343 	 * offline memcg reference in zswap_next_shrink.
1344 	 * We can rely on the cleaner only if we get online memcg under lock.
1345 	 *
1346 	 * If we get an offline memcg, we cannot determine if the cleaner has
1347 	 * already been called or will be called later. We must put back the
1348 	 * reference before returning from this function. Otherwise, the
1349 	 * offline memcg left in zswap_next_shrink will hold the reference
1350 	 * until the next run of shrink_worker().
1351 	 */
1352 	do {
1353 		/*
1354 		 * Start shrinking from the next memcg after zswap_next_shrink.
1355 		 * When the offline cleaner has already advanced the cursor,
1356 		 * advancing the cursor here overlooks one memcg, but this
1357 		 * should be negligibly rare.
1358 		 *
1359 		 * If we get an online memcg, keep the extra reference in case
1360 		 * the original one obtained by mem_cgroup_iter() is dropped by
1361 		 * zswap_memcg_offline_cleanup() while we are shrinking the
1362 		 * memcg.
1363 		 */
1364 		spin_lock(&zswap_shrink_lock);
1365 		do {
1366 			memcg = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
1367 			zswap_next_shrink = memcg;
1368 		} while (memcg && !mem_cgroup_tryget_online(memcg));
1369 		spin_unlock(&zswap_shrink_lock);
1370 
1371 		if (!memcg) {
1372 			/*
1373 			 * Continue shrinking without incrementing failures if
1374 			 * we found candidate memcgs in the last tree walk.
1375 			 */
1376 			if (!attempts && ++failures == MAX_RECLAIM_RETRIES)
1377 				break;
1378 
1379 			attempts = 0;
1380 			goto resched;
1381 		}
1382 
1383 		ret = shrink_memcg(memcg);
1384 		/* drop the extra reference */
1385 		mem_cgroup_put(memcg);
1386 
1387 		/*
1388 		 * There are no writeback-candidate pages in the memcg.
1389 		 * This is not an issue as long as we can find another memcg
1390 		 * with pages in zswap. Skip this without incrementing attempts
1391 		 * and failures.
1392 		 */
1393 		if (ret == -ENOENT)
1394 			continue;
1395 		++attempts;
1396 
1397 		if (ret && ++failures == MAX_RECLAIM_RETRIES)
1398 			break;
1399 resched:
1400 		cond_resched();
1401 	} while (zswap_total_pages() > thr);
1402 }
1403 
1404 /*********************************
1405 * main API
1406 **********************************/
1407 bool zswap_store(struct folio *folio)
1408 {
1409 	swp_entry_t swp = folio->swap;
1410 	pgoff_t offset = swp_offset(swp);
1411 	struct xarray *tree = swap_zswap_tree(swp);
1412 	struct zswap_entry *entry, *old;
1413 	struct obj_cgroup *objcg = NULL;
1414 	struct mem_cgroup *memcg = NULL;
1415 
1416 	VM_WARN_ON_ONCE(!folio_test_locked(folio));
1417 	VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
1418 
1419 	/* Large folios aren't supported */
1420 	if (folio_test_large(folio))
1421 		return false;
1422 
1423 	if (!zswap_enabled)
1424 		goto check_old;
1425 
1426 	/* Check cgroup limits */
1427 	objcg = get_obj_cgroup_from_folio(folio);
1428 	if (objcg && !obj_cgroup_may_zswap(objcg)) {
1429 		memcg = get_mem_cgroup_from_objcg(objcg);
1430 		if (shrink_memcg(memcg)) {
1431 			mem_cgroup_put(memcg);
1432 			goto reject;
1433 		}
1434 		mem_cgroup_put(memcg);
1435 	}
1436 
1437 	if (zswap_check_limits())
1438 		goto reject;
1439 
1440 	/* allocate entry */
1441 	entry = zswap_entry_cache_alloc(GFP_KERNEL, folio_nid(folio));
1442 	if (!entry) {
1443 		zswap_reject_kmemcache_fail++;
1444 		goto reject;
1445 	}
1446 
1447 	/* if entry is successfully added, it keeps the reference */
1448 	entry->pool = zswap_pool_current_get();
1449 	if (!entry->pool)
1450 		goto freepage;
1451 
1452 	if (objcg) {
1453 		memcg = get_mem_cgroup_from_objcg(objcg);
1454 		if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) {
1455 			mem_cgroup_put(memcg);
1456 			goto put_pool;
1457 		}
1458 		mem_cgroup_put(memcg);
1459 	}
1460 
1461 	if (!zswap_compress(folio, entry))
1462 		goto put_pool;
1463 
1464 	entry->swpentry = swp;
1465 	entry->objcg = objcg;
1466 	entry->referenced = true;
1467 
1468 	old = xa_store(tree, offset, entry, GFP_KERNEL);
1469 	if (xa_is_err(old)) {
1470 		int err = xa_err(old);
1471 
1472 		WARN_ONCE(err != -ENOMEM, "unexpected xarray error: %d\n", err);
1473 		zswap_reject_alloc_fail++;
1474 		goto store_failed;
1475 	}
1476 
1477 	/*
1478 	 * We may have had an existing entry that became stale when
1479 	 * the folio was redirtied and now the new version is being
1480 	 * swapped out. Get rid of the old.
1481 	 */
1482 	if (old)
1483 		zswap_entry_free(old);
1484 
1485 	if (objcg) {
1486 		obj_cgroup_charge_zswap(objcg, entry->length);
1487 		count_objcg_event(objcg, ZSWPOUT);
1488 	}
1489 
1490 	/*
1491 	 * We finish initializing the entry while it's already in xarray.
1492 	 * This is safe because:
1493 	 *
1494 	 * 1. Concurrent stores and invalidations are excluded by folio lock.
1495 	 *
1496 	 * 2. Writeback is excluded by the entry not being on the LRU yet.
1497 	 *    The publishing order matters to prevent writeback from seeing
1498 	 *    an incoherent entry.
1499 	 */
1500 	if (entry->length) {
1501 		INIT_LIST_HEAD(&entry->lru);
1502 		zswap_lru_add(&zswap_list_lru, entry);
1503 	}
1504 
1505 	/* update stats */
1506 	atomic_inc(&zswap_stored_pages);
1507 	count_vm_event(ZSWPOUT);
1508 
1509 	return true;
1510 
1511 store_failed:
1512 	zpool_free(entry->pool->zpool, entry->handle);
1513 put_pool:
1514 	zswap_pool_put(entry->pool);
1515 freepage:
1516 	zswap_entry_cache_free(entry);
1517 reject:
1518 	obj_cgroup_put(objcg);
1519 	if (zswap_pool_reached_full)
1520 		queue_work(shrink_wq, &zswap_shrink_work);
1521 check_old:
1522 	/*
1523 	 * If the zswap store fails or zswap is disabled, we must invalidate the
1524 	 * possibly stale entry which was previously stored at this offset.
1525 	 * Otherwise, writeback could overwrite the new data in the swapfile.
1526 	 */
1527 	entry = xa_erase(tree, offset);
1528 	if (entry)
1529 		zswap_entry_free(entry);
1530 	return false;
1531 }
1532 
1533 bool zswap_load(struct folio *folio)
1534 {
1535 	swp_entry_t swp = folio->swap;
1536 	pgoff_t offset = swp_offset(swp);
1537 	bool swapcache = folio_test_swapcache(folio);
1538 	struct xarray *tree = swap_zswap_tree(swp);
1539 	struct zswap_entry *entry;
1540 
1541 	VM_WARN_ON_ONCE(!folio_test_locked(folio));
1542 
1543 	if (zswap_never_enabled())
1544 		return false;
1545 
1546 	/*
1547 	 * Large folios should not be swapped in while zswap is being used, as
1548 	 * they are not properly handled. Zswap does not properly load large
1549 	 * folios, and a large folio may only be partially in zswap.
1550 	 *
1551 	 * Return true without marking the folio uptodate so that an IO error is
1552 	 * emitted (e.g. do_swap_page() will sigbus).
1553 	 */
1554 	if (WARN_ON_ONCE(folio_test_large(folio)))
1555 		return true;
1556 
1557 	/*
1558 	 * When reading into the swapcache, invalidate our entry. The
1559 	 * swapcache can be the authoritative owner of the page and
1560 	 * its mappings, and the pressure that results from having two
1561 	 * in-memory copies outweighs any benefits of caching the
1562 	 * compression work.
1563 	 *
1564 	 * (Most swapins go through the swapcache. The notable
1565 	 * exception is the singleton fault on SWP_SYNCHRONOUS_IO
1566 	 * files, which reads into a private page and may free it if
1567 	 * the fault fails. We remain the primary owner of the entry.)
1568 	 */
1569 	if (swapcache)
1570 		entry = xa_erase(tree, offset);
1571 	else
1572 		entry = xa_load(tree, offset);
1573 
1574 	if (!entry)
1575 		return false;
1576 
1577 	zswap_decompress(entry, folio);
1578 
1579 	count_vm_event(ZSWPIN);
1580 	if (entry->objcg)
1581 		count_objcg_event(entry->objcg, ZSWPIN);
1582 
1583 	if (swapcache) {
1584 		zswap_entry_free(entry);
1585 		folio_mark_dirty(folio);
1586 	}
1587 
1588 	folio_mark_uptodate(folio);
1589 	return true;
1590 }
1591 
1592 void zswap_invalidate(swp_entry_t swp)
1593 {
1594 	pgoff_t offset = swp_offset(swp);
1595 	struct xarray *tree = swap_zswap_tree(swp);
1596 	struct zswap_entry *entry;
1597 
1598 	entry = xa_erase(tree, offset);
1599 	if (entry)
1600 		zswap_entry_free(entry);
1601 }
1602 
1603 int zswap_swapon(int type, unsigned long nr_pages)
1604 {
1605 	struct xarray *trees, *tree;
1606 	unsigned int nr, i;
1607 
1608 	nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
1609 	trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL);
1610 	if (!trees) {
1611 		pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1612 		return -ENOMEM;
1613 	}
1614 
1615 	for (i = 0; i < nr; i++)
1616 		xa_init(trees + i);
1617 
1618 	nr_zswap_trees[type] = nr;
1619 	zswap_trees[type] = trees;
1620 	return 0;
1621 }
1622 
1623 void zswap_swapoff(int type)
1624 {
1625 	struct xarray *trees = zswap_trees[type];
1626 	unsigned int i;
1627 
1628 	if (!trees)
1629 		return;
1630 
1631 	/* try_to_unuse() invalidated all the entries already */
1632 	for (i = 0; i < nr_zswap_trees[type]; i++)
1633 		WARN_ON_ONCE(!xa_empty(trees + i));
1634 
1635 	kvfree(trees);
1636 	nr_zswap_trees[type] = 0;
1637 	zswap_trees[type] = NULL;
1638 }
1639 
1640 /*********************************
1641 * debugfs functions
1642 **********************************/
1643 #ifdef CONFIG_DEBUG_FS
1644 #include <linux/debugfs.h>
1645 
1646 static struct dentry *zswap_debugfs_root;
1647 
1648 static int debugfs_get_total_size(void *data, u64 *val)
1649 {
1650 	*val = zswap_total_pages() * PAGE_SIZE;
1651 	return 0;
1652 }
1653 DEFINE_DEBUGFS_ATTRIBUTE(total_size_fops, debugfs_get_total_size, NULL, "%llu\n");
1654 
1655 static int zswap_debugfs_init(void)
1656 {
1657 	if (!debugfs_initialized())
1658 		return -ENODEV;
1659 
1660 	zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1661 
1662 	debugfs_create_u64("pool_limit_hit", 0444,
1663 			   zswap_debugfs_root, &zswap_pool_limit_hit);
1664 	debugfs_create_u64("reject_reclaim_fail", 0444,
1665 			   zswap_debugfs_root, &zswap_reject_reclaim_fail);
1666 	debugfs_create_u64("reject_alloc_fail", 0444,
1667 			   zswap_debugfs_root, &zswap_reject_alloc_fail);
1668 	debugfs_create_u64("reject_kmemcache_fail", 0444,
1669 			   zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1670 	debugfs_create_u64("reject_compress_fail", 0444,
1671 			   zswap_debugfs_root, &zswap_reject_compress_fail);
1672 	debugfs_create_u64("reject_compress_poor", 0444,
1673 			   zswap_debugfs_root, &zswap_reject_compress_poor);
1674 	debugfs_create_u64("written_back_pages", 0444,
1675 			   zswap_debugfs_root, &zswap_written_back_pages);
1676 	debugfs_create_file("pool_total_size", 0444,
1677 			    zswap_debugfs_root, NULL, &total_size_fops);
1678 	debugfs_create_atomic_t("stored_pages", 0444,
1679 				zswap_debugfs_root, &zswap_stored_pages);
1680 
1681 	return 0;
1682 }
1683 #else
1684 static int zswap_debugfs_init(void)
1685 {
1686 	return 0;
1687 }
1688 #endif
1689 
1690 /*********************************
1691 * module init and exit
1692 **********************************/
1693 static int zswap_setup(void)
1694 {
1695 	struct zswap_pool *pool;
1696 	int ret;
1697 
1698 	zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1699 	if (!zswap_entry_cache) {
1700 		pr_err("entry cache creation failed\n");
1701 		goto cache_fail;
1702 	}
1703 
1704 	ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1705 				      "mm/zswap_pool:prepare",
1706 				      zswap_cpu_comp_prepare,
1707 				      zswap_cpu_comp_dead);
1708 	if (ret)
1709 		goto hp_fail;
1710 
1711 	shrink_wq = alloc_workqueue("zswap-shrink",
1712 			WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
1713 	if (!shrink_wq)
1714 		goto shrink_wq_fail;
1715 
1716 	zswap_shrinker = zswap_alloc_shrinker();
1717 	if (!zswap_shrinker)
1718 		goto shrinker_fail;
1719 	if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker))
1720 		goto lru_fail;
1721 	shrinker_register(zswap_shrinker);
1722 
1723 	INIT_WORK(&zswap_shrink_work, shrink_worker);
1724 
1725 	pool = __zswap_pool_create_fallback();
1726 	if (pool) {
1727 		pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1728 			zpool_get_type(pool->zpool));
1729 		list_add(&pool->list, &zswap_pools);
1730 		zswap_has_pool = true;
1731 		static_branch_enable(&zswap_ever_enabled);
1732 	} else {
1733 		pr_err("pool creation failed\n");
1734 		zswap_enabled = false;
1735 	}
1736 
1737 	if (zswap_debugfs_init())
1738 		pr_warn("debugfs initialization failed\n");
1739 	zswap_init_state = ZSWAP_INIT_SUCCEED;
1740 	return 0;
1741 
1742 lru_fail:
1743 	shrinker_free(zswap_shrinker);
1744 shrinker_fail:
1745 	destroy_workqueue(shrink_wq);
1746 shrink_wq_fail:
1747 	cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE);
1748 hp_fail:
1749 	kmem_cache_destroy(zswap_entry_cache);
1750 cache_fail:
1751 	/* if built-in, we aren't unloaded on failure; don't allow use */
1752 	zswap_init_state = ZSWAP_INIT_FAILED;
1753 	zswap_enabled = false;
1754 	return -ENOMEM;
1755 }
1756 
1757 static int __init zswap_init(void)
1758 {
1759 	if (!zswap_enabled)
1760 		return 0;
1761 	return zswap_setup();
1762 }
1763 /* must be late so crypto has time to come up */
1764 late_initcall(zswap_init);
1765 
1766 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1767 MODULE_DESCRIPTION("Compressed cache for swap pages");
1768