xref: /linux/mm/zswap.c (revision a634dda26186cf9a51567020fcce52bcba5e1e59)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * zswap.c - zswap driver file
4  *
5  * zswap is a cache that takes pages that are in the process
6  * of being swapped out and attempts to compress and store them in a
7  * RAM-based memory pool.  This can result in a significant I/O reduction on
8  * the swap device and, in the case where decompressing from RAM is faster
9  * than reading from the swap device, can also improve workload performance.
10  *
11  * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
12 */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/module.h>
17 #include <linux/cpu.h>
18 #include <linux/highmem.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 #include <linux/atomic.h>
23 #include <linux/swap.h>
24 #include <linux/crypto.h>
25 #include <linux/scatterlist.h>
26 #include <linux/mempolicy.h>
27 #include <linux/mempool.h>
28 #include <linux/zpool.h>
29 #include <crypto/acompress.h>
30 #include <linux/zswap.h>
31 #include <linux/mm_types.h>
32 #include <linux/page-flags.h>
33 #include <linux/swapops.h>
34 #include <linux/writeback.h>
35 #include <linux/pagemap.h>
36 #include <linux/workqueue.h>
37 #include <linux/list_lru.h>
38 
39 #include "swap.h"
40 #include "internal.h"
41 
42 /*********************************
43 * statistics
44 **********************************/
45 /* The number of compressed pages currently stored in zswap */
46 atomic_long_t zswap_stored_pages = ATOMIC_INIT(0);
47 
48 /*
49  * The statistics below are not protected from concurrent access for
50  * performance reasons so they may not be a 100% accurate.  However,
51  * they do provide useful information on roughly how many times a
52  * certain event is occurring.
53 */
54 
55 /* Pool limit was hit (see zswap_max_pool_percent) */
56 static u64 zswap_pool_limit_hit;
57 /* Pages written back when pool limit was reached */
58 static u64 zswap_written_back_pages;
59 /* Store failed due to a reclaim failure after pool limit was reached */
60 static u64 zswap_reject_reclaim_fail;
61 /* Store failed due to compression algorithm failure */
62 static u64 zswap_reject_compress_fail;
63 /* Compressed page was too big for the allocator to (optimally) store */
64 static u64 zswap_reject_compress_poor;
65 /* Store failed because underlying allocator could not get memory */
66 static u64 zswap_reject_alloc_fail;
67 /* Store failed because the entry metadata could not be allocated (rare) */
68 static u64 zswap_reject_kmemcache_fail;
69 
70 /* Shrinker work queue */
71 static struct workqueue_struct *shrink_wq;
72 /* Pool limit was hit, we need to calm down */
73 static bool zswap_pool_reached_full;
74 
75 /*********************************
76 * tunables
77 **********************************/
78 
79 #define ZSWAP_PARAM_UNSET ""
80 
81 static int zswap_setup(void);
82 
83 /* Enable/disable zswap */
84 static DEFINE_STATIC_KEY_MAYBE(CONFIG_ZSWAP_DEFAULT_ON, zswap_ever_enabled);
85 static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
86 static int zswap_enabled_param_set(const char *,
87 				   const struct kernel_param *);
88 static const struct kernel_param_ops zswap_enabled_param_ops = {
89 	.set =		zswap_enabled_param_set,
90 	.get =		param_get_bool,
91 };
92 module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
93 
94 /* Crypto compressor to use */
95 static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
96 static int zswap_compressor_param_set(const char *,
97 				      const struct kernel_param *);
98 static const struct kernel_param_ops zswap_compressor_param_ops = {
99 	.set =		zswap_compressor_param_set,
100 	.get =		param_get_charp,
101 	.free =		param_free_charp,
102 };
103 module_param_cb(compressor, &zswap_compressor_param_ops,
104 		&zswap_compressor, 0644);
105 
106 /* Compressed storage zpool to use */
107 static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
108 static int zswap_zpool_param_set(const char *, const struct kernel_param *);
109 static const struct kernel_param_ops zswap_zpool_param_ops = {
110 	.set =		zswap_zpool_param_set,
111 	.get =		param_get_charp,
112 	.free =		param_free_charp,
113 };
114 module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
115 
116 /* The maximum percentage of memory that the compressed pool can occupy */
117 static unsigned int zswap_max_pool_percent = 20;
118 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
119 
120 /* The threshold for accepting new pages after the max_pool_percent was hit */
121 static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
122 module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
123 		   uint, 0644);
124 
125 /* Enable/disable memory pressure-based shrinker. */
126 static bool zswap_shrinker_enabled = IS_ENABLED(
127 		CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
128 module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
129 
130 bool zswap_is_enabled(void)
131 {
132 	return zswap_enabled;
133 }
134 
135 bool zswap_never_enabled(void)
136 {
137 	return !static_branch_maybe(CONFIG_ZSWAP_DEFAULT_ON, &zswap_ever_enabled);
138 }
139 
140 /*********************************
141 * data structures
142 **********************************/
143 
144 struct crypto_acomp_ctx {
145 	struct crypto_acomp *acomp;
146 	struct acomp_req *req;
147 	struct crypto_wait wait;
148 	u8 *buffer;
149 	struct mutex mutex;
150 	bool is_sleepable;
151 };
152 
153 /*
154  * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
155  * The only case where lru_lock is not acquired while holding tree.lock is
156  * when a zswap_entry is taken off the lru for writeback, in that case it
157  * needs to be verified that it's still valid in the tree.
158  */
159 struct zswap_pool {
160 	struct zpool *zpool;
161 	struct crypto_acomp_ctx __percpu *acomp_ctx;
162 	struct percpu_ref ref;
163 	struct list_head list;
164 	struct work_struct release_work;
165 	struct hlist_node node;
166 	char tfm_name[CRYPTO_MAX_ALG_NAME];
167 };
168 
169 /* Global LRU lists shared by all zswap pools. */
170 static struct list_lru zswap_list_lru;
171 
172 /* The lock protects zswap_next_shrink updates. */
173 static DEFINE_SPINLOCK(zswap_shrink_lock);
174 static struct mem_cgroup *zswap_next_shrink;
175 static struct work_struct zswap_shrink_work;
176 static struct shrinker *zswap_shrinker;
177 
178 /*
179  * struct zswap_entry
180  *
181  * This structure contains the metadata for tracking a single compressed
182  * page within zswap.
183  *
184  * swpentry - associated swap entry, the offset indexes into the red-black tree
185  * length - the length in bytes of the compressed page data.  Needed during
186  *          decompression.
187  * referenced - true if the entry recently entered the zswap pool. Unset by the
188  *              writeback logic. The entry is only reclaimed by the writeback
189  *              logic if referenced is unset. See comments in the shrinker
190  *              section for context.
191  * pool - the zswap_pool the entry's data is in
192  * handle - zpool allocation handle that stores the compressed page data
193  * objcg - the obj_cgroup that the compressed memory is charged to
194  * lru - handle to the pool's lru used to evict pages.
195  */
196 struct zswap_entry {
197 	swp_entry_t swpentry;
198 	unsigned int length;
199 	bool referenced;
200 	struct zswap_pool *pool;
201 	unsigned long handle;
202 	struct obj_cgroup *objcg;
203 	struct list_head lru;
204 };
205 
206 static struct xarray *zswap_trees[MAX_SWAPFILES];
207 static unsigned int nr_zswap_trees[MAX_SWAPFILES];
208 
209 /* RCU-protected iteration */
210 static LIST_HEAD(zswap_pools);
211 /* protects zswap_pools list modification */
212 static DEFINE_SPINLOCK(zswap_pools_lock);
213 /* pool counter to provide unique names to zpool */
214 static atomic_t zswap_pools_count = ATOMIC_INIT(0);
215 
216 enum zswap_init_type {
217 	ZSWAP_UNINIT,
218 	ZSWAP_INIT_SUCCEED,
219 	ZSWAP_INIT_FAILED
220 };
221 
222 static enum zswap_init_type zswap_init_state;
223 
224 /* used to ensure the integrity of initialization */
225 static DEFINE_MUTEX(zswap_init_lock);
226 
227 /* init completed, but couldn't create the initial pool */
228 static bool zswap_has_pool;
229 
230 /*********************************
231 * helpers and fwd declarations
232 **********************************/
233 
234 static inline struct xarray *swap_zswap_tree(swp_entry_t swp)
235 {
236 	return &zswap_trees[swp_type(swp)][swp_offset(swp)
237 		>> SWAP_ADDRESS_SPACE_SHIFT];
238 }
239 
240 #define zswap_pool_debug(msg, p)				\
241 	pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,		\
242 		 zpool_get_type((p)->zpool))
243 
244 /*********************************
245 * pool functions
246 **********************************/
247 static void __zswap_pool_empty(struct percpu_ref *ref);
248 
249 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
250 {
251 	struct zswap_pool *pool;
252 	char name[38]; /* 'zswap' + 32 char (max) num + \0 */
253 	gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
254 	int ret, cpu;
255 
256 	if (!zswap_has_pool) {
257 		/* if either are unset, pool initialization failed, and we
258 		 * need both params to be set correctly before trying to
259 		 * create a pool.
260 		 */
261 		if (!strcmp(type, ZSWAP_PARAM_UNSET))
262 			return NULL;
263 		if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
264 			return NULL;
265 	}
266 
267 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
268 	if (!pool)
269 		return NULL;
270 
271 	/* unique name for each pool specifically required by zsmalloc */
272 	snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
273 	pool->zpool = zpool_create_pool(type, name, gfp);
274 	if (!pool->zpool) {
275 		pr_err("%s zpool not available\n", type);
276 		goto error;
277 	}
278 	pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
279 
280 	strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
281 
282 	pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
283 	if (!pool->acomp_ctx) {
284 		pr_err("percpu alloc failed\n");
285 		goto error;
286 	}
287 
288 	for_each_possible_cpu(cpu)
289 		mutex_init(&per_cpu_ptr(pool->acomp_ctx, cpu)->mutex);
290 
291 	ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
292 				       &pool->node);
293 	if (ret)
294 		goto error;
295 
296 	/* being the current pool takes 1 ref; this func expects the
297 	 * caller to always add the new pool as the current pool
298 	 */
299 	ret = percpu_ref_init(&pool->ref, __zswap_pool_empty,
300 			      PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
301 	if (ret)
302 		goto ref_fail;
303 	INIT_LIST_HEAD(&pool->list);
304 
305 	zswap_pool_debug("created", pool);
306 
307 	return pool;
308 
309 ref_fail:
310 	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
311 error:
312 	if (pool->acomp_ctx)
313 		free_percpu(pool->acomp_ctx);
314 	if (pool->zpool)
315 		zpool_destroy_pool(pool->zpool);
316 	kfree(pool);
317 	return NULL;
318 }
319 
320 static struct zswap_pool *__zswap_pool_create_fallback(void)
321 {
322 	bool has_comp, has_zpool;
323 
324 	has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
325 	if (!has_comp && strcmp(zswap_compressor,
326 				CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
327 		pr_err("compressor %s not available, using default %s\n",
328 		       zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
329 		param_free_charp(&zswap_compressor);
330 		zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
331 		has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
332 	}
333 	if (!has_comp) {
334 		pr_err("default compressor %s not available\n",
335 		       zswap_compressor);
336 		param_free_charp(&zswap_compressor);
337 		zswap_compressor = ZSWAP_PARAM_UNSET;
338 	}
339 
340 	has_zpool = zpool_has_pool(zswap_zpool_type);
341 	if (!has_zpool && strcmp(zswap_zpool_type,
342 				 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
343 		pr_err("zpool %s not available, using default %s\n",
344 		       zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
345 		param_free_charp(&zswap_zpool_type);
346 		zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
347 		has_zpool = zpool_has_pool(zswap_zpool_type);
348 	}
349 	if (!has_zpool) {
350 		pr_err("default zpool %s not available\n",
351 		       zswap_zpool_type);
352 		param_free_charp(&zswap_zpool_type);
353 		zswap_zpool_type = ZSWAP_PARAM_UNSET;
354 	}
355 
356 	if (!has_comp || !has_zpool)
357 		return NULL;
358 
359 	return zswap_pool_create(zswap_zpool_type, zswap_compressor);
360 }
361 
362 static void zswap_pool_destroy(struct zswap_pool *pool)
363 {
364 	zswap_pool_debug("destroying", pool);
365 
366 	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
367 	free_percpu(pool->acomp_ctx);
368 
369 	zpool_destroy_pool(pool->zpool);
370 	kfree(pool);
371 }
372 
373 static void __zswap_pool_release(struct work_struct *work)
374 {
375 	struct zswap_pool *pool = container_of(work, typeof(*pool),
376 						release_work);
377 
378 	synchronize_rcu();
379 
380 	/* nobody should have been able to get a ref... */
381 	WARN_ON(!percpu_ref_is_zero(&pool->ref));
382 	percpu_ref_exit(&pool->ref);
383 
384 	/* pool is now off zswap_pools list and has no references. */
385 	zswap_pool_destroy(pool);
386 }
387 
388 static struct zswap_pool *zswap_pool_current(void);
389 
390 static void __zswap_pool_empty(struct percpu_ref *ref)
391 {
392 	struct zswap_pool *pool;
393 
394 	pool = container_of(ref, typeof(*pool), ref);
395 
396 	spin_lock_bh(&zswap_pools_lock);
397 
398 	WARN_ON(pool == zswap_pool_current());
399 
400 	list_del_rcu(&pool->list);
401 
402 	INIT_WORK(&pool->release_work, __zswap_pool_release);
403 	schedule_work(&pool->release_work);
404 
405 	spin_unlock_bh(&zswap_pools_lock);
406 }
407 
408 static int __must_check zswap_pool_tryget(struct zswap_pool *pool)
409 {
410 	if (!pool)
411 		return 0;
412 
413 	return percpu_ref_tryget(&pool->ref);
414 }
415 
416 /* The caller must already have a reference. */
417 static void zswap_pool_get(struct zswap_pool *pool)
418 {
419 	percpu_ref_get(&pool->ref);
420 }
421 
422 static void zswap_pool_put(struct zswap_pool *pool)
423 {
424 	percpu_ref_put(&pool->ref);
425 }
426 
427 static struct zswap_pool *__zswap_pool_current(void)
428 {
429 	struct zswap_pool *pool;
430 
431 	pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
432 	WARN_ONCE(!pool && zswap_has_pool,
433 		  "%s: no page storage pool!\n", __func__);
434 
435 	return pool;
436 }
437 
438 static struct zswap_pool *zswap_pool_current(void)
439 {
440 	assert_spin_locked(&zswap_pools_lock);
441 
442 	return __zswap_pool_current();
443 }
444 
445 static struct zswap_pool *zswap_pool_current_get(void)
446 {
447 	struct zswap_pool *pool;
448 
449 	rcu_read_lock();
450 
451 	pool = __zswap_pool_current();
452 	if (!zswap_pool_tryget(pool))
453 		pool = NULL;
454 
455 	rcu_read_unlock();
456 
457 	return pool;
458 }
459 
460 /* type and compressor must be null-terminated */
461 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
462 {
463 	struct zswap_pool *pool;
464 
465 	assert_spin_locked(&zswap_pools_lock);
466 
467 	list_for_each_entry_rcu(pool, &zswap_pools, list) {
468 		if (strcmp(pool->tfm_name, compressor))
469 			continue;
470 		if (strcmp(zpool_get_type(pool->zpool), type))
471 			continue;
472 		/* if we can't get it, it's about to be destroyed */
473 		if (!zswap_pool_tryget(pool))
474 			continue;
475 		return pool;
476 	}
477 
478 	return NULL;
479 }
480 
481 static unsigned long zswap_max_pages(void)
482 {
483 	return totalram_pages() * zswap_max_pool_percent / 100;
484 }
485 
486 static unsigned long zswap_accept_thr_pages(void)
487 {
488 	return zswap_max_pages() * zswap_accept_thr_percent / 100;
489 }
490 
491 unsigned long zswap_total_pages(void)
492 {
493 	struct zswap_pool *pool;
494 	unsigned long total = 0;
495 
496 	rcu_read_lock();
497 	list_for_each_entry_rcu(pool, &zswap_pools, list)
498 		total += zpool_get_total_pages(pool->zpool);
499 	rcu_read_unlock();
500 
501 	return total;
502 }
503 
504 static bool zswap_check_limits(void)
505 {
506 	unsigned long cur_pages = zswap_total_pages();
507 	unsigned long max_pages = zswap_max_pages();
508 
509 	if (cur_pages >= max_pages) {
510 		zswap_pool_limit_hit++;
511 		zswap_pool_reached_full = true;
512 	} else if (zswap_pool_reached_full &&
513 		   cur_pages <= zswap_accept_thr_pages()) {
514 			zswap_pool_reached_full = false;
515 	}
516 	return zswap_pool_reached_full;
517 }
518 
519 /*********************************
520 * param callbacks
521 **********************************/
522 
523 static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
524 {
525 	/* no change required */
526 	if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
527 		return false;
528 	return true;
529 }
530 
531 /* val must be a null-terminated string */
532 static int __zswap_param_set(const char *val, const struct kernel_param *kp,
533 			     char *type, char *compressor)
534 {
535 	struct zswap_pool *pool, *put_pool = NULL;
536 	char *s = strstrip((char *)val);
537 	int ret = 0;
538 	bool new_pool = false;
539 
540 	mutex_lock(&zswap_init_lock);
541 	switch (zswap_init_state) {
542 	case ZSWAP_UNINIT:
543 		/* if this is load-time (pre-init) param setting,
544 		 * don't create a pool; that's done during init.
545 		 */
546 		ret = param_set_charp(s, kp);
547 		break;
548 	case ZSWAP_INIT_SUCCEED:
549 		new_pool = zswap_pool_changed(s, kp);
550 		break;
551 	case ZSWAP_INIT_FAILED:
552 		pr_err("can't set param, initialization failed\n");
553 		ret = -ENODEV;
554 	}
555 	mutex_unlock(&zswap_init_lock);
556 
557 	/* no need to create a new pool, return directly */
558 	if (!new_pool)
559 		return ret;
560 
561 	if (!type) {
562 		if (!zpool_has_pool(s)) {
563 			pr_err("zpool %s not available\n", s);
564 			return -ENOENT;
565 		}
566 		type = s;
567 	} else if (!compressor) {
568 		if (!crypto_has_acomp(s, 0, 0)) {
569 			pr_err("compressor %s not available\n", s);
570 			return -ENOENT;
571 		}
572 		compressor = s;
573 	} else {
574 		WARN_ON(1);
575 		return -EINVAL;
576 	}
577 
578 	spin_lock_bh(&zswap_pools_lock);
579 
580 	pool = zswap_pool_find_get(type, compressor);
581 	if (pool) {
582 		zswap_pool_debug("using existing", pool);
583 		WARN_ON(pool == zswap_pool_current());
584 		list_del_rcu(&pool->list);
585 	}
586 
587 	spin_unlock_bh(&zswap_pools_lock);
588 
589 	if (!pool)
590 		pool = zswap_pool_create(type, compressor);
591 	else {
592 		/*
593 		 * Restore the initial ref dropped by percpu_ref_kill()
594 		 * when the pool was decommissioned and switch it again
595 		 * to percpu mode.
596 		 */
597 		percpu_ref_resurrect(&pool->ref);
598 
599 		/* Drop the ref from zswap_pool_find_get(). */
600 		zswap_pool_put(pool);
601 	}
602 
603 	if (pool)
604 		ret = param_set_charp(s, kp);
605 	else
606 		ret = -EINVAL;
607 
608 	spin_lock_bh(&zswap_pools_lock);
609 
610 	if (!ret) {
611 		put_pool = zswap_pool_current();
612 		list_add_rcu(&pool->list, &zswap_pools);
613 		zswap_has_pool = true;
614 	} else if (pool) {
615 		/* add the possibly pre-existing pool to the end of the pools
616 		 * list; if it's new (and empty) then it'll be removed and
617 		 * destroyed by the put after we drop the lock
618 		 */
619 		list_add_tail_rcu(&pool->list, &zswap_pools);
620 		put_pool = pool;
621 	}
622 
623 	spin_unlock_bh(&zswap_pools_lock);
624 
625 	if (!zswap_has_pool && !pool) {
626 		/* if initial pool creation failed, and this pool creation also
627 		 * failed, maybe both compressor and zpool params were bad.
628 		 * Allow changing this param, so pool creation will succeed
629 		 * when the other param is changed. We already verified this
630 		 * param is ok in the zpool_has_pool() or crypto_has_acomp()
631 		 * checks above.
632 		 */
633 		ret = param_set_charp(s, kp);
634 	}
635 
636 	/* drop the ref from either the old current pool,
637 	 * or the new pool we failed to add
638 	 */
639 	if (put_pool)
640 		percpu_ref_kill(&put_pool->ref);
641 
642 	return ret;
643 }
644 
645 static int zswap_compressor_param_set(const char *val,
646 				      const struct kernel_param *kp)
647 {
648 	return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
649 }
650 
651 static int zswap_zpool_param_set(const char *val,
652 				 const struct kernel_param *kp)
653 {
654 	return __zswap_param_set(val, kp, NULL, zswap_compressor);
655 }
656 
657 static int zswap_enabled_param_set(const char *val,
658 				   const struct kernel_param *kp)
659 {
660 	int ret = -ENODEV;
661 
662 	/* if this is load-time (pre-init) param setting, only set param. */
663 	if (system_state != SYSTEM_RUNNING)
664 		return param_set_bool(val, kp);
665 
666 	mutex_lock(&zswap_init_lock);
667 	switch (zswap_init_state) {
668 	case ZSWAP_UNINIT:
669 		if (zswap_setup())
670 			break;
671 		fallthrough;
672 	case ZSWAP_INIT_SUCCEED:
673 		if (!zswap_has_pool)
674 			pr_err("can't enable, no pool configured\n");
675 		else
676 			ret = param_set_bool(val, kp);
677 		break;
678 	case ZSWAP_INIT_FAILED:
679 		pr_err("can't enable, initialization failed\n");
680 	}
681 	mutex_unlock(&zswap_init_lock);
682 
683 	return ret;
684 }
685 
686 /*********************************
687 * lru functions
688 **********************************/
689 
690 /* should be called under RCU */
691 #ifdef CONFIG_MEMCG
692 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
693 {
694 	return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
695 }
696 #else
697 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
698 {
699 	return NULL;
700 }
701 #endif
702 
703 static inline int entry_to_nid(struct zswap_entry *entry)
704 {
705 	return page_to_nid(virt_to_page(entry));
706 }
707 
708 static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
709 {
710 	int nid = entry_to_nid(entry);
711 	struct mem_cgroup *memcg;
712 
713 	/*
714 	 * Note that it is safe to use rcu_read_lock() here, even in the face of
715 	 * concurrent memcg offlining:
716 	 *
717 	 * 1. list_lru_add() is called before list_lru_one is dead. The
718 	 *    new entry will be reparented to memcg's parent's list_lru.
719 	 * 2. list_lru_add() is called after list_lru_one is dead. The
720 	 *    new entry will be added directly to memcg's parent's list_lru.
721 	 *
722 	 * Similar reasoning holds for list_lru_del().
723 	 */
724 	rcu_read_lock();
725 	memcg = mem_cgroup_from_entry(entry);
726 	/* will always succeed */
727 	list_lru_add(list_lru, &entry->lru, nid, memcg);
728 	rcu_read_unlock();
729 }
730 
731 static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
732 {
733 	int nid = entry_to_nid(entry);
734 	struct mem_cgroup *memcg;
735 
736 	rcu_read_lock();
737 	memcg = mem_cgroup_from_entry(entry);
738 	/* will always succeed */
739 	list_lru_del(list_lru, &entry->lru, nid, memcg);
740 	rcu_read_unlock();
741 }
742 
743 void zswap_lruvec_state_init(struct lruvec *lruvec)
744 {
745 	atomic_long_set(&lruvec->zswap_lruvec_state.nr_disk_swapins, 0);
746 }
747 
748 void zswap_folio_swapin(struct folio *folio)
749 {
750 	struct lruvec *lruvec;
751 
752 	if (folio) {
753 		lruvec = folio_lruvec(folio);
754 		atomic_long_inc(&lruvec->zswap_lruvec_state.nr_disk_swapins);
755 	}
756 }
757 
758 /*
759  * This function should be called when a memcg is being offlined.
760  *
761  * Since the global shrinker shrink_worker() may hold a reference
762  * of the memcg, we must check and release the reference in
763  * zswap_next_shrink.
764  *
765  * shrink_worker() must handle the case where this function releases
766  * the reference of memcg being shrunk.
767  */
768 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
769 {
770 	/* lock out zswap shrinker walking memcg tree */
771 	spin_lock(&zswap_shrink_lock);
772 	if (zswap_next_shrink == memcg) {
773 		do {
774 			zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
775 		} while (zswap_next_shrink && !mem_cgroup_online(zswap_next_shrink));
776 	}
777 	spin_unlock(&zswap_shrink_lock);
778 }
779 
780 /*********************************
781 * zswap entry functions
782 **********************************/
783 static struct kmem_cache *zswap_entry_cache;
784 
785 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
786 {
787 	struct zswap_entry *entry;
788 	entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
789 	if (!entry)
790 		return NULL;
791 	return entry;
792 }
793 
794 static void zswap_entry_cache_free(struct zswap_entry *entry)
795 {
796 	kmem_cache_free(zswap_entry_cache, entry);
797 }
798 
799 /*
800  * Carries out the common pattern of freeing and entry's zpool allocation,
801  * freeing the entry itself, and decrementing the number of stored pages.
802  */
803 static void zswap_entry_free(struct zswap_entry *entry)
804 {
805 	zswap_lru_del(&zswap_list_lru, entry);
806 	zpool_free(entry->pool->zpool, entry->handle);
807 	zswap_pool_put(entry->pool);
808 	if (entry->objcg) {
809 		obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
810 		obj_cgroup_put(entry->objcg);
811 	}
812 	zswap_entry_cache_free(entry);
813 	atomic_long_dec(&zswap_stored_pages);
814 }
815 
816 /*********************************
817 * compressed storage functions
818 **********************************/
819 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
820 {
821 	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
822 	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
823 	struct crypto_acomp *acomp;
824 	struct acomp_req *req;
825 	int ret;
826 
827 	mutex_lock(&acomp_ctx->mutex);
828 	acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
829 	if (!acomp_ctx->buffer) {
830 		ret = -ENOMEM;
831 		goto buffer_fail;
832 	}
833 
834 	acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
835 	if (IS_ERR(acomp)) {
836 		pr_err("could not alloc crypto acomp %s : %ld\n",
837 				pool->tfm_name, PTR_ERR(acomp));
838 		ret = PTR_ERR(acomp);
839 		goto acomp_fail;
840 	}
841 	acomp_ctx->acomp = acomp;
842 	acomp_ctx->is_sleepable = acomp_is_async(acomp);
843 
844 	req = acomp_request_alloc(acomp_ctx->acomp);
845 	if (!req) {
846 		pr_err("could not alloc crypto acomp_request %s\n",
847 		       pool->tfm_name);
848 		ret = -ENOMEM;
849 		goto req_fail;
850 	}
851 	acomp_ctx->req = req;
852 
853 	crypto_init_wait(&acomp_ctx->wait);
854 	/*
855 	 * if the backend of acomp is async zip, crypto_req_done() will wakeup
856 	 * crypto_wait_req(); if the backend of acomp is scomp, the callback
857 	 * won't be called, crypto_wait_req() will return without blocking.
858 	 */
859 	acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
860 				   crypto_req_done, &acomp_ctx->wait);
861 
862 	mutex_unlock(&acomp_ctx->mutex);
863 	return 0;
864 
865 req_fail:
866 	crypto_free_acomp(acomp_ctx->acomp);
867 acomp_fail:
868 	kfree(acomp_ctx->buffer);
869 buffer_fail:
870 	mutex_unlock(&acomp_ctx->mutex);
871 	return ret;
872 }
873 
874 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
875 {
876 	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
877 	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
878 
879 	mutex_lock(&acomp_ctx->mutex);
880 	if (!IS_ERR_OR_NULL(acomp_ctx)) {
881 		if (!IS_ERR_OR_NULL(acomp_ctx->req))
882 			acomp_request_free(acomp_ctx->req);
883 		acomp_ctx->req = NULL;
884 		if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
885 			crypto_free_acomp(acomp_ctx->acomp);
886 		kfree(acomp_ctx->buffer);
887 	}
888 	mutex_unlock(&acomp_ctx->mutex);
889 
890 	return 0;
891 }
892 
893 static struct crypto_acomp_ctx *acomp_ctx_get_cpu_lock(struct zswap_pool *pool)
894 {
895 	struct crypto_acomp_ctx *acomp_ctx;
896 
897 	for (;;) {
898 		acomp_ctx = raw_cpu_ptr(pool->acomp_ctx);
899 		mutex_lock(&acomp_ctx->mutex);
900 		if (likely(acomp_ctx->req))
901 			return acomp_ctx;
902 		/*
903 		 * It is possible that we were migrated to a different CPU after
904 		 * getting the per-CPU ctx but before the mutex was acquired. If
905 		 * the old CPU got offlined, zswap_cpu_comp_dead() could have
906 		 * already freed ctx->req (among other things) and set it to
907 		 * NULL. Just try again on the new CPU that we ended up on.
908 		 */
909 		mutex_unlock(&acomp_ctx->mutex);
910 	}
911 }
912 
913 static void acomp_ctx_put_unlock(struct crypto_acomp_ctx *acomp_ctx)
914 {
915 	mutex_unlock(&acomp_ctx->mutex);
916 }
917 
918 static bool zswap_compress(struct page *page, struct zswap_entry *entry,
919 			   struct zswap_pool *pool)
920 {
921 	struct crypto_acomp_ctx *acomp_ctx;
922 	struct scatterlist input, output;
923 	int comp_ret = 0, alloc_ret = 0;
924 	unsigned int dlen = PAGE_SIZE;
925 	unsigned long handle;
926 	struct zpool *zpool;
927 	char *buf;
928 	gfp_t gfp;
929 	u8 *dst;
930 
931 	acomp_ctx = acomp_ctx_get_cpu_lock(pool);
932 	dst = acomp_ctx->buffer;
933 	sg_init_table(&input, 1);
934 	sg_set_page(&input, page, PAGE_SIZE, 0);
935 
936 	/*
937 	 * We need PAGE_SIZE * 2 here since there maybe over-compression case,
938 	 * and hardware-accelerators may won't check the dst buffer size, so
939 	 * giving the dst buffer with enough length to avoid buffer overflow.
940 	 */
941 	sg_init_one(&output, dst, PAGE_SIZE * 2);
942 	acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
943 
944 	/*
945 	 * it maybe looks a little bit silly that we send an asynchronous request,
946 	 * then wait for its completion synchronously. This makes the process look
947 	 * synchronous in fact.
948 	 * Theoretically, acomp supports users send multiple acomp requests in one
949 	 * acomp instance, then get those requests done simultaneously. but in this
950 	 * case, zswap actually does store and load page by page, there is no
951 	 * existing method to send the second page before the first page is done
952 	 * in one thread doing zwap.
953 	 * but in different threads running on different cpu, we have different
954 	 * acomp instance, so multiple threads can do (de)compression in parallel.
955 	 */
956 	comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
957 	dlen = acomp_ctx->req->dlen;
958 	if (comp_ret)
959 		goto unlock;
960 
961 	zpool = pool->zpool;
962 	gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
963 	if (zpool_malloc_support_movable(zpool))
964 		gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
965 	alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle);
966 	if (alloc_ret)
967 		goto unlock;
968 
969 	buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
970 	memcpy(buf, dst, dlen);
971 	zpool_unmap_handle(zpool, handle);
972 
973 	entry->handle = handle;
974 	entry->length = dlen;
975 
976 unlock:
977 	if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC)
978 		zswap_reject_compress_poor++;
979 	else if (comp_ret)
980 		zswap_reject_compress_fail++;
981 	else if (alloc_ret)
982 		zswap_reject_alloc_fail++;
983 
984 	acomp_ctx_put_unlock(acomp_ctx);
985 	return comp_ret == 0 && alloc_ret == 0;
986 }
987 
988 static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
989 {
990 	struct zpool *zpool = entry->pool->zpool;
991 	struct scatterlist input, output;
992 	struct crypto_acomp_ctx *acomp_ctx;
993 	u8 *src;
994 
995 	acomp_ctx = acomp_ctx_get_cpu_lock(entry->pool);
996 	src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
997 	/*
998 	 * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer
999 	 * to do crypto_acomp_decompress() which might sleep. In such cases, we must
1000 	 * resort to copying the buffer to a temporary one.
1001 	 * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer,
1002 	 * such as a kmap address of high memory or even ever a vmap address.
1003 	 * However, sg_init_one is only equipped to handle linearly mapped low memory.
1004 	 * In such cases, we also must copy the buffer to a temporary and lowmem one.
1005 	 */
1006 	if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) ||
1007 	    !virt_addr_valid(src)) {
1008 		memcpy(acomp_ctx->buffer, src, entry->length);
1009 		src = acomp_ctx->buffer;
1010 		zpool_unmap_handle(zpool, entry->handle);
1011 	}
1012 
1013 	sg_init_one(&input, src, entry->length);
1014 	sg_init_table(&output, 1);
1015 	sg_set_folio(&output, folio, PAGE_SIZE, 0);
1016 	acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
1017 	BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
1018 	BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
1019 
1020 	if (src != acomp_ctx->buffer)
1021 		zpool_unmap_handle(zpool, entry->handle);
1022 	acomp_ctx_put_unlock(acomp_ctx);
1023 }
1024 
1025 /*********************************
1026 * writeback code
1027 **********************************/
1028 /*
1029  * Attempts to free an entry by adding a folio to the swap cache,
1030  * decompressing the entry data into the folio, and issuing a
1031  * bio write to write the folio back to the swap device.
1032  *
1033  * This can be thought of as a "resumed writeback" of the folio
1034  * to the swap device.  We are basically resuming the same swap
1035  * writeback path that was intercepted with the zswap_store()
1036  * in the first place.  After the folio has been decompressed into
1037  * the swap cache, the compressed version stored by zswap can be
1038  * freed.
1039  */
1040 static int zswap_writeback_entry(struct zswap_entry *entry,
1041 				 swp_entry_t swpentry)
1042 {
1043 	struct xarray *tree;
1044 	pgoff_t offset = swp_offset(swpentry);
1045 	struct folio *folio;
1046 	struct mempolicy *mpol;
1047 	bool folio_was_allocated;
1048 	struct writeback_control wbc = {
1049 		.sync_mode = WB_SYNC_NONE,
1050 	};
1051 
1052 	/* try to allocate swap cache folio */
1053 	mpol = get_task_policy(current);
1054 	folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
1055 				NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
1056 	if (!folio)
1057 		return -ENOMEM;
1058 
1059 	/*
1060 	 * Found an existing folio, we raced with swapin or concurrent
1061 	 * shrinker. We generally writeback cold folios from zswap, and
1062 	 * swapin means the folio just became hot, so skip this folio.
1063 	 * For unlikely concurrent shrinker case, it will be unlinked
1064 	 * and freed when invalidated by the concurrent shrinker anyway.
1065 	 */
1066 	if (!folio_was_allocated) {
1067 		folio_put(folio);
1068 		return -EEXIST;
1069 	}
1070 
1071 	/*
1072 	 * folio is locked, and the swapcache is now secured against
1073 	 * concurrent swapping to and from the slot, and concurrent
1074 	 * swapoff so we can safely dereference the zswap tree here.
1075 	 * Verify that the swap entry hasn't been invalidated and recycled
1076 	 * behind our backs, to avoid overwriting a new swap folio with
1077 	 * old compressed data. Only when this is successful can the entry
1078 	 * be dereferenced.
1079 	 */
1080 	tree = swap_zswap_tree(swpentry);
1081 	if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) {
1082 		delete_from_swap_cache(folio);
1083 		folio_unlock(folio);
1084 		folio_put(folio);
1085 		return -ENOMEM;
1086 	}
1087 
1088 	zswap_decompress(entry, folio);
1089 
1090 	count_vm_event(ZSWPWB);
1091 	if (entry->objcg)
1092 		count_objcg_events(entry->objcg, ZSWPWB, 1);
1093 
1094 	zswap_entry_free(entry);
1095 
1096 	/* folio is up to date */
1097 	folio_mark_uptodate(folio);
1098 
1099 	/* move it to the tail of the inactive list after end_writeback */
1100 	folio_set_reclaim(folio);
1101 
1102 	/* start writeback */
1103 	__swap_writepage(folio, &wbc);
1104 	folio_put(folio);
1105 
1106 	return 0;
1107 }
1108 
1109 /*********************************
1110 * shrinker functions
1111 **********************************/
1112 /*
1113  * The dynamic shrinker is modulated by the following factors:
1114  *
1115  * 1. Each zswap entry has a referenced bit, which the shrinker unsets (giving
1116  *    the entry a second chance) before rotating it in the LRU list. If the
1117  *    entry is considered again by the shrinker, with its referenced bit unset,
1118  *    it is written back. The writeback rate as a result is dynamically
1119  *    adjusted by the pool activities - if the pool is dominated by new entries
1120  *    (i.e lots of recent zswapouts), these entries will be protected and
1121  *    the writeback rate will slow down. On the other hand, if the pool has a
1122  *    lot of stagnant entries, these entries will be reclaimed immediately,
1123  *    effectively increasing the writeback rate.
1124  *
1125  * 2. Swapins counter: If we observe swapins, it is a sign that we are
1126  *    overshrinking and should slow down. We maintain a swapins counter, which
1127  *    is consumed and subtract from the number of eligible objects on the LRU
1128  *    in zswap_shrinker_count().
1129  *
1130  * 3. Compression ratio. The better the workload compresses, the less gains we
1131  *    can expect from writeback. We scale down the number of objects available
1132  *    for reclaim by this ratio.
1133  */
1134 static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
1135 				       void *arg)
1136 {
1137 	struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
1138 	bool *encountered_page_in_swapcache = (bool *)arg;
1139 	swp_entry_t swpentry;
1140 	enum lru_status ret = LRU_REMOVED_RETRY;
1141 	int writeback_result;
1142 
1143 	/*
1144 	 * Second chance algorithm: if the entry has its referenced bit set, give it
1145 	 * a second chance. Only clear the referenced bit and rotate it in the
1146 	 * zswap's LRU list.
1147 	 */
1148 	if (entry->referenced) {
1149 		entry->referenced = false;
1150 		return LRU_ROTATE;
1151 	}
1152 
1153 	/*
1154 	 * As soon as we drop the LRU lock, the entry can be freed by
1155 	 * a concurrent invalidation. This means the following:
1156 	 *
1157 	 * 1. We extract the swp_entry_t to the stack, allowing
1158 	 *    zswap_writeback_entry() to pin the swap entry and
1159 	 *    then validate the zwap entry against that swap entry's
1160 	 *    tree using pointer value comparison. Only when that
1161 	 *    is successful can the entry be dereferenced.
1162 	 *
1163 	 * 2. Usually, objects are taken off the LRU for reclaim. In
1164 	 *    this case this isn't possible, because if reclaim fails
1165 	 *    for whatever reason, we have no means of knowing if the
1166 	 *    entry is alive to put it back on the LRU.
1167 	 *
1168 	 *    So rotate it before dropping the lock. If the entry is
1169 	 *    written back or invalidated, the free path will unlink
1170 	 *    it. For failures, rotation is the right thing as well.
1171 	 *
1172 	 *    Temporary failures, where the same entry should be tried
1173 	 *    again immediately, almost never happen for this shrinker.
1174 	 *    We don't do any trylocking; -ENOMEM comes closest,
1175 	 *    but that's extremely rare and doesn't happen spuriously
1176 	 *    either. Don't bother distinguishing this case.
1177 	 */
1178 	list_move_tail(item, &l->list);
1179 
1180 	/*
1181 	 * Once the lru lock is dropped, the entry might get freed. The
1182 	 * swpentry is copied to the stack, and entry isn't deref'd again
1183 	 * until the entry is verified to still be alive in the tree.
1184 	 */
1185 	swpentry = entry->swpentry;
1186 
1187 	/*
1188 	 * It's safe to drop the lock here because we return either
1189 	 * LRU_REMOVED_RETRY or LRU_RETRY.
1190 	 */
1191 	spin_unlock(&l->lock);
1192 
1193 	writeback_result = zswap_writeback_entry(entry, swpentry);
1194 
1195 	if (writeback_result) {
1196 		zswap_reject_reclaim_fail++;
1197 		ret = LRU_RETRY;
1198 
1199 		/*
1200 		 * Encountering a page already in swap cache is a sign that we are shrinking
1201 		 * into the warmer region. We should terminate shrinking (if we're in the dynamic
1202 		 * shrinker context).
1203 		 */
1204 		if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
1205 			ret = LRU_STOP;
1206 			*encountered_page_in_swapcache = true;
1207 		}
1208 	} else {
1209 		zswap_written_back_pages++;
1210 	}
1211 
1212 	return ret;
1213 }
1214 
1215 static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
1216 		struct shrink_control *sc)
1217 {
1218 	unsigned long shrink_ret;
1219 	bool encountered_page_in_swapcache = false;
1220 
1221 	if (!zswap_shrinker_enabled ||
1222 			!mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
1223 		sc->nr_scanned = 0;
1224 		return SHRINK_STOP;
1225 	}
1226 
1227 	shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb,
1228 		&encountered_page_in_swapcache);
1229 
1230 	if (encountered_page_in_swapcache)
1231 		return SHRINK_STOP;
1232 
1233 	return shrink_ret ? shrink_ret : SHRINK_STOP;
1234 }
1235 
1236 static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
1237 		struct shrink_control *sc)
1238 {
1239 	struct mem_cgroup *memcg = sc->memcg;
1240 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
1241 	atomic_long_t *nr_disk_swapins =
1242 		&lruvec->zswap_lruvec_state.nr_disk_swapins;
1243 	unsigned long nr_backing, nr_stored, nr_freeable, nr_disk_swapins_cur,
1244 		nr_remain;
1245 
1246 	if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
1247 		return 0;
1248 
1249 	/*
1250 	 * The shrinker resumes swap writeback, which will enter block
1251 	 * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS
1252 	 * rules (may_enter_fs()), which apply on a per-folio basis.
1253 	 */
1254 	if (!gfp_has_io_fs(sc->gfp_mask))
1255 		return 0;
1256 
1257 	/*
1258 	 * For memcg, use the cgroup-wide ZSWAP stats since we don't
1259 	 * have them per-node and thus per-lruvec. Careful if memcg is
1260 	 * runtime-disabled: we can get sc->memcg == NULL, which is ok
1261 	 * for the lruvec, but not for memcg_page_state().
1262 	 *
1263 	 * Without memcg, use the zswap pool-wide metrics.
1264 	 */
1265 	if (!mem_cgroup_disabled()) {
1266 		mem_cgroup_flush_stats(memcg);
1267 		nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
1268 		nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
1269 	} else {
1270 		nr_backing = zswap_total_pages();
1271 		nr_stored = atomic_long_read(&zswap_stored_pages);
1272 	}
1273 
1274 	if (!nr_stored)
1275 		return 0;
1276 
1277 	nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc);
1278 	if (!nr_freeable)
1279 		return 0;
1280 
1281 	/*
1282 	 * Subtract from the lru size the number of pages that are recently swapped
1283 	 * in from disk. The idea is that had we protect the zswap's LRU by this
1284 	 * amount of pages, these disk swapins would not have happened.
1285 	 */
1286 	nr_disk_swapins_cur = atomic_long_read(nr_disk_swapins);
1287 	do {
1288 		if (nr_freeable >= nr_disk_swapins_cur)
1289 			nr_remain = 0;
1290 		else
1291 			nr_remain = nr_disk_swapins_cur - nr_freeable;
1292 	} while (!atomic_long_try_cmpxchg(
1293 		nr_disk_swapins, &nr_disk_swapins_cur, nr_remain));
1294 
1295 	nr_freeable -= nr_disk_swapins_cur - nr_remain;
1296 	if (!nr_freeable)
1297 		return 0;
1298 
1299 	/*
1300 	 * Scale the number of freeable pages by the memory saving factor.
1301 	 * This ensures that the better zswap compresses memory, the fewer
1302 	 * pages we will evict to swap (as it will otherwise incur IO for
1303 	 * relatively small memory saving).
1304 	 */
1305 	return mult_frac(nr_freeable, nr_backing, nr_stored);
1306 }
1307 
1308 static struct shrinker *zswap_alloc_shrinker(void)
1309 {
1310 	struct shrinker *shrinker;
1311 
1312 	shrinker =
1313 		shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
1314 	if (!shrinker)
1315 		return NULL;
1316 
1317 	shrinker->scan_objects = zswap_shrinker_scan;
1318 	shrinker->count_objects = zswap_shrinker_count;
1319 	shrinker->batch = 0;
1320 	shrinker->seeks = DEFAULT_SEEKS;
1321 	return shrinker;
1322 }
1323 
1324 static int shrink_memcg(struct mem_cgroup *memcg)
1325 {
1326 	int nid, shrunk = 0, scanned = 0;
1327 
1328 	if (!mem_cgroup_zswap_writeback_enabled(memcg))
1329 		return -ENOENT;
1330 
1331 	/*
1332 	 * Skip zombies because their LRUs are reparented and we would be
1333 	 * reclaiming from the parent instead of the dead memcg.
1334 	 */
1335 	if (memcg && !mem_cgroup_online(memcg))
1336 		return -ENOENT;
1337 
1338 	for_each_node_state(nid, N_NORMAL_MEMORY) {
1339 		unsigned long nr_to_walk = 1;
1340 
1341 		shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg,
1342 					    &shrink_memcg_cb, NULL, &nr_to_walk);
1343 		scanned += 1 - nr_to_walk;
1344 	}
1345 
1346 	if (!scanned)
1347 		return -ENOENT;
1348 
1349 	return shrunk ? 0 : -EAGAIN;
1350 }
1351 
1352 static void shrink_worker(struct work_struct *w)
1353 {
1354 	struct mem_cgroup *memcg;
1355 	int ret, failures = 0, attempts = 0;
1356 	unsigned long thr;
1357 
1358 	/* Reclaim down to the accept threshold */
1359 	thr = zswap_accept_thr_pages();
1360 
1361 	/*
1362 	 * Global reclaim will select cgroup in a round-robin fashion from all
1363 	 * online memcgs, but memcgs that have no pages in zswap and
1364 	 * writeback-disabled memcgs (memory.zswap.writeback=0) are not
1365 	 * candidates for shrinking.
1366 	 *
1367 	 * Shrinking will be aborted if we encounter the following
1368 	 * MAX_RECLAIM_RETRIES times:
1369 	 * - No writeback-candidate memcgs found in a memcg tree walk.
1370 	 * - Shrinking a writeback-candidate memcg failed.
1371 	 *
1372 	 * We save iteration cursor memcg into zswap_next_shrink,
1373 	 * which can be modified by the offline memcg cleaner
1374 	 * zswap_memcg_offline_cleanup().
1375 	 *
1376 	 * Since the offline cleaner is called only once, we cannot leave an
1377 	 * offline memcg reference in zswap_next_shrink.
1378 	 * We can rely on the cleaner only if we get online memcg under lock.
1379 	 *
1380 	 * If we get an offline memcg, we cannot determine if the cleaner has
1381 	 * already been called or will be called later. We must put back the
1382 	 * reference before returning from this function. Otherwise, the
1383 	 * offline memcg left in zswap_next_shrink will hold the reference
1384 	 * until the next run of shrink_worker().
1385 	 */
1386 	do {
1387 		/*
1388 		 * Start shrinking from the next memcg after zswap_next_shrink.
1389 		 * When the offline cleaner has already advanced the cursor,
1390 		 * advancing the cursor here overlooks one memcg, but this
1391 		 * should be negligibly rare.
1392 		 *
1393 		 * If we get an online memcg, keep the extra reference in case
1394 		 * the original one obtained by mem_cgroup_iter() is dropped by
1395 		 * zswap_memcg_offline_cleanup() while we are shrinking the
1396 		 * memcg.
1397 		 */
1398 		spin_lock(&zswap_shrink_lock);
1399 		do {
1400 			memcg = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
1401 			zswap_next_shrink = memcg;
1402 		} while (memcg && !mem_cgroup_tryget_online(memcg));
1403 		spin_unlock(&zswap_shrink_lock);
1404 
1405 		if (!memcg) {
1406 			/*
1407 			 * Continue shrinking without incrementing failures if
1408 			 * we found candidate memcgs in the last tree walk.
1409 			 */
1410 			if (!attempts && ++failures == MAX_RECLAIM_RETRIES)
1411 				break;
1412 
1413 			attempts = 0;
1414 			goto resched;
1415 		}
1416 
1417 		ret = shrink_memcg(memcg);
1418 		/* drop the extra reference */
1419 		mem_cgroup_put(memcg);
1420 
1421 		/*
1422 		 * There are no writeback-candidate pages in the memcg.
1423 		 * This is not an issue as long as we can find another memcg
1424 		 * with pages in zswap. Skip this without incrementing attempts
1425 		 * and failures.
1426 		 */
1427 		if (ret == -ENOENT)
1428 			continue;
1429 		++attempts;
1430 
1431 		if (ret && ++failures == MAX_RECLAIM_RETRIES)
1432 			break;
1433 resched:
1434 		cond_resched();
1435 	} while (zswap_total_pages() > thr);
1436 }
1437 
1438 /*********************************
1439 * main API
1440 **********************************/
1441 
1442 static ssize_t zswap_store_page(struct page *page,
1443 				struct obj_cgroup *objcg,
1444 				struct zswap_pool *pool)
1445 {
1446 	swp_entry_t page_swpentry = page_swap_entry(page);
1447 	struct zswap_entry *entry, *old;
1448 
1449 	/* allocate entry */
1450 	entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page));
1451 	if (!entry) {
1452 		zswap_reject_kmemcache_fail++;
1453 		return -EINVAL;
1454 	}
1455 
1456 	if (!zswap_compress(page, entry, pool))
1457 		goto compress_failed;
1458 
1459 	old = xa_store(swap_zswap_tree(page_swpentry),
1460 		       swp_offset(page_swpentry),
1461 		       entry, GFP_KERNEL);
1462 	if (xa_is_err(old)) {
1463 		int err = xa_err(old);
1464 
1465 		WARN_ONCE(err != -ENOMEM, "unexpected xarray error: %d\n", err);
1466 		zswap_reject_alloc_fail++;
1467 		goto store_failed;
1468 	}
1469 
1470 	/*
1471 	 * We may have had an existing entry that became stale when
1472 	 * the folio was redirtied and now the new version is being
1473 	 * swapped out. Get rid of the old.
1474 	 */
1475 	if (old)
1476 		zswap_entry_free(old);
1477 
1478 	/*
1479 	 * The entry is successfully compressed and stored in the tree, there is
1480 	 * no further possibility of failure. Grab refs to the pool and objcg.
1481 	 * These refs will be dropped by zswap_entry_free() when the entry is
1482 	 * removed from the tree.
1483 	 */
1484 	zswap_pool_get(pool);
1485 	if (objcg)
1486 		obj_cgroup_get(objcg);
1487 
1488 	/*
1489 	 * We finish initializing the entry while it's already in xarray.
1490 	 * This is safe because:
1491 	 *
1492 	 * 1. Concurrent stores and invalidations are excluded by folio lock.
1493 	 *
1494 	 * 2. Writeback is excluded by the entry not being on the LRU yet.
1495 	 *    The publishing order matters to prevent writeback from seeing
1496 	 *    an incoherent entry.
1497 	 */
1498 	entry->pool = pool;
1499 	entry->swpentry = page_swpentry;
1500 	entry->objcg = objcg;
1501 	entry->referenced = true;
1502 	if (entry->length) {
1503 		INIT_LIST_HEAD(&entry->lru);
1504 		zswap_lru_add(&zswap_list_lru, entry);
1505 	}
1506 
1507 	return entry->length;
1508 
1509 store_failed:
1510 	zpool_free(pool->zpool, entry->handle);
1511 compress_failed:
1512 	zswap_entry_cache_free(entry);
1513 	return -EINVAL;
1514 }
1515 
1516 bool zswap_store(struct folio *folio)
1517 {
1518 	long nr_pages = folio_nr_pages(folio);
1519 	swp_entry_t swp = folio->swap;
1520 	struct obj_cgroup *objcg = NULL;
1521 	struct mem_cgroup *memcg = NULL;
1522 	struct zswap_pool *pool;
1523 	size_t compressed_bytes = 0;
1524 	bool ret = false;
1525 	long index;
1526 
1527 	VM_WARN_ON_ONCE(!folio_test_locked(folio));
1528 	VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
1529 
1530 	if (!zswap_enabled)
1531 		goto check_old;
1532 
1533 	objcg = get_obj_cgroup_from_folio(folio);
1534 	if (objcg && !obj_cgroup_may_zswap(objcg)) {
1535 		memcg = get_mem_cgroup_from_objcg(objcg);
1536 		if (shrink_memcg(memcg)) {
1537 			mem_cgroup_put(memcg);
1538 			goto put_objcg;
1539 		}
1540 		mem_cgroup_put(memcg);
1541 	}
1542 
1543 	if (zswap_check_limits())
1544 		goto put_objcg;
1545 
1546 	pool = zswap_pool_current_get();
1547 	if (!pool)
1548 		goto put_objcg;
1549 
1550 	if (objcg) {
1551 		memcg = get_mem_cgroup_from_objcg(objcg);
1552 		if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) {
1553 			mem_cgroup_put(memcg);
1554 			goto put_pool;
1555 		}
1556 		mem_cgroup_put(memcg);
1557 	}
1558 
1559 	for (index = 0; index < nr_pages; ++index) {
1560 		struct page *page = folio_page(folio, index);
1561 		ssize_t bytes;
1562 
1563 		bytes = zswap_store_page(page, objcg, pool);
1564 		if (bytes < 0)
1565 			goto put_pool;
1566 		compressed_bytes += bytes;
1567 	}
1568 
1569 	if (objcg) {
1570 		obj_cgroup_charge_zswap(objcg, compressed_bytes);
1571 		count_objcg_events(objcg, ZSWPOUT, nr_pages);
1572 	}
1573 
1574 	atomic_long_add(nr_pages, &zswap_stored_pages);
1575 	count_vm_events(ZSWPOUT, nr_pages);
1576 
1577 	ret = true;
1578 
1579 put_pool:
1580 	zswap_pool_put(pool);
1581 put_objcg:
1582 	obj_cgroup_put(objcg);
1583 	if (!ret && zswap_pool_reached_full)
1584 		queue_work(shrink_wq, &zswap_shrink_work);
1585 check_old:
1586 	/*
1587 	 * If the zswap store fails or zswap is disabled, we must invalidate
1588 	 * the possibly stale entries which were previously stored at the
1589 	 * offsets corresponding to each page of the folio. Otherwise,
1590 	 * writeback could overwrite the new data in the swapfile.
1591 	 */
1592 	if (!ret) {
1593 		unsigned type = swp_type(swp);
1594 		pgoff_t offset = swp_offset(swp);
1595 		struct zswap_entry *entry;
1596 		struct xarray *tree;
1597 
1598 		for (index = 0; index < nr_pages; ++index) {
1599 			tree = swap_zswap_tree(swp_entry(type, offset + index));
1600 			entry = xa_erase(tree, offset + index);
1601 			if (entry)
1602 				zswap_entry_free(entry);
1603 		}
1604 	}
1605 
1606 	return ret;
1607 }
1608 
1609 bool zswap_load(struct folio *folio)
1610 {
1611 	swp_entry_t swp = folio->swap;
1612 	pgoff_t offset = swp_offset(swp);
1613 	bool swapcache = folio_test_swapcache(folio);
1614 	struct xarray *tree = swap_zswap_tree(swp);
1615 	struct zswap_entry *entry;
1616 
1617 	VM_WARN_ON_ONCE(!folio_test_locked(folio));
1618 
1619 	if (zswap_never_enabled())
1620 		return false;
1621 
1622 	/*
1623 	 * Large folios should not be swapped in while zswap is being used, as
1624 	 * they are not properly handled. Zswap does not properly load large
1625 	 * folios, and a large folio may only be partially in zswap.
1626 	 *
1627 	 * Return true without marking the folio uptodate so that an IO error is
1628 	 * emitted (e.g. do_swap_page() will sigbus).
1629 	 */
1630 	if (WARN_ON_ONCE(folio_test_large(folio)))
1631 		return true;
1632 
1633 	/*
1634 	 * When reading into the swapcache, invalidate our entry. The
1635 	 * swapcache can be the authoritative owner of the page and
1636 	 * its mappings, and the pressure that results from having two
1637 	 * in-memory copies outweighs any benefits of caching the
1638 	 * compression work.
1639 	 *
1640 	 * (Most swapins go through the swapcache. The notable
1641 	 * exception is the singleton fault on SWP_SYNCHRONOUS_IO
1642 	 * files, which reads into a private page and may free it if
1643 	 * the fault fails. We remain the primary owner of the entry.)
1644 	 */
1645 	if (swapcache)
1646 		entry = xa_erase(tree, offset);
1647 	else
1648 		entry = xa_load(tree, offset);
1649 
1650 	if (!entry)
1651 		return false;
1652 
1653 	zswap_decompress(entry, folio);
1654 
1655 	count_vm_event(ZSWPIN);
1656 	if (entry->objcg)
1657 		count_objcg_events(entry->objcg, ZSWPIN, 1);
1658 
1659 	if (swapcache) {
1660 		zswap_entry_free(entry);
1661 		folio_mark_dirty(folio);
1662 	}
1663 
1664 	folio_mark_uptodate(folio);
1665 	return true;
1666 }
1667 
1668 void zswap_invalidate(swp_entry_t swp)
1669 {
1670 	pgoff_t offset = swp_offset(swp);
1671 	struct xarray *tree = swap_zswap_tree(swp);
1672 	struct zswap_entry *entry;
1673 
1674 	if (xa_empty(tree))
1675 		return;
1676 
1677 	entry = xa_erase(tree, offset);
1678 	if (entry)
1679 		zswap_entry_free(entry);
1680 }
1681 
1682 int zswap_swapon(int type, unsigned long nr_pages)
1683 {
1684 	struct xarray *trees, *tree;
1685 	unsigned int nr, i;
1686 
1687 	nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
1688 	trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL);
1689 	if (!trees) {
1690 		pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1691 		return -ENOMEM;
1692 	}
1693 
1694 	for (i = 0; i < nr; i++)
1695 		xa_init(trees + i);
1696 
1697 	nr_zswap_trees[type] = nr;
1698 	zswap_trees[type] = trees;
1699 	return 0;
1700 }
1701 
1702 void zswap_swapoff(int type)
1703 {
1704 	struct xarray *trees = zswap_trees[type];
1705 	unsigned int i;
1706 
1707 	if (!trees)
1708 		return;
1709 
1710 	/* try_to_unuse() invalidated all the entries already */
1711 	for (i = 0; i < nr_zswap_trees[type]; i++)
1712 		WARN_ON_ONCE(!xa_empty(trees + i));
1713 
1714 	kvfree(trees);
1715 	nr_zswap_trees[type] = 0;
1716 	zswap_trees[type] = NULL;
1717 }
1718 
1719 /*********************************
1720 * debugfs functions
1721 **********************************/
1722 #ifdef CONFIG_DEBUG_FS
1723 #include <linux/debugfs.h>
1724 
1725 static struct dentry *zswap_debugfs_root;
1726 
1727 static int debugfs_get_total_size(void *data, u64 *val)
1728 {
1729 	*val = zswap_total_pages() * PAGE_SIZE;
1730 	return 0;
1731 }
1732 DEFINE_DEBUGFS_ATTRIBUTE(total_size_fops, debugfs_get_total_size, NULL, "%llu\n");
1733 
1734 static int debugfs_get_stored_pages(void *data, u64 *val)
1735 {
1736 	*val = atomic_long_read(&zswap_stored_pages);
1737 	return 0;
1738 }
1739 DEFINE_DEBUGFS_ATTRIBUTE(stored_pages_fops, debugfs_get_stored_pages, NULL, "%llu\n");
1740 
1741 static int zswap_debugfs_init(void)
1742 {
1743 	if (!debugfs_initialized())
1744 		return -ENODEV;
1745 
1746 	zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1747 
1748 	debugfs_create_u64("pool_limit_hit", 0444,
1749 			   zswap_debugfs_root, &zswap_pool_limit_hit);
1750 	debugfs_create_u64("reject_reclaim_fail", 0444,
1751 			   zswap_debugfs_root, &zswap_reject_reclaim_fail);
1752 	debugfs_create_u64("reject_alloc_fail", 0444,
1753 			   zswap_debugfs_root, &zswap_reject_alloc_fail);
1754 	debugfs_create_u64("reject_kmemcache_fail", 0444,
1755 			   zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1756 	debugfs_create_u64("reject_compress_fail", 0444,
1757 			   zswap_debugfs_root, &zswap_reject_compress_fail);
1758 	debugfs_create_u64("reject_compress_poor", 0444,
1759 			   zswap_debugfs_root, &zswap_reject_compress_poor);
1760 	debugfs_create_u64("written_back_pages", 0444,
1761 			   zswap_debugfs_root, &zswap_written_back_pages);
1762 	debugfs_create_file("pool_total_size", 0444,
1763 			    zswap_debugfs_root, NULL, &total_size_fops);
1764 	debugfs_create_file("stored_pages", 0444,
1765 			    zswap_debugfs_root, NULL, &stored_pages_fops);
1766 
1767 	return 0;
1768 }
1769 #else
1770 static int zswap_debugfs_init(void)
1771 {
1772 	return 0;
1773 }
1774 #endif
1775 
1776 /*********************************
1777 * module init and exit
1778 **********************************/
1779 static int zswap_setup(void)
1780 {
1781 	struct zswap_pool *pool;
1782 	int ret;
1783 
1784 	zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1785 	if (!zswap_entry_cache) {
1786 		pr_err("entry cache creation failed\n");
1787 		goto cache_fail;
1788 	}
1789 
1790 	ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1791 				      "mm/zswap_pool:prepare",
1792 				      zswap_cpu_comp_prepare,
1793 				      zswap_cpu_comp_dead);
1794 	if (ret)
1795 		goto hp_fail;
1796 
1797 	shrink_wq = alloc_workqueue("zswap-shrink",
1798 			WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
1799 	if (!shrink_wq)
1800 		goto shrink_wq_fail;
1801 
1802 	zswap_shrinker = zswap_alloc_shrinker();
1803 	if (!zswap_shrinker)
1804 		goto shrinker_fail;
1805 	if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker))
1806 		goto lru_fail;
1807 	shrinker_register(zswap_shrinker);
1808 
1809 	INIT_WORK(&zswap_shrink_work, shrink_worker);
1810 
1811 	pool = __zswap_pool_create_fallback();
1812 	if (pool) {
1813 		pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1814 			zpool_get_type(pool->zpool));
1815 		list_add(&pool->list, &zswap_pools);
1816 		zswap_has_pool = true;
1817 		static_branch_enable(&zswap_ever_enabled);
1818 	} else {
1819 		pr_err("pool creation failed\n");
1820 		zswap_enabled = false;
1821 	}
1822 
1823 	if (zswap_debugfs_init())
1824 		pr_warn("debugfs initialization failed\n");
1825 	zswap_init_state = ZSWAP_INIT_SUCCEED;
1826 	return 0;
1827 
1828 lru_fail:
1829 	shrinker_free(zswap_shrinker);
1830 shrinker_fail:
1831 	destroy_workqueue(shrink_wq);
1832 shrink_wq_fail:
1833 	cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE);
1834 hp_fail:
1835 	kmem_cache_destroy(zswap_entry_cache);
1836 cache_fail:
1837 	/* if built-in, we aren't unloaded on failure; don't allow use */
1838 	zswap_init_state = ZSWAP_INIT_FAILED;
1839 	zswap_enabled = false;
1840 	return -ENOMEM;
1841 }
1842 
1843 static int __init zswap_init(void)
1844 {
1845 	if (!zswap_enabled)
1846 		return 0;
1847 	return zswap_setup();
1848 }
1849 /* must be late so crypto has time to come up */
1850 late_initcall(zswap_init);
1851 
1852 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1853 MODULE_DESCRIPTION("Compressed cache for swap pages");
1854