xref: /linux/mm/zswap.c (revision e6ebf01172185d74237193ca7bb6bdfc39f3eaeb)
1c942fddfSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
22b281117SSeth Jennings /*
32b281117SSeth Jennings  * zswap.c - zswap driver file
42b281117SSeth Jennings  *
542c06a0eSJohannes Weiner  * zswap is a cache that takes pages that are in the process
62b281117SSeth Jennings  * of being swapped out and attempts to compress and store them in a
72b281117SSeth Jennings  * RAM-based memory pool.  This can result in a significant I/O reduction on
82b281117SSeth Jennings  * the swap device and, in the case where decompressing from RAM is faster
92b281117SSeth Jennings  * than reading from the swap device, can also improve workload performance.
102b281117SSeth Jennings  *
112b281117SSeth Jennings  * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
122b281117SSeth Jennings */
132b281117SSeth Jennings 
142b281117SSeth Jennings #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
152b281117SSeth Jennings 
162b281117SSeth Jennings #include <linux/module.h>
172b281117SSeth Jennings #include <linux/cpu.h>
182b281117SSeth Jennings #include <linux/highmem.h>
192b281117SSeth Jennings #include <linux/slab.h>
202b281117SSeth Jennings #include <linux/spinlock.h>
212b281117SSeth Jennings #include <linux/types.h>
222b281117SSeth Jennings #include <linux/atomic.h>
232b281117SSeth Jennings #include <linux/rbtree.h>
242b281117SSeth Jennings #include <linux/swap.h>
252b281117SSeth Jennings #include <linux/crypto.h>
261ec3b5feSBarry Song #include <linux/scatterlist.h>
27ddc1a5cbSHugh Dickins #include <linux/mempolicy.h>
282b281117SSeth Jennings #include <linux/mempool.h>
2912d79d64SDan Streetman #include <linux/zpool.h>
301ec3b5feSBarry Song #include <crypto/acompress.h>
3142c06a0eSJohannes Weiner #include <linux/zswap.h>
322b281117SSeth Jennings #include <linux/mm_types.h>
332b281117SSeth Jennings #include <linux/page-flags.h>
342b281117SSeth Jennings #include <linux/swapops.h>
352b281117SSeth Jennings #include <linux/writeback.h>
362b281117SSeth Jennings #include <linux/pagemap.h>
3745190f01SVitaly Wool #include <linux/workqueue.h>
38a65b0e76SDomenico Cerasuolo #include <linux/list_lru.h>
392b281117SSeth Jennings 
40014bb1deSNeilBrown #include "swap.h"
41e0228d59SDomenico Cerasuolo #include "internal.h"
42014bb1deSNeilBrown 
432b281117SSeth Jennings /*********************************
442b281117SSeth Jennings * statistics
452b281117SSeth Jennings **********************************/
4612d79d64SDan Streetman /* Total bytes used by the compressed storage */
47f6498b77SJohannes Weiner u64 zswap_pool_total_size;
482b281117SSeth Jennings /* The number of compressed pages currently stored in zswap */
49f6498b77SJohannes Weiner atomic_t zswap_stored_pages = ATOMIC_INIT(0);
50a85f878bSSrividya Desireddy /* The number of same-value filled pages currently stored in zswap */
51a85f878bSSrividya Desireddy static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
522b281117SSeth Jennings 
532b281117SSeth Jennings /*
542b281117SSeth Jennings  * The statistics below are not protected from concurrent access for
552b281117SSeth Jennings  * performance reasons so they may not be a 100% accurate.  However,
562b281117SSeth Jennings  * they do provide useful information on roughly how many times a
572b281117SSeth Jennings  * certain event is occurring.
582b281117SSeth Jennings */
592b281117SSeth Jennings 
602b281117SSeth Jennings /* Pool limit was hit (see zswap_max_pool_percent) */
612b281117SSeth Jennings static u64 zswap_pool_limit_hit;
622b281117SSeth Jennings /* Pages written back when pool limit was reached */
632b281117SSeth Jennings static u64 zswap_written_back_pages;
642b281117SSeth Jennings /* Store failed due to a reclaim failure after pool limit was reached */
652b281117SSeth Jennings static u64 zswap_reject_reclaim_fail;
66cb61dad8SNhat Pham /* Store failed due to compression algorithm failure */
67cb61dad8SNhat Pham static u64 zswap_reject_compress_fail;
682b281117SSeth Jennings /* Compressed page was too big for the allocator to (optimally) store */
692b281117SSeth Jennings static u64 zswap_reject_compress_poor;
702b281117SSeth Jennings /* Store failed because underlying allocator could not get memory */
712b281117SSeth Jennings static u64 zswap_reject_alloc_fail;
722b281117SSeth Jennings /* Store failed because the entry metadata could not be allocated (rare) */
732b281117SSeth Jennings static u64 zswap_reject_kmemcache_fail;
742b281117SSeth Jennings 
7545190f01SVitaly Wool /* Shrinker work queue */
7645190f01SVitaly Wool static struct workqueue_struct *shrink_wq;
7745190f01SVitaly Wool /* Pool limit was hit, we need to calm down */
7845190f01SVitaly Wool static bool zswap_pool_reached_full;
7945190f01SVitaly Wool 
802b281117SSeth Jennings /*********************************
812b281117SSeth Jennings * tunables
822b281117SSeth Jennings **********************************/
83c00ed16aSDan Streetman 
84bae21db8SDan Streetman #define ZSWAP_PARAM_UNSET ""
85bae21db8SDan Streetman 
86141fdeecSLiu Shixin static int zswap_setup(void);
87141fdeecSLiu Shixin 
88bb8b93b5SMaciej S. Szmigiero /* Enable/disable zswap */
89bb8b93b5SMaciej S. Szmigiero static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
90d7b028f5SDan Streetman static int zswap_enabled_param_set(const char *,
91d7b028f5SDan Streetman 				   const struct kernel_param *);
9283aed6cdSJoe Perches static const struct kernel_param_ops zswap_enabled_param_ops = {
93d7b028f5SDan Streetman 	.set =		zswap_enabled_param_set,
94d7b028f5SDan Streetman 	.get =		param_get_bool,
95d7b028f5SDan Streetman };
96d7b028f5SDan Streetman module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
972b281117SSeth Jennings 
9890b0fc26SDan Streetman /* Crypto compressor to use */
99bb8b93b5SMaciej S. Szmigiero static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
10090b0fc26SDan Streetman static int zswap_compressor_param_set(const char *,
10190b0fc26SDan Streetman 				      const struct kernel_param *);
10283aed6cdSJoe Perches static const struct kernel_param_ops zswap_compressor_param_ops = {
10390b0fc26SDan Streetman 	.set =		zswap_compressor_param_set,
104c99b42c3SDan Streetman 	.get =		param_get_charp,
105c99b42c3SDan Streetman 	.free =		param_free_charp,
10690b0fc26SDan Streetman };
10790b0fc26SDan Streetman module_param_cb(compressor, &zswap_compressor_param_ops,
108c99b42c3SDan Streetman 		&zswap_compressor, 0644);
10990b0fc26SDan Streetman 
11090b0fc26SDan Streetman /* Compressed storage zpool to use */
111bb8b93b5SMaciej S. Szmigiero static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
11290b0fc26SDan Streetman static int zswap_zpool_param_set(const char *, const struct kernel_param *);
11383aed6cdSJoe Perches static const struct kernel_param_ops zswap_zpool_param_ops = {
11490b0fc26SDan Streetman 	.set =		zswap_zpool_param_set,
115c99b42c3SDan Streetman 	.get =		param_get_charp,
116c99b42c3SDan Streetman 	.free =		param_free_charp,
11790b0fc26SDan Streetman };
118c99b42c3SDan Streetman module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
1192b281117SSeth Jennings 
1202b281117SSeth Jennings /* The maximum percentage of memory that the compressed pool can occupy */
1212b281117SSeth Jennings static unsigned int zswap_max_pool_percent = 20;
12290b0fc26SDan Streetman module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
12360105e12SMinchan Kim 
12445190f01SVitaly Wool /* The threshold for accepting new pages after the max_pool_percent was hit */
12545190f01SVitaly Wool static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
12645190f01SVitaly Wool module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
12745190f01SVitaly Wool 		   uint, 0644);
12845190f01SVitaly Wool 
129cb325dddSMaciej S. Szmigiero /*
130cb325dddSMaciej S. Szmigiero  * Enable/disable handling same-value filled pages (enabled by default).
131cb325dddSMaciej S. Szmigiero  * If disabled every page is considered non-same-value filled.
132cb325dddSMaciej S. Szmigiero  */
133a85f878bSSrividya Desireddy static bool zswap_same_filled_pages_enabled = true;
134a85f878bSSrividya Desireddy module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
135a85f878bSSrividya Desireddy 		   bool, 0644);
136a85f878bSSrividya Desireddy 
137cb325dddSMaciej S. Szmigiero /* Enable/disable handling non-same-value filled pages (enabled by default) */
138cb325dddSMaciej S. Szmigiero static bool zswap_non_same_filled_pages_enabled = true;
139cb325dddSMaciej S. Szmigiero module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
140cb325dddSMaciej S. Szmigiero 		   bool, 0644);
141cb325dddSMaciej S. Szmigiero 
142b8cf32dcSYosry Ahmed /* Number of zpools in zswap_pool (empirically determined for scalability) */
143b8cf32dcSYosry Ahmed #define ZSWAP_NR_ZPOOLS 32
144b8cf32dcSYosry Ahmed 
145b5ba474fSNhat Pham /* Enable/disable memory pressure-based shrinker. */
146b5ba474fSNhat Pham static bool zswap_shrinker_enabled = IS_ENABLED(
147b5ba474fSNhat Pham 		CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
148b5ba474fSNhat Pham module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
149b5ba474fSNhat Pham 
is_zswap_enabled(void)150501a06feSNhat Pham bool is_zswap_enabled(void)
151501a06feSNhat Pham {
152501a06feSNhat Pham 	return zswap_enabled;
153501a06feSNhat Pham }
154501a06feSNhat Pham 
1552b281117SSeth Jennings /*********************************
1562b281117SSeth Jennings * data structures
1572b281117SSeth Jennings **********************************/
158f1c54846SDan Streetman 
1591ec3b5feSBarry Song struct crypto_acomp_ctx {
1601ec3b5feSBarry Song 	struct crypto_acomp *acomp;
1611ec3b5feSBarry Song 	struct acomp_req *req;
1621ec3b5feSBarry Song 	struct crypto_wait wait;
1638ba2f844SChengming Zhou 	u8 *buffer;
1648ba2f844SChengming Zhou 	struct mutex mutex;
165270700ddSBarry Song 	bool is_sleepable;
1661ec3b5feSBarry Song };
1671ec3b5feSBarry Song 
168f999f38bSDomenico Cerasuolo /*
169f999f38bSDomenico Cerasuolo  * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
170f999f38bSDomenico Cerasuolo  * The only case where lru_lock is not acquired while holding tree.lock is
171f999f38bSDomenico Cerasuolo  * when a zswap_entry is taken off the lru for writeback, in that case it
172f999f38bSDomenico Cerasuolo  * needs to be verified that it's still valid in the tree.
173f999f38bSDomenico Cerasuolo  */
174f1c54846SDan Streetman struct zswap_pool {
175b8cf32dcSYosry Ahmed 	struct zpool *zpools[ZSWAP_NR_ZPOOLS];
1761ec3b5feSBarry Song 	struct crypto_acomp_ctx __percpu *acomp_ctx;
17794ace3feSChengming Zhou 	struct percpu_ref ref;
178f1c54846SDan Streetman 	struct list_head list;
17945190f01SVitaly Wool 	struct work_struct release_work;
180cab7a7e5SSebastian Andrzej Siewior 	struct hlist_node node;
181f1c54846SDan Streetman 	char tfm_name[CRYPTO_MAX_ALG_NAME];
182f1c54846SDan Streetman };
183f1c54846SDan Streetman 
184e35606e4SChengming Zhou /* Global LRU lists shared by all zswap pools. */
185e35606e4SChengming Zhou static struct list_lru zswap_list_lru;
186e35606e4SChengming Zhou /* counter of pages stored in all zswap pools. */
187e35606e4SChengming Zhou static atomic_t zswap_nr_stored = ATOMIC_INIT(0);
188e35606e4SChengming Zhou 
189e35606e4SChengming Zhou /* The lock protects zswap_next_shrink updates. */
190e35606e4SChengming Zhou static DEFINE_SPINLOCK(zswap_shrink_lock);
191e35606e4SChengming Zhou static struct mem_cgroup *zswap_next_shrink;
192e35606e4SChengming Zhou static struct work_struct zswap_shrink_work;
193e35606e4SChengming Zhou static struct shrinker *zswap_shrinker;
194bf9b7df2SChengming Zhou 
1952b281117SSeth Jennings /*
1962b281117SSeth Jennings  * struct zswap_entry
1972b281117SSeth Jennings  *
1982b281117SSeth Jennings  * This structure contains the metadata for tracking a single compressed
1992b281117SSeth Jennings  * page within zswap.
2002b281117SSeth Jennings  *
2012b281117SSeth Jennings  * rbnode - links the entry into red-black tree for the appropriate swap type
20297157d89SXiu Jianfeng  * swpentry - associated swap entry, the offset indexes into the red-black tree
2032b281117SSeth Jennings  * length - the length in bytes of the compressed page data.  Needed during
204f999f38bSDomenico Cerasuolo  *          decompression. For a same value filled page length is 0, and both
205f999f38bSDomenico Cerasuolo  *          pool and lru are invalid and must be ignored.
206f1c54846SDan Streetman  * pool - the zswap_pool the entry's data is in
207f1c54846SDan Streetman  * handle - zpool allocation handle that stores the compressed page data
208a85f878bSSrividya Desireddy  * value - value of the same-value filled pages which have same content
20997157d89SXiu Jianfeng  * objcg - the obj_cgroup that the compressed memory is charged to
210f999f38bSDomenico Cerasuolo  * lru - handle to the pool's lru used to evict pages.
2112b281117SSeth Jennings  */
2122b281117SSeth Jennings struct zswap_entry {
2132b281117SSeth Jennings 	struct rb_node rbnode;
2140bb48849SDomenico Cerasuolo 	swp_entry_t swpentry;
2152b281117SSeth Jennings 	unsigned int length;
216f1c54846SDan Streetman 	struct zswap_pool *pool;
217a85f878bSSrividya Desireddy 	union {
2182b281117SSeth Jennings 		unsigned long handle;
219a85f878bSSrividya Desireddy 		unsigned long value;
220a85f878bSSrividya Desireddy 	};
221f4840ccfSJohannes Weiner 	struct obj_cgroup *objcg;
222f999f38bSDomenico Cerasuolo 	struct list_head lru;
2232b281117SSeth Jennings };
2242b281117SSeth Jennings 
2252b281117SSeth Jennings struct zswap_tree {
2262b281117SSeth Jennings 	struct rb_root rbroot;
2272b281117SSeth Jennings 	spinlock_t lock;
2282b281117SSeth Jennings };
2292b281117SSeth Jennings 
2302b281117SSeth Jennings static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
23144c7c734SChengming Zhou static unsigned int nr_zswap_trees[MAX_SWAPFILES];
2322b281117SSeth Jennings 
233f1c54846SDan Streetman /* RCU-protected iteration */
234f1c54846SDan Streetman static LIST_HEAD(zswap_pools);
235f1c54846SDan Streetman /* protects zswap_pools list modification */
236f1c54846SDan Streetman static DEFINE_SPINLOCK(zswap_pools_lock);
23732a4e169SDan Streetman /* pool counter to provide unique names to zpool */
23832a4e169SDan Streetman static atomic_t zswap_pools_count = ATOMIC_INIT(0);
239f1c54846SDan Streetman 
2409021ccecSLiu Shixin enum zswap_init_type {
2419021ccecSLiu Shixin 	ZSWAP_UNINIT,
2429021ccecSLiu Shixin 	ZSWAP_INIT_SUCCEED,
2439021ccecSLiu Shixin 	ZSWAP_INIT_FAILED
2449021ccecSLiu Shixin };
24590b0fc26SDan Streetman 
2469021ccecSLiu Shixin static enum zswap_init_type zswap_init_state;
247d7b028f5SDan Streetman 
248141fdeecSLiu Shixin /* used to ensure the integrity of initialization */
249141fdeecSLiu Shixin static DEFINE_MUTEX(zswap_init_lock);
250f1c54846SDan Streetman 
251ae3d89a7SDan Streetman /* init completed, but couldn't create the initial pool */
252ae3d89a7SDan Streetman static bool zswap_has_pool;
253ae3d89a7SDan Streetman 
254f1c54846SDan Streetman /*********************************
255f1c54846SDan Streetman * helpers and fwd declarations
256f1c54846SDan Streetman **********************************/
257f1c54846SDan Streetman 
swap_zswap_tree(swp_entry_t swp)25844c7c734SChengming Zhou static inline struct zswap_tree *swap_zswap_tree(swp_entry_t swp)
25944c7c734SChengming Zhou {
26044c7c734SChengming Zhou 	return &zswap_trees[swp_type(swp)][swp_offset(swp)
26144c7c734SChengming Zhou 		>> SWAP_ADDRESS_SPACE_SHIFT];
26244c7c734SChengming Zhou }
26344c7c734SChengming Zhou 
264f1c54846SDan Streetman #define zswap_pool_debug(msg, p)				\
265f1c54846SDan Streetman 	pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,		\
266b8cf32dcSYosry Ahmed 		 zpool_get_type((p)->zpools[0]))
267f1c54846SDan Streetman 
zswap_is_full(void)268f1c54846SDan Streetman static bool zswap_is_full(void)
269f1c54846SDan Streetman {
270ca79b0c2SArun KS 	return totalram_pages() * zswap_max_pool_percent / 100 <
271f1c54846SDan Streetman 			DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
272f1c54846SDan Streetman }
273f1c54846SDan Streetman 
zswap_can_accept(void)27445190f01SVitaly Wool static bool zswap_can_accept(void)
27545190f01SVitaly Wool {
27645190f01SVitaly Wool 	return totalram_pages() * zswap_accept_thr_percent / 100 *
27745190f01SVitaly Wool 				zswap_max_pool_percent / 100 >
27845190f01SVitaly Wool 			DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
27945190f01SVitaly Wool }
28045190f01SVitaly Wool 
get_zswap_pool_size(struct zswap_pool * pool)281b5ba474fSNhat Pham static u64 get_zswap_pool_size(struct zswap_pool *pool)
282b5ba474fSNhat Pham {
283b5ba474fSNhat Pham 	u64 pool_size = 0;
284b5ba474fSNhat Pham 	int i;
285b5ba474fSNhat Pham 
286b5ba474fSNhat Pham 	for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
287b5ba474fSNhat Pham 		pool_size += zpool_get_total_size(pool->zpools[i]);
288b5ba474fSNhat Pham 
289b5ba474fSNhat Pham 	return pool_size;
290b5ba474fSNhat Pham }
291b5ba474fSNhat Pham 
zswap_update_total_size(void)292f1c54846SDan Streetman static void zswap_update_total_size(void)
293f1c54846SDan Streetman {
294f1c54846SDan Streetman 	struct zswap_pool *pool;
295f1c54846SDan Streetman 	u64 total = 0;
296f1c54846SDan Streetman 
297f1c54846SDan Streetman 	rcu_read_lock();
298f1c54846SDan Streetman 
299f1c54846SDan Streetman 	list_for_each_entry_rcu(pool, &zswap_pools, list)
300b5ba474fSNhat Pham 		total += get_zswap_pool_size(pool);
301f1c54846SDan Streetman 
302f1c54846SDan Streetman 	rcu_read_unlock();
303f1c54846SDan Streetman 
304f1c54846SDan Streetman 	zswap_pool_total_size = total;
305f1c54846SDan Streetman }
306f1c54846SDan Streetman 
307a984649bSJohannes Weiner /*********************************
308a984649bSJohannes Weiner * pool functions
309a984649bSJohannes Weiner **********************************/
31094ace3feSChengming Zhou static void __zswap_pool_empty(struct percpu_ref *ref);
311a984649bSJohannes Weiner 
zswap_pool_create(char * type,char * compressor)312a984649bSJohannes Weiner static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
313a984649bSJohannes Weiner {
314a984649bSJohannes Weiner 	int i;
315a984649bSJohannes Weiner 	struct zswap_pool *pool;
316a984649bSJohannes Weiner 	char name[38]; /* 'zswap' + 32 char (max) num + \0 */
317a984649bSJohannes Weiner 	gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
318a984649bSJohannes Weiner 	int ret;
319a984649bSJohannes Weiner 
320a984649bSJohannes Weiner 	if (!zswap_has_pool) {
321a984649bSJohannes Weiner 		/* if either are unset, pool initialization failed, and we
322a984649bSJohannes Weiner 		 * need both params to be set correctly before trying to
323a984649bSJohannes Weiner 		 * create a pool.
324a984649bSJohannes Weiner 		 */
325a984649bSJohannes Weiner 		if (!strcmp(type, ZSWAP_PARAM_UNSET))
326a984649bSJohannes Weiner 			return NULL;
327a984649bSJohannes Weiner 		if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
328a984649bSJohannes Weiner 			return NULL;
329a984649bSJohannes Weiner 	}
330a984649bSJohannes Weiner 
331a984649bSJohannes Weiner 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
332a984649bSJohannes Weiner 	if (!pool)
333a984649bSJohannes Weiner 		return NULL;
334a984649bSJohannes Weiner 
335a984649bSJohannes Weiner 	for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) {
336a984649bSJohannes Weiner 		/* unique name for each pool specifically required by zsmalloc */
337a984649bSJohannes Weiner 		snprintf(name, 38, "zswap%x",
338a984649bSJohannes Weiner 			 atomic_inc_return(&zswap_pools_count));
339a984649bSJohannes Weiner 
340a984649bSJohannes Weiner 		pool->zpools[i] = zpool_create_pool(type, name, gfp);
341a984649bSJohannes Weiner 		if (!pool->zpools[i]) {
342a984649bSJohannes Weiner 			pr_err("%s zpool not available\n", type);
343a984649bSJohannes Weiner 			goto error;
344a984649bSJohannes Weiner 		}
345a984649bSJohannes Weiner 	}
346a984649bSJohannes Weiner 	pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0]));
347a984649bSJohannes Weiner 
348a984649bSJohannes Weiner 	strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
349a984649bSJohannes Weiner 
350a984649bSJohannes Weiner 	pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
351a984649bSJohannes Weiner 	if (!pool->acomp_ctx) {
352a984649bSJohannes Weiner 		pr_err("percpu alloc failed\n");
353a984649bSJohannes Weiner 		goto error;
354a984649bSJohannes Weiner 	}
355a984649bSJohannes Weiner 
356a984649bSJohannes Weiner 	ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
357a984649bSJohannes Weiner 				       &pool->node);
358a984649bSJohannes Weiner 	if (ret)
359a984649bSJohannes Weiner 		goto error;
360a984649bSJohannes Weiner 
361a984649bSJohannes Weiner 	/* being the current pool takes 1 ref; this func expects the
362a984649bSJohannes Weiner 	 * caller to always add the new pool as the current pool
363a984649bSJohannes Weiner 	 */
36494ace3feSChengming Zhou 	ret = percpu_ref_init(&pool->ref, __zswap_pool_empty,
36594ace3feSChengming Zhou 			      PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
36694ace3feSChengming Zhou 	if (ret)
36794ace3feSChengming Zhou 		goto ref_fail;
368a984649bSJohannes Weiner 	INIT_LIST_HEAD(&pool->list);
369a984649bSJohannes Weiner 
370a984649bSJohannes Weiner 	zswap_pool_debug("created", pool);
371a984649bSJohannes Weiner 
372a984649bSJohannes Weiner 	return pool;
373a984649bSJohannes Weiner 
37494ace3feSChengming Zhou ref_fail:
37594ace3feSChengming Zhou 	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
376a984649bSJohannes Weiner error:
377a984649bSJohannes Weiner 	if (pool->acomp_ctx)
378a984649bSJohannes Weiner 		free_percpu(pool->acomp_ctx);
379a984649bSJohannes Weiner 	while (i--)
380a984649bSJohannes Weiner 		zpool_destroy_pool(pool->zpools[i]);
381a984649bSJohannes Weiner 	kfree(pool);
382a984649bSJohannes Weiner 	return NULL;
383a984649bSJohannes Weiner }
384a984649bSJohannes Weiner 
__zswap_pool_create_fallback(void)385a984649bSJohannes Weiner static struct zswap_pool *__zswap_pool_create_fallback(void)
386a984649bSJohannes Weiner {
387a984649bSJohannes Weiner 	bool has_comp, has_zpool;
388a984649bSJohannes Weiner 
389a984649bSJohannes Weiner 	has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
390a984649bSJohannes Weiner 	if (!has_comp && strcmp(zswap_compressor,
391a984649bSJohannes Weiner 				CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
392a984649bSJohannes Weiner 		pr_err("compressor %s not available, using default %s\n",
393a984649bSJohannes Weiner 		       zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
394a984649bSJohannes Weiner 		param_free_charp(&zswap_compressor);
395a984649bSJohannes Weiner 		zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
396a984649bSJohannes Weiner 		has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
397a984649bSJohannes Weiner 	}
398a984649bSJohannes Weiner 	if (!has_comp) {
399a984649bSJohannes Weiner 		pr_err("default compressor %s not available\n",
400a984649bSJohannes Weiner 		       zswap_compressor);
401a984649bSJohannes Weiner 		param_free_charp(&zswap_compressor);
402a984649bSJohannes Weiner 		zswap_compressor = ZSWAP_PARAM_UNSET;
403a984649bSJohannes Weiner 	}
404a984649bSJohannes Weiner 
405a984649bSJohannes Weiner 	has_zpool = zpool_has_pool(zswap_zpool_type);
406a984649bSJohannes Weiner 	if (!has_zpool && strcmp(zswap_zpool_type,
407a984649bSJohannes Weiner 				 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
408a984649bSJohannes Weiner 		pr_err("zpool %s not available, using default %s\n",
409a984649bSJohannes Weiner 		       zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
410a984649bSJohannes Weiner 		param_free_charp(&zswap_zpool_type);
411a984649bSJohannes Weiner 		zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
412a984649bSJohannes Weiner 		has_zpool = zpool_has_pool(zswap_zpool_type);
413a984649bSJohannes Weiner 	}
414a984649bSJohannes Weiner 	if (!has_zpool) {
415a984649bSJohannes Weiner 		pr_err("default zpool %s not available\n",
416a984649bSJohannes Weiner 		       zswap_zpool_type);
417a984649bSJohannes Weiner 		param_free_charp(&zswap_zpool_type);
418a984649bSJohannes Weiner 		zswap_zpool_type = ZSWAP_PARAM_UNSET;
419a984649bSJohannes Weiner 	}
420a984649bSJohannes Weiner 
421a984649bSJohannes Weiner 	if (!has_comp || !has_zpool)
422a984649bSJohannes Weiner 		return NULL;
423a984649bSJohannes Weiner 
424a984649bSJohannes Weiner 	return zswap_pool_create(zswap_zpool_type, zswap_compressor);
425a984649bSJohannes Weiner }
426a984649bSJohannes Weiner 
zswap_pool_destroy(struct zswap_pool * pool)427a984649bSJohannes Weiner static void zswap_pool_destroy(struct zswap_pool *pool)
428a984649bSJohannes Weiner {
429a984649bSJohannes Weiner 	int i;
430a984649bSJohannes Weiner 
431a984649bSJohannes Weiner 	zswap_pool_debug("destroying", pool);
432a984649bSJohannes Weiner 
433a984649bSJohannes Weiner 	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
434a984649bSJohannes Weiner 	free_percpu(pool->acomp_ctx);
435a984649bSJohannes Weiner 
436a984649bSJohannes Weiner 	for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
437a984649bSJohannes Weiner 		zpool_destroy_pool(pool->zpools[i]);
438a984649bSJohannes Weiner 	kfree(pool);
439a984649bSJohannes Weiner }
440a984649bSJohannes Weiner 
__zswap_pool_release(struct work_struct * work)44139f3ec8eSJohannes Weiner static void __zswap_pool_release(struct work_struct *work)
44239f3ec8eSJohannes Weiner {
44339f3ec8eSJohannes Weiner 	struct zswap_pool *pool = container_of(work, typeof(*pool),
44439f3ec8eSJohannes Weiner 						release_work);
44539f3ec8eSJohannes Weiner 
44639f3ec8eSJohannes Weiner 	synchronize_rcu();
44739f3ec8eSJohannes Weiner 
44894ace3feSChengming Zhou 	/* nobody should have been able to get a ref... */
44994ace3feSChengming Zhou 	WARN_ON(!percpu_ref_is_zero(&pool->ref));
45094ace3feSChengming Zhou 	percpu_ref_exit(&pool->ref);
45139f3ec8eSJohannes Weiner 
45239f3ec8eSJohannes Weiner 	/* pool is now off zswap_pools list and has no references. */
45339f3ec8eSJohannes Weiner 	zswap_pool_destroy(pool);
45439f3ec8eSJohannes Weiner }
45539f3ec8eSJohannes Weiner 
45639f3ec8eSJohannes Weiner static struct zswap_pool *zswap_pool_current(void);
45739f3ec8eSJohannes Weiner 
__zswap_pool_empty(struct percpu_ref * ref)45894ace3feSChengming Zhou static void __zswap_pool_empty(struct percpu_ref *ref)
45939f3ec8eSJohannes Weiner {
46039f3ec8eSJohannes Weiner 	struct zswap_pool *pool;
46139f3ec8eSJohannes Weiner 
46294ace3feSChengming Zhou 	pool = container_of(ref, typeof(*pool), ref);
46339f3ec8eSJohannes Weiner 
46494ace3feSChengming Zhou 	spin_lock_bh(&zswap_pools_lock);
46539f3ec8eSJohannes Weiner 
46639f3ec8eSJohannes Weiner 	WARN_ON(pool == zswap_pool_current());
46739f3ec8eSJohannes Weiner 
46839f3ec8eSJohannes Weiner 	list_del_rcu(&pool->list);
46939f3ec8eSJohannes Weiner 
47039f3ec8eSJohannes Weiner 	INIT_WORK(&pool->release_work, __zswap_pool_release);
47139f3ec8eSJohannes Weiner 	schedule_work(&pool->release_work);
47239f3ec8eSJohannes Weiner 
47394ace3feSChengming Zhou 	spin_unlock_bh(&zswap_pools_lock);
47439f3ec8eSJohannes Weiner }
47539f3ec8eSJohannes Weiner 
zswap_pool_get(struct zswap_pool * pool)47639f3ec8eSJohannes Weiner static int __must_check zswap_pool_get(struct zswap_pool *pool)
47739f3ec8eSJohannes Weiner {
47839f3ec8eSJohannes Weiner 	if (!pool)
47939f3ec8eSJohannes Weiner 		return 0;
48039f3ec8eSJohannes Weiner 
48194ace3feSChengming Zhou 	return percpu_ref_tryget(&pool->ref);
48239f3ec8eSJohannes Weiner }
48339f3ec8eSJohannes Weiner 
zswap_pool_put(struct zswap_pool * pool)48439f3ec8eSJohannes Weiner static void zswap_pool_put(struct zswap_pool *pool)
48539f3ec8eSJohannes Weiner {
48694ace3feSChengming Zhou 	percpu_ref_put(&pool->ref);
48739f3ec8eSJohannes Weiner }
48839f3ec8eSJohannes Weiner 
__zswap_pool_current(void)489c1a0ecb8SJohannes Weiner static struct zswap_pool *__zswap_pool_current(void)
490c1a0ecb8SJohannes Weiner {
491c1a0ecb8SJohannes Weiner 	struct zswap_pool *pool;
492c1a0ecb8SJohannes Weiner 
493c1a0ecb8SJohannes Weiner 	pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
494c1a0ecb8SJohannes Weiner 	WARN_ONCE(!pool && zswap_has_pool,
495c1a0ecb8SJohannes Weiner 		  "%s: no page storage pool!\n", __func__);
496c1a0ecb8SJohannes Weiner 
497c1a0ecb8SJohannes Weiner 	return pool;
498c1a0ecb8SJohannes Weiner }
499c1a0ecb8SJohannes Weiner 
zswap_pool_current(void)500c1a0ecb8SJohannes Weiner static struct zswap_pool *zswap_pool_current(void)
501c1a0ecb8SJohannes Weiner {
502c1a0ecb8SJohannes Weiner 	assert_spin_locked(&zswap_pools_lock);
503c1a0ecb8SJohannes Weiner 
504c1a0ecb8SJohannes Weiner 	return __zswap_pool_current();
505c1a0ecb8SJohannes Weiner }
506c1a0ecb8SJohannes Weiner 
zswap_pool_current_get(void)507c1a0ecb8SJohannes Weiner static struct zswap_pool *zswap_pool_current_get(void)
508c1a0ecb8SJohannes Weiner {
509c1a0ecb8SJohannes Weiner 	struct zswap_pool *pool;
510c1a0ecb8SJohannes Weiner 
511c1a0ecb8SJohannes Weiner 	rcu_read_lock();
512c1a0ecb8SJohannes Weiner 
513c1a0ecb8SJohannes Weiner 	pool = __zswap_pool_current();
514c1a0ecb8SJohannes Weiner 	if (!zswap_pool_get(pool))
515c1a0ecb8SJohannes Weiner 		pool = NULL;
516c1a0ecb8SJohannes Weiner 
517c1a0ecb8SJohannes Weiner 	rcu_read_unlock();
518c1a0ecb8SJohannes Weiner 
519c1a0ecb8SJohannes Weiner 	return pool;
520c1a0ecb8SJohannes Weiner }
521c1a0ecb8SJohannes Weiner 
522c1a0ecb8SJohannes Weiner /* type and compressor must be null-terminated */
zswap_pool_find_get(char * type,char * compressor)523c1a0ecb8SJohannes Weiner static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
524c1a0ecb8SJohannes Weiner {
525c1a0ecb8SJohannes Weiner 	struct zswap_pool *pool;
526c1a0ecb8SJohannes Weiner 
527c1a0ecb8SJohannes Weiner 	assert_spin_locked(&zswap_pools_lock);
528c1a0ecb8SJohannes Weiner 
529c1a0ecb8SJohannes Weiner 	list_for_each_entry_rcu(pool, &zswap_pools, list) {
530c1a0ecb8SJohannes Weiner 		if (strcmp(pool->tfm_name, compressor))
531c1a0ecb8SJohannes Weiner 			continue;
532c1a0ecb8SJohannes Weiner 		/* all zpools share the same type */
533c1a0ecb8SJohannes Weiner 		if (strcmp(zpool_get_type(pool->zpools[0]), type))
534c1a0ecb8SJohannes Weiner 			continue;
535c1a0ecb8SJohannes Weiner 		/* if we can't get it, it's about to be destroyed */
536c1a0ecb8SJohannes Weiner 		if (!zswap_pool_get(pool))
537c1a0ecb8SJohannes Weiner 			continue;
538c1a0ecb8SJohannes Weiner 		return pool;
539c1a0ecb8SJohannes Weiner 	}
540c1a0ecb8SJohannes Weiner 
541c1a0ecb8SJohannes Weiner 	return NULL;
542c1a0ecb8SJohannes Weiner }
543c1a0ecb8SJohannes Weiner 
544abca07c0SJohannes Weiner /*********************************
545abca07c0SJohannes Weiner * param callbacks
546abca07c0SJohannes Weiner **********************************/
547abca07c0SJohannes Weiner 
zswap_pool_changed(const char * s,const struct kernel_param * kp)548abca07c0SJohannes Weiner static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
549abca07c0SJohannes Weiner {
550abca07c0SJohannes Weiner 	/* no change required */
551abca07c0SJohannes Weiner 	if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
552abca07c0SJohannes Weiner 		return false;
553abca07c0SJohannes Weiner 	return true;
554abca07c0SJohannes Weiner }
555abca07c0SJohannes Weiner 
556abca07c0SJohannes Weiner /* val must be a null-terminated string */
__zswap_param_set(const char * val,const struct kernel_param * kp,char * type,char * compressor)557abca07c0SJohannes Weiner static int __zswap_param_set(const char *val, const struct kernel_param *kp,
558abca07c0SJohannes Weiner 			     char *type, char *compressor)
559abca07c0SJohannes Weiner {
560abca07c0SJohannes Weiner 	struct zswap_pool *pool, *put_pool = NULL;
561abca07c0SJohannes Weiner 	char *s = strstrip((char *)val);
562abca07c0SJohannes Weiner 	int ret = 0;
563abca07c0SJohannes Weiner 	bool new_pool = false;
564abca07c0SJohannes Weiner 
565abca07c0SJohannes Weiner 	mutex_lock(&zswap_init_lock);
566abca07c0SJohannes Weiner 	switch (zswap_init_state) {
567abca07c0SJohannes Weiner 	case ZSWAP_UNINIT:
568abca07c0SJohannes Weiner 		/* if this is load-time (pre-init) param setting,
569abca07c0SJohannes Weiner 		 * don't create a pool; that's done during init.
570abca07c0SJohannes Weiner 		 */
571abca07c0SJohannes Weiner 		ret = param_set_charp(s, kp);
572abca07c0SJohannes Weiner 		break;
573abca07c0SJohannes Weiner 	case ZSWAP_INIT_SUCCEED:
574abca07c0SJohannes Weiner 		new_pool = zswap_pool_changed(s, kp);
575abca07c0SJohannes Weiner 		break;
576abca07c0SJohannes Weiner 	case ZSWAP_INIT_FAILED:
577abca07c0SJohannes Weiner 		pr_err("can't set param, initialization failed\n");
578abca07c0SJohannes Weiner 		ret = -ENODEV;
579abca07c0SJohannes Weiner 	}
580abca07c0SJohannes Weiner 	mutex_unlock(&zswap_init_lock);
581abca07c0SJohannes Weiner 
582abca07c0SJohannes Weiner 	/* no need to create a new pool, return directly */
583abca07c0SJohannes Weiner 	if (!new_pool)
584abca07c0SJohannes Weiner 		return ret;
585abca07c0SJohannes Weiner 
586abca07c0SJohannes Weiner 	if (!type) {
587abca07c0SJohannes Weiner 		if (!zpool_has_pool(s)) {
588abca07c0SJohannes Weiner 			pr_err("zpool %s not available\n", s);
589abca07c0SJohannes Weiner 			return -ENOENT;
590abca07c0SJohannes Weiner 		}
591abca07c0SJohannes Weiner 		type = s;
592abca07c0SJohannes Weiner 	} else if (!compressor) {
593abca07c0SJohannes Weiner 		if (!crypto_has_acomp(s, 0, 0)) {
594abca07c0SJohannes Weiner 			pr_err("compressor %s not available\n", s);
595abca07c0SJohannes Weiner 			return -ENOENT;
596abca07c0SJohannes Weiner 		}
597abca07c0SJohannes Weiner 		compressor = s;
598abca07c0SJohannes Weiner 	} else {
599abca07c0SJohannes Weiner 		WARN_ON(1);
600abca07c0SJohannes Weiner 		return -EINVAL;
601abca07c0SJohannes Weiner 	}
602abca07c0SJohannes Weiner 
60394ace3feSChengming Zhou 	spin_lock_bh(&zswap_pools_lock);
604abca07c0SJohannes Weiner 
605abca07c0SJohannes Weiner 	pool = zswap_pool_find_get(type, compressor);
606abca07c0SJohannes Weiner 	if (pool) {
607abca07c0SJohannes Weiner 		zswap_pool_debug("using existing", pool);
608abca07c0SJohannes Weiner 		WARN_ON(pool == zswap_pool_current());
609abca07c0SJohannes Weiner 		list_del_rcu(&pool->list);
610abca07c0SJohannes Weiner 	}
611abca07c0SJohannes Weiner 
61294ace3feSChengming Zhou 	spin_unlock_bh(&zswap_pools_lock);
613abca07c0SJohannes Weiner 
614abca07c0SJohannes Weiner 	if (!pool)
615abca07c0SJohannes Weiner 		pool = zswap_pool_create(type, compressor);
61694ace3feSChengming Zhou 	else {
61794ace3feSChengming Zhou 		/*
61894ace3feSChengming Zhou 		 * Restore the initial ref dropped by percpu_ref_kill()
61994ace3feSChengming Zhou 		 * when the pool was decommissioned and switch it again
62094ace3feSChengming Zhou 		 * to percpu mode.
62194ace3feSChengming Zhou 		 */
62294ace3feSChengming Zhou 		percpu_ref_resurrect(&pool->ref);
62394ace3feSChengming Zhou 
62494ace3feSChengming Zhou 		/* Drop the ref from zswap_pool_find_get(). */
62594ace3feSChengming Zhou 		zswap_pool_put(pool);
62694ace3feSChengming Zhou 	}
627abca07c0SJohannes Weiner 
628abca07c0SJohannes Weiner 	if (pool)
629abca07c0SJohannes Weiner 		ret = param_set_charp(s, kp);
630abca07c0SJohannes Weiner 	else
631abca07c0SJohannes Weiner 		ret = -EINVAL;
632abca07c0SJohannes Weiner 
63394ace3feSChengming Zhou 	spin_lock_bh(&zswap_pools_lock);
634abca07c0SJohannes Weiner 
635abca07c0SJohannes Weiner 	if (!ret) {
636abca07c0SJohannes Weiner 		put_pool = zswap_pool_current();
637abca07c0SJohannes Weiner 		list_add_rcu(&pool->list, &zswap_pools);
638abca07c0SJohannes Weiner 		zswap_has_pool = true;
639abca07c0SJohannes Weiner 	} else if (pool) {
640abca07c0SJohannes Weiner 		/* add the possibly pre-existing pool to the end of the pools
641abca07c0SJohannes Weiner 		 * list; if it's new (and empty) then it'll be removed and
642abca07c0SJohannes Weiner 		 * destroyed by the put after we drop the lock
643abca07c0SJohannes Weiner 		 */
644abca07c0SJohannes Weiner 		list_add_tail_rcu(&pool->list, &zswap_pools);
645abca07c0SJohannes Weiner 		put_pool = pool;
646abca07c0SJohannes Weiner 	}
647abca07c0SJohannes Weiner 
64894ace3feSChengming Zhou 	spin_unlock_bh(&zswap_pools_lock);
649abca07c0SJohannes Weiner 
650abca07c0SJohannes Weiner 	if (!zswap_has_pool && !pool) {
651abca07c0SJohannes Weiner 		/* if initial pool creation failed, and this pool creation also
652abca07c0SJohannes Weiner 		 * failed, maybe both compressor and zpool params were bad.
653abca07c0SJohannes Weiner 		 * Allow changing this param, so pool creation will succeed
654abca07c0SJohannes Weiner 		 * when the other param is changed. We already verified this
655abca07c0SJohannes Weiner 		 * param is ok in the zpool_has_pool() or crypto_has_acomp()
656abca07c0SJohannes Weiner 		 * checks above.
657abca07c0SJohannes Weiner 		 */
658abca07c0SJohannes Weiner 		ret = param_set_charp(s, kp);
659abca07c0SJohannes Weiner 	}
660abca07c0SJohannes Weiner 
661abca07c0SJohannes Weiner 	/* drop the ref from either the old current pool,
662abca07c0SJohannes Weiner 	 * or the new pool we failed to add
663abca07c0SJohannes Weiner 	 */
664abca07c0SJohannes Weiner 	if (put_pool)
66594ace3feSChengming Zhou 		percpu_ref_kill(&put_pool->ref);
666abca07c0SJohannes Weiner 
667abca07c0SJohannes Weiner 	return ret;
668abca07c0SJohannes Weiner }
669abca07c0SJohannes Weiner 
zswap_compressor_param_set(const char * val,const struct kernel_param * kp)670abca07c0SJohannes Weiner static int zswap_compressor_param_set(const char *val,
671abca07c0SJohannes Weiner 				      const struct kernel_param *kp)
672abca07c0SJohannes Weiner {
673abca07c0SJohannes Weiner 	return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
674abca07c0SJohannes Weiner }
675abca07c0SJohannes Weiner 
zswap_zpool_param_set(const char * val,const struct kernel_param * kp)676abca07c0SJohannes Weiner static int zswap_zpool_param_set(const char *val,
677abca07c0SJohannes Weiner 				 const struct kernel_param *kp)
678abca07c0SJohannes Weiner {
679abca07c0SJohannes Weiner 	return __zswap_param_set(val, kp, NULL, zswap_compressor);
680abca07c0SJohannes Weiner }
681abca07c0SJohannes Weiner 
zswap_enabled_param_set(const char * val,const struct kernel_param * kp)682abca07c0SJohannes Weiner static int zswap_enabled_param_set(const char *val,
683abca07c0SJohannes Weiner 				   const struct kernel_param *kp)
684abca07c0SJohannes Weiner {
685abca07c0SJohannes Weiner 	int ret = -ENODEV;
686abca07c0SJohannes Weiner 
687abca07c0SJohannes Weiner 	/* if this is load-time (pre-init) param setting, only set param. */
688abca07c0SJohannes Weiner 	if (system_state != SYSTEM_RUNNING)
689abca07c0SJohannes Weiner 		return param_set_bool(val, kp);
690abca07c0SJohannes Weiner 
691abca07c0SJohannes Weiner 	mutex_lock(&zswap_init_lock);
692abca07c0SJohannes Weiner 	switch (zswap_init_state) {
693abca07c0SJohannes Weiner 	case ZSWAP_UNINIT:
694abca07c0SJohannes Weiner 		if (zswap_setup())
695abca07c0SJohannes Weiner 			break;
696abca07c0SJohannes Weiner 		fallthrough;
697abca07c0SJohannes Weiner 	case ZSWAP_INIT_SUCCEED:
698abca07c0SJohannes Weiner 		if (!zswap_has_pool)
699abca07c0SJohannes Weiner 			pr_err("can't enable, no pool configured\n");
700abca07c0SJohannes Weiner 		else
701abca07c0SJohannes Weiner 			ret = param_set_bool(val, kp);
702abca07c0SJohannes Weiner 		break;
703abca07c0SJohannes Weiner 	case ZSWAP_INIT_FAILED:
704abca07c0SJohannes Weiner 		pr_err("can't enable, initialization failed\n");
705abca07c0SJohannes Weiner 	}
706abca07c0SJohannes Weiner 	mutex_unlock(&zswap_init_lock);
707abca07c0SJohannes Weiner 
708abca07c0SJohannes Weiner 	return ret;
709abca07c0SJohannes Weiner }
710abca07c0SJohannes Weiner 
711506a86c5SJohannes Weiner /*********************************
712506a86c5SJohannes Weiner * lru functions
713506a86c5SJohannes Weiner **********************************/
714506a86c5SJohannes Weiner 
715a65b0e76SDomenico Cerasuolo /* should be called under RCU */
716a65b0e76SDomenico Cerasuolo #ifdef CONFIG_MEMCG
mem_cgroup_from_entry(struct zswap_entry * entry)717a65b0e76SDomenico Cerasuolo static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
718a65b0e76SDomenico Cerasuolo {
719a65b0e76SDomenico Cerasuolo 	return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
720a65b0e76SDomenico Cerasuolo }
721a65b0e76SDomenico Cerasuolo #else
mem_cgroup_from_entry(struct zswap_entry * entry)722a65b0e76SDomenico Cerasuolo static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
723a65b0e76SDomenico Cerasuolo {
724a65b0e76SDomenico Cerasuolo 	return NULL;
725a65b0e76SDomenico Cerasuolo }
726a65b0e76SDomenico Cerasuolo #endif
727a65b0e76SDomenico Cerasuolo 
entry_to_nid(struct zswap_entry * entry)728a65b0e76SDomenico Cerasuolo static inline int entry_to_nid(struct zswap_entry *entry)
729a65b0e76SDomenico Cerasuolo {
730a65b0e76SDomenico Cerasuolo 	return page_to_nid(virt_to_page(entry));
731a65b0e76SDomenico Cerasuolo }
732a65b0e76SDomenico Cerasuolo 
zswap_lru_add(struct list_lru * list_lru,struct zswap_entry * entry)733a65b0e76SDomenico Cerasuolo static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
734a65b0e76SDomenico Cerasuolo {
735b5ba474fSNhat Pham 	atomic_long_t *nr_zswap_protected;
736b5ba474fSNhat Pham 	unsigned long lru_size, old, new;
737a65b0e76SDomenico Cerasuolo 	int nid = entry_to_nid(entry);
738a65b0e76SDomenico Cerasuolo 	struct mem_cgroup *memcg;
739b5ba474fSNhat Pham 	struct lruvec *lruvec;
740a65b0e76SDomenico Cerasuolo 
741a65b0e76SDomenico Cerasuolo 	/*
742a65b0e76SDomenico Cerasuolo 	 * Note that it is safe to use rcu_read_lock() here, even in the face of
743a65b0e76SDomenico Cerasuolo 	 * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
744a65b0e76SDomenico Cerasuolo 	 * used in list_lru lookup, only two scenarios are possible:
745a65b0e76SDomenico Cerasuolo 	 *
746a65b0e76SDomenico Cerasuolo 	 * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
747a65b0e76SDomenico Cerasuolo 	 *    new entry will be reparented to memcg's parent's list_lru.
748a65b0e76SDomenico Cerasuolo 	 * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
749a65b0e76SDomenico Cerasuolo 	 *    new entry will be added directly to memcg's parent's list_lru.
750a65b0e76SDomenico Cerasuolo 	 *
7513f798aa6SChengming Zhou 	 * Similar reasoning holds for list_lru_del().
752a65b0e76SDomenico Cerasuolo 	 */
753a65b0e76SDomenico Cerasuolo 	rcu_read_lock();
754a65b0e76SDomenico Cerasuolo 	memcg = mem_cgroup_from_entry(entry);
755a65b0e76SDomenico Cerasuolo 	/* will always succeed */
756a65b0e76SDomenico Cerasuolo 	list_lru_add(list_lru, &entry->lru, nid, memcg);
757b5ba474fSNhat Pham 
758b5ba474fSNhat Pham 	/* Update the protection area */
759b5ba474fSNhat Pham 	lru_size = list_lru_count_one(list_lru, nid, memcg);
760b5ba474fSNhat Pham 	lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
761b5ba474fSNhat Pham 	nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected;
762b5ba474fSNhat Pham 	old = atomic_long_inc_return(nr_zswap_protected);
763b5ba474fSNhat Pham 	/*
764b5ba474fSNhat Pham 	 * Decay to avoid overflow and adapt to changing workloads.
765b5ba474fSNhat Pham 	 * This is based on LRU reclaim cost decaying heuristics.
766b5ba474fSNhat Pham 	 */
767b5ba474fSNhat Pham 	do {
768b5ba474fSNhat Pham 		new = old > lru_size / 4 ? old / 2 : old;
769b5ba474fSNhat Pham 	} while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new));
770a65b0e76SDomenico Cerasuolo 	rcu_read_unlock();
771a65b0e76SDomenico Cerasuolo }
772a65b0e76SDomenico Cerasuolo 
zswap_lru_del(struct list_lru * list_lru,struct zswap_entry * entry)773a65b0e76SDomenico Cerasuolo static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
774a65b0e76SDomenico Cerasuolo {
775a65b0e76SDomenico Cerasuolo 	int nid = entry_to_nid(entry);
776a65b0e76SDomenico Cerasuolo 	struct mem_cgroup *memcg;
777a65b0e76SDomenico Cerasuolo 
778a65b0e76SDomenico Cerasuolo 	rcu_read_lock();
779a65b0e76SDomenico Cerasuolo 	memcg = mem_cgroup_from_entry(entry);
780a65b0e76SDomenico Cerasuolo 	/* will always succeed */
781a65b0e76SDomenico Cerasuolo 	list_lru_del(list_lru, &entry->lru, nid, memcg);
782a65b0e76SDomenico Cerasuolo 	rcu_read_unlock();
783a65b0e76SDomenico Cerasuolo }
784a65b0e76SDomenico Cerasuolo 
zswap_lruvec_state_init(struct lruvec * lruvec)7855182661aSJohannes Weiner void zswap_lruvec_state_init(struct lruvec *lruvec)
7865182661aSJohannes Weiner {
7875182661aSJohannes Weiner 	atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
7885182661aSJohannes Weiner }
7895182661aSJohannes Weiner 
zswap_folio_swapin(struct folio * folio)7905182661aSJohannes Weiner void zswap_folio_swapin(struct folio *folio)
7915182661aSJohannes Weiner {
7925182661aSJohannes Weiner 	struct lruvec *lruvec;
7935182661aSJohannes Weiner 
7945182661aSJohannes Weiner 	if (folio) {
7955182661aSJohannes Weiner 		lruvec = folio_lruvec(folio);
7965182661aSJohannes Weiner 		atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
7975182661aSJohannes Weiner 	}
7985182661aSJohannes Weiner }
7995182661aSJohannes Weiner 
zswap_memcg_offline_cleanup(struct mem_cgroup * memcg)8005182661aSJohannes Weiner void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
8015182661aSJohannes Weiner {
802bf9b7df2SChengming Zhou 	/* lock out zswap shrinker walking memcg tree */
803e35606e4SChengming Zhou 	spin_lock(&zswap_shrink_lock);
804e35606e4SChengming Zhou 	if (zswap_next_shrink == memcg)
805e35606e4SChengming Zhou 		zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
806e35606e4SChengming Zhou 	spin_unlock(&zswap_shrink_lock);
8075182661aSJohannes Weiner }
8085182661aSJohannes Weiner 
8095182661aSJohannes Weiner /*********************************
8102b281117SSeth Jennings * rbtree functions
8112b281117SSeth Jennings **********************************/
zswap_rb_search(struct rb_root * root,pgoff_t offset)8122b281117SSeth Jennings static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
8132b281117SSeth Jennings {
8142b281117SSeth Jennings 	struct rb_node *node = root->rb_node;
8152b281117SSeth Jennings 	struct zswap_entry *entry;
8160bb48849SDomenico Cerasuolo 	pgoff_t entry_offset;
8172b281117SSeth Jennings 
8182b281117SSeth Jennings 	while (node) {
8192b281117SSeth Jennings 		entry = rb_entry(node, struct zswap_entry, rbnode);
8200bb48849SDomenico Cerasuolo 		entry_offset = swp_offset(entry->swpentry);
8210bb48849SDomenico Cerasuolo 		if (entry_offset > offset)
8222b281117SSeth Jennings 			node = node->rb_left;
8230bb48849SDomenico Cerasuolo 		else if (entry_offset < offset)
8242b281117SSeth Jennings 			node = node->rb_right;
8252b281117SSeth Jennings 		else
8262b281117SSeth Jennings 			return entry;
8272b281117SSeth Jennings 	}
8282b281117SSeth Jennings 	return NULL;
8292b281117SSeth Jennings }
8302b281117SSeth Jennings 
8312b281117SSeth Jennings /*
8322b281117SSeth Jennings  * In the case that a entry with the same offset is found, a pointer to
8332b281117SSeth Jennings  * the existing entry is stored in dupentry and the function returns -EEXIST
8342b281117SSeth Jennings  */
zswap_rb_insert(struct rb_root * root,struct zswap_entry * entry,struct zswap_entry ** dupentry)8352b281117SSeth Jennings static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
8362b281117SSeth Jennings 			struct zswap_entry **dupentry)
8372b281117SSeth Jennings {
8382b281117SSeth Jennings 	struct rb_node **link = &root->rb_node, *parent = NULL;
8392b281117SSeth Jennings 	struct zswap_entry *myentry;
8400bb48849SDomenico Cerasuolo 	pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry);
8412b281117SSeth Jennings 
8422b281117SSeth Jennings 	while (*link) {
8432b281117SSeth Jennings 		parent = *link;
8442b281117SSeth Jennings 		myentry = rb_entry(parent, struct zswap_entry, rbnode);
8450bb48849SDomenico Cerasuolo 		myentry_offset = swp_offset(myentry->swpentry);
8460bb48849SDomenico Cerasuolo 		if (myentry_offset > entry_offset)
8472b281117SSeth Jennings 			link = &(*link)->rb_left;
8480bb48849SDomenico Cerasuolo 		else if (myentry_offset < entry_offset)
8492b281117SSeth Jennings 			link = &(*link)->rb_right;
8502b281117SSeth Jennings 		else {
8512b281117SSeth Jennings 			*dupentry = myentry;
8522b281117SSeth Jennings 			return -EEXIST;
8532b281117SSeth Jennings 		}
8542b281117SSeth Jennings 	}
8552b281117SSeth Jennings 	rb_link_node(&entry->rbnode, parent, link);
8562b281117SSeth Jennings 	rb_insert_color(&entry->rbnode, root);
8572b281117SSeth Jennings 	return 0;
8582b281117SSeth Jennings }
8592b281117SSeth Jennings 
zswap_rb_erase(struct rb_root * root,struct zswap_entry * entry)860a230c20eSChengming Zhou static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
8610ab0abcfSWeijie Yang {
8620ab0abcfSWeijie Yang 	rb_erase(&entry->rbnode, root);
8630ab0abcfSWeijie Yang 	RB_CLEAR_NODE(&entry->rbnode);
8640ab0abcfSWeijie Yang }
8650ab0abcfSWeijie Yang 
86636034bf6SJohannes Weiner /*********************************
86736034bf6SJohannes Weiner * zswap entry functions
86836034bf6SJohannes Weiner **********************************/
86936034bf6SJohannes Weiner static struct kmem_cache *zswap_entry_cache;
87036034bf6SJohannes Weiner 
zswap_entry_cache_alloc(gfp_t gfp,int nid)87136034bf6SJohannes Weiner static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
87236034bf6SJohannes Weiner {
87336034bf6SJohannes Weiner 	struct zswap_entry *entry;
87436034bf6SJohannes Weiner 	entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
87536034bf6SJohannes Weiner 	if (!entry)
87636034bf6SJohannes Weiner 		return NULL;
87736034bf6SJohannes Weiner 	RB_CLEAR_NODE(&entry->rbnode);
87836034bf6SJohannes Weiner 	return entry;
87936034bf6SJohannes Weiner }
88036034bf6SJohannes Weiner 
zswap_entry_cache_free(struct zswap_entry * entry)88136034bf6SJohannes Weiner static void zswap_entry_cache_free(struct zswap_entry *entry)
88236034bf6SJohannes Weiner {
88336034bf6SJohannes Weiner 	kmem_cache_free(zswap_entry_cache, entry);
88436034bf6SJohannes Weiner }
88536034bf6SJohannes Weiner 
zswap_find_zpool(struct zswap_entry * entry)886b8cf32dcSYosry Ahmed static struct zpool *zswap_find_zpool(struct zswap_entry *entry)
887b8cf32dcSYosry Ahmed {
888b8cf32dcSYosry Ahmed 	int i = 0;
889b8cf32dcSYosry Ahmed 
890b8cf32dcSYosry Ahmed 	if (ZSWAP_NR_ZPOOLS > 1)
891b8cf32dcSYosry Ahmed 		i = hash_ptr(entry, ilog2(ZSWAP_NR_ZPOOLS));
892b8cf32dcSYosry Ahmed 
893b8cf32dcSYosry Ahmed 	return entry->pool->zpools[i];
894b8cf32dcSYosry Ahmed }
895b8cf32dcSYosry Ahmed 
8960ab0abcfSWeijie Yang /*
89712d79d64SDan Streetman  * Carries out the common pattern of freeing and entry's zpool allocation,
8980ab0abcfSWeijie Yang  * freeing the entry itself, and decrementing the number of stored pages.
8990ab0abcfSWeijie Yang  */
zswap_entry_free(struct zswap_entry * entry)90042398be2SJohannes Weiner static void zswap_entry_free(struct zswap_entry *entry)
9010ab0abcfSWeijie Yang {
902a85f878bSSrividya Desireddy 	if (!entry->length)
903a85f878bSSrividya Desireddy 		atomic_dec(&zswap_same_filled_pages);
904a85f878bSSrividya Desireddy 	else {
905e35606e4SChengming Zhou 		zswap_lru_del(&zswap_list_lru, entry);
906b8cf32dcSYosry Ahmed 		zpool_free(zswap_find_zpool(entry), entry->handle);
907e35606e4SChengming Zhou 		atomic_dec(&zswap_nr_stored);
908f1c54846SDan Streetman 		zswap_pool_put(entry->pool);
909a85f878bSSrividya Desireddy 	}
9102e601e1eSJohannes Weiner 	if (entry->objcg) {
9112e601e1eSJohannes Weiner 		obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
9122e601e1eSJohannes Weiner 		obj_cgroup_put(entry->objcg);
9132e601e1eSJohannes Weiner 	}
9140ab0abcfSWeijie Yang 	zswap_entry_cache_free(entry);
9150ab0abcfSWeijie Yang 	atomic_dec(&zswap_stored_pages);
916f1c54846SDan Streetman 	zswap_update_total_size();
9170ab0abcfSWeijie Yang }
9180ab0abcfSWeijie Yang 
9197dd1f7f0SJohannes Weiner /*
920a230c20eSChengming Zhou  * The caller hold the tree lock and search the entry from the tree,
921a230c20eSChengming Zhou  * so it must be on the tree, remove it from the tree and free it.
9227dd1f7f0SJohannes Weiner  */
zswap_invalidate_entry(struct zswap_tree * tree,struct zswap_entry * entry)9237dd1f7f0SJohannes Weiner static void zswap_invalidate_entry(struct zswap_tree *tree,
9247dd1f7f0SJohannes Weiner 				   struct zswap_entry *entry)
9257dd1f7f0SJohannes Weiner {
926a230c20eSChengming Zhou 	zswap_rb_erase(&tree->rbroot, entry);
927a230c20eSChengming Zhou 	zswap_entry_free(entry);
9287dd1f7f0SJohannes Weiner }
9297dd1f7f0SJohannes Weiner 
9302b281117SSeth Jennings /*********************************
931f91e81d3SJohannes Weiner * compressed storage functions
932f91e81d3SJohannes Weiner **********************************/
zswap_cpu_comp_prepare(unsigned int cpu,struct hlist_node * node)93364f200b8SJohannes Weiner static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
93464f200b8SJohannes Weiner {
93564f200b8SJohannes Weiner 	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
93664f200b8SJohannes Weiner 	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
93764f200b8SJohannes Weiner 	struct crypto_acomp *acomp;
93864f200b8SJohannes Weiner 	struct acomp_req *req;
93964f200b8SJohannes Weiner 	int ret;
94064f200b8SJohannes Weiner 
94164f200b8SJohannes Weiner 	mutex_init(&acomp_ctx->mutex);
94264f200b8SJohannes Weiner 
94364f200b8SJohannes Weiner 	acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
94464f200b8SJohannes Weiner 	if (!acomp_ctx->buffer)
94564f200b8SJohannes Weiner 		return -ENOMEM;
94664f200b8SJohannes Weiner 
94764f200b8SJohannes Weiner 	acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
94864f200b8SJohannes Weiner 	if (IS_ERR(acomp)) {
94964f200b8SJohannes Weiner 		pr_err("could not alloc crypto acomp %s : %ld\n",
95064f200b8SJohannes Weiner 				pool->tfm_name, PTR_ERR(acomp));
95164f200b8SJohannes Weiner 		ret = PTR_ERR(acomp);
95264f200b8SJohannes Weiner 		goto acomp_fail;
95364f200b8SJohannes Weiner 	}
95464f200b8SJohannes Weiner 	acomp_ctx->acomp = acomp;
955270700ddSBarry Song 	acomp_ctx->is_sleepable = acomp_is_async(acomp);
95664f200b8SJohannes Weiner 
95764f200b8SJohannes Weiner 	req = acomp_request_alloc(acomp_ctx->acomp);
95864f200b8SJohannes Weiner 	if (!req) {
95964f200b8SJohannes Weiner 		pr_err("could not alloc crypto acomp_request %s\n",
96064f200b8SJohannes Weiner 		       pool->tfm_name);
96164f200b8SJohannes Weiner 		ret = -ENOMEM;
96264f200b8SJohannes Weiner 		goto req_fail;
96364f200b8SJohannes Weiner 	}
96464f200b8SJohannes Weiner 	acomp_ctx->req = req;
96564f200b8SJohannes Weiner 
96664f200b8SJohannes Weiner 	crypto_init_wait(&acomp_ctx->wait);
96764f200b8SJohannes Weiner 	/*
96864f200b8SJohannes Weiner 	 * if the backend of acomp is async zip, crypto_req_done() will wakeup
96964f200b8SJohannes Weiner 	 * crypto_wait_req(); if the backend of acomp is scomp, the callback
97064f200b8SJohannes Weiner 	 * won't be called, crypto_wait_req() will return without blocking.
97164f200b8SJohannes Weiner 	 */
97264f200b8SJohannes Weiner 	acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
97364f200b8SJohannes Weiner 				   crypto_req_done, &acomp_ctx->wait);
97464f200b8SJohannes Weiner 
97564f200b8SJohannes Weiner 	return 0;
97664f200b8SJohannes Weiner 
97764f200b8SJohannes Weiner req_fail:
97864f200b8SJohannes Weiner 	crypto_free_acomp(acomp_ctx->acomp);
97964f200b8SJohannes Weiner acomp_fail:
98064f200b8SJohannes Weiner 	kfree(acomp_ctx->buffer);
98164f200b8SJohannes Weiner 	return ret;
98264f200b8SJohannes Weiner }
98364f200b8SJohannes Weiner 
zswap_cpu_comp_dead(unsigned int cpu,struct hlist_node * node)98464f200b8SJohannes Weiner static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
98564f200b8SJohannes Weiner {
98664f200b8SJohannes Weiner 	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
98764f200b8SJohannes Weiner 	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
98864f200b8SJohannes Weiner 
98964f200b8SJohannes Weiner 	if (!IS_ERR_OR_NULL(acomp_ctx)) {
99064f200b8SJohannes Weiner 		if (!IS_ERR_OR_NULL(acomp_ctx->req))
99164f200b8SJohannes Weiner 			acomp_request_free(acomp_ctx->req);
99264f200b8SJohannes Weiner 		if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
99364f200b8SJohannes Weiner 			crypto_free_acomp(acomp_ctx->acomp);
99464f200b8SJohannes Weiner 		kfree(acomp_ctx->buffer);
99564f200b8SJohannes Weiner 	}
99664f200b8SJohannes Weiner 
99764f200b8SJohannes Weiner 	return 0;
99864f200b8SJohannes Weiner }
99964f200b8SJohannes Weiner 
zswap_compress(struct folio * folio,struct zswap_entry * entry)1000f91e81d3SJohannes Weiner static bool zswap_compress(struct folio *folio, struct zswap_entry *entry)
1001f91e81d3SJohannes Weiner {
1002f91e81d3SJohannes Weiner 	struct crypto_acomp_ctx *acomp_ctx;
1003f91e81d3SJohannes Weiner 	struct scatterlist input, output;
100455e78c93SBarry Song 	int comp_ret = 0, alloc_ret = 0;
1005f91e81d3SJohannes Weiner 	unsigned int dlen = PAGE_SIZE;
1006f91e81d3SJohannes Weiner 	unsigned long handle;
1007f91e81d3SJohannes Weiner 	struct zpool *zpool;
1008f91e81d3SJohannes Weiner 	char *buf;
1009f91e81d3SJohannes Weiner 	gfp_t gfp;
1010f91e81d3SJohannes Weiner 	u8 *dst;
1011f91e81d3SJohannes Weiner 
1012f91e81d3SJohannes Weiner 	acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1013f91e81d3SJohannes Weiner 
1014f91e81d3SJohannes Weiner 	mutex_lock(&acomp_ctx->mutex);
1015f91e81d3SJohannes Weiner 
1016f91e81d3SJohannes Weiner 	dst = acomp_ctx->buffer;
1017f91e81d3SJohannes Weiner 	sg_init_table(&input, 1);
1018f91e81d3SJohannes Weiner 	sg_set_page(&input, &folio->page, PAGE_SIZE, 0);
1019f91e81d3SJohannes Weiner 
1020f91e81d3SJohannes Weiner 	/*
1021f91e81d3SJohannes Weiner 	 * We need PAGE_SIZE * 2 here since there maybe over-compression case,
1022f91e81d3SJohannes Weiner 	 * and hardware-accelerators may won't check the dst buffer size, so
1023f91e81d3SJohannes Weiner 	 * giving the dst buffer with enough length to avoid buffer overflow.
1024f91e81d3SJohannes Weiner 	 */
1025f91e81d3SJohannes Weiner 	sg_init_one(&output, dst, PAGE_SIZE * 2);
1026f91e81d3SJohannes Weiner 	acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
1027f91e81d3SJohannes Weiner 
1028f91e81d3SJohannes Weiner 	/*
1029f91e81d3SJohannes Weiner 	 * it maybe looks a little bit silly that we send an asynchronous request,
1030f91e81d3SJohannes Weiner 	 * then wait for its completion synchronously. This makes the process look
1031f91e81d3SJohannes Weiner 	 * synchronous in fact.
1032f91e81d3SJohannes Weiner 	 * Theoretically, acomp supports users send multiple acomp requests in one
1033f91e81d3SJohannes Weiner 	 * acomp instance, then get those requests done simultaneously. but in this
1034f91e81d3SJohannes Weiner 	 * case, zswap actually does store and load page by page, there is no
1035f91e81d3SJohannes Weiner 	 * existing method to send the second page before the first page is done
1036f91e81d3SJohannes Weiner 	 * in one thread doing zwap.
1037f91e81d3SJohannes Weiner 	 * but in different threads running on different cpu, we have different
1038f91e81d3SJohannes Weiner 	 * acomp instance, so multiple threads can do (de)compression in parallel.
1039f91e81d3SJohannes Weiner 	 */
104055e78c93SBarry Song 	comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
1041f91e81d3SJohannes Weiner 	dlen = acomp_ctx->req->dlen;
104255e78c93SBarry Song 	if (comp_ret)
1043f91e81d3SJohannes Weiner 		goto unlock;
1044f91e81d3SJohannes Weiner 
1045f91e81d3SJohannes Weiner 	zpool = zswap_find_zpool(entry);
1046f91e81d3SJohannes Weiner 	gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
1047f91e81d3SJohannes Weiner 	if (zpool_malloc_support_movable(zpool))
1048f91e81d3SJohannes Weiner 		gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
104955e78c93SBarry Song 	alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle);
105055e78c93SBarry Song 	if (alloc_ret)
1051f91e81d3SJohannes Weiner 		goto unlock;
1052f91e81d3SJohannes Weiner 
1053f91e81d3SJohannes Weiner 	buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
1054f91e81d3SJohannes Weiner 	memcpy(buf, dst, dlen);
1055f91e81d3SJohannes Weiner 	zpool_unmap_handle(zpool, handle);
1056f91e81d3SJohannes Weiner 
1057f91e81d3SJohannes Weiner 	entry->handle = handle;
1058f91e81d3SJohannes Weiner 	entry->length = dlen;
1059f91e81d3SJohannes Weiner 
1060f91e81d3SJohannes Weiner unlock:
106155e78c93SBarry Song 	if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC)
106255e78c93SBarry Song 		zswap_reject_compress_poor++;
106355e78c93SBarry Song 	else if (comp_ret)
106455e78c93SBarry Song 		zswap_reject_compress_fail++;
106555e78c93SBarry Song 	else if (alloc_ret)
106655e78c93SBarry Song 		zswap_reject_alloc_fail++;
106755e78c93SBarry Song 
1068f91e81d3SJohannes Weiner 	mutex_unlock(&acomp_ctx->mutex);
106955e78c93SBarry Song 	return comp_ret == 0 && alloc_ret == 0;
1070f91e81d3SJohannes Weiner }
1071f91e81d3SJohannes Weiner 
zswap_decompress(struct zswap_entry * entry,struct page * page)1072f91e81d3SJohannes Weiner static void zswap_decompress(struct zswap_entry *entry, struct page *page)
1073f91e81d3SJohannes Weiner {
1074f91e81d3SJohannes Weiner 	struct zpool *zpool = zswap_find_zpool(entry);
1075f91e81d3SJohannes Weiner 	struct scatterlist input, output;
1076f91e81d3SJohannes Weiner 	struct crypto_acomp_ctx *acomp_ctx;
1077f91e81d3SJohannes Weiner 	u8 *src;
1078f91e81d3SJohannes Weiner 
1079f91e81d3SJohannes Weiner 	acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1080f91e81d3SJohannes Weiner 	mutex_lock(&acomp_ctx->mutex);
1081f91e81d3SJohannes Weiner 
1082f91e81d3SJohannes Weiner 	src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
10839c500835SBarry Song 	/*
10849c500835SBarry Song 	 * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer
10859c500835SBarry Song 	 * to do crypto_acomp_decompress() which might sleep. In such cases, we must
10869c500835SBarry Song 	 * resort to copying the buffer to a temporary one.
10879c500835SBarry Song 	 * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer,
10889c500835SBarry Song 	 * such as a kmap address of high memory or even ever a vmap address.
10899c500835SBarry Song 	 * However, sg_init_one is only equipped to handle linearly mapped low memory.
10909c500835SBarry Song 	 * In such cases, we also must copy the buffer to a temporary and lowmem one.
10919c500835SBarry Song 	 */
10929c500835SBarry Song 	if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) ||
10939c500835SBarry Song 	    !virt_addr_valid(src)) {
1094f91e81d3SJohannes Weiner 		memcpy(acomp_ctx->buffer, src, entry->length);
1095f91e81d3SJohannes Weiner 		src = acomp_ctx->buffer;
1096f91e81d3SJohannes Weiner 		zpool_unmap_handle(zpool, entry->handle);
1097f91e81d3SJohannes Weiner 	}
1098f91e81d3SJohannes Weiner 
1099f91e81d3SJohannes Weiner 	sg_init_one(&input, src, entry->length);
1100f91e81d3SJohannes Weiner 	sg_init_table(&output, 1);
1101f91e81d3SJohannes Weiner 	sg_set_page(&output, page, PAGE_SIZE, 0);
1102f91e81d3SJohannes Weiner 	acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
1103f91e81d3SJohannes Weiner 	BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
1104f91e81d3SJohannes Weiner 	BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
1105f91e81d3SJohannes Weiner 	mutex_unlock(&acomp_ctx->mutex);
1106f91e81d3SJohannes Weiner 
11079c500835SBarry Song 	if (src != acomp_ctx->buffer)
1108f91e81d3SJohannes Weiner 		zpool_unmap_handle(zpool, entry->handle);
1109f91e81d3SJohannes Weiner }
1110f91e81d3SJohannes Weiner 
1111f91e81d3SJohannes Weiner /*********************************
11129986d35dSJohannes Weiner * writeback code
11139986d35dSJohannes Weiner **********************************/
11149986d35dSJohannes Weiner /*
11159986d35dSJohannes Weiner  * Attempts to free an entry by adding a folio to the swap cache,
11169986d35dSJohannes Weiner  * decompressing the entry data into the folio, and issuing a
11179986d35dSJohannes Weiner  * bio write to write the folio back to the swap device.
11189986d35dSJohannes Weiner  *
11199986d35dSJohannes Weiner  * This can be thought of as a "resumed writeback" of the folio
11209986d35dSJohannes Weiner  * to the swap device.  We are basically resuming the same swap
11219986d35dSJohannes Weiner  * writeback path that was intercepted with the zswap_store()
11229986d35dSJohannes Weiner  * in the first place.  After the folio has been decompressed into
11239986d35dSJohannes Weiner  * the swap cache, the compressed version stored by zswap can be
11249986d35dSJohannes Weiner  * freed.
11259986d35dSJohannes Weiner  */
zswap_writeback_entry(struct zswap_entry * entry,swp_entry_t swpentry)11269986d35dSJohannes Weiner static int zswap_writeback_entry(struct zswap_entry *entry,
11279986d35dSJohannes Weiner 				 swp_entry_t swpentry)
11289986d35dSJohannes Weiner {
11299986d35dSJohannes Weiner 	struct zswap_tree *tree;
11309986d35dSJohannes Weiner 	struct folio *folio;
11319986d35dSJohannes Weiner 	struct mempolicy *mpol;
11329986d35dSJohannes Weiner 	bool folio_was_allocated;
11339986d35dSJohannes Weiner 	struct writeback_control wbc = {
11349986d35dSJohannes Weiner 		.sync_mode = WB_SYNC_NONE,
11359986d35dSJohannes Weiner 	};
11369986d35dSJohannes Weiner 
11379986d35dSJohannes Weiner 	/* try to allocate swap cache folio */
11389986d35dSJohannes Weiner 	mpol = get_task_policy(current);
11399986d35dSJohannes Weiner 	folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
11409986d35dSJohannes Weiner 				NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
11419986d35dSJohannes Weiner 	if (!folio)
11429986d35dSJohannes Weiner 		return -ENOMEM;
11439986d35dSJohannes Weiner 
11449986d35dSJohannes Weiner 	/*
11459986d35dSJohannes Weiner 	 * Found an existing folio, we raced with swapin or concurrent
11469986d35dSJohannes Weiner 	 * shrinker. We generally writeback cold folios from zswap, and
11479986d35dSJohannes Weiner 	 * swapin means the folio just became hot, so skip this folio.
11489986d35dSJohannes Weiner 	 * For unlikely concurrent shrinker case, it will be unlinked
11499986d35dSJohannes Weiner 	 * and freed when invalidated by the concurrent shrinker anyway.
11509986d35dSJohannes Weiner 	 */
11519986d35dSJohannes Weiner 	if (!folio_was_allocated) {
11529986d35dSJohannes Weiner 		folio_put(folio);
11539986d35dSJohannes Weiner 		return -EEXIST;
11549986d35dSJohannes Weiner 	}
11559986d35dSJohannes Weiner 
11569986d35dSJohannes Weiner 	/*
11579986d35dSJohannes Weiner 	 * folio is locked, and the swapcache is now secured against
1158f9c0f1c3SChengming Zhou 	 * concurrent swapping to and from the slot, and concurrent
1159f9c0f1c3SChengming Zhou 	 * swapoff so we can safely dereference the zswap tree here.
1160f9c0f1c3SChengming Zhou 	 * Verify that the swap entry hasn't been invalidated and recycled
1161f9c0f1c3SChengming Zhou 	 * behind our backs, to avoid overwriting a new swap folio with
1162f9c0f1c3SChengming Zhou 	 * old compressed data. Only when this is successful can the entry
1163f9c0f1c3SChengming Zhou 	 * be dereferenced.
11649986d35dSJohannes Weiner 	 */
11659986d35dSJohannes Weiner 	tree = swap_zswap_tree(swpentry);
11669986d35dSJohannes Weiner 	spin_lock(&tree->lock);
11679986d35dSJohannes Weiner 	if (zswap_rb_search(&tree->rbroot, swp_offset(swpentry)) != entry) {
11689986d35dSJohannes Weiner 		spin_unlock(&tree->lock);
11699986d35dSJohannes Weiner 		delete_from_swap_cache(folio);
11709986d35dSJohannes Weiner 		folio_unlock(folio);
11719986d35dSJohannes Weiner 		folio_put(folio);
11729986d35dSJohannes Weiner 		return -ENOMEM;
11739986d35dSJohannes Weiner 	}
11749986d35dSJohannes Weiner 
11759986d35dSJohannes Weiner 	/* Safe to deref entry after the entry is verified above. */
1176a230c20eSChengming Zhou 	zswap_rb_erase(&tree->rbroot, entry);
11779986d35dSJohannes Weiner 	spin_unlock(&tree->lock);
11789986d35dSJohannes Weiner 
11799986d35dSJohannes Weiner 	zswap_decompress(entry, &folio->page);
11809986d35dSJohannes Weiner 
11819986d35dSJohannes Weiner 	count_vm_event(ZSWPWB);
11829986d35dSJohannes Weiner 	if (entry->objcg)
11839986d35dSJohannes Weiner 		count_objcg_event(entry->objcg, ZSWPWB);
11849986d35dSJohannes Weiner 
1185a230c20eSChengming Zhou 	zswap_entry_free(entry);
11869986d35dSJohannes Weiner 
11879986d35dSJohannes Weiner 	/* folio is up to date */
11889986d35dSJohannes Weiner 	folio_mark_uptodate(folio);
11899986d35dSJohannes Weiner 
11909986d35dSJohannes Weiner 	/* move it to the tail of the inactive list after end_writeback */
11919986d35dSJohannes Weiner 	folio_set_reclaim(folio);
11929986d35dSJohannes Weiner 
11939986d35dSJohannes Weiner 	/* start writeback */
11949986d35dSJohannes Weiner 	__swap_writepage(folio, &wbc);
11959986d35dSJohannes Weiner 	folio_put(folio);
11969986d35dSJohannes Weiner 
11979986d35dSJohannes Weiner 	return 0;
11989986d35dSJohannes Weiner }
11999986d35dSJohannes Weiner 
12009986d35dSJohannes Weiner /*********************************
1201b5ba474fSNhat Pham * shrinker functions
1202b5ba474fSNhat Pham **********************************/
shrink_memcg_cb(struct list_head * item,struct list_lru_one * l,spinlock_t * lock,void * arg)1203b5ba474fSNhat Pham static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
1204eb23ee4fSJohannes Weiner 				       spinlock_t *lock, void *arg)
1205eb23ee4fSJohannes Weiner {
1206eb23ee4fSJohannes Weiner 	struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
1207eb23ee4fSJohannes Weiner 	bool *encountered_page_in_swapcache = (bool *)arg;
1208eb23ee4fSJohannes Weiner 	swp_entry_t swpentry;
1209eb23ee4fSJohannes Weiner 	enum lru_status ret = LRU_REMOVED_RETRY;
1210eb23ee4fSJohannes Weiner 	int writeback_result;
1211eb23ee4fSJohannes Weiner 
1212eb23ee4fSJohannes Weiner 	/*
1213f9c0f1c3SChengming Zhou 	 * As soon as we drop the LRU lock, the entry can be freed by
1214f9c0f1c3SChengming Zhou 	 * a concurrent invalidation. This means the following:
1215eb23ee4fSJohannes Weiner 	 *
1216f9c0f1c3SChengming Zhou 	 * 1. We extract the swp_entry_t to the stack, allowing
1217f9c0f1c3SChengming Zhou 	 *    zswap_writeback_entry() to pin the swap entry and
1218f9c0f1c3SChengming Zhou 	 *    then validate the zwap entry against that swap entry's
1219f9c0f1c3SChengming Zhou 	 *    tree using pointer value comparison. Only when that
1220f9c0f1c3SChengming Zhou 	 *    is successful can the entry be dereferenced.
1221f9c0f1c3SChengming Zhou 	 *
1222f9c0f1c3SChengming Zhou 	 * 2. Usually, objects are taken off the LRU for reclaim. In
1223f9c0f1c3SChengming Zhou 	 *    this case this isn't possible, because if reclaim fails
1224f9c0f1c3SChengming Zhou 	 *    for whatever reason, we have no means of knowing if the
1225f9c0f1c3SChengming Zhou 	 *    entry is alive to put it back on the LRU.
1226f9c0f1c3SChengming Zhou 	 *
1227f9c0f1c3SChengming Zhou 	 *    So rotate it before dropping the lock. If the entry is
1228f9c0f1c3SChengming Zhou 	 *    written back or invalidated, the free path will unlink
1229f9c0f1c3SChengming Zhou 	 *    it. For failures, rotation is the right thing as well.
1230eb23ee4fSJohannes Weiner 	 *
1231eb23ee4fSJohannes Weiner 	 *    Temporary failures, where the same entry should be tried
1232eb23ee4fSJohannes Weiner 	 *    again immediately, almost never happen for this shrinker.
1233eb23ee4fSJohannes Weiner 	 *    We don't do any trylocking; -ENOMEM comes closest,
1234eb23ee4fSJohannes Weiner 	 *    but that's extremely rare and doesn't happen spuriously
1235eb23ee4fSJohannes Weiner 	 *    either. Don't bother distinguishing this case.
1236eb23ee4fSJohannes Weiner 	 */
1237eb23ee4fSJohannes Weiner 	list_move_tail(item, &l->list);
1238eb23ee4fSJohannes Weiner 
1239eb23ee4fSJohannes Weiner 	/*
1240eb23ee4fSJohannes Weiner 	 * Once the lru lock is dropped, the entry might get freed. The
1241eb23ee4fSJohannes Weiner 	 * swpentry is copied to the stack, and entry isn't deref'd again
1242eb23ee4fSJohannes Weiner 	 * until the entry is verified to still be alive in the tree.
1243eb23ee4fSJohannes Weiner 	 */
1244eb23ee4fSJohannes Weiner 	swpentry = entry->swpentry;
1245eb23ee4fSJohannes Weiner 
1246eb23ee4fSJohannes Weiner 	/*
1247eb23ee4fSJohannes Weiner 	 * It's safe to drop the lock here because we return either
1248eb23ee4fSJohannes Weiner 	 * LRU_REMOVED_RETRY or LRU_RETRY.
1249eb23ee4fSJohannes Weiner 	 */
1250eb23ee4fSJohannes Weiner 	spin_unlock(lock);
1251eb23ee4fSJohannes Weiner 
1252eb23ee4fSJohannes Weiner 	writeback_result = zswap_writeback_entry(entry, swpentry);
1253eb23ee4fSJohannes Weiner 
1254eb23ee4fSJohannes Weiner 	if (writeback_result) {
1255eb23ee4fSJohannes Weiner 		zswap_reject_reclaim_fail++;
1256eb23ee4fSJohannes Weiner 		ret = LRU_RETRY;
1257eb23ee4fSJohannes Weiner 
1258eb23ee4fSJohannes Weiner 		/*
1259eb23ee4fSJohannes Weiner 		 * Encountering a page already in swap cache is a sign that we are shrinking
1260eb23ee4fSJohannes Weiner 		 * into the warmer region. We should terminate shrinking (if we're in the dynamic
1261eb23ee4fSJohannes Weiner 		 * shrinker context).
1262eb23ee4fSJohannes Weiner 		 */
1263b49547adSChengming Zhou 		if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
1264b49547adSChengming Zhou 			ret = LRU_STOP;
1265eb23ee4fSJohannes Weiner 			*encountered_page_in_swapcache = true;
1266b49547adSChengming Zhou 		}
1267eb23ee4fSJohannes Weiner 	} else {
1268eb23ee4fSJohannes Weiner 		zswap_written_back_pages++;
1269eb23ee4fSJohannes Weiner 	}
1270eb23ee4fSJohannes Weiner 
1271eb23ee4fSJohannes Weiner 	spin_lock(lock);
1272eb23ee4fSJohannes Weiner 	return ret;
1273eb23ee4fSJohannes Weiner }
1274b5ba474fSNhat Pham 
zswap_shrinker_scan(struct shrinker * shrinker,struct shrink_control * sc)1275b5ba474fSNhat Pham static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
1276b5ba474fSNhat Pham 		struct shrink_control *sc)
1277b5ba474fSNhat Pham {
1278b5ba474fSNhat Pham 	struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
1279b5ba474fSNhat Pham 	unsigned long shrink_ret, nr_protected, lru_size;
1280b5ba474fSNhat Pham 	bool encountered_page_in_swapcache = false;
1281b5ba474fSNhat Pham 
1282501a06feSNhat Pham 	if (!zswap_shrinker_enabled ||
1283501a06feSNhat Pham 			!mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
1284b5ba474fSNhat Pham 		sc->nr_scanned = 0;
1285b5ba474fSNhat Pham 		return SHRINK_STOP;
1286b5ba474fSNhat Pham 	}
1287b5ba474fSNhat Pham 
1288b5ba474fSNhat Pham 	nr_protected =
1289b5ba474fSNhat Pham 		atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
1290e35606e4SChengming Zhou 	lru_size = list_lru_shrink_count(&zswap_list_lru, sc);
1291b5ba474fSNhat Pham 
1292b5ba474fSNhat Pham 	/*
1293b5ba474fSNhat Pham 	 * Abort if we are shrinking into the protected region.
1294b5ba474fSNhat Pham 	 *
1295b5ba474fSNhat Pham 	 * This short-circuiting is necessary because if we have too many multiple
1296b5ba474fSNhat Pham 	 * concurrent reclaimers getting the freeable zswap object counts at the
1297b5ba474fSNhat Pham 	 * same time (before any of them made reasonable progress), the total
1298b5ba474fSNhat Pham 	 * number of reclaimed objects might be more than the number of unprotected
1299b5ba474fSNhat Pham 	 * objects (i.e the reclaimers will reclaim into the protected area of the
1300b5ba474fSNhat Pham 	 * zswap LRU).
1301b5ba474fSNhat Pham 	 */
1302b5ba474fSNhat Pham 	if (nr_protected >= lru_size - sc->nr_to_scan) {
1303b5ba474fSNhat Pham 		sc->nr_scanned = 0;
1304b5ba474fSNhat Pham 		return SHRINK_STOP;
1305b5ba474fSNhat Pham 	}
1306b5ba474fSNhat Pham 
1307e35606e4SChengming Zhou 	shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb,
1308b5ba474fSNhat Pham 		&encountered_page_in_swapcache);
1309b5ba474fSNhat Pham 
1310b5ba474fSNhat Pham 	if (encountered_page_in_swapcache)
1311b5ba474fSNhat Pham 		return SHRINK_STOP;
1312b5ba474fSNhat Pham 
1313b5ba474fSNhat Pham 	return shrink_ret ? shrink_ret : SHRINK_STOP;
1314b5ba474fSNhat Pham }
1315b5ba474fSNhat Pham 
zswap_shrinker_count(struct shrinker * shrinker,struct shrink_control * sc)1316b5ba474fSNhat Pham static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
1317b5ba474fSNhat Pham 		struct shrink_control *sc)
1318b5ba474fSNhat Pham {
1319b5ba474fSNhat Pham 	struct mem_cgroup *memcg = sc->memcg;
1320b5ba474fSNhat Pham 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
1321b5ba474fSNhat Pham 	unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;
1322b5ba474fSNhat Pham 
1323501a06feSNhat Pham 	if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
1324b5ba474fSNhat Pham 		return 0;
1325b5ba474fSNhat Pham 
132630fb6a8dSJohannes Weiner 	/*
132730fb6a8dSJohannes Weiner 	 * The shrinker resumes swap writeback, which will enter block
132830fb6a8dSJohannes Weiner 	 * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS
132930fb6a8dSJohannes Weiner 	 * rules (may_enter_fs()), which apply on a per-folio basis.
133030fb6a8dSJohannes Weiner 	 */
133130fb6a8dSJohannes Weiner 	if (!gfp_has_io_fs(sc->gfp_mask))
133230fb6a8dSJohannes Weiner 		return 0;
133330fb6a8dSJohannes Weiner 
1334*682886ecSJohannes Weiner 	/*
1335*682886ecSJohannes Weiner 	 * For memcg, use the cgroup-wide ZSWAP stats since we don't
1336*682886ecSJohannes Weiner 	 * have them per-node and thus per-lruvec. Careful if memcg is
1337*682886ecSJohannes Weiner 	 * runtime-disabled: we can get sc->memcg == NULL, which is ok
1338*682886ecSJohannes Weiner 	 * for the lruvec, but not for memcg_page_state().
1339*682886ecSJohannes Weiner 	 *
1340*682886ecSJohannes Weiner 	 * Without memcg, use the zswap pool-wide metrics.
1341*682886ecSJohannes Weiner 	 */
1342*682886ecSJohannes Weiner 	if (!mem_cgroup_disabled()) {
13437d7ef0a4SYosry Ahmed 		mem_cgroup_flush_stats(memcg);
1344b5ba474fSNhat Pham 		nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
1345b5ba474fSNhat Pham 		nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
1346*682886ecSJohannes Weiner 	} else {
1347bf9b7df2SChengming Zhou 		nr_backing = zswap_pool_total_size >> PAGE_SHIFT;
1348e35606e4SChengming Zhou 		nr_stored = atomic_read(&zswap_nr_stored);
1349*682886ecSJohannes Weiner 	}
1350b5ba474fSNhat Pham 
1351b5ba474fSNhat Pham 	if (!nr_stored)
1352b5ba474fSNhat Pham 		return 0;
1353b5ba474fSNhat Pham 
1354b5ba474fSNhat Pham 	nr_protected =
1355b5ba474fSNhat Pham 		atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
1356e35606e4SChengming Zhou 	nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc);
1357b5ba474fSNhat Pham 	/*
1358b5ba474fSNhat Pham 	 * Subtract the lru size by an estimate of the number of pages
1359b5ba474fSNhat Pham 	 * that should be protected.
1360b5ba474fSNhat Pham 	 */
1361b5ba474fSNhat Pham 	nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0;
1362b5ba474fSNhat Pham 
1363b5ba474fSNhat Pham 	/*
1364b5ba474fSNhat Pham 	 * Scale the number of freeable pages by the memory saving factor.
1365b5ba474fSNhat Pham 	 * This ensures that the better zswap compresses memory, the fewer
1366b5ba474fSNhat Pham 	 * pages we will evict to swap (as it will otherwise incur IO for
1367b5ba474fSNhat Pham 	 * relatively small memory saving).
1368b5ba474fSNhat Pham 	 */
1369b5ba474fSNhat Pham 	return mult_frac(nr_freeable, nr_backing, nr_stored);
1370b5ba474fSNhat Pham }
1371b5ba474fSNhat Pham 
zswap_alloc_shrinker(void)1372bf9b7df2SChengming Zhou static struct shrinker *zswap_alloc_shrinker(void)
1373b5ba474fSNhat Pham {
1374bf9b7df2SChengming Zhou 	struct shrinker *shrinker;
1375b5ba474fSNhat Pham 
1376bf9b7df2SChengming Zhou 	shrinker =
1377bf9b7df2SChengming Zhou 		shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
1378bf9b7df2SChengming Zhou 	if (!shrinker)
1379bf9b7df2SChengming Zhou 		return NULL;
1380bf9b7df2SChengming Zhou 
1381bf9b7df2SChengming Zhou 	shrinker->scan_objects = zswap_shrinker_scan;
1382bf9b7df2SChengming Zhou 	shrinker->count_objects = zswap_shrinker_count;
1383bf9b7df2SChengming Zhou 	shrinker->batch = 0;
1384bf9b7df2SChengming Zhou 	shrinker->seeks = DEFAULT_SEEKS;
1385bf9b7df2SChengming Zhou 	return shrinker;
1386b5ba474fSNhat Pham }
1387b5ba474fSNhat Pham 
shrink_memcg(struct mem_cgroup * memcg)1388a65b0e76SDomenico Cerasuolo static int shrink_memcg(struct mem_cgroup *memcg)
1389a65b0e76SDomenico Cerasuolo {
1390a65b0e76SDomenico Cerasuolo 	int nid, shrunk = 0;
1391a65b0e76SDomenico Cerasuolo 
1392501a06feSNhat Pham 	if (!mem_cgroup_zswap_writeback_enabled(memcg))
1393501a06feSNhat Pham 		return -EINVAL;
1394501a06feSNhat Pham 
1395a65b0e76SDomenico Cerasuolo 	/*
1396a65b0e76SDomenico Cerasuolo 	 * Skip zombies because their LRUs are reparented and we would be
1397a65b0e76SDomenico Cerasuolo 	 * reclaiming from the parent instead of the dead memcg.
1398a65b0e76SDomenico Cerasuolo 	 */
1399a65b0e76SDomenico Cerasuolo 	if (memcg && !mem_cgroup_online(memcg))
1400a65b0e76SDomenico Cerasuolo 		return -ENOENT;
1401a65b0e76SDomenico Cerasuolo 
1402a65b0e76SDomenico Cerasuolo 	for_each_node_state(nid, N_NORMAL_MEMORY) {
1403a65b0e76SDomenico Cerasuolo 		unsigned long nr_to_walk = 1;
1404a65b0e76SDomenico Cerasuolo 
1405e35606e4SChengming Zhou 		shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg,
1406a65b0e76SDomenico Cerasuolo 					    &shrink_memcg_cb, NULL, &nr_to_walk);
1407a65b0e76SDomenico Cerasuolo 	}
1408a65b0e76SDomenico Cerasuolo 	return shrunk ? 0 : -EAGAIN;
1409f999f38bSDomenico Cerasuolo }
1410f999f38bSDomenico Cerasuolo 
shrink_worker(struct work_struct * w)141145190f01SVitaly Wool static void shrink_worker(struct work_struct *w)
141245190f01SVitaly Wool {
1413a65b0e76SDomenico Cerasuolo 	struct mem_cgroup *memcg;
1414e0228d59SDomenico Cerasuolo 	int ret, failures = 0;
141545190f01SVitaly Wool 
1416a65b0e76SDomenico Cerasuolo 	/* global reclaim will select cgroup in a round-robin fashion. */
1417e0228d59SDomenico Cerasuolo 	do {
1418e35606e4SChengming Zhou 		spin_lock(&zswap_shrink_lock);
1419e35606e4SChengming Zhou 		zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
1420e35606e4SChengming Zhou 		memcg = zswap_next_shrink;
1421a65b0e76SDomenico Cerasuolo 
1422a65b0e76SDomenico Cerasuolo 		/*
1423a65b0e76SDomenico Cerasuolo 		 * We need to retry if we have gone through a full round trip, or if we
1424a65b0e76SDomenico Cerasuolo 		 * got an offline memcg (or else we risk undoing the effect of the
1425a65b0e76SDomenico Cerasuolo 		 * zswap memcg offlining cleanup callback). This is not catastrophic
1426a65b0e76SDomenico Cerasuolo 		 * per se, but it will keep the now offlined memcg hostage for a while.
1427a65b0e76SDomenico Cerasuolo 		 *
1428a65b0e76SDomenico Cerasuolo 		 * Note that if we got an online memcg, we will keep the extra
1429a65b0e76SDomenico Cerasuolo 		 * reference in case the original reference obtained by mem_cgroup_iter
1430a65b0e76SDomenico Cerasuolo 		 * is dropped by the zswap memcg offlining callback, ensuring that the
1431a65b0e76SDomenico Cerasuolo 		 * memcg is not killed when we are reclaiming.
1432a65b0e76SDomenico Cerasuolo 		 */
1433a65b0e76SDomenico Cerasuolo 		if (!memcg) {
1434e35606e4SChengming Zhou 			spin_unlock(&zswap_shrink_lock);
1435e0228d59SDomenico Cerasuolo 			if (++failures == MAX_RECLAIM_RETRIES)
1436e0228d59SDomenico Cerasuolo 				break;
1437a65b0e76SDomenico Cerasuolo 
1438a65b0e76SDomenico Cerasuolo 			goto resched;
1439e0228d59SDomenico Cerasuolo 		}
1440a65b0e76SDomenico Cerasuolo 
1441a65b0e76SDomenico Cerasuolo 		if (!mem_cgroup_tryget_online(memcg)) {
1442a65b0e76SDomenico Cerasuolo 			/* drop the reference from mem_cgroup_iter() */
1443a65b0e76SDomenico Cerasuolo 			mem_cgroup_iter_break(NULL, memcg);
1444e35606e4SChengming Zhou 			zswap_next_shrink = NULL;
1445e35606e4SChengming Zhou 			spin_unlock(&zswap_shrink_lock);
1446a65b0e76SDomenico Cerasuolo 
1447a65b0e76SDomenico Cerasuolo 			if (++failures == MAX_RECLAIM_RETRIES)
1448a65b0e76SDomenico Cerasuolo 				break;
1449a65b0e76SDomenico Cerasuolo 
1450a65b0e76SDomenico Cerasuolo 			goto resched;
1451a65b0e76SDomenico Cerasuolo 		}
1452e35606e4SChengming Zhou 		spin_unlock(&zswap_shrink_lock);
1453a65b0e76SDomenico Cerasuolo 
1454a65b0e76SDomenico Cerasuolo 		ret = shrink_memcg(memcg);
1455a65b0e76SDomenico Cerasuolo 		/* drop the extra reference */
1456a65b0e76SDomenico Cerasuolo 		mem_cgroup_put(memcg);
1457a65b0e76SDomenico Cerasuolo 
1458a65b0e76SDomenico Cerasuolo 		if (ret == -EINVAL)
1459a65b0e76SDomenico Cerasuolo 			break;
1460a65b0e76SDomenico Cerasuolo 		if (ret && ++failures == MAX_RECLAIM_RETRIES)
1461a65b0e76SDomenico Cerasuolo 			break;
1462a65b0e76SDomenico Cerasuolo 
1463a65b0e76SDomenico Cerasuolo resched:
1464e0228d59SDomenico Cerasuolo 		cond_resched();
1465e0228d59SDomenico Cerasuolo 	} while (!zswap_can_accept());
146645190f01SVitaly Wool }
146745190f01SVitaly Wool 
zswap_is_page_same_filled(void * ptr,unsigned long * value)1468a85f878bSSrividya Desireddy static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
1469a85f878bSSrividya Desireddy {
1470a85f878bSSrividya Desireddy 	unsigned long *page;
147162bf1258STaejoon Song 	unsigned long val;
147262bf1258STaejoon Song 	unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
1473a85f878bSSrividya Desireddy 
1474a85f878bSSrividya Desireddy 	page = (unsigned long *)ptr;
147562bf1258STaejoon Song 	val = page[0];
147662bf1258STaejoon Song 
147762bf1258STaejoon Song 	if (val != page[last_pos])
147862bf1258STaejoon Song 		return 0;
147962bf1258STaejoon Song 
148062bf1258STaejoon Song 	for (pos = 1; pos < last_pos; pos++) {
148162bf1258STaejoon Song 		if (val != page[pos])
1482a85f878bSSrividya Desireddy 			return 0;
1483a85f878bSSrividya Desireddy 	}
148462bf1258STaejoon Song 
148562bf1258STaejoon Song 	*value = val;
148662bf1258STaejoon Song 
1487a85f878bSSrividya Desireddy 	return 1;
1488a85f878bSSrividya Desireddy }
1489a85f878bSSrividya Desireddy 
zswap_fill_page(void * ptr,unsigned long value)1490a85f878bSSrividya Desireddy static void zswap_fill_page(void *ptr, unsigned long value)
1491a85f878bSSrividya Desireddy {
1492a85f878bSSrividya Desireddy 	unsigned long *page;
1493a85f878bSSrividya Desireddy 
1494a85f878bSSrividya Desireddy 	page = (unsigned long *)ptr;
1495a85f878bSSrividya Desireddy 	memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
1496a85f878bSSrividya Desireddy }
1497a85f878bSSrividya Desireddy 
zswap_store(struct folio * folio)149834f4c198SMatthew Wilcox (Oracle) bool zswap_store(struct folio *folio)
14992b281117SSeth Jennings {
15003d2c9087SDavid Hildenbrand 	swp_entry_t swp = folio->swap;
150142c06a0eSJohannes Weiner 	pgoff_t offset = swp_offset(swp);
150244c7c734SChengming Zhou 	struct zswap_tree *tree = swap_zswap_tree(swp);
15032b281117SSeth Jennings 	struct zswap_entry *entry, *dupentry;
1504f4840ccfSJohannes Weiner 	struct obj_cgroup *objcg = NULL;
1505a65b0e76SDomenico Cerasuolo 	struct mem_cgroup *memcg = NULL;
150642c06a0eSJohannes Weiner 
150734f4c198SMatthew Wilcox (Oracle) 	VM_WARN_ON_ONCE(!folio_test_locked(folio));
150834f4c198SMatthew Wilcox (Oracle) 	VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
15092b281117SSeth Jennings 
151034f4c198SMatthew Wilcox (Oracle) 	/* Large folios aren't supported */
151134f4c198SMatthew Wilcox (Oracle) 	if (folio_test_large(folio))
151242c06a0eSJohannes Weiner 		return false;
15137ba71669SHuang Ying 
1514678e54d4SChengming Zhou 	if (!zswap_enabled)
1515f576a1e8SChengming Zhou 		goto check_old;
1516678e54d4SChengming Zhou 
1517074e3e26SMatthew Wilcox (Oracle) 	objcg = get_obj_cgroup_from_folio(folio);
1518a65b0e76SDomenico Cerasuolo 	if (objcg && !obj_cgroup_may_zswap(objcg)) {
1519a65b0e76SDomenico Cerasuolo 		memcg = get_mem_cgroup_from_objcg(objcg);
1520a65b0e76SDomenico Cerasuolo 		if (shrink_memcg(memcg)) {
1521a65b0e76SDomenico Cerasuolo 			mem_cgroup_put(memcg);
15220bdf0efaSNhat Pham 			goto reject;
1523a65b0e76SDomenico Cerasuolo 		}
1524a65b0e76SDomenico Cerasuolo 		mem_cgroup_put(memcg);
1525a65b0e76SDomenico Cerasuolo 	}
1526f4840ccfSJohannes Weiner 
15272b281117SSeth Jennings 	/* reclaim space if needed */
15282b281117SSeth Jennings 	if (zswap_is_full()) {
15292b281117SSeth Jennings 		zswap_pool_limit_hit++;
153045190f01SVitaly Wool 		zswap_pool_reached_full = true;
1531f4840ccfSJohannes Weiner 		goto shrink;
15322b281117SSeth Jennings 	}
153316e536efSLi Wang 
153445190f01SVitaly Wool 	if (zswap_pool_reached_full) {
153542c06a0eSJohannes Weiner 	       if (!zswap_can_accept())
1536e0228d59SDomenico Cerasuolo 			goto shrink;
153742c06a0eSJohannes Weiner 		else
153845190f01SVitaly Wool 			zswap_pool_reached_full = false;
15392b281117SSeth Jennings 	}
15402b281117SSeth Jennings 
15412b281117SSeth Jennings 	/* allocate entry */
1542be7fc97cSJohannes Weiner 	entry = zswap_entry_cache_alloc(GFP_KERNEL, folio_nid(folio));
15432b281117SSeth Jennings 	if (!entry) {
15442b281117SSeth Jennings 		zswap_reject_kmemcache_fail++;
15452b281117SSeth Jennings 		goto reject;
15462b281117SSeth Jennings 	}
15472b281117SSeth Jennings 
1548a85f878bSSrividya Desireddy 	if (zswap_same_filled_pages_enabled) {
1549be7fc97cSJohannes Weiner 		unsigned long value;
1550be7fc97cSJohannes Weiner 		u8 *src;
1551be7fc97cSJohannes Weiner 
1552be7fc97cSJohannes Weiner 		src = kmap_local_folio(folio, 0);
1553a85f878bSSrividya Desireddy 		if (zswap_is_page_same_filled(src, &value)) {
1554003ae2fbSFabio M. De Francesco 			kunmap_local(src);
1555a85f878bSSrividya Desireddy 			entry->length = 0;
1556a85f878bSSrividya Desireddy 			entry->value = value;
1557a85f878bSSrividya Desireddy 			atomic_inc(&zswap_same_filled_pages);
1558a85f878bSSrividya Desireddy 			goto insert_entry;
1559a85f878bSSrividya Desireddy 		}
1560003ae2fbSFabio M. De Francesco 		kunmap_local(src);
1561a85f878bSSrividya Desireddy 	}
1562a85f878bSSrividya Desireddy 
156342c06a0eSJohannes Weiner 	if (!zswap_non_same_filled_pages_enabled)
1564cb325dddSMaciej S. Szmigiero 		goto freepage;
1565cb325dddSMaciej S. Szmigiero 
1566f1c54846SDan Streetman 	/* if entry is successfully added, it keeps the reference */
1567f1c54846SDan Streetman 	entry->pool = zswap_pool_current_get();
156842c06a0eSJohannes Weiner 	if (!entry->pool)
15692b281117SSeth Jennings 		goto freepage;
15702b281117SSeth Jennings 
1571a65b0e76SDomenico Cerasuolo 	if (objcg) {
1572a65b0e76SDomenico Cerasuolo 		memcg = get_mem_cgroup_from_objcg(objcg);
1573e35606e4SChengming Zhou 		if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) {
1574a65b0e76SDomenico Cerasuolo 			mem_cgroup_put(memcg);
1575a65b0e76SDomenico Cerasuolo 			goto put_pool;
1576a65b0e76SDomenico Cerasuolo 		}
1577a65b0e76SDomenico Cerasuolo 		mem_cgroup_put(memcg);
1578a65b0e76SDomenico Cerasuolo 	}
1579a65b0e76SDomenico Cerasuolo 
1580fa9ad6e2SJohannes Weiner 	if (!zswap_compress(folio, entry))
1581fa9ad6e2SJohannes Weiner 		goto put_pool;
15821ec3b5feSBarry Song 
1583a85f878bSSrividya Desireddy insert_entry:
1584be7fc97cSJohannes Weiner 	entry->swpentry = swp;
1585f4840ccfSJohannes Weiner 	entry->objcg = objcg;
1586f4840ccfSJohannes Weiner 	if (objcg) {
1587f4840ccfSJohannes Weiner 		obj_cgroup_charge_zswap(objcg, entry->length);
1588f4840ccfSJohannes Weiner 		/* Account before objcg ref is moved to tree */
1589f4840ccfSJohannes Weiner 		count_objcg_event(objcg, ZSWPOUT);
1590f4840ccfSJohannes Weiner 	}
1591f4840ccfSJohannes Weiner 
15922b281117SSeth Jennings 	/* map */
15932b281117SSeth Jennings 	spin_lock(&tree->lock);
1594ca56489cSDomenico Cerasuolo 	/*
1595f576a1e8SChengming Zhou 	 * The folio may have been dirtied again, invalidate the
1596f576a1e8SChengming Zhou 	 * possibly stale entry before inserting the new entry.
1597ca56489cSDomenico Cerasuolo 	 */
1598f576a1e8SChengming Zhou 	if (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) {
159956c67049SJohannes Weiner 		zswap_invalidate_entry(tree, dupentry);
1600f576a1e8SChengming Zhou 		WARN_ON(zswap_rb_insert(&tree->rbroot, entry, &dupentry));
16012b281117SSeth Jennings 	}
160235499e2bSDomenico Cerasuolo 	if (entry->length) {
1603a65b0e76SDomenico Cerasuolo 		INIT_LIST_HEAD(&entry->lru);
1604e35606e4SChengming Zhou 		zswap_lru_add(&zswap_list_lru, entry);
1605e35606e4SChengming Zhou 		atomic_inc(&zswap_nr_stored);
1606f999f38bSDomenico Cerasuolo 	}
16072b281117SSeth Jennings 	spin_unlock(&tree->lock);
16082b281117SSeth Jennings 
16092b281117SSeth Jennings 	/* update stats */
16102b281117SSeth Jennings 	atomic_inc(&zswap_stored_pages);
1611f1c54846SDan Streetman 	zswap_update_total_size();
1612f6498b77SJohannes Weiner 	count_vm_event(ZSWPOUT);
16132b281117SSeth Jennings 
161442c06a0eSJohannes Weiner 	return true;
16152b281117SSeth Jennings 
1616a65b0e76SDomenico Cerasuolo put_pool:
1617f1c54846SDan Streetman 	zswap_pool_put(entry->pool);
1618f1c54846SDan Streetman freepage:
16192b281117SSeth Jennings 	zswap_entry_cache_free(entry);
16202b281117SSeth Jennings reject:
1621f4840ccfSJohannes Weiner 	if (objcg)
1622f4840ccfSJohannes Weiner 		obj_cgroup_put(objcg);
1623f576a1e8SChengming Zhou check_old:
1624f576a1e8SChengming Zhou 	/*
1625f576a1e8SChengming Zhou 	 * If the zswap store fails or zswap is disabled, we must invalidate the
1626f576a1e8SChengming Zhou 	 * possibly stale entry which was previously stored at this offset.
1627f576a1e8SChengming Zhou 	 * Otherwise, writeback could overwrite the new data in the swapfile.
1628f576a1e8SChengming Zhou 	 */
1629f576a1e8SChengming Zhou 	spin_lock(&tree->lock);
1630f576a1e8SChengming Zhou 	entry = zswap_rb_search(&tree->rbroot, offset);
1631f576a1e8SChengming Zhou 	if (entry)
1632f576a1e8SChengming Zhou 		zswap_invalidate_entry(tree, entry);
1633f576a1e8SChengming Zhou 	spin_unlock(&tree->lock);
163442c06a0eSJohannes Weiner 	return false;
1635f4840ccfSJohannes Weiner 
1636f4840ccfSJohannes Weiner shrink:
1637e35606e4SChengming Zhou 	queue_work(shrink_wq, &zswap_shrink_work);
1638f4840ccfSJohannes Weiner 	goto reject;
16392b281117SSeth Jennings }
16402b281117SSeth Jennings 
zswap_load(struct folio * folio)1641ca54f6d8SMatthew Wilcox (Oracle) bool zswap_load(struct folio *folio)
16422b281117SSeth Jennings {
16433d2c9087SDavid Hildenbrand 	swp_entry_t swp = folio->swap;
164442c06a0eSJohannes Weiner 	pgoff_t offset = swp_offset(swp);
1645ca54f6d8SMatthew Wilcox (Oracle) 	struct page *page = &folio->page;
164625cd2414SJohannes Weiner 	bool swapcache = folio_test_swapcache(folio);
164744c7c734SChengming Zhou 	struct zswap_tree *tree = swap_zswap_tree(swp);
16482b281117SSeth Jennings 	struct zswap_entry *entry;
164932acba4cSChengming Zhou 	u8 *dst;
165042c06a0eSJohannes Weiner 
1651ca54f6d8SMatthew Wilcox (Oracle) 	VM_WARN_ON_ONCE(!folio_test_locked(folio));
16522b281117SSeth Jennings 
16532b281117SSeth Jennings 	spin_lock(&tree->lock);
16545b297f70SJohannes Weiner 	entry = zswap_rb_search(&tree->rbroot, offset);
16552b281117SSeth Jennings 	if (!entry) {
16562b281117SSeth Jennings 		spin_unlock(&tree->lock);
165742c06a0eSJohannes Weiner 		return false;
16582b281117SSeth Jennings 	}
165925cd2414SJohannes Weiner 	/*
166025cd2414SJohannes Weiner 	 * When reading into the swapcache, invalidate our entry. The
166125cd2414SJohannes Weiner 	 * swapcache can be the authoritative owner of the page and
166225cd2414SJohannes Weiner 	 * its mappings, and the pressure that results from having two
166325cd2414SJohannes Weiner 	 * in-memory copies outweighs any benefits of caching the
166425cd2414SJohannes Weiner 	 * compression work.
166525cd2414SJohannes Weiner 	 *
166625cd2414SJohannes Weiner 	 * (Most swapins go through the swapcache. The notable
166725cd2414SJohannes Weiner 	 * exception is the singleton fault on SWP_SYNCHRONOUS_IO
166825cd2414SJohannes Weiner 	 * files, which reads into a private page and may free it if
166925cd2414SJohannes Weiner 	 * the fault fails. We remain the primary owner of the entry.)
167025cd2414SJohannes Weiner 	 */
167125cd2414SJohannes Weiner 	if (swapcache)
1672a230c20eSChengming Zhou 		zswap_rb_erase(&tree->rbroot, entry);
16732b281117SSeth Jennings 	spin_unlock(&tree->lock);
16742b281117SSeth Jennings 
167566447fd0SChengming Zhou 	if (entry->length)
1676ff2972aaSJohannes Weiner 		zswap_decompress(entry, page);
167766447fd0SChengming Zhou 	else {
1678003ae2fbSFabio M. De Francesco 		dst = kmap_local_page(page);
1679a85f878bSSrividya Desireddy 		zswap_fill_page(dst, entry->value);
1680003ae2fbSFabio M. De Francesco 		kunmap_local(dst);
1681a85f878bSSrividya Desireddy 	}
1682a85f878bSSrividya Desireddy 
1683f6498b77SJohannes Weiner 	count_vm_event(ZSWPIN);
1684f4840ccfSJohannes Weiner 	if (entry->objcg)
1685f4840ccfSJohannes Weiner 		count_objcg_event(entry->objcg, ZSWPIN);
1686c75f5c1eSChengming Zhou 
168725cd2414SJohannes Weiner 	if (swapcache) {
1688a230c20eSChengming Zhou 		zswap_entry_free(entry);
1689c2e2ba77SChengming Zhou 		folio_mark_dirty(folio);
169025cd2414SJohannes Weiner 	}
1691c2e2ba77SChengming Zhou 
169266447fd0SChengming Zhou 	return true;
16932b281117SSeth Jennings }
16942b281117SSeth Jennings 
zswap_invalidate(swp_entry_t swp)16950827a1fbSChengming Zhou void zswap_invalidate(swp_entry_t swp)
16962b281117SSeth Jennings {
16970827a1fbSChengming Zhou 	pgoff_t offset = swp_offset(swp);
16980827a1fbSChengming Zhou 	struct zswap_tree *tree = swap_zswap_tree(swp);
16992b281117SSeth Jennings 	struct zswap_entry *entry;
17002b281117SSeth Jennings 
17012b281117SSeth Jennings 	spin_lock(&tree->lock);
17022b281117SSeth Jennings 	entry = zswap_rb_search(&tree->rbroot, offset);
170306ed2289SJohannes Weiner 	if (entry)
1704b9c91c43SYosry Ahmed 		zswap_invalidate_entry(tree, entry);
17052b281117SSeth Jennings 	spin_unlock(&tree->lock);
17062b281117SSeth Jennings }
17072b281117SSeth Jennings 
zswap_swapon(int type,unsigned long nr_pages)170844c7c734SChengming Zhou int zswap_swapon(int type, unsigned long nr_pages)
170942c06a0eSJohannes Weiner {
171044c7c734SChengming Zhou 	struct zswap_tree *trees, *tree;
171144c7c734SChengming Zhou 	unsigned int nr, i;
171242c06a0eSJohannes Weiner 
171344c7c734SChengming Zhou 	nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
171444c7c734SChengming Zhou 	trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL);
171544c7c734SChengming Zhou 	if (!trees) {
171642c06a0eSJohannes Weiner 		pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1717bb29fd77SChengming Zhou 		return -ENOMEM;
171842c06a0eSJohannes Weiner 	}
171942c06a0eSJohannes Weiner 
172044c7c734SChengming Zhou 	for (i = 0; i < nr; i++) {
172144c7c734SChengming Zhou 		tree = trees + i;
172242c06a0eSJohannes Weiner 		tree->rbroot = RB_ROOT;
172342c06a0eSJohannes Weiner 		spin_lock_init(&tree->lock);
172444c7c734SChengming Zhou 	}
172544c7c734SChengming Zhou 
172644c7c734SChengming Zhou 	nr_zswap_trees[type] = nr;
172744c7c734SChengming Zhou 	zswap_trees[type] = trees;
1728bb29fd77SChengming Zhou 	return 0;
172942c06a0eSJohannes Weiner }
173042c06a0eSJohannes Weiner 
zswap_swapoff(int type)173142c06a0eSJohannes Weiner void zswap_swapoff(int type)
17322b281117SSeth Jennings {
173344c7c734SChengming Zhou 	struct zswap_tree *trees = zswap_trees[type];
173444c7c734SChengming Zhou 	unsigned int i;
17352b281117SSeth Jennings 
173644c7c734SChengming Zhou 	if (!trees)
17372b281117SSeth Jennings 		return;
17382b281117SSeth Jennings 
173983e68f25SYosry Ahmed 	/* try_to_unuse() invalidated all the entries already */
174083e68f25SYosry Ahmed 	for (i = 0; i < nr_zswap_trees[type]; i++)
174183e68f25SYosry Ahmed 		WARN_ON_ONCE(!RB_EMPTY_ROOT(&trees[i].rbroot));
174244c7c734SChengming Zhou 
174344c7c734SChengming Zhou 	kvfree(trees);
174444c7c734SChengming Zhou 	nr_zswap_trees[type] = 0;
1745aa9bca05SWeijie Yang 	zswap_trees[type] = NULL;
17462b281117SSeth Jennings }
17472b281117SSeth Jennings 
17482b281117SSeth Jennings /*********************************
17492b281117SSeth Jennings * debugfs functions
17502b281117SSeth Jennings **********************************/
17512b281117SSeth Jennings #ifdef CONFIG_DEBUG_FS
17522b281117SSeth Jennings #include <linux/debugfs.h>
17532b281117SSeth Jennings 
17542b281117SSeth Jennings static struct dentry *zswap_debugfs_root;
17552b281117SSeth Jennings 
zswap_debugfs_init(void)1756141fdeecSLiu Shixin static int zswap_debugfs_init(void)
17572b281117SSeth Jennings {
17582b281117SSeth Jennings 	if (!debugfs_initialized())
17592b281117SSeth Jennings 		return -ENODEV;
17602b281117SSeth Jennings 
17612b281117SSeth Jennings 	zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
17622b281117SSeth Jennings 
17630825a6f9SJoe Perches 	debugfs_create_u64("pool_limit_hit", 0444,
17642b281117SSeth Jennings 			   zswap_debugfs_root, &zswap_pool_limit_hit);
17650825a6f9SJoe Perches 	debugfs_create_u64("reject_reclaim_fail", 0444,
17662b281117SSeth Jennings 			   zswap_debugfs_root, &zswap_reject_reclaim_fail);
17670825a6f9SJoe Perches 	debugfs_create_u64("reject_alloc_fail", 0444,
17682b281117SSeth Jennings 			   zswap_debugfs_root, &zswap_reject_alloc_fail);
17690825a6f9SJoe Perches 	debugfs_create_u64("reject_kmemcache_fail", 0444,
17702b281117SSeth Jennings 			   zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1771cb61dad8SNhat Pham 	debugfs_create_u64("reject_compress_fail", 0444,
1772cb61dad8SNhat Pham 			   zswap_debugfs_root, &zswap_reject_compress_fail);
17730825a6f9SJoe Perches 	debugfs_create_u64("reject_compress_poor", 0444,
17742b281117SSeth Jennings 			   zswap_debugfs_root, &zswap_reject_compress_poor);
17750825a6f9SJoe Perches 	debugfs_create_u64("written_back_pages", 0444,
17762b281117SSeth Jennings 			   zswap_debugfs_root, &zswap_written_back_pages);
17770825a6f9SJoe Perches 	debugfs_create_u64("pool_total_size", 0444,
177812d79d64SDan Streetman 			   zswap_debugfs_root, &zswap_pool_total_size);
17790825a6f9SJoe Perches 	debugfs_create_atomic_t("stored_pages", 0444,
17802b281117SSeth Jennings 				zswap_debugfs_root, &zswap_stored_pages);
1781a85f878bSSrividya Desireddy 	debugfs_create_atomic_t("same_filled_pages", 0444,
1782a85f878bSSrividya Desireddy 				zswap_debugfs_root, &zswap_same_filled_pages);
17832b281117SSeth Jennings 
17842b281117SSeth Jennings 	return 0;
17852b281117SSeth Jennings }
17862b281117SSeth Jennings #else
zswap_debugfs_init(void)1787141fdeecSLiu Shixin static int zswap_debugfs_init(void)
17882b281117SSeth Jennings {
17892b281117SSeth Jennings 	return 0;
17902b281117SSeth Jennings }
17912b281117SSeth Jennings #endif
17922b281117SSeth Jennings 
17932b281117SSeth Jennings /*********************************
17942b281117SSeth Jennings * module init and exit
17952b281117SSeth Jennings **********************************/
zswap_setup(void)1796141fdeecSLiu Shixin static int zswap_setup(void)
17972b281117SSeth Jennings {
1798f1c54846SDan Streetman 	struct zswap_pool *pool;
1799ad7ed770SSebastian Andrzej Siewior 	int ret;
180060105e12SMinchan Kim 
1801b7919122SLiu Shixin 	zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1802b7919122SLiu Shixin 	if (!zswap_entry_cache) {
18032b281117SSeth Jennings 		pr_err("entry cache creation failed\n");
1804f1c54846SDan Streetman 		goto cache_fail;
18052b281117SSeth Jennings 	}
1806f1c54846SDan Streetman 
1807cab7a7e5SSebastian Andrzej Siewior 	ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1808cab7a7e5SSebastian Andrzej Siewior 				      "mm/zswap_pool:prepare",
1809cab7a7e5SSebastian Andrzej Siewior 				      zswap_cpu_comp_prepare,
1810cab7a7e5SSebastian Andrzej Siewior 				      zswap_cpu_comp_dead);
1811cab7a7e5SSebastian Andrzej Siewior 	if (ret)
1812cab7a7e5SSebastian Andrzej Siewior 		goto hp_fail;
1813cab7a7e5SSebastian Andrzej Siewior 
1814bf9b7df2SChengming Zhou 	shrink_wq = alloc_workqueue("zswap-shrink",
1815bf9b7df2SChengming Zhou 			WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
1816bf9b7df2SChengming Zhou 	if (!shrink_wq)
1817bf9b7df2SChengming Zhou 		goto shrink_wq_fail;
1818bf9b7df2SChengming Zhou 
1819e35606e4SChengming Zhou 	zswap_shrinker = zswap_alloc_shrinker();
1820e35606e4SChengming Zhou 	if (!zswap_shrinker)
1821bf9b7df2SChengming Zhou 		goto shrinker_fail;
1822e35606e4SChengming Zhou 	if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker))
1823bf9b7df2SChengming Zhou 		goto lru_fail;
1824e35606e4SChengming Zhou 	shrinker_register(zswap_shrinker);
1825bf9b7df2SChengming Zhou 
1826e35606e4SChengming Zhou 	INIT_WORK(&zswap_shrink_work, shrink_worker);
1827bf9b7df2SChengming Zhou 
1828f1c54846SDan Streetman 	pool = __zswap_pool_create_fallback();
1829ae3d89a7SDan Streetman 	if (pool) {
1830f1c54846SDan Streetman 		pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1831b8cf32dcSYosry Ahmed 			zpool_get_type(pool->zpools[0]));
1832f1c54846SDan Streetman 		list_add(&pool->list, &zswap_pools);
1833ae3d89a7SDan Streetman 		zswap_has_pool = true;
1834ae3d89a7SDan Streetman 	} else {
1835ae3d89a7SDan Streetman 		pr_err("pool creation failed\n");
1836ae3d89a7SDan Streetman 		zswap_enabled = false;
1837ae3d89a7SDan Streetman 	}
183860105e12SMinchan Kim 
18392b281117SSeth Jennings 	if (zswap_debugfs_init())
18402b281117SSeth Jennings 		pr_warn("debugfs initialization failed\n");
18419021ccecSLiu Shixin 	zswap_init_state = ZSWAP_INIT_SUCCEED;
18422b281117SSeth Jennings 	return 0;
1843f1c54846SDan Streetman 
1844bf9b7df2SChengming Zhou lru_fail:
1845e35606e4SChengming Zhou 	shrinker_free(zswap_shrinker);
1846bf9b7df2SChengming Zhou shrinker_fail:
1847bf9b7df2SChengming Zhou 	destroy_workqueue(shrink_wq);
1848bf9b7df2SChengming Zhou shrink_wq_fail:
1849bf9b7df2SChengming Zhou 	cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE);
1850cab7a7e5SSebastian Andrzej Siewior hp_fail:
1851b7919122SLiu Shixin 	kmem_cache_destroy(zswap_entry_cache);
1852f1c54846SDan Streetman cache_fail:
1853d7b028f5SDan Streetman 	/* if built-in, we aren't unloaded on failure; don't allow use */
18549021ccecSLiu Shixin 	zswap_init_state = ZSWAP_INIT_FAILED;
1855d7b028f5SDan Streetman 	zswap_enabled = false;
18562b281117SSeth Jennings 	return -ENOMEM;
18572b281117SSeth Jennings }
1858141fdeecSLiu Shixin 
zswap_init(void)1859141fdeecSLiu Shixin static int __init zswap_init(void)
1860141fdeecSLiu Shixin {
1861141fdeecSLiu Shixin 	if (!zswap_enabled)
1862141fdeecSLiu Shixin 		return 0;
1863141fdeecSLiu Shixin 	return zswap_setup();
1864141fdeecSLiu Shixin }
18652b281117SSeth Jennings /* must be late so crypto has time to come up */
1866141fdeecSLiu Shixin late_initcall(zswap_init);
18672b281117SSeth Jennings 
186868386da8SSeth Jennings MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
18692b281117SSeth Jennings MODULE_DESCRIPTION("Compressed cache for swap pages");
1870