1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * zswap.c - zswap driver file
4 *
5 * zswap is a cache that takes pages that are in the process
6 * of being swapped out and attempts to compress and store them in a
7 * RAM-based memory pool. This can result in a significant I/O reduction on
8 * the swap device and, in the case where decompressing from RAM is faster
9 * than reading from the swap device, can also improve workload performance.
10 *
11 * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com>
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/module.h>
17 #include <linux/cpu.h>
18 #include <linux/highmem.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 #include <linux/atomic.h>
23 #include <linux/swap.h>
24 #include <linux/crypto.h>
25 #include <linux/scatterlist.h>
26 #include <linux/mempolicy.h>
27 #include <linux/mempool.h>
28 #include <crypto/acompress.h>
29 #include <crypto/scatterwalk.h>
30 #include <linux/zswap.h>
31 #include <linux/mm_types.h>
32 #include <linux/page-flags.h>
33 #include <linux/swapops.h>
34 #include <linux/writeback.h>
35 #include <linux/pagemap.h>
36 #include <linux/workqueue.h>
37 #include <linux/list_lru.h>
38 #include <linux/zsmalloc.h>
39
40 #include "swap.h"
41 #include "internal.h"
42
43 /*********************************
44 * statistics
45 **********************************/
46 /* The number of pages currently stored in zswap */
47 atomic_long_t zswap_stored_pages = ATOMIC_LONG_INIT(0);
48 /* The number of incompressible pages currently stored in zswap */
49 static atomic_long_t zswap_stored_incompressible_pages = ATOMIC_LONG_INIT(0);
50
51 /*
52 * The statistics below are not protected from concurrent access for
53 * performance reasons so they may not be a 100% accurate. However,
54 * they do provide useful information on roughly how many times a
55 * certain event is occurring.
56 */
57
58 /* Pool limit was hit (see zswap_max_pool_percent) */
59 static u64 zswap_pool_limit_hit;
60 /* Pages written back when pool limit was reached */
61 static u64 zswap_written_back_pages;
62 /* Store failed due to a reclaim failure after pool limit was reached */
63 static u64 zswap_reject_reclaim_fail;
64 /* Store failed due to compression algorithm failure */
65 static u64 zswap_reject_compress_fail;
66 /* Compressed page was too big for the allocator to (optimally) store */
67 static u64 zswap_reject_compress_poor;
68 /* Load or writeback failed due to decompression failure */
69 static u64 zswap_decompress_fail;
70 /* Store failed because underlying allocator could not get memory */
71 static u64 zswap_reject_alloc_fail;
72 /* Store failed because the entry metadata could not be allocated (rare) */
73 static u64 zswap_reject_kmemcache_fail;
74
75 /* Shrinker work queue */
76 static struct workqueue_struct *shrink_wq;
77 /* Pool limit was hit, we need to calm down */
78 static bool zswap_pool_reached_full;
79
80 /*********************************
81 * tunables
82 **********************************/
83
84 #define ZSWAP_PARAM_UNSET ""
85
86 static int zswap_setup(void);
87
88 /* Enable/disable zswap */
89 static DEFINE_STATIC_KEY_MAYBE(CONFIG_ZSWAP_DEFAULT_ON, zswap_ever_enabled);
90 static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
91 static int zswap_enabled_param_set(const char *,
92 const struct kernel_param *);
93 static const struct kernel_param_ops zswap_enabled_param_ops = {
94 .set = zswap_enabled_param_set,
95 .get = param_get_bool,
96 };
97 module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
98
99 /* Crypto compressor to use */
100 static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
101 static int zswap_compressor_param_set(const char *,
102 const struct kernel_param *);
103 static const struct kernel_param_ops zswap_compressor_param_ops = {
104 .set = zswap_compressor_param_set,
105 .get = param_get_charp,
106 .free = param_free_charp,
107 };
108 module_param_cb(compressor, &zswap_compressor_param_ops,
109 &zswap_compressor, 0644);
110
111 /* The maximum percentage of memory that the compressed pool can occupy */
112 static unsigned int zswap_max_pool_percent = 20;
113 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
114
115 /* The threshold for accepting new pages after the max_pool_percent was hit */
116 static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
117 module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
118 uint, 0644);
119
120 /* Enable/disable memory pressure-based shrinker. */
121 static bool zswap_shrinker_enabled = IS_ENABLED(
122 CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
123 module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
124
zswap_is_enabled(void)125 bool zswap_is_enabled(void)
126 {
127 return zswap_enabled;
128 }
129
zswap_never_enabled(void)130 bool zswap_never_enabled(void)
131 {
132 return !static_branch_maybe(CONFIG_ZSWAP_DEFAULT_ON, &zswap_ever_enabled);
133 }
134
135 /*********************************
136 * data structures
137 **********************************/
138
139 struct crypto_acomp_ctx {
140 struct crypto_acomp *acomp;
141 struct acomp_req *req;
142 struct crypto_wait wait;
143 u8 *buffer;
144 struct mutex mutex;
145 };
146
147 /*
148 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
149 * The only case where lru_lock is not acquired while holding tree.lock is
150 * when a zswap_entry is taken off the lru for writeback, in that case it
151 * needs to be verified that it's still valid in the tree.
152 */
153 struct zswap_pool {
154 struct zs_pool *zs_pool;
155 struct crypto_acomp_ctx __percpu *acomp_ctx;
156 struct percpu_ref ref;
157 struct list_head list;
158 struct work_struct release_work;
159 struct hlist_node node;
160 char tfm_name[CRYPTO_MAX_ALG_NAME];
161 };
162
163 /* Global LRU lists shared by all zswap pools. */
164 static struct list_lru zswap_list_lru;
165
166 /* The lock protects zswap_next_shrink updates. */
167 static DEFINE_SPINLOCK(zswap_shrink_lock);
168 static struct mem_cgroup *zswap_next_shrink;
169 static struct work_struct zswap_shrink_work;
170 static struct shrinker *zswap_shrinker;
171
172 /*
173 * struct zswap_entry
174 *
175 * This structure contains the metadata for tracking a single compressed
176 * page within zswap.
177 *
178 * swpentry - associated swap entry, the offset indexes into the xarray
179 * length - the length in bytes of the compressed page data. Needed during
180 * decompression.
181 * referenced - true if the entry recently entered the zswap pool. Unset by the
182 * writeback logic. The entry is only reclaimed by the writeback
183 * logic if referenced is unset. See comments in the shrinker
184 * section for context.
185 * pool - the zswap_pool the entry's data is in
186 * handle - zsmalloc allocation handle that stores the compressed page data
187 * objcg - the obj_cgroup that the compressed memory is charged to
188 * lru - handle to the pool's lru used to evict pages.
189 */
190 struct zswap_entry {
191 swp_entry_t swpentry;
192 unsigned int length;
193 bool referenced;
194 struct zswap_pool *pool;
195 unsigned long handle;
196 struct obj_cgroup *objcg;
197 struct list_head lru;
198 };
199
200 static struct xarray *zswap_trees[MAX_SWAPFILES];
201 static unsigned int nr_zswap_trees[MAX_SWAPFILES];
202
203 /* RCU-protected iteration */
204 static LIST_HEAD(zswap_pools);
205 /* protects zswap_pools list modification */
206 static DEFINE_SPINLOCK(zswap_pools_lock);
207 /* pool counter to provide unique names to zsmalloc */
208 static atomic_t zswap_pools_count = ATOMIC_INIT(0);
209
210 enum zswap_init_type {
211 ZSWAP_UNINIT,
212 ZSWAP_INIT_SUCCEED,
213 ZSWAP_INIT_FAILED
214 };
215
216 static enum zswap_init_type zswap_init_state;
217
218 /* used to ensure the integrity of initialization */
219 static DEFINE_MUTEX(zswap_init_lock);
220
221 /* init completed, but couldn't create the initial pool */
222 static bool zswap_has_pool;
223
224 /*********************************
225 * helpers and fwd declarations
226 **********************************/
227
228 /* One swap address space for each 64M swap space */
229 #define ZSWAP_ADDRESS_SPACE_SHIFT 14
230 #define ZSWAP_ADDRESS_SPACE_PAGES (1 << ZSWAP_ADDRESS_SPACE_SHIFT)
swap_zswap_tree(swp_entry_t swp)231 static inline struct xarray *swap_zswap_tree(swp_entry_t swp)
232 {
233 return &zswap_trees[swp_type(swp)][swp_offset(swp)
234 >> ZSWAP_ADDRESS_SPACE_SHIFT];
235 }
236
237 #define zswap_pool_debug(msg, p) \
238 pr_debug("%s pool %s\n", msg, (p)->tfm_name)
239
240 /*********************************
241 * pool functions
242 **********************************/
243 static void __zswap_pool_empty(struct percpu_ref *ref);
244
acomp_ctx_free(struct crypto_acomp_ctx * acomp_ctx)245 static void acomp_ctx_free(struct crypto_acomp_ctx *acomp_ctx)
246 {
247 if (!acomp_ctx)
248 return;
249
250 /*
251 * If there was an error in allocating @acomp_ctx->req, it
252 * would be set to NULL.
253 */
254 if (acomp_ctx->req)
255 acomp_request_free(acomp_ctx->req);
256
257 acomp_ctx->req = NULL;
258
259 /*
260 * We have to handle both cases here: an error pointer return from
261 * crypto_alloc_acomp_node(); and a) NULL initialization by zswap, or
262 * b) NULL assignment done in a previous call to acomp_ctx_free().
263 */
264 if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
265 crypto_free_acomp(acomp_ctx->acomp);
266
267 acomp_ctx->acomp = NULL;
268
269 kfree(acomp_ctx->buffer);
270 acomp_ctx->buffer = NULL;
271 }
272
zswap_pool_create(char * compressor)273 static struct zswap_pool *zswap_pool_create(char *compressor)
274 {
275 struct zswap_pool *pool;
276 char name[38]; /* 'zswap' + 32 char (max) num + \0 */
277 int ret, cpu;
278
279 if (!zswap_has_pool && !strcmp(compressor, ZSWAP_PARAM_UNSET))
280 return NULL;
281
282 pool = kzalloc_obj(*pool);
283 if (!pool)
284 return NULL;
285
286 /* unique name for each pool specifically required by zsmalloc */
287 snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
288 pool->zs_pool = zs_create_pool(name);
289 if (!pool->zs_pool)
290 goto error;
291
292 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
293
294 /* Many things rely on the zero-initialization. */
295 pool->acomp_ctx = alloc_percpu_gfp(*pool->acomp_ctx,
296 GFP_KERNEL | __GFP_ZERO);
297 if (!pool->acomp_ctx) {
298 pr_err("percpu alloc failed\n");
299 goto error;
300 }
301
302 /*
303 * This is serialized against CPU hotplug operations. Hence, cores
304 * cannot be offlined until this finishes.
305 */
306 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
307 &pool->node);
308
309 /*
310 * cpuhp_state_add_instance() will not cleanup on failure since
311 * we don't register a hotunplug callback.
312 */
313 if (ret)
314 goto cpuhp_add_fail;
315
316 /* being the current pool takes 1 ref; this func expects the
317 * caller to always add the new pool as the current pool
318 */
319 ret = percpu_ref_init(&pool->ref, __zswap_pool_empty,
320 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
321 if (ret)
322 goto ref_fail;
323 INIT_LIST_HEAD(&pool->list);
324
325 zswap_pool_debug("created", pool);
326
327 return pool;
328
329 ref_fail:
330 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
331
332 cpuhp_add_fail:
333 for_each_possible_cpu(cpu)
334 acomp_ctx_free(per_cpu_ptr(pool->acomp_ctx, cpu));
335 error:
336 if (pool->acomp_ctx)
337 free_percpu(pool->acomp_ctx);
338 if (pool->zs_pool)
339 zs_destroy_pool(pool->zs_pool);
340 kfree(pool);
341 return NULL;
342 }
343
__zswap_pool_create_fallback(void)344 static struct zswap_pool *__zswap_pool_create_fallback(void)
345 {
346 if (!crypto_has_acomp(zswap_compressor, 0, 0) &&
347 strcmp(zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
348 pr_err("compressor %s not available, using default %s\n",
349 zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
350 param_free_charp(&zswap_compressor);
351 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
352 }
353
354 /* Default compressor should be available. Kconfig bug? */
355 if (WARN_ON_ONCE(!crypto_has_acomp(zswap_compressor, 0, 0))) {
356 zswap_compressor = ZSWAP_PARAM_UNSET;
357 return NULL;
358 }
359
360 return zswap_pool_create(zswap_compressor);
361 }
362
zswap_pool_destroy(struct zswap_pool * pool)363 static void zswap_pool_destroy(struct zswap_pool *pool)
364 {
365 int cpu;
366
367 zswap_pool_debug("destroying", pool);
368
369 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
370
371 for_each_possible_cpu(cpu)
372 acomp_ctx_free(per_cpu_ptr(pool->acomp_ctx, cpu));
373
374 free_percpu(pool->acomp_ctx);
375
376 zs_destroy_pool(pool->zs_pool);
377 kfree(pool);
378 }
379
__zswap_pool_release(struct work_struct * work)380 static void __zswap_pool_release(struct work_struct *work)
381 {
382 struct zswap_pool *pool = container_of(work, typeof(*pool),
383 release_work);
384
385 synchronize_rcu();
386
387 /* nobody should have been able to get a ref... */
388 WARN_ON(!percpu_ref_is_zero(&pool->ref));
389 percpu_ref_exit(&pool->ref);
390
391 /* pool is now off zswap_pools list and has no references. */
392 zswap_pool_destroy(pool);
393 }
394
395 static struct zswap_pool *zswap_pool_current(void);
396
__zswap_pool_empty(struct percpu_ref * ref)397 static void __zswap_pool_empty(struct percpu_ref *ref)
398 {
399 struct zswap_pool *pool;
400
401 pool = container_of(ref, typeof(*pool), ref);
402
403 spin_lock_bh(&zswap_pools_lock);
404
405 WARN_ON(pool == zswap_pool_current());
406
407 list_del_rcu(&pool->list);
408
409 INIT_WORK(&pool->release_work, __zswap_pool_release);
410 schedule_work(&pool->release_work);
411
412 spin_unlock_bh(&zswap_pools_lock);
413 }
414
zswap_pool_tryget(struct zswap_pool * pool)415 static int __must_check zswap_pool_tryget(struct zswap_pool *pool)
416 {
417 if (!pool)
418 return 0;
419
420 return percpu_ref_tryget(&pool->ref);
421 }
422
423 /* The caller must already have a reference. */
zswap_pool_get(struct zswap_pool * pool)424 static void zswap_pool_get(struct zswap_pool *pool)
425 {
426 percpu_ref_get(&pool->ref);
427 }
428
zswap_pool_put(struct zswap_pool * pool)429 static void zswap_pool_put(struct zswap_pool *pool)
430 {
431 percpu_ref_put(&pool->ref);
432 }
433
__zswap_pool_current(void)434 static struct zswap_pool *__zswap_pool_current(void)
435 {
436 struct zswap_pool *pool;
437
438 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
439 WARN_ONCE(!pool && zswap_has_pool,
440 "%s: no page storage pool!\n", __func__);
441
442 return pool;
443 }
444
zswap_pool_current(void)445 static struct zswap_pool *zswap_pool_current(void)
446 {
447 assert_spin_locked(&zswap_pools_lock);
448
449 return __zswap_pool_current();
450 }
451
zswap_pool_current_get(void)452 static struct zswap_pool *zswap_pool_current_get(void)
453 {
454 struct zswap_pool *pool;
455
456 rcu_read_lock();
457
458 pool = __zswap_pool_current();
459 if (!zswap_pool_tryget(pool))
460 pool = NULL;
461
462 rcu_read_unlock();
463
464 return pool;
465 }
466
467 /* type and compressor must be null-terminated */
zswap_pool_find_get(char * compressor)468 static struct zswap_pool *zswap_pool_find_get(char *compressor)
469 {
470 struct zswap_pool *pool;
471
472 assert_spin_locked(&zswap_pools_lock);
473
474 list_for_each_entry_rcu(pool, &zswap_pools, list) {
475 if (strcmp(pool->tfm_name, compressor))
476 continue;
477 /* if we can't get it, it's about to be destroyed */
478 if (!zswap_pool_tryget(pool))
479 continue;
480 return pool;
481 }
482
483 return NULL;
484 }
485
zswap_max_pages(void)486 static unsigned long zswap_max_pages(void)
487 {
488 return totalram_pages() * zswap_max_pool_percent / 100;
489 }
490
zswap_accept_thr_pages(void)491 static unsigned long zswap_accept_thr_pages(void)
492 {
493 return zswap_max_pages() * zswap_accept_thr_percent / 100;
494 }
495
zswap_total_pages(void)496 unsigned long zswap_total_pages(void)
497 {
498 struct zswap_pool *pool;
499 unsigned long total = 0;
500
501 rcu_read_lock();
502 list_for_each_entry_rcu(pool, &zswap_pools, list)
503 total += zs_get_total_pages(pool->zs_pool);
504 rcu_read_unlock();
505
506 return total;
507 }
508
zswap_check_limits(void)509 static bool zswap_check_limits(void)
510 {
511 unsigned long cur_pages = zswap_total_pages();
512 unsigned long max_pages = zswap_max_pages();
513
514 if (cur_pages >= max_pages) {
515 zswap_pool_limit_hit++;
516 zswap_pool_reached_full = true;
517 } else if (zswap_pool_reached_full &&
518 cur_pages <= zswap_accept_thr_pages()) {
519 zswap_pool_reached_full = false;
520 }
521 return zswap_pool_reached_full;
522 }
523
524 /*********************************
525 * param callbacks
526 **********************************/
527
zswap_compressor_param_set(const char * val,const struct kernel_param * kp)528 static int zswap_compressor_param_set(const char *val, const struct kernel_param *kp)
529 {
530 struct zswap_pool *pool, *put_pool = NULL;
531 char *s = strstrip((char *)val);
532 bool create_pool = false;
533 int ret = 0;
534
535 mutex_lock(&zswap_init_lock);
536 switch (zswap_init_state) {
537 case ZSWAP_UNINIT:
538 /* Handled in zswap_setup() */
539 ret = param_set_charp(s, kp);
540 break;
541 case ZSWAP_INIT_SUCCEED:
542 if (!zswap_has_pool || strcmp(s, *(char **)kp->arg))
543 create_pool = true;
544 break;
545 case ZSWAP_INIT_FAILED:
546 pr_err("can't set param, initialization failed\n");
547 ret = -ENODEV;
548 }
549 mutex_unlock(&zswap_init_lock);
550
551 if (!create_pool)
552 return ret;
553
554 if (!crypto_has_acomp(s, 0, 0)) {
555 pr_err("compressor %s not available\n", s);
556 return -ENOENT;
557 }
558
559 spin_lock_bh(&zswap_pools_lock);
560
561 pool = zswap_pool_find_get(s);
562 if (pool) {
563 zswap_pool_debug("using existing", pool);
564 WARN_ON(pool == zswap_pool_current());
565 list_del_rcu(&pool->list);
566 }
567
568 spin_unlock_bh(&zswap_pools_lock);
569
570 if (!pool)
571 pool = zswap_pool_create(s);
572 else {
573 /*
574 * Restore the initial ref dropped by percpu_ref_kill()
575 * when the pool was decommissioned and switch it again
576 * to percpu mode.
577 */
578 percpu_ref_resurrect(&pool->ref);
579
580 /* Drop the ref from zswap_pool_find_get(). */
581 zswap_pool_put(pool);
582 }
583
584 if (pool)
585 ret = param_set_charp(s, kp);
586 else
587 ret = -EINVAL;
588
589 spin_lock_bh(&zswap_pools_lock);
590
591 if (!ret) {
592 put_pool = zswap_pool_current();
593 list_add_rcu(&pool->list, &zswap_pools);
594 zswap_has_pool = true;
595 } else if (pool) {
596 /*
597 * Add the possibly pre-existing pool to the end of the pools
598 * list; if it's new (and empty) then it'll be removed and
599 * destroyed by the put after we drop the lock
600 */
601 list_add_tail_rcu(&pool->list, &zswap_pools);
602 put_pool = pool;
603 }
604
605 spin_unlock_bh(&zswap_pools_lock);
606
607 /*
608 * Drop the ref from either the old current pool,
609 * or the new pool we failed to add
610 */
611 if (put_pool)
612 percpu_ref_kill(&put_pool->ref);
613
614 return ret;
615 }
616
zswap_enabled_param_set(const char * val,const struct kernel_param * kp)617 static int zswap_enabled_param_set(const char *val,
618 const struct kernel_param *kp)
619 {
620 int ret = -ENODEV;
621
622 /* if this is load-time (pre-init) param setting, only set param. */
623 if (system_state != SYSTEM_RUNNING)
624 return param_set_bool(val, kp);
625
626 mutex_lock(&zswap_init_lock);
627 switch (zswap_init_state) {
628 case ZSWAP_UNINIT:
629 if (zswap_setup())
630 break;
631 fallthrough;
632 case ZSWAP_INIT_SUCCEED:
633 if (!zswap_has_pool)
634 pr_err("can't enable, no pool configured\n");
635 else
636 ret = param_set_bool(val, kp);
637 break;
638 case ZSWAP_INIT_FAILED:
639 pr_err("can't enable, initialization failed\n");
640 }
641 mutex_unlock(&zswap_init_lock);
642
643 return ret;
644 }
645
646 /*********************************
647 * lru functions
648 **********************************/
649
650 /* should be called under RCU */
651 #ifdef CONFIG_MEMCG
mem_cgroup_from_entry(struct zswap_entry * entry)652 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
653 {
654 return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
655 }
656 #else
mem_cgroup_from_entry(struct zswap_entry * entry)657 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
658 {
659 return NULL;
660 }
661 #endif
662
entry_to_nid(struct zswap_entry * entry)663 static inline int entry_to_nid(struct zswap_entry *entry)
664 {
665 return page_to_nid(virt_to_page(entry));
666 }
667
zswap_lru_add(struct list_lru * list_lru,struct zswap_entry * entry)668 static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
669 {
670 int nid = entry_to_nid(entry);
671 struct mem_cgroup *memcg;
672
673 /*
674 * Note that it is safe to use rcu_read_lock() here, even in the face of
675 * concurrent memcg offlining:
676 *
677 * 1. list_lru_add() is called before list_lru_one is dead. The
678 * new entry will be reparented to memcg's parent's list_lru.
679 * 2. list_lru_add() is called after list_lru_one is dead. The
680 * new entry will be added directly to memcg's parent's list_lru.
681 *
682 * Similar reasoning holds for list_lru_del().
683 */
684 rcu_read_lock();
685 memcg = mem_cgroup_from_entry(entry);
686 /* will always succeed */
687 list_lru_add(list_lru, &entry->lru, nid, memcg);
688 rcu_read_unlock();
689 }
690
zswap_lru_del(struct list_lru * list_lru,struct zswap_entry * entry)691 static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
692 {
693 int nid = entry_to_nid(entry);
694 struct mem_cgroup *memcg;
695
696 rcu_read_lock();
697 memcg = mem_cgroup_from_entry(entry);
698 /* will always succeed */
699 list_lru_del(list_lru, &entry->lru, nid, memcg);
700 rcu_read_unlock();
701 }
702
zswap_lruvec_state_init(struct lruvec * lruvec)703 void zswap_lruvec_state_init(struct lruvec *lruvec)
704 {
705 atomic_long_set(&lruvec->zswap_lruvec_state.nr_disk_swapins, 0);
706 }
707
zswap_folio_swapin(struct folio * folio)708 void zswap_folio_swapin(struct folio *folio)
709 {
710 struct lruvec *lruvec;
711
712 if (folio) {
713 rcu_read_lock();
714 lruvec = folio_lruvec(folio);
715 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_disk_swapins);
716 rcu_read_unlock();
717 }
718 }
719
720 /*
721 * This function should be called when a memcg is being offlined.
722 *
723 * Since the global shrinker shrink_worker() may hold a reference
724 * of the memcg, we must check and release the reference in
725 * zswap_next_shrink.
726 *
727 * shrink_worker() must handle the case where this function releases
728 * the reference of memcg being shrunk.
729 */
zswap_memcg_offline_cleanup(struct mem_cgroup * memcg)730 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
731 {
732 /* lock out zswap shrinker walking memcg tree */
733 spin_lock(&zswap_shrink_lock);
734 if (zswap_next_shrink == memcg) {
735 do {
736 zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
737 } while (zswap_next_shrink && !mem_cgroup_online(zswap_next_shrink));
738 }
739 spin_unlock(&zswap_shrink_lock);
740 }
741
742 /*********************************
743 * zswap entry functions
744 **********************************/
745 static struct kmem_cache *zswap_entry_cache;
746
zswap_entry_cache_alloc(gfp_t gfp,int nid)747 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
748 {
749 struct zswap_entry *entry;
750 entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
751 if (!entry)
752 return NULL;
753 return entry;
754 }
755
zswap_entry_cache_free(struct zswap_entry * entry)756 static void zswap_entry_cache_free(struct zswap_entry *entry)
757 {
758 kmem_cache_free(zswap_entry_cache, entry);
759 }
760
761 /*
762 * Carries out the common pattern of freeing an entry's zsmalloc allocation,
763 * freeing the entry itself, and decrementing the number of stored pages.
764 */
zswap_entry_free(struct zswap_entry * entry)765 static void zswap_entry_free(struct zswap_entry *entry)
766 {
767 zswap_lru_del(&zswap_list_lru, entry);
768 zs_free(entry->pool->zs_pool, entry->handle);
769 zswap_pool_put(entry->pool);
770 if (entry->objcg) {
771 obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
772 obj_cgroup_put(entry->objcg);
773 }
774 if (entry->length == PAGE_SIZE)
775 atomic_long_dec(&zswap_stored_incompressible_pages);
776 zswap_entry_cache_free(entry);
777 atomic_long_dec(&zswap_stored_pages);
778 }
779
780 /*********************************
781 * compressed storage functions
782 **********************************/
zswap_cpu_comp_prepare(unsigned int cpu,struct hlist_node * node)783 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
784 {
785 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
786 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
787 int ret = -ENOMEM;
788
789 /*
790 * To handle cases where the CPU goes through online-offline-online
791 * transitions, we return if the acomp_ctx has already been initialized.
792 */
793 if (acomp_ctx->acomp) {
794 WARN_ON_ONCE(IS_ERR(acomp_ctx->acomp));
795 return 0;
796 }
797
798 acomp_ctx->buffer = kmalloc_node(PAGE_SIZE, GFP_KERNEL, cpu_to_node(cpu));
799 if (!acomp_ctx->buffer)
800 return ret;
801
802 /*
803 * In case of an error, crypto_alloc_acomp_node() returns an
804 * error pointer, never NULL.
805 */
806 acomp_ctx->acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
807 if (IS_ERR(acomp_ctx->acomp)) {
808 pr_err("could not alloc crypto acomp %s : %pe\n",
809 pool->tfm_name, acomp_ctx->acomp);
810 ret = PTR_ERR(acomp_ctx->acomp);
811 goto fail;
812 }
813
814 /* acomp_request_alloc() returns NULL in case of an error. */
815 acomp_ctx->req = acomp_request_alloc(acomp_ctx->acomp);
816 if (!acomp_ctx->req) {
817 pr_err("could not alloc crypto acomp_request %s\n",
818 pool->tfm_name);
819 goto fail;
820 }
821
822 crypto_init_wait(&acomp_ctx->wait);
823
824 /*
825 * if the backend of acomp is async zip, crypto_req_done() will wakeup
826 * crypto_wait_req(); if the backend of acomp is scomp, the callback
827 * won't be called, crypto_wait_req() will return without blocking.
828 */
829 acomp_request_set_callback(acomp_ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
830 crypto_req_done, &acomp_ctx->wait);
831
832 mutex_init(&acomp_ctx->mutex);
833 return 0;
834
835 fail:
836 acomp_ctx_free(acomp_ctx);
837 return ret;
838 }
839
zswap_compress(struct page * page,struct zswap_entry * entry,struct zswap_pool * pool)840 static bool zswap_compress(struct page *page, struct zswap_entry *entry,
841 struct zswap_pool *pool)
842 {
843 struct crypto_acomp_ctx *acomp_ctx;
844 struct scatterlist input, output;
845 int comp_ret = 0, alloc_ret = 0;
846 unsigned int dlen = PAGE_SIZE;
847 unsigned long handle;
848 gfp_t gfp;
849 u8 *dst;
850 bool mapped = false;
851
852 acomp_ctx = raw_cpu_ptr(pool->acomp_ctx);
853 mutex_lock(&acomp_ctx->mutex);
854
855 dst = acomp_ctx->buffer;
856 sg_init_table(&input, 1);
857 sg_set_page(&input, page, PAGE_SIZE, 0);
858
859 sg_init_one(&output, dst, PAGE_SIZE);
860 acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
861
862 /*
863 * it maybe looks a little bit silly that we send an asynchronous request,
864 * then wait for its completion synchronously. This makes the process look
865 * synchronous in fact.
866 * Theoretically, acomp supports users send multiple acomp requests in one
867 * acomp instance, then get those requests done simultaneously. but in this
868 * case, zswap actually does store and load page by page, there is no
869 * existing method to send the second page before the first page is done
870 * in one thread doing zswap.
871 * but in different threads running on different cpu, we have different
872 * acomp instance, so multiple threads can do (de)compression in parallel.
873 */
874 comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
875 dlen = acomp_ctx->req->dlen;
876
877 /*
878 * If a page cannot be compressed into a size smaller than PAGE_SIZE,
879 * save the content as is without a compression, to keep the LRU order
880 * of writebacks. If writeback is disabled, reject the page since it
881 * only adds metadata overhead. swap_writeout() will put the page back
882 * to the active LRU list in the case.
883 */
884 if (comp_ret || !dlen || dlen >= PAGE_SIZE) {
885 rcu_read_lock();
886 if (!mem_cgroup_zswap_writeback_enabled(
887 folio_memcg(page_folio(page)))) {
888 rcu_read_unlock();
889 comp_ret = comp_ret ? comp_ret : -EINVAL;
890 goto unlock;
891 }
892 rcu_read_unlock();
893 comp_ret = 0;
894 dlen = PAGE_SIZE;
895 dst = kmap_local_page(page);
896 mapped = true;
897 }
898
899 gfp = GFP_NOWAIT | __GFP_NORETRY | __GFP_HIGHMEM | __GFP_MOVABLE;
900 handle = zs_malloc(pool->zs_pool, dlen, gfp, page_to_nid(page));
901 if (IS_ERR_VALUE(handle)) {
902 alloc_ret = PTR_ERR((void *)handle);
903 goto unlock;
904 }
905
906 zs_obj_write(pool->zs_pool, handle, dst, dlen);
907 entry->handle = handle;
908 entry->length = dlen;
909
910 unlock:
911 if (mapped)
912 kunmap_local(dst);
913 if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC)
914 zswap_reject_compress_poor++;
915 else if (comp_ret)
916 zswap_reject_compress_fail++;
917 else if (alloc_ret)
918 zswap_reject_alloc_fail++;
919
920 mutex_unlock(&acomp_ctx->mutex);
921 return comp_ret == 0 && alloc_ret == 0;
922 }
923
zswap_decompress(struct zswap_entry * entry,struct folio * folio)924 static bool zswap_decompress(struct zswap_entry *entry, struct folio *folio)
925 {
926 struct zswap_pool *pool = entry->pool;
927 struct scatterlist input[2]; /* zsmalloc returns an SG list 1-2 entries */
928 struct scatterlist output;
929 struct crypto_acomp_ctx *acomp_ctx;
930 int ret = 0, dlen;
931
932 acomp_ctx = raw_cpu_ptr(pool->acomp_ctx);
933 mutex_lock(&acomp_ctx->mutex);
934 zs_obj_read_sg_begin(pool->zs_pool, entry->handle, input, entry->length);
935
936 /* zswap entries of length PAGE_SIZE are not compressed. */
937 if (entry->length == PAGE_SIZE) {
938 void *dst;
939
940 WARN_ON_ONCE(input->length != PAGE_SIZE);
941
942 dst = kmap_local_folio(folio, 0);
943 memcpy_from_sglist(dst, input, 0, PAGE_SIZE);
944 dlen = PAGE_SIZE;
945 kunmap_local(dst);
946 flush_dcache_folio(folio);
947 } else {
948 sg_init_table(&output, 1);
949 sg_set_folio(&output, folio, PAGE_SIZE, 0);
950 acomp_request_set_params(acomp_ctx->req, input, &output,
951 entry->length, PAGE_SIZE);
952 ret = crypto_acomp_decompress(acomp_ctx->req);
953 ret = crypto_wait_req(ret, &acomp_ctx->wait);
954 dlen = acomp_ctx->req->dlen;
955 }
956
957 zs_obj_read_sg_end(pool->zs_pool, entry->handle);
958 mutex_unlock(&acomp_ctx->mutex);
959
960 if (!ret && dlen == PAGE_SIZE)
961 return true;
962
963 zswap_decompress_fail++;
964 pr_alert_ratelimited("Decompression error from zswap (%d:%lu %s %u->%d)\n",
965 swp_type(entry->swpentry),
966 swp_offset(entry->swpentry),
967 entry->pool->tfm_name,
968 entry->length, dlen);
969 return false;
970 }
971
972 /*********************************
973 * writeback code
974 **********************************/
975 /*
976 * Attempts to free an entry by adding a folio to the swap cache,
977 * decompressing the entry data into the folio, and issuing a
978 * bio write to write the folio back to the swap device.
979 *
980 * This can be thought of as a "resumed writeback" of the folio
981 * to the swap device. We are basically resuming the same swap
982 * writeback path that was intercepted with the zswap_store()
983 * in the first place. After the folio has been decompressed into
984 * the swap cache, the compressed version stored by zswap can be
985 * freed.
986 */
zswap_writeback_entry(struct zswap_entry * entry,swp_entry_t swpentry)987 static int zswap_writeback_entry(struct zswap_entry *entry,
988 swp_entry_t swpentry)
989 {
990 struct xarray *tree;
991 pgoff_t offset = swp_offset(swpentry);
992 struct folio *folio;
993 struct mempolicy *mpol;
994 bool folio_was_allocated;
995 struct swap_info_struct *si;
996 int ret = 0;
997
998 /* try to allocate swap cache folio */
999 si = get_swap_device(swpentry);
1000 if (!si)
1001 return -EEXIST;
1002
1003 mpol = get_task_policy(current);
1004 folio = swap_cache_alloc_folio(swpentry, GFP_KERNEL, mpol,
1005 NO_INTERLEAVE_INDEX, &folio_was_allocated);
1006 put_swap_device(si);
1007 if (!folio)
1008 return -ENOMEM;
1009
1010 /*
1011 * Found an existing folio, we raced with swapin or concurrent
1012 * shrinker. We generally writeback cold folios from zswap, and
1013 * swapin means the folio just became hot, so skip this folio.
1014 * For unlikely concurrent shrinker case, it will be unlinked
1015 * and freed when invalidated by the concurrent shrinker anyway.
1016 */
1017 if (!folio_was_allocated) {
1018 ret = -EEXIST;
1019 goto out;
1020 }
1021
1022 /*
1023 * folio is locked, and the swapcache is now secured against
1024 * concurrent swapping to and from the slot, and concurrent
1025 * swapoff so we can safely dereference the zswap tree here.
1026 * Verify that the swap entry hasn't been invalidated and recycled
1027 * behind our backs, to avoid overwriting a new swap folio with
1028 * old compressed data. Only when this is successful can the entry
1029 * be dereferenced.
1030 */
1031 tree = swap_zswap_tree(swpentry);
1032 if (entry != xa_load(tree, offset)) {
1033 ret = -ENOMEM;
1034 goto out;
1035 }
1036
1037 if (!zswap_decompress(entry, folio)) {
1038 ret = -EIO;
1039 goto out;
1040 }
1041
1042 xa_erase(tree, offset);
1043
1044 count_vm_event(ZSWPWB);
1045 if (entry->objcg)
1046 count_objcg_events(entry->objcg, ZSWPWB, 1);
1047
1048 zswap_entry_free(entry);
1049
1050 /* folio is up to date */
1051 folio_mark_uptodate(folio);
1052
1053 /* move it to the tail of the inactive list after end_writeback */
1054 folio_set_reclaim(folio);
1055
1056 /* start writeback */
1057 __swap_writepage(folio, NULL);
1058
1059 out:
1060 if (ret && ret != -EEXIST) {
1061 swap_cache_del_folio(folio);
1062 folio_unlock(folio);
1063 }
1064 folio_put(folio);
1065 return ret;
1066 }
1067
1068 /*********************************
1069 * shrinker functions
1070 **********************************/
1071 /*
1072 * The dynamic shrinker is modulated by the following factors:
1073 *
1074 * 1. Each zswap entry has a referenced bit, which the shrinker unsets (giving
1075 * the entry a second chance) before rotating it in the LRU list. If the
1076 * entry is considered again by the shrinker, with its referenced bit unset,
1077 * it is written back. The writeback rate as a result is dynamically
1078 * adjusted by the pool activities - if the pool is dominated by new entries
1079 * (i.e lots of recent zswapouts), these entries will be protected and
1080 * the writeback rate will slow down. On the other hand, if the pool has a
1081 * lot of stagnant entries, these entries will be reclaimed immediately,
1082 * effectively increasing the writeback rate.
1083 *
1084 * 2. Swapins counter: If we observe swapins, it is a sign that we are
1085 * overshrinking and should slow down. We maintain a swapins counter, which
1086 * is consumed and subtract from the number of eligible objects on the LRU
1087 * in zswap_shrinker_count().
1088 *
1089 * 3. Compression ratio. The better the workload compresses, the less gains we
1090 * can expect from writeback. We scale down the number of objects available
1091 * for reclaim by this ratio.
1092 */
shrink_memcg_cb(struct list_head * item,struct list_lru_one * l,void * arg)1093 static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
1094 void *arg)
1095 {
1096 struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
1097 bool *encountered_page_in_swapcache = (bool *)arg;
1098 swp_entry_t swpentry;
1099 enum lru_status ret = LRU_REMOVED_RETRY;
1100 int writeback_result;
1101
1102 /*
1103 * Second chance algorithm: if the entry has its referenced bit set, give it
1104 * a second chance. Only clear the referenced bit and rotate it in the
1105 * zswap's LRU list.
1106 */
1107 if (entry->referenced) {
1108 entry->referenced = false;
1109 return LRU_ROTATE;
1110 }
1111
1112 /*
1113 * As soon as we drop the LRU lock, the entry can be freed by
1114 * a concurrent invalidation. This means the following:
1115 *
1116 * 1. We extract the swp_entry_t to the stack, allowing
1117 * zswap_writeback_entry() to pin the swap entry and
1118 * then validate the zswap entry against that swap entry's
1119 * tree using pointer value comparison. Only when that
1120 * is successful can the entry be dereferenced.
1121 *
1122 * 2. Usually, objects are taken off the LRU for reclaim. In
1123 * this case this isn't possible, because if reclaim fails
1124 * for whatever reason, we have no means of knowing if the
1125 * entry is alive to put it back on the LRU.
1126 *
1127 * So rotate it before dropping the lock. If the entry is
1128 * written back or invalidated, the free path will unlink
1129 * it. For failures, rotation is the right thing as well.
1130 *
1131 * Temporary failures, where the same entry should be tried
1132 * again immediately, almost never happen for this shrinker.
1133 * We don't do any trylocking; -ENOMEM comes closest,
1134 * but that's extremely rare and doesn't happen spuriously
1135 * either. Don't bother distinguishing this case.
1136 */
1137 list_move_tail(item, &l->list);
1138
1139 /*
1140 * Once the lru lock is dropped, the entry might get freed. The
1141 * swpentry is copied to the stack, and entry isn't deref'd again
1142 * until the entry is verified to still be alive in the tree.
1143 */
1144 swpentry = entry->swpentry;
1145
1146 /*
1147 * It's safe to drop the lock here because we return either
1148 * LRU_REMOVED_RETRY, LRU_RETRY or LRU_STOP.
1149 */
1150 spin_unlock(&l->lock);
1151
1152 writeback_result = zswap_writeback_entry(entry, swpentry);
1153
1154 if (writeback_result) {
1155 zswap_reject_reclaim_fail++;
1156 ret = LRU_RETRY;
1157
1158 /*
1159 * Encountering a page already in swap cache is a sign that we are shrinking
1160 * into the warmer region. We should terminate shrinking (if we're in the dynamic
1161 * shrinker context).
1162 */
1163 if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
1164 ret = LRU_STOP;
1165 *encountered_page_in_swapcache = true;
1166 }
1167 } else {
1168 zswap_written_back_pages++;
1169 }
1170
1171 return ret;
1172 }
1173
zswap_shrinker_scan(struct shrinker * shrinker,struct shrink_control * sc)1174 static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
1175 struct shrink_control *sc)
1176 {
1177 unsigned long shrink_ret;
1178 bool encountered_page_in_swapcache = false;
1179
1180 if (!zswap_shrinker_enabled ||
1181 !mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
1182 sc->nr_scanned = 0;
1183 return SHRINK_STOP;
1184 }
1185
1186 shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb,
1187 &encountered_page_in_swapcache);
1188
1189 if (encountered_page_in_swapcache)
1190 return SHRINK_STOP;
1191
1192 return shrink_ret ? shrink_ret : SHRINK_STOP;
1193 }
1194
zswap_shrinker_count(struct shrinker * shrinker,struct shrink_control * sc)1195 static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
1196 struct shrink_control *sc)
1197 {
1198 struct mem_cgroup *memcg = sc->memcg;
1199 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
1200 atomic_long_t *nr_disk_swapins =
1201 &lruvec->zswap_lruvec_state.nr_disk_swapins;
1202 unsigned long nr_backing, nr_stored, nr_freeable, nr_disk_swapins_cur,
1203 nr_remain;
1204
1205 if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
1206 return 0;
1207
1208 /*
1209 * The shrinker resumes swap writeback, which will enter block
1210 * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS
1211 * rules (may_enter_fs()), which apply on a per-folio basis.
1212 */
1213 if (!gfp_has_io_fs(sc->gfp_mask))
1214 return 0;
1215
1216 /*
1217 * For memcg, use the cgroup-wide ZSWAP stats since we don't
1218 * have them per-node and thus per-lruvec. Careful if memcg is
1219 * runtime-disabled: we can get sc->memcg == NULL, which is ok
1220 * for the lruvec, but not for memcg_page_state().
1221 *
1222 * Without memcg, use the zswap pool-wide metrics.
1223 */
1224 if (!mem_cgroup_disabled()) {
1225 mem_cgroup_flush_stats(memcg);
1226 nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
1227 nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
1228 } else {
1229 nr_backing = zswap_total_pages();
1230 nr_stored = atomic_long_read(&zswap_stored_pages);
1231 }
1232
1233 if (!nr_stored)
1234 return 0;
1235
1236 nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc);
1237 if (!nr_freeable)
1238 return 0;
1239
1240 /*
1241 * Subtract from the lru size the number of pages that are recently swapped
1242 * in from disk. The idea is that had we protect the zswap's LRU by this
1243 * amount of pages, these disk swapins would not have happened.
1244 */
1245 nr_disk_swapins_cur = atomic_long_read(nr_disk_swapins);
1246 do {
1247 if (nr_freeable >= nr_disk_swapins_cur)
1248 nr_remain = 0;
1249 else
1250 nr_remain = nr_disk_swapins_cur - nr_freeable;
1251 } while (!atomic_long_try_cmpxchg(
1252 nr_disk_swapins, &nr_disk_swapins_cur, nr_remain));
1253
1254 nr_freeable -= nr_disk_swapins_cur - nr_remain;
1255 if (!nr_freeable)
1256 return 0;
1257
1258 /*
1259 * Scale the number of freeable pages by the memory saving factor.
1260 * This ensures that the better zswap compresses memory, the fewer
1261 * pages we will evict to swap (as it will otherwise incur IO for
1262 * relatively small memory saving).
1263 */
1264 return mult_frac(nr_freeable, nr_backing, nr_stored);
1265 }
1266
zswap_alloc_shrinker(void)1267 static struct shrinker *zswap_alloc_shrinker(void)
1268 {
1269 struct shrinker *shrinker;
1270
1271 shrinker =
1272 shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
1273 if (!shrinker)
1274 return NULL;
1275
1276 shrinker->scan_objects = zswap_shrinker_scan;
1277 shrinker->count_objects = zswap_shrinker_count;
1278 shrinker->batch = 0;
1279 shrinker->seeks = DEFAULT_SEEKS;
1280 return shrinker;
1281 }
1282
shrink_memcg(struct mem_cgroup * memcg)1283 static int shrink_memcg(struct mem_cgroup *memcg)
1284 {
1285 int nid, shrunk = 0, scanned = 0;
1286
1287 if (!mem_cgroup_zswap_writeback_enabled(memcg))
1288 return -ENOENT;
1289
1290 /*
1291 * Skip zombies because their LRUs are reparented and we would be
1292 * reclaiming from the parent instead of the dead memcg.
1293 */
1294 if (memcg && !mem_cgroup_online(memcg))
1295 return -ENOENT;
1296
1297 for_each_node_state(nid, N_NORMAL_MEMORY) {
1298 unsigned long nr_to_walk = 1;
1299
1300 shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg,
1301 &shrink_memcg_cb, NULL, &nr_to_walk);
1302 scanned += 1 - nr_to_walk;
1303 }
1304
1305 if (!scanned)
1306 return -ENOENT;
1307
1308 return shrunk ? 0 : -EAGAIN;
1309 }
1310
shrink_worker(struct work_struct * w)1311 static void shrink_worker(struct work_struct *w)
1312 {
1313 struct mem_cgroup *memcg;
1314 int ret, failures = 0, attempts = 0;
1315 unsigned long thr;
1316
1317 /* Reclaim down to the accept threshold */
1318 thr = zswap_accept_thr_pages();
1319
1320 /*
1321 * Global reclaim will select cgroup in a round-robin fashion from all
1322 * online memcgs, but memcgs that have no pages in zswap and
1323 * writeback-disabled memcgs (memory.zswap.writeback=0) are not
1324 * candidates for shrinking.
1325 *
1326 * Shrinking will be aborted if we encounter the following
1327 * MAX_RECLAIM_RETRIES times:
1328 * - No writeback-candidate memcgs found in a memcg tree walk.
1329 * - Shrinking a writeback-candidate memcg failed.
1330 *
1331 * We save iteration cursor memcg into zswap_next_shrink,
1332 * which can be modified by the offline memcg cleaner
1333 * zswap_memcg_offline_cleanup().
1334 *
1335 * Since the offline cleaner is called only once, we cannot leave an
1336 * offline memcg reference in zswap_next_shrink.
1337 * We can rely on the cleaner only if we get online memcg under lock.
1338 *
1339 * If we get an offline memcg, we cannot determine if the cleaner has
1340 * already been called or will be called later. We must put back the
1341 * reference before returning from this function. Otherwise, the
1342 * offline memcg left in zswap_next_shrink will hold the reference
1343 * until the next run of shrink_worker().
1344 */
1345 do {
1346 /*
1347 * Start shrinking from the next memcg after zswap_next_shrink.
1348 * When the offline cleaner has already advanced the cursor,
1349 * advancing the cursor here overlooks one memcg, but this
1350 * should be negligibly rare.
1351 *
1352 * If we get an online memcg, keep the extra reference in case
1353 * the original one obtained by mem_cgroup_iter() is dropped by
1354 * zswap_memcg_offline_cleanup() while we are shrinking the
1355 * memcg.
1356 */
1357 spin_lock(&zswap_shrink_lock);
1358 do {
1359 memcg = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
1360 zswap_next_shrink = memcg;
1361 } while (memcg && !mem_cgroup_tryget_online(memcg));
1362 spin_unlock(&zswap_shrink_lock);
1363
1364 if (!memcg) {
1365 /*
1366 * Continue shrinking without incrementing failures if
1367 * we found candidate memcgs in the last tree walk.
1368 */
1369 if (!attempts && ++failures == MAX_RECLAIM_RETRIES)
1370 break;
1371
1372 attempts = 0;
1373 goto resched;
1374 }
1375
1376 ret = shrink_memcg(memcg);
1377 /* drop the extra reference */
1378 mem_cgroup_put(memcg);
1379
1380 /*
1381 * There are no writeback-candidate pages in the memcg.
1382 * This is not an issue as long as we can find another memcg
1383 * with pages in zswap. Skip this without incrementing attempts
1384 * and failures.
1385 */
1386 if (ret == -ENOENT)
1387 continue;
1388 ++attempts;
1389
1390 if (ret && ++failures == MAX_RECLAIM_RETRIES)
1391 break;
1392 resched:
1393 cond_resched();
1394 } while (zswap_total_pages() > thr);
1395 }
1396
1397 /*********************************
1398 * main API
1399 **********************************/
1400
zswap_store_page(struct page * page,struct obj_cgroup * objcg,struct zswap_pool * pool)1401 static bool zswap_store_page(struct page *page,
1402 struct obj_cgroup *objcg,
1403 struct zswap_pool *pool)
1404 {
1405 swp_entry_t page_swpentry = page_swap_entry(page);
1406 struct zswap_entry *entry, *old;
1407
1408 /* allocate entry */
1409 entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page));
1410 if (!entry) {
1411 zswap_reject_kmemcache_fail++;
1412 return false;
1413 }
1414
1415 if (!zswap_compress(page, entry, pool))
1416 goto compress_failed;
1417
1418 old = xa_store(swap_zswap_tree(page_swpentry),
1419 swp_offset(page_swpentry),
1420 entry, GFP_KERNEL);
1421 if (xa_is_err(old)) {
1422 int err = xa_err(old);
1423
1424 WARN_ONCE(err != -ENOMEM, "unexpected xarray error: %d\n", err);
1425 zswap_reject_alloc_fail++;
1426 goto store_failed;
1427 }
1428
1429 /*
1430 * We may have had an existing entry that became stale when
1431 * the folio was redirtied and now the new version is being
1432 * swapped out. Get rid of the old.
1433 */
1434 if (old)
1435 zswap_entry_free(old);
1436
1437 /*
1438 * The entry is successfully compressed and stored in the tree, there is
1439 * no further possibility of failure. Grab refs to the pool and objcg,
1440 * charge zswap memory, and increment zswap_stored_pages.
1441 * The opposite actions will be performed by zswap_entry_free()
1442 * when the entry is removed from the tree.
1443 */
1444 zswap_pool_get(pool);
1445 if (objcg) {
1446 obj_cgroup_get(objcg);
1447 obj_cgroup_charge_zswap(objcg, entry->length);
1448 }
1449 atomic_long_inc(&zswap_stored_pages);
1450 if (entry->length == PAGE_SIZE)
1451 atomic_long_inc(&zswap_stored_incompressible_pages);
1452
1453 /*
1454 * We finish initializing the entry while it's already in xarray.
1455 * This is safe because:
1456 *
1457 * 1. Concurrent stores and invalidations are excluded by folio lock.
1458 *
1459 * 2. Writeback is excluded by the entry not being on the LRU yet.
1460 * The publishing order matters to prevent writeback from seeing
1461 * an incoherent entry.
1462 */
1463 entry->pool = pool;
1464 entry->swpentry = page_swpentry;
1465 entry->objcg = objcg;
1466 entry->referenced = true;
1467 if (entry->length) {
1468 INIT_LIST_HEAD(&entry->lru);
1469 zswap_lru_add(&zswap_list_lru, entry);
1470 }
1471
1472 return true;
1473
1474 store_failed:
1475 zs_free(pool->zs_pool, entry->handle);
1476 compress_failed:
1477 zswap_entry_cache_free(entry);
1478 return false;
1479 }
1480
zswap_store(struct folio * folio)1481 bool zswap_store(struct folio *folio)
1482 {
1483 long nr_pages = folio_nr_pages(folio);
1484 swp_entry_t swp = folio->swap;
1485 struct obj_cgroup *objcg = NULL;
1486 struct mem_cgroup *memcg = NULL;
1487 struct zswap_pool *pool;
1488 bool ret = false;
1489 long index;
1490
1491 VM_WARN_ON_ONCE(!folio_test_locked(folio));
1492 VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
1493
1494 if (!zswap_enabled)
1495 goto check_old;
1496
1497 objcg = get_obj_cgroup_from_folio(folio);
1498 if (objcg && !obj_cgroup_may_zswap(objcg)) {
1499 memcg = get_mem_cgroup_from_objcg(objcg);
1500 if (shrink_memcg(memcg)) {
1501 mem_cgroup_put(memcg);
1502 goto put_objcg;
1503 }
1504 mem_cgroup_put(memcg);
1505 }
1506
1507 if (zswap_check_limits())
1508 goto put_objcg;
1509
1510 pool = zswap_pool_current_get();
1511 if (!pool)
1512 goto put_objcg;
1513
1514 if (objcg) {
1515 memcg = get_mem_cgroup_from_objcg(objcg);
1516 if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) {
1517 mem_cgroup_put(memcg);
1518 goto put_pool;
1519 }
1520 mem_cgroup_put(memcg);
1521 }
1522
1523 for (index = 0; index < nr_pages; ++index) {
1524 struct page *page = folio_page(folio, index);
1525
1526 if (!zswap_store_page(page, objcg, pool))
1527 goto put_pool;
1528 }
1529
1530 if (objcg)
1531 count_objcg_events(objcg, ZSWPOUT, nr_pages);
1532
1533 count_vm_events(ZSWPOUT, nr_pages);
1534
1535 ret = true;
1536
1537 put_pool:
1538 zswap_pool_put(pool);
1539 put_objcg:
1540 obj_cgroup_put(objcg);
1541 if (!ret && zswap_pool_reached_full)
1542 queue_work(shrink_wq, &zswap_shrink_work);
1543 check_old:
1544 /*
1545 * If the zswap store fails or zswap is disabled, we must invalidate
1546 * the possibly stale entries which were previously stored at the
1547 * offsets corresponding to each page of the folio. Otherwise,
1548 * writeback could overwrite the new data in the swapfile.
1549 */
1550 if (!ret) {
1551 unsigned type = swp_type(swp);
1552 pgoff_t offset = swp_offset(swp);
1553 struct zswap_entry *entry;
1554 struct xarray *tree;
1555
1556 for (index = 0; index < nr_pages; ++index) {
1557 tree = swap_zswap_tree(swp_entry(type, offset + index));
1558 entry = xa_erase(tree, offset + index);
1559 if (entry)
1560 zswap_entry_free(entry);
1561 }
1562 }
1563
1564 return ret;
1565 }
1566
1567 /**
1568 * zswap_load() - load a folio from zswap
1569 * @folio: folio to load
1570 *
1571 * Return: 0 on success, with the folio unlocked and marked up-to-date, or one
1572 * of the following error codes:
1573 *
1574 * -EIO: if the swapped out content was in zswap, but could not be loaded
1575 * into the page due to a decompression failure. The folio is unlocked, but
1576 * NOT marked up-to-date, so that an IO error is emitted (e.g. do_swap_page()
1577 * will SIGBUS).
1578 *
1579 * -EINVAL: if the swapped out content was in zswap, but the page belongs
1580 * to a large folio, which is not supported by zswap. The folio is unlocked,
1581 * but NOT marked up-to-date, so that an IO error is emitted (e.g.
1582 * do_swap_page() will SIGBUS).
1583 *
1584 * -ENOENT: if the swapped out content was not in zswap. The folio remains
1585 * locked on return.
1586 */
zswap_load(struct folio * folio)1587 int zswap_load(struct folio *folio)
1588 {
1589 swp_entry_t swp = folio->swap;
1590 pgoff_t offset = swp_offset(swp);
1591 struct xarray *tree = swap_zswap_tree(swp);
1592 struct zswap_entry *entry;
1593
1594 VM_WARN_ON_ONCE(!folio_test_locked(folio));
1595 VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
1596
1597 if (zswap_never_enabled())
1598 return -ENOENT;
1599
1600 /*
1601 * Large folios should not be swapped in while zswap is being used, as
1602 * they are not properly handled. Zswap does not properly load large
1603 * folios, and a large folio may only be partially in zswap.
1604 */
1605 if (WARN_ON_ONCE(folio_test_large(folio))) {
1606 folio_unlock(folio);
1607 return -EINVAL;
1608 }
1609
1610 entry = xa_load(tree, offset);
1611 if (!entry)
1612 return -ENOENT;
1613
1614 if (!zswap_decompress(entry, folio)) {
1615 folio_unlock(folio);
1616 return -EIO;
1617 }
1618
1619 folio_mark_uptodate(folio);
1620
1621 count_vm_event(ZSWPIN);
1622 if (entry->objcg)
1623 count_objcg_events(entry->objcg, ZSWPIN, 1);
1624
1625 /*
1626 * We are reading into the swapcache, invalidate zswap entry.
1627 * The swapcache is the authoritative owner of the page and
1628 * its mappings, and the pressure that results from having two
1629 * in-memory copies outweighs any benefits of caching the
1630 * compression work.
1631 */
1632 folio_mark_dirty(folio);
1633 xa_erase(tree, offset);
1634 zswap_entry_free(entry);
1635
1636 folio_unlock(folio);
1637 return 0;
1638 }
1639
zswap_invalidate(swp_entry_t swp)1640 void zswap_invalidate(swp_entry_t swp)
1641 {
1642 pgoff_t offset = swp_offset(swp);
1643 struct xarray *tree = swap_zswap_tree(swp);
1644 struct zswap_entry *entry;
1645
1646 if (xa_empty(tree))
1647 return;
1648
1649 entry = xa_erase(tree, offset);
1650 if (entry)
1651 zswap_entry_free(entry);
1652 }
1653
zswap_swapon(int type,unsigned long nr_pages)1654 int zswap_swapon(int type, unsigned long nr_pages)
1655 {
1656 struct xarray *trees, *tree;
1657 unsigned int nr, i;
1658
1659 nr = DIV_ROUND_UP(nr_pages, ZSWAP_ADDRESS_SPACE_PAGES);
1660 trees = kvzalloc_objs(*tree, nr);
1661 if (!trees) {
1662 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1663 return -ENOMEM;
1664 }
1665
1666 for (i = 0; i < nr; i++)
1667 xa_init(trees + i);
1668
1669 nr_zswap_trees[type] = nr;
1670 zswap_trees[type] = trees;
1671 return 0;
1672 }
1673
zswap_swapoff(int type)1674 void zswap_swapoff(int type)
1675 {
1676 struct xarray *trees = zswap_trees[type];
1677 unsigned int i;
1678
1679 if (!trees)
1680 return;
1681
1682 /* try_to_unuse() invalidated all the entries already */
1683 for (i = 0; i < nr_zswap_trees[type]; i++)
1684 WARN_ON_ONCE(!xa_empty(trees + i));
1685
1686 kvfree(trees);
1687 nr_zswap_trees[type] = 0;
1688 zswap_trees[type] = NULL;
1689 }
1690
1691 /*********************************
1692 * debugfs functions
1693 **********************************/
1694 #ifdef CONFIG_DEBUG_FS
1695 #include <linux/debugfs.h>
1696
1697 static struct dentry *zswap_debugfs_root;
1698
debugfs_get_total_size(void * data,u64 * val)1699 static int debugfs_get_total_size(void *data, u64 *val)
1700 {
1701 *val = zswap_total_pages() * PAGE_SIZE;
1702 return 0;
1703 }
1704 DEFINE_DEBUGFS_ATTRIBUTE(total_size_fops, debugfs_get_total_size, NULL, "%llu\n");
1705
debugfs_get_stored_pages(void * data,u64 * val)1706 static int debugfs_get_stored_pages(void *data, u64 *val)
1707 {
1708 *val = atomic_long_read(&zswap_stored_pages);
1709 return 0;
1710 }
1711 DEFINE_DEBUGFS_ATTRIBUTE(stored_pages_fops, debugfs_get_stored_pages, NULL, "%llu\n");
1712
debugfs_get_stored_incompressible_pages(void * data,u64 * val)1713 static int debugfs_get_stored_incompressible_pages(void *data, u64 *val)
1714 {
1715 *val = atomic_long_read(&zswap_stored_incompressible_pages);
1716 return 0;
1717 }
1718 DEFINE_DEBUGFS_ATTRIBUTE(stored_incompressible_pages_fops,
1719 debugfs_get_stored_incompressible_pages, NULL, "%llu\n");
1720
zswap_debugfs_init(void)1721 static int zswap_debugfs_init(void)
1722 {
1723 if (!debugfs_initialized())
1724 return -ENODEV;
1725
1726 zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1727
1728 debugfs_create_u64("pool_limit_hit", 0444,
1729 zswap_debugfs_root, &zswap_pool_limit_hit);
1730 debugfs_create_u64("reject_reclaim_fail", 0444,
1731 zswap_debugfs_root, &zswap_reject_reclaim_fail);
1732 debugfs_create_u64("reject_alloc_fail", 0444,
1733 zswap_debugfs_root, &zswap_reject_alloc_fail);
1734 debugfs_create_u64("reject_kmemcache_fail", 0444,
1735 zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1736 debugfs_create_u64("reject_compress_fail", 0444,
1737 zswap_debugfs_root, &zswap_reject_compress_fail);
1738 debugfs_create_u64("reject_compress_poor", 0444,
1739 zswap_debugfs_root, &zswap_reject_compress_poor);
1740 debugfs_create_u64("decompress_fail", 0444,
1741 zswap_debugfs_root, &zswap_decompress_fail);
1742 debugfs_create_u64("written_back_pages", 0444,
1743 zswap_debugfs_root, &zswap_written_back_pages);
1744 debugfs_create_file("pool_total_size", 0444,
1745 zswap_debugfs_root, NULL, &total_size_fops);
1746 debugfs_create_file("stored_pages", 0444,
1747 zswap_debugfs_root, NULL, &stored_pages_fops);
1748 debugfs_create_file("stored_incompressible_pages", 0444,
1749 zswap_debugfs_root, NULL,
1750 &stored_incompressible_pages_fops);
1751
1752 return 0;
1753 }
1754 #else
zswap_debugfs_init(void)1755 static int zswap_debugfs_init(void)
1756 {
1757 return 0;
1758 }
1759 #endif
1760
1761 /*********************************
1762 * module init and exit
1763 **********************************/
zswap_setup(void)1764 static int zswap_setup(void)
1765 {
1766 struct zswap_pool *pool;
1767 int ret;
1768
1769 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1770 if (!zswap_entry_cache) {
1771 pr_err("entry cache creation failed\n");
1772 goto cache_fail;
1773 }
1774
1775 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1776 "mm/zswap_pool:prepare",
1777 zswap_cpu_comp_prepare,
1778 NULL);
1779 if (ret)
1780 goto hp_fail;
1781
1782 shrink_wq = alloc_workqueue("zswap-shrink",
1783 WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
1784 if (!shrink_wq)
1785 goto shrink_wq_fail;
1786
1787 zswap_shrinker = zswap_alloc_shrinker();
1788 if (!zswap_shrinker)
1789 goto shrinker_fail;
1790 if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker))
1791 goto lru_fail;
1792 shrinker_register(zswap_shrinker);
1793
1794 INIT_WORK(&zswap_shrink_work, shrink_worker);
1795
1796 pool = __zswap_pool_create_fallback();
1797 if (pool) {
1798 pr_info("loaded using pool %s\n", pool->tfm_name);
1799 list_add(&pool->list, &zswap_pools);
1800 zswap_has_pool = true;
1801 static_branch_enable(&zswap_ever_enabled);
1802 } else {
1803 pr_err("pool creation failed\n");
1804 zswap_enabled = false;
1805 }
1806
1807 if (zswap_debugfs_init())
1808 pr_warn("debugfs initialization failed\n");
1809 zswap_init_state = ZSWAP_INIT_SUCCEED;
1810 return 0;
1811
1812 lru_fail:
1813 shrinker_free(zswap_shrinker);
1814 shrinker_fail:
1815 destroy_workqueue(shrink_wq);
1816 shrink_wq_fail:
1817 cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE);
1818 hp_fail:
1819 kmem_cache_destroy(zswap_entry_cache);
1820 cache_fail:
1821 /* if built-in, we aren't unloaded on failure; don't allow use */
1822 zswap_init_state = ZSWAP_INIT_FAILED;
1823 zswap_enabled = false;
1824 return -ENOMEM;
1825 }
1826
zswap_init(void)1827 static int __init zswap_init(void)
1828 {
1829 if (!zswap_enabled)
1830 return 0;
1831 return zswap_setup();
1832 }
1833 /* must be late so crypto has time to come up */
1834 late_initcall(zswap_init);
1835
1836 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1837 MODULE_DESCRIPTION("Compressed cache for swap pages");
1838