1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * zswap.c - zswap driver file 4 * 5 * zswap is a cache that takes pages that are in the process 6 * of being swapped out and attempts to compress and store them in a 7 * RAM-based memory pool. This can result in a significant I/O reduction on 8 * the swap device and, in the case where decompressing from RAM is faster 9 * than reading from the swap device, can also improve workload performance. 10 * 11 * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com> 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16 #include <linux/module.h> 17 #include <linux/cpu.h> 18 #include <linux/highmem.h> 19 #include <linux/slab.h> 20 #include <linux/spinlock.h> 21 #include <linux/types.h> 22 #include <linux/atomic.h> 23 #include <linux/swap.h> 24 #include <linux/crypto.h> 25 #include <linux/scatterlist.h> 26 #include <linux/mempolicy.h> 27 #include <linux/mempool.h> 28 #include <linux/zpool.h> 29 #include <crypto/acompress.h> 30 #include <linux/zswap.h> 31 #include <linux/mm_types.h> 32 #include <linux/page-flags.h> 33 #include <linux/swapops.h> 34 #include <linux/writeback.h> 35 #include <linux/pagemap.h> 36 #include <linux/workqueue.h> 37 #include <linux/list_lru.h> 38 39 #include "swap.h" 40 #include "internal.h" 41 42 /********************************* 43 * statistics 44 **********************************/ 45 /* The number of compressed pages currently stored in zswap */ 46 atomic_long_t zswap_stored_pages = ATOMIC_LONG_INIT(0); 47 48 /* 49 * The statistics below are not protected from concurrent access for 50 * performance reasons so they may not be a 100% accurate. However, 51 * they do provide useful information on roughly how many times a 52 * certain event is occurring. 53 */ 54 55 /* Pool limit was hit (see zswap_max_pool_percent) */ 56 static u64 zswap_pool_limit_hit; 57 /* Pages written back when pool limit was reached */ 58 static u64 zswap_written_back_pages; 59 /* Store failed due to a reclaim failure after pool limit was reached */ 60 static u64 zswap_reject_reclaim_fail; 61 /* Store failed due to compression algorithm failure */ 62 static u64 zswap_reject_compress_fail; 63 /* Compressed page was too big for the allocator to (optimally) store */ 64 static u64 zswap_reject_compress_poor; 65 /* Load or writeback failed due to decompression failure */ 66 static u64 zswap_decompress_fail; 67 /* Store failed because underlying allocator could not get memory */ 68 static u64 zswap_reject_alloc_fail; 69 /* Store failed because the entry metadata could not be allocated (rare) */ 70 static u64 zswap_reject_kmemcache_fail; 71 72 /* Shrinker work queue */ 73 static struct workqueue_struct *shrink_wq; 74 /* Pool limit was hit, we need to calm down */ 75 static bool zswap_pool_reached_full; 76 77 /********************************* 78 * tunables 79 **********************************/ 80 81 #define ZSWAP_PARAM_UNSET "" 82 83 static int zswap_setup(void); 84 85 /* Enable/disable zswap */ 86 static DEFINE_STATIC_KEY_MAYBE(CONFIG_ZSWAP_DEFAULT_ON, zswap_ever_enabled); 87 static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON); 88 static int zswap_enabled_param_set(const char *, 89 const struct kernel_param *); 90 static const struct kernel_param_ops zswap_enabled_param_ops = { 91 .set = zswap_enabled_param_set, 92 .get = param_get_bool, 93 }; 94 module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644); 95 96 /* Crypto compressor to use */ 97 static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; 98 static int zswap_compressor_param_set(const char *, 99 const struct kernel_param *); 100 static const struct kernel_param_ops zswap_compressor_param_ops = { 101 .set = zswap_compressor_param_set, 102 .get = param_get_charp, 103 .free = param_free_charp, 104 }; 105 module_param_cb(compressor, &zswap_compressor_param_ops, 106 &zswap_compressor, 0644); 107 108 /* Compressed storage zpool to use */ 109 static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT; 110 static int zswap_zpool_param_set(const char *, const struct kernel_param *); 111 static const struct kernel_param_ops zswap_zpool_param_ops = { 112 .set = zswap_zpool_param_set, 113 .get = param_get_charp, 114 .free = param_free_charp, 115 }; 116 module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644); 117 118 /* The maximum percentage of memory that the compressed pool can occupy */ 119 static unsigned int zswap_max_pool_percent = 20; 120 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644); 121 122 /* The threshold for accepting new pages after the max_pool_percent was hit */ 123 static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */ 124 module_param_named(accept_threshold_percent, zswap_accept_thr_percent, 125 uint, 0644); 126 127 /* Enable/disable memory pressure-based shrinker. */ 128 static bool zswap_shrinker_enabled = IS_ENABLED( 129 CONFIG_ZSWAP_SHRINKER_DEFAULT_ON); 130 module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644); 131 132 bool zswap_is_enabled(void) 133 { 134 return zswap_enabled; 135 } 136 137 bool zswap_never_enabled(void) 138 { 139 return !static_branch_maybe(CONFIG_ZSWAP_DEFAULT_ON, &zswap_ever_enabled); 140 } 141 142 /********************************* 143 * data structures 144 **********************************/ 145 146 struct crypto_acomp_ctx { 147 struct crypto_acomp *acomp; 148 struct acomp_req *req; 149 struct crypto_wait wait; 150 u8 *buffer; 151 struct mutex mutex; 152 bool is_sleepable; 153 }; 154 155 /* 156 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock. 157 * The only case where lru_lock is not acquired while holding tree.lock is 158 * when a zswap_entry is taken off the lru for writeback, in that case it 159 * needs to be verified that it's still valid in the tree. 160 */ 161 struct zswap_pool { 162 struct zpool *zpool; 163 struct crypto_acomp_ctx __percpu *acomp_ctx; 164 struct percpu_ref ref; 165 struct list_head list; 166 struct work_struct release_work; 167 struct hlist_node node; 168 char tfm_name[CRYPTO_MAX_ALG_NAME]; 169 }; 170 171 /* Global LRU lists shared by all zswap pools. */ 172 static struct list_lru zswap_list_lru; 173 174 /* The lock protects zswap_next_shrink updates. */ 175 static DEFINE_SPINLOCK(zswap_shrink_lock); 176 static struct mem_cgroup *zswap_next_shrink; 177 static struct work_struct zswap_shrink_work; 178 static struct shrinker *zswap_shrinker; 179 180 /* 181 * struct zswap_entry 182 * 183 * This structure contains the metadata for tracking a single compressed 184 * page within zswap. 185 * 186 * swpentry - associated swap entry, the offset indexes into the red-black tree 187 * length - the length in bytes of the compressed page data. Needed during 188 * decompression. 189 * referenced - true if the entry recently entered the zswap pool. Unset by the 190 * writeback logic. The entry is only reclaimed by the writeback 191 * logic if referenced is unset. See comments in the shrinker 192 * section for context. 193 * pool - the zswap_pool the entry's data is in 194 * handle - zpool allocation handle that stores the compressed page data 195 * objcg - the obj_cgroup that the compressed memory is charged to 196 * lru - handle to the pool's lru used to evict pages. 197 */ 198 struct zswap_entry { 199 swp_entry_t swpentry; 200 unsigned int length; 201 bool referenced; 202 struct zswap_pool *pool; 203 unsigned long handle; 204 struct obj_cgroup *objcg; 205 struct list_head lru; 206 }; 207 208 static struct xarray *zswap_trees[MAX_SWAPFILES]; 209 static unsigned int nr_zswap_trees[MAX_SWAPFILES]; 210 211 /* RCU-protected iteration */ 212 static LIST_HEAD(zswap_pools); 213 /* protects zswap_pools list modification */ 214 static DEFINE_SPINLOCK(zswap_pools_lock); 215 /* pool counter to provide unique names to zpool */ 216 static atomic_t zswap_pools_count = ATOMIC_INIT(0); 217 218 enum zswap_init_type { 219 ZSWAP_UNINIT, 220 ZSWAP_INIT_SUCCEED, 221 ZSWAP_INIT_FAILED 222 }; 223 224 static enum zswap_init_type zswap_init_state; 225 226 /* used to ensure the integrity of initialization */ 227 static DEFINE_MUTEX(zswap_init_lock); 228 229 /* init completed, but couldn't create the initial pool */ 230 static bool zswap_has_pool; 231 232 /********************************* 233 * helpers and fwd declarations 234 **********************************/ 235 236 static inline struct xarray *swap_zswap_tree(swp_entry_t swp) 237 { 238 return &zswap_trees[swp_type(swp)][swp_offset(swp) 239 >> SWAP_ADDRESS_SPACE_SHIFT]; 240 } 241 242 #define zswap_pool_debug(msg, p) \ 243 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \ 244 zpool_get_type((p)->zpool)) 245 246 /********************************* 247 * pool functions 248 **********************************/ 249 static void __zswap_pool_empty(struct percpu_ref *ref); 250 251 static struct zswap_pool *zswap_pool_create(char *type, char *compressor) 252 { 253 struct zswap_pool *pool; 254 char name[38]; /* 'zswap' + 32 char (max) num + \0 */ 255 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; 256 int ret, cpu; 257 258 if (!zswap_has_pool) { 259 /* if either are unset, pool initialization failed, and we 260 * need both params to be set correctly before trying to 261 * create a pool. 262 */ 263 if (!strcmp(type, ZSWAP_PARAM_UNSET)) 264 return NULL; 265 if (!strcmp(compressor, ZSWAP_PARAM_UNSET)) 266 return NULL; 267 } 268 269 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 270 if (!pool) 271 return NULL; 272 273 /* unique name for each pool specifically required by zsmalloc */ 274 snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count)); 275 pool->zpool = zpool_create_pool(type, name, gfp); 276 if (!pool->zpool) { 277 pr_err("%s zpool not available\n", type); 278 goto error; 279 } 280 pr_debug("using %s zpool\n", zpool_get_type(pool->zpool)); 281 282 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name)); 283 284 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx); 285 if (!pool->acomp_ctx) { 286 pr_err("percpu alloc failed\n"); 287 goto error; 288 } 289 290 for_each_possible_cpu(cpu) 291 mutex_init(&per_cpu_ptr(pool->acomp_ctx, cpu)->mutex); 292 293 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE, 294 &pool->node); 295 if (ret) 296 goto error; 297 298 /* being the current pool takes 1 ref; this func expects the 299 * caller to always add the new pool as the current pool 300 */ 301 ret = percpu_ref_init(&pool->ref, __zswap_pool_empty, 302 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL); 303 if (ret) 304 goto ref_fail; 305 INIT_LIST_HEAD(&pool->list); 306 307 zswap_pool_debug("created", pool); 308 309 return pool; 310 311 ref_fail: 312 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); 313 error: 314 if (pool->acomp_ctx) 315 free_percpu(pool->acomp_ctx); 316 if (pool->zpool) 317 zpool_destroy_pool(pool->zpool); 318 kfree(pool); 319 return NULL; 320 } 321 322 static struct zswap_pool *__zswap_pool_create_fallback(void) 323 { 324 bool has_comp, has_zpool; 325 326 has_comp = crypto_has_acomp(zswap_compressor, 0, 0); 327 if (!has_comp && strcmp(zswap_compressor, 328 CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) { 329 pr_err("compressor %s not available, using default %s\n", 330 zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT); 331 param_free_charp(&zswap_compressor); 332 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; 333 has_comp = crypto_has_acomp(zswap_compressor, 0, 0); 334 } 335 if (!has_comp) { 336 pr_err("default compressor %s not available\n", 337 zswap_compressor); 338 param_free_charp(&zswap_compressor); 339 zswap_compressor = ZSWAP_PARAM_UNSET; 340 } 341 342 has_zpool = zpool_has_pool(zswap_zpool_type); 343 if (!has_zpool && strcmp(zswap_zpool_type, 344 CONFIG_ZSWAP_ZPOOL_DEFAULT)) { 345 pr_err("zpool %s not available, using default %s\n", 346 zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT); 347 param_free_charp(&zswap_zpool_type); 348 zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT; 349 has_zpool = zpool_has_pool(zswap_zpool_type); 350 } 351 if (!has_zpool) { 352 pr_err("default zpool %s not available\n", 353 zswap_zpool_type); 354 param_free_charp(&zswap_zpool_type); 355 zswap_zpool_type = ZSWAP_PARAM_UNSET; 356 } 357 358 if (!has_comp || !has_zpool) 359 return NULL; 360 361 return zswap_pool_create(zswap_zpool_type, zswap_compressor); 362 } 363 364 static void zswap_pool_destroy(struct zswap_pool *pool) 365 { 366 zswap_pool_debug("destroying", pool); 367 368 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); 369 free_percpu(pool->acomp_ctx); 370 371 zpool_destroy_pool(pool->zpool); 372 kfree(pool); 373 } 374 375 static void __zswap_pool_release(struct work_struct *work) 376 { 377 struct zswap_pool *pool = container_of(work, typeof(*pool), 378 release_work); 379 380 synchronize_rcu(); 381 382 /* nobody should have been able to get a ref... */ 383 WARN_ON(!percpu_ref_is_zero(&pool->ref)); 384 percpu_ref_exit(&pool->ref); 385 386 /* pool is now off zswap_pools list and has no references. */ 387 zswap_pool_destroy(pool); 388 } 389 390 static struct zswap_pool *zswap_pool_current(void); 391 392 static void __zswap_pool_empty(struct percpu_ref *ref) 393 { 394 struct zswap_pool *pool; 395 396 pool = container_of(ref, typeof(*pool), ref); 397 398 spin_lock_bh(&zswap_pools_lock); 399 400 WARN_ON(pool == zswap_pool_current()); 401 402 list_del_rcu(&pool->list); 403 404 INIT_WORK(&pool->release_work, __zswap_pool_release); 405 schedule_work(&pool->release_work); 406 407 spin_unlock_bh(&zswap_pools_lock); 408 } 409 410 static int __must_check zswap_pool_tryget(struct zswap_pool *pool) 411 { 412 if (!pool) 413 return 0; 414 415 return percpu_ref_tryget(&pool->ref); 416 } 417 418 /* The caller must already have a reference. */ 419 static void zswap_pool_get(struct zswap_pool *pool) 420 { 421 percpu_ref_get(&pool->ref); 422 } 423 424 static void zswap_pool_put(struct zswap_pool *pool) 425 { 426 percpu_ref_put(&pool->ref); 427 } 428 429 static struct zswap_pool *__zswap_pool_current(void) 430 { 431 struct zswap_pool *pool; 432 433 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list); 434 WARN_ONCE(!pool && zswap_has_pool, 435 "%s: no page storage pool!\n", __func__); 436 437 return pool; 438 } 439 440 static struct zswap_pool *zswap_pool_current(void) 441 { 442 assert_spin_locked(&zswap_pools_lock); 443 444 return __zswap_pool_current(); 445 } 446 447 static struct zswap_pool *zswap_pool_current_get(void) 448 { 449 struct zswap_pool *pool; 450 451 rcu_read_lock(); 452 453 pool = __zswap_pool_current(); 454 if (!zswap_pool_tryget(pool)) 455 pool = NULL; 456 457 rcu_read_unlock(); 458 459 return pool; 460 } 461 462 /* type and compressor must be null-terminated */ 463 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) 464 { 465 struct zswap_pool *pool; 466 467 assert_spin_locked(&zswap_pools_lock); 468 469 list_for_each_entry_rcu(pool, &zswap_pools, list) { 470 if (strcmp(pool->tfm_name, compressor)) 471 continue; 472 if (strcmp(zpool_get_type(pool->zpool), type)) 473 continue; 474 /* if we can't get it, it's about to be destroyed */ 475 if (!zswap_pool_tryget(pool)) 476 continue; 477 return pool; 478 } 479 480 return NULL; 481 } 482 483 static unsigned long zswap_max_pages(void) 484 { 485 return totalram_pages() * zswap_max_pool_percent / 100; 486 } 487 488 static unsigned long zswap_accept_thr_pages(void) 489 { 490 return zswap_max_pages() * zswap_accept_thr_percent / 100; 491 } 492 493 unsigned long zswap_total_pages(void) 494 { 495 struct zswap_pool *pool; 496 unsigned long total = 0; 497 498 rcu_read_lock(); 499 list_for_each_entry_rcu(pool, &zswap_pools, list) 500 total += zpool_get_total_pages(pool->zpool); 501 rcu_read_unlock(); 502 503 return total; 504 } 505 506 static bool zswap_check_limits(void) 507 { 508 unsigned long cur_pages = zswap_total_pages(); 509 unsigned long max_pages = zswap_max_pages(); 510 511 if (cur_pages >= max_pages) { 512 zswap_pool_limit_hit++; 513 zswap_pool_reached_full = true; 514 } else if (zswap_pool_reached_full && 515 cur_pages <= zswap_accept_thr_pages()) { 516 zswap_pool_reached_full = false; 517 } 518 return zswap_pool_reached_full; 519 } 520 521 /********************************* 522 * param callbacks 523 **********************************/ 524 525 static bool zswap_pool_changed(const char *s, const struct kernel_param *kp) 526 { 527 /* no change required */ 528 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool) 529 return false; 530 return true; 531 } 532 533 /* val must be a null-terminated string */ 534 static int __zswap_param_set(const char *val, const struct kernel_param *kp, 535 char *type, char *compressor) 536 { 537 struct zswap_pool *pool, *put_pool = NULL; 538 char *s = strstrip((char *)val); 539 int ret = 0; 540 bool new_pool = false; 541 542 mutex_lock(&zswap_init_lock); 543 switch (zswap_init_state) { 544 case ZSWAP_UNINIT: 545 /* if this is load-time (pre-init) param setting, 546 * don't create a pool; that's done during init. 547 */ 548 ret = param_set_charp(s, kp); 549 break; 550 case ZSWAP_INIT_SUCCEED: 551 new_pool = zswap_pool_changed(s, kp); 552 break; 553 case ZSWAP_INIT_FAILED: 554 pr_err("can't set param, initialization failed\n"); 555 ret = -ENODEV; 556 } 557 mutex_unlock(&zswap_init_lock); 558 559 /* no need to create a new pool, return directly */ 560 if (!new_pool) 561 return ret; 562 563 if (!type) { 564 if (!zpool_has_pool(s)) { 565 pr_err("zpool %s not available\n", s); 566 return -ENOENT; 567 } 568 type = s; 569 } else if (!compressor) { 570 if (!crypto_has_acomp(s, 0, 0)) { 571 pr_err("compressor %s not available\n", s); 572 return -ENOENT; 573 } 574 compressor = s; 575 } else { 576 WARN_ON(1); 577 return -EINVAL; 578 } 579 580 spin_lock_bh(&zswap_pools_lock); 581 582 pool = zswap_pool_find_get(type, compressor); 583 if (pool) { 584 zswap_pool_debug("using existing", pool); 585 WARN_ON(pool == zswap_pool_current()); 586 list_del_rcu(&pool->list); 587 } 588 589 spin_unlock_bh(&zswap_pools_lock); 590 591 if (!pool) 592 pool = zswap_pool_create(type, compressor); 593 else { 594 /* 595 * Restore the initial ref dropped by percpu_ref_kill() 596 * when the pool was decommissioned and switch it again 597 * to percpu mode. 598 */ 599 percpu_ref_resurrect(&pool->ref); 600 601 /* Drop the ref from zswap_pool_find_get(). */ 602 zswap_pool_put(pool); 603 } 604 605 if (pool) 606 ret = param_set_charp(s, kp); 607 else 608 ret = -EINVAL; 609 610 spin_lock_bh(&zswap_pools_lock); 611 612 if (!ret) { 613 put_pool = zswap_pool_current(); 614 list_add_rcu(&pool->list, &zswap_pools); 615 zswap_has_pool = true; 616 } else if (pool) { 617 /* add the possibly pre-existing pool to the end of the pools 618 * list; if it's new (and empty) then it'll be removed and 619 * destroyed by the put after we drop the lock 620 */ 621 list_add_tail_rcu(&pool->list, &zswap_pools); 622 put_pool = pool; 623 } 624 625 spin_unlock_bh(&zswap_pools_lock); 626 627 if (!zswap_has_pool && !pool) { 628 /* if initial pool creation failed, and this pool creation also 629 * failed, maybe both compressor and zpool params were bad. 630 * Allow changing this param, so pool creation will succeed 631 * when the other param is changed. We already verified this 632 * param is ok in the zpool_has_pool() or crypto_has_acomp() 633 * checks above. 634 */ 635 ret = param_set_charp(s, kp); 636 } 637 638 /* drop the ref from either the old current pool, 639 * or the new pool we failed to add 640 */ 641 if (put_pool) 642 percpu_ref_kill(&put_pool->ref); 643 644 return ret; 645 } 646 647 static int zswap_compressor_param_set(const char *val, 648 const struct kernel_param *kp) 649 { 650 return __zswap_param_set(val, kp, zswap_zpool_type, NULL); 651 } 652 653 static int zswap_zpool_param_set(const char *val, 654 const struct kernel_param *kp) 655 { 656 return __zswap_param_set(val, kp, NULL, zswap_compressor); 657 } 658 659 static int zswap_enabled_param_set(const char *val, 660 const struct kernel_param *kp) 661 { 662 int ret = -ENODEV; 663 664 /* if this is load-time (pre-init) param setting, only set param. */ 665 if (system_state != SYSTEM_RUNNING) 666 return param_set_bool(val, kp); 667 668 mutex_lock(&zswap_init_lock); 669 switch (zswap_init_state) { 670 case ZSWAP_UNINIT: 671 if (zswap_setup()) 672 break; 673 fallthrough; 674 case ZSWAP_INIT_SUCCEED: 675 if (!zswap_has_pool) 676 pr_err("can't enable, no pool configured\n"); 677 else 678 ret = param_set_bool(val, kp); 679 break; 680 case ZSWAP_INIT_FAILED: 681 pr_err("can't enable, initialization failed\n"); 682 } 683 mutex_unlock(&zswap_init_lock); 684 685 return ret; 686 } 687 688 /********************************* 689 * lru functions 690 **********************************/ 691 692 /* should be called under RCU */ 693 #ifdef CONFIG_MEMCG 694 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry) 695 { 696 return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL; 697 } 698 #else 699 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry) 700 { 701 return NULL; 702 } 703 #endif 704 705 static inline int entry_to_nid(struct zswap_entry *entry) 706 { 707 return page_to_nid(virt_to_page(entry)); 708 } 709 710 static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry) 711 { 712 int nid = entry_to_nid(entry); 713 struct mem_cgroup *memcg; 714 715 /* 716 * Note that it is safe to use rcu_read_lock() here, even in the face of 717 * concurrent memcg offlining: 718 * 719 * 1. list_lru_add() is called before list_lru_one is dead. The 720 * new entry will be reparented to memcg's parent's list_lru. 721 * 2. list_lru_add() is called after list_lru_one is dead. The 722 * new entry will be added directly to memcg's parent's list_lru. 723 * 724 * Similar reasoning holds for list_lru_del(). 725 */ 726 rcu_read_lock(); 727 memcg = mem_cgroup_from_entry(entry); 728 /* will always succeed */ 729 list_lru_add(list_lru, &entry->lru, nid, memcg); 730 rcu_read_unlock(); 731 } 732 733 static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry) 734 { 735 int nid = entry_to_nid(entry); 736 struct mem_cgroup *memcg; 737 738 rcu_read_lock(); 739 memcg = mem_cgroup_from_entry(entry); 740 /* will always succeed */ 741 list_lru_del(list_lru, &entry->lru, nid, memcg); 742 rcu_read_unlock(); 743 } 744 745 void zswap_lruvec_state_init(struct lruvec *lruvec) 746 { 747 atomic_long_set(&lruvec->zswap_lruvec_state.nr_disk_swapins, 0); 748 } 749 750 void zswap_folio_swapin(struct folio *folio) 751 { 752 struct lruvec *lruvec; 753 754 if (folio) { 755 lruvec = folio_lruvec(folio); 756 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_disk_swapins); 757 } 758 } 759 760 /* 761 * This function should be called when a memcg is being offlined. 762 * 763 * Since the global shrinker shrink_worker() may hold a reference 764 * of the memcg, we must check and release the reference in 765 * zswap_next_shrink. 766 * 767 * shrink_worker() must handle the case where this function releases 768 * the reference of memcg being shrunk. 769 */ 770 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) 771 { 772 /* lock out zswap shrinker walking memcg tree */ 773 spin_lock(&zswap_shrink_lock); 774 if (zswap_next_shrink == memcg) { 775 do { 776 zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL); 777 } while (zswap_next_shrink && !mem_cgroup_online(zswap_next_shrink)); 778 } 779 spin_unlock(&zswap_shrink_lock); 780 } 781 782 /********************************* 783 * zswap entry functions 784 **********************************/ 785 static struct kmem_cache *zswap_entry_cache; 786 787 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid) 788 { 789 struct zswap_entry *entry; 790 entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid); 791 if (!entry) 792 return NULL; 793 return entry; 794 } 795 796 static void zswap_entry_cache_free(struct zswap_entry *entry) 797 { 798 kmem_cache_free(zswap_entry_cache, entry); 799 } 800 801 /* 802 * Carries out the common pattern of freeing and entry's zpool allocation, 803 * freeing the entry itself, and decrementing the number of stored pages. 804 */ 805 static void zswap_entry_free(struct zswap_entry *entry) 806 { 807 zswap_lru_del(&zswap_list_lru, entry); 808 zpool_free(entry->pool->zpool, entry->handle); 809 zswap_pool_put(entry->pool); 810 if (entry->objcg) { 811 obj_cgroup_uncharge_zswap(entry->objcg, entry->length); 812 obj_cgroup_put(entry->objcg); 813 } 814 zswap_entry_cache_free(entry); 815 atomic_long_dec(&zswap_stored_pages); 816 } 817 818 /********************************* 819 * compressed storage functions 820 **********************************/ 821 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node) 822 { 823 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); 824 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); 825 struct crypto_acomp *acomp = NULL; 826 struct acomp_req *req = NULL; 827 u8 *buffer = NULL; 828 int ret; 829 830 buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu)); 831 if (!buffer) { 832 ret = -ENOMEM; 833 goto fail; 834 } 835 836 acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu)); 837 if (IS_ERR(acomp)) { 838 pr_err("could not alloc crypto acomp %s : %ld\n", 839 pool->tfm_name, PTR_ERR(acomp)); 840 ret = PTR_ERR(acomp); 841 goto fail; 842 } 843 844 req = acomp_request_alloc(acomp); 845 if (!req) { 846 pr_err("could not alloc crypto acomp_request %s\n", 847 pool->tfm_name); 848 ret = -ENOMEM; 849 goto fail; 850 } 851 852 /* 853 * Only hold the mutex after completing allocations, otherwise we may 854 * recurse into zswap through reclaim and attempt to hold the mutex 855 * again resulting in a deadlock. 856 */ 857 mutex_lock(&acomp_ctx->mutex); 858 crypto_init_wait(&acomp_ctx->wait); 859 860 /* 861 * if the backend of acomp is async zip, crypto_req_done() will wakeup 862 * crypto_wait_req(); if the backend of acomp is scomp, the callback 863 * won't be called, crypto_wait_req() will return without blocking. 864 */ 865 acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 866 crypto_req_done, &acomp_ctx->wait); 867 868 acomp_ctx->buffer = buffer; 869 acomp_ctx->acomp = acomp; 870 acomp_ctx->is_sleepable = acomp_is_async(acomp); 871 acomp_ctx->req = req; 872 mutex_unlock(&acomp_ctx->mutex); 873 return 0; 874 875 fail: 876 if (acomp) 877 crypto_free_acomp(acomp); 878 kfree(buffer); 879 return ret; 880 } 881 882 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node) 883 { 884 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); 885 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); 886 887 mutex_lock(&acomp_ctx->mutex); 888 if (!IS_ERR_OR_NULL(acomp_ctx)) { 889 if (!IS_ERR_OR_NULL(acomp_ctx->req)) 890 acomp_request_free(acomp_ctx->req); 891 acomp_ctx->req = NULL; 892 if (!IS_ERR_OR_NULL(acomp_ctx->acomp)) 893 crypto_free_acomp(acomp_ctx->acomp); 894 kfree(acomp_ctx->buffer); 895 } 896 mutex_unlock(&acomp_ctx->mutex); 897 898 return 0; 899 } 900 901 static struct crypto_acomp_ctx *acomp_ctx_get_cpu_lock(struct zswap_pool *pool) 902 { 903 struct crypto_acomp_ctx *acomp_ctx; 904 905 for (;;) { 906 acomp_ctx = raw_cpu_ptr(pool->acomp_ctx); 907 mutex_lock(&acomp_ctx->mutex); 908 if (likely(acomp_ctx->req)) 909 return acomp_ctx; 910 /* 911 * It is possible that we were migrated to a different CPU after 912 * getting the per-CPU ctx but before the mutex was acquired. If 913 * the old CPU got offlined, zswap_cpu_comp_dead() could have 914 * already freed ctx->req (among other things) and set it to 915 * NULL. Just try again on the new CPU that we ended up on. 916 */ 917 mutex_unlock(&acomp_ctx->mutex); 918 } 919 } 920 921 static void acomp_ctx_put_unlock(struct crypto_acomp_ctx *acomp_ctx) 922 { 923 mutex_unlock(&acomp_ctx->mutex); 924 } 925 926 static bool zswap_compress(struct page *page, struct zswap_entry *entry, 927 struct zswap_pool *pool) 928 { 929 struct crypto_acomp_ctx *acomp_ctx; 930 struct scatterlist input, output; 931 int comp_ret = 0, alloc_ret = 0; 932 unsigned int dlen = PAGE_SIZE; 933 unsigned long handle; 934 struct zpool *zpool; 935 gfp_t gfp; 936 u8 *dst; 937 938 acomp_ctx = acomp_ctx_get_cpu_lock(pool); 939 dst = acomp_ctx->buffer; 940 sg_init_table(&input, 1); 941 sg_set_page(&input, page, PAGE_SIZE, 0); 942 943 /* 944 * We need PAGE_SIZE * 2 here since there maybe over-compression case, 945 * and hardware-accelerators may won't check the dst buffer size, so 946 * giving the dst buffer with enough length to avoid buffer overflow. 947 */ 948 sg_init_one(&output, dst, PAGE_SIZE * 2); 949 acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen); 950 951 /* 952 * it maybe looks a little bit silly that we send an asynchronous request, 953 * then wait for its completion synchronously. This makes the process look 954 * synchronous in fact. 955 * Theoretically, acomp supports users send multiple acomp requests in one 956 * acomp instance, then get those requests done simultaneously. but in this 957 * case, zswap actually does store and load page by page, there is no 958 * existing method to send the second page before the first page is done 959 * in one thread doing zwap. 960 * but in different threads running on different cpu, we have different 961 * acomp instance, so multiple threads can do (de)compression in parallel. 962 */ 963 comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait); 964 dlen = acomp_ctx->req->dlen; 965 if (comp_ret) 966 goto unlock; 967 968 zpool = pool->zpool; 969 gfp = GFP_NOWAIT | __GFP_NORETRY | __GFP_HIGHMEM | __GFP_MOVABLE; 970 alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle); 971 if (alloc_ret) 972 goto unlock; 973 974 zpool_obj_write(zpool, handle, dst, dlen); 975 entry->handle = handle; 976 entry->length = dlen; 977 978 unlock: 979 if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC) 980 zswap_reject_compress_poor++; 981 else if (comp_ret) 982 zswap_reject_compress_fail++; 983 else if (alloc_ret) 984 zswap_reject_alloc_fail++; 985 986 acomp_ctx_put_unlock(acomp_ctx); 987 return comp_ret == 0 && alloc_ret == 0; 988 } 989 990 static bool zswap_decompress(struct zswap_entry *entry, struct folio *folio) 991 { 992 struct zpool *zpool = entry->pool->zpool; 993 struct scatterlist input, output; 994 struct crypto_acomp_ctx *acomp_ctx; 995 int decomp_ret, dlen; 996 u8 *src, *obj; 997 998 acomp_ctx = acomp_ctx_get_cpu_lock(entry->pool); 999 obj = zpool_obj_read_begin(zpool, entry->handle, acomp_ctx->buffer); 1000 1001 /* 1002 * zpool_obj_read_begin() might return a kmap address of highmem when 1003 * acomp_ctx->buffer is not used. However, sg_init_one() does not 1004 * handle highmem addresses, so copy the object to acomp_ctx->buffer. 1005 */ 1006 if (virt_addr_valid(obj)) { 1007 src = obj; 1008 } else { 1009 WARN_ON_ONCE(obj == acomp_ctx->buffer); 1010 memcpy(acomp_ctx->buffer, obj, entry->length); 1011 src = acomp_ctx->buffer; 1012 } 1013 1014 sg_init_one(&input, src, entry->length); 1015 sg_init_table(&output, 1); 1016 sg_set_folio(&output, folio, PAGE_SIZE, 0); 1017 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE); 1018 decomp_ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait); 1019 dlen = acomp_ctx->req->dlen; 1020 1021 zpool_obj_read_end(zpool, entry->handle, obj); 1022 acomp_ctx_put_unlock(acomp_ctx); 1023 1024 if (!decomp_ret && dlen == PAGE_SIZE) 1025 return true; 1026 1027 zswap_decompress_fail++; 1028 pr_alert_ratelimited("Decompression error from zswap (%d:%lu %s %u->%d)\n", 1029 swp_type(entry->swpentry), 1030 swp_offset(entry->swpentry), 1031 entry->pool->tfm_name, entry->length, dlen); 1032 return false; 1033 } 1034 1035 /********************************* 1036 * writeback code 1037 **********************************/ 1038 /* 1039 * Attempts to free an entry by adding a folio to the swap cache, 1040 * decompressing the entry data into the folio, and issuing a 1041 * bio write to write the folio back to the swap device. 1042 * 1043 * This can be thought of as a "resumed writeback" of the folio 1044 * to the swap device. We are basically resuming the same swap 1045 * writeback path that was intercepted with the zswap_store() 1046 * in the first place. After the folio has been decompressed into 1047 * the swap cache, the compressed version stored by zswap can be 1048 * freed. 1049 */ 1050 static int zswap_writeback_entry(struct zswap_entry *entry, 1051 swp_entry_t swpentry) 1052 { 1053 struct xarray *tree; 1054 pgoff_t offset = swp_offset(swpentry); 1055 struct folio *folio; 1056 struct mempolicy *mpol; 1057 bool folio_was_allocated; 1058 struct swap_info_struct *si; 1059 struct writeback_control wbc = { 1060 .sync_mode = WB_SYNC_NONE, 1061 }; 1062 int ret = 0; 1063 1064 /* try to allocate swap cache folio */ 1065 si = get_swap_device(swpentry); 1066 if (!si) 1067 return -EEXIST; 1068 1069 mpol = get_task_policy(current); 1070 folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol, 1071 NO_INTERLEAVE_INDEX, &folio_was_allocated, true); 1072 put_swap_device(si); 1073 if (!folio) 1074 return -ENOMEM; 1075 1076 /* 1077 * Found an existing folio, we raced with swapin or concurrent 1078 * shrinker. We generally writeback cold folios from zswap, and 1079 * swapin means the folio just became hot, so skip this folio. 1080 * For unlikely concurrent shrinker case, it will be unlinked 1081 * and freed when invalidated by the concurrent shrinker anyway. 1082 */ 1083 if (!folio_was_allocated) { 1084 ret = -EEXIST; 1085 goto out; 1086 } 1087 1088 /* 1089 * folio is locked, and the swapcache is now secured against 1090 * concurrent swapping to and from the slot, and concurrent 1091 * swapoff so we can safely dereference the zswap tree here. 1092 * Verify that the swap entry hasn't been invalidated and recycled 1093 * behind our backs, to avoid overwriting a new swap folio with 1094 * old compressed data. Only when this is successful can the entry 1095 * be dereferenced. 1096 */ 1097 tree = swap_zswap_tree(swpentry); 1098 if (entry != xa_load(tree, offset)) { 1099 ret = -ENOMEM; 1100 goto out; 1101 } 1102 1103 if (!zswap_decompress(entry, folio)) { 1104 ret = -EIO; 1105 goto out; 1106 } 1107 1108 xa_erase(tree, offset); 1109 1110 count_vm_event(ZSWPWB); 1111 if (entry->objcg) 1112 count_objcg_events(entry->objcg, ZSWPWB, 1); 1113 1114 zswap_entry_free(entry); 1115 1116 /* folio is up to date */ 1117 folio_mark_uptodate(folio); 1118 1119 /* move it to the tail of the inactive list after end_writeback */ 1120 folio_set_reclaim(folio); 1121 1122 /* start writeback */ 1123 __swap_writepage(folio, &wbc); 1124 1125 out: 1126 if (ret && ret != -EEXIST) { 1127 delete_from_swap_cache(folio); 1128 folio_unlock(folio); 1129 } 1130 folio_put(folio); 1131 return ret; 1132 } 1133 1134 /********************************* 1135 * shrinker functions 1136 **********************************/ 1137 /* 1138 * The dynamic shrinker is modulated by the following factors: 1139 * 1140 * 1. Each zswap entry has a referenced bit, which the shrinker unsets (giving 1141 * the entry a second chance) before rotating it in the LRU list. If the 1142 * entry is considered again by the shrinker, with its referenced bit unset, 1143 * it is written back. The writeback rate as a result is dynamically 1144 * adjusted by the pool activities - if the pool is dominated by new entries 1145 * (i.e lots of recent zswapouts), these entries will be protected and 1146 * the writeback rate will slow down. On the other hand, if the pool has a 1147 * lot of stagnant entries, these entries will be reclaimed immediately, 1148 * effectively increasing the writeback rate. 1149 * 1150 * 2. Swapins counter: If we observe swapins, it is a sign that we are 1151 * overshrinking and should slow down. We maintain a swapins counter, which 1152 * is consumed and subtract from the number of eligible objects on the LRU 1153 * in zswap_shrinker_count(). 1154 * 1155 * 3. Compression ratio. The better the workload compresses, the less gains we 1156 * can expect from writeback. We scale down the number of objects available 1157 * for reclaim by this ratio. 1158 */ 1159 static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l, 1160 void *arg) 1161 { 1162 struct zswap_entry *entry = container_of(item, struct zswap_entry, lru); 1163 bool *encountered_page_in_swapcache = (bool *)arg; 1164 swp_entry_t swpentry; 1165 enum lru_status ret = LRU_REMOVED_RETRY; 1166 int writeback_result; 1167 1168 /* 1169 * Second chance algorithm: if the entry has its referenced bit set, give it 1170 * a second chance. Only clear the referenced bit and rotate it in the 1171 * zswap's LRU list. 1172 */ 1173 if (entry->referenced) { 1174 entry->referenced = false; 1175 return LRU_ROTATE; 1176 } 1177 1178 /* 1179 * As soon as we drop the LRU lock, the entry can be freed by 1180 * a concurrent invalidation. This means the following: 1181 * 1182 * 1. We extract the swp_entry_t to the stack, allowing 1183 * zswap_writeback_entry() to pin the swap entry and 1184 * then validate the zwap entry against that swap entry's 1185 * tree using pointer value comparison. Only when that 1186 * is successful can the entry be dereferenced. 1187 * 1188 * 2. Usually, objects are taken off the LRU for reclaim. In 1189 * this case this isn't possible, because if reclaim fails 1190 * for whatever reason, we have no means of knowing if the 1191 * entry is alive to put it back on the LRU. 1192 * 1193 * So rotate it before dropping the lock. If the entry is 1194 * written back or invalidated, the free path will unlink 1195 * it. For failures, rotation is the right thing as well. 1196 * 1197 * Temporary failures, where the same entry should be tried 1198 * again immediately, almost never happen for this shrinker. 1199 * We don't do any trylocking; -ENOMEM comes closest, 1200 * but that's extremely rare and doesn't happen spuriously 1201 * either. Don't bother distinguishing this case. 1202 */ 1203 list_move_tail(item, &l->list); 1204 1205 /* 1206 * Once the lru lock is dropped, the entry might get freed. The 1207 * swpentry is copied to the stack, and entry isn't deref'd again 1208 * until the entry is verified to still be alive in the tree. 1209 */ 1210 swpentry = entry->swpentry; 1211 1212 /* 1213 * It's safe to drop the lock here because we return either 1214 * LRU_REMOVED_RETRY, LRU_RETRY or LRU_STOP. 1215 */ 1216 spin_unlock(&l->lock); 1217 1218 writeback_result = zswap_writeback_entry(entry, swpentry); 1219 1220 if (writeback_result) { 1221 zswap_reject_reclaim_fail++; 1222 ret = LRU_RETRY; 1223 1224 /* 1225 * Encountering a page already in swap cache is a sign that we are shrinking 1226 * into the warmer region. We should terminate shrinking (if we're in the dynamic 1227 * shrinker context). 1228 */ 1229 if (writeback_result == -EEXIST && encountered_page_in_swapcache) { 1230 ret = LRU_STOP; 1231 *encountered_page_in_swapcache = true; 1232 } 1233 } else { 1234 zswap_written_back_pages++; 1235 } 1236 1237 return ret; 1238 } 1239 1240 static unsigned long zswap_shrinker_scan(struct shrinker *shrinker, 1241 struct shrink_control *sc) 1242 { 1243 unsigned long shrink_ret; 1244 bool encountered_page_in_swapcache = false; 1245 1246 if (!zswap_shrinker_enabled || 1247 !mem_cgroup_zswap_writeback_enabled(sc->memcg)) { 1248 sc->nr_scanned = 0; 1249 return SHRINK_STOP; 1250 } 1251 1252 shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb, 1253 &encountered_page_in_swapcache); 1254 1255 if (encountered_page_in_swapcache) 1256 return SHRINK_STOP; 1257 1258 return shrink_ret ? shrink_ret : SHRINK_STOP; 1259 } 1260 1261 static unsigned long zswap_shrinker_count(struct shrinker *shrinker, 1262 struct shrink_control *sc) 1263 { 1264 struct mem_cgroup *memcg = sc->memcg; 1265 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid)); 1266 atomic_long_t *nr_disk_swapins = 1267 &lruvec->zswap_lruvec_state.nr_disk_swapins; 1268 unsigned long nr_backing, nr_stored, nr_freeable, nr_disk_swapins_cur, 1269 nr_remain; 1270 1271 if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg)) 1272 return 0; 1273 1274 /* 1275 * The shrinker resumes swap writeback, which will enter block 1276 * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS 1277 * rules (may_enter_fs()), which apply on a per-folio basis. 1278 */ 1279 if (!gfp_has_io_fs(sc->gfp_mask)) 1280 return 0; 1281 1282 /* 1283 * For memcg, use the cgroup-wide ZSWAP stats since we don't 1284 * have them per-node and thus per-lruvec. Careful if memcg is 1285 * runtime-disabled: we can get sc->memcg == NULL, which is ok 1286 * for the lruvec, but not for memcg_page_state(). 1287 * 1288 * Without memcg, use the zswap pool-wide metrics. 1289 */ 1290 if (!mem_cgroup_disabled()) { 1291 mem_cgroup_flush_stats(memcg); 1292 nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT; 1293 nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED); 1294 } else { 1295 nr_backing = zswap_total_pages(); 1296 nr_stored = atomic_long_read(&zswap_stored_pages); 1297 } 1298 1299 if (!nr_stored) 1300 return 0; 1301 1302 nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc); 1303 if (!nr_freeable) 1304 return 0; 1305 1306 /* 1307 * Subtract from the lru size the number of pages that are recently swapped 1308 * in from disk. The idea is that had we protect the zswap's LRU by this 1309 * amount of pages, these disk swapins would not have happened. 1310 */ 1311 nr_disk_swapins_cur = atomic_long_read(nr_disk_swapins); 1312 do { 1313 if (nr_freeable >= nr_disk_swapins_cur) 1314 nr_remain = 0; 1315 else 1316 nr_remain = nr_disk_swapins_cur - nr_freeable; 1317 } while (!atomic_long_try_cmpxchg( 1318 nr_disk_swapins, &nr_disk_swapins_cur, nr_remain)); 1319 1320 nr_freeable -= nr_disk_swapins_cur - nr_remain; 1321 if (!nr_freeable) 1322 return 0; 1323 1324 /* 1325 * Scale the number of freeable pages by the memory saving factor. 1326 * This ensures that the better zswap compresses memory, the fewer 1327 * pages we will evict to swap (as it will otherwise incur IO for 1328 * relatively small memory saving). 1329 */ 1330 return mult_frac(nr_freeable, nr_backing, nr_stored); 1331 } 1332 1333 static struct shrinker *zswap_alloc_shrinker(void) 1334 { 1335 struct shrinker *shrinker; 1336 1337 shrinker = 1338 shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap"); 1339 if (!shrinker) 1340 return NULL; 1341 1342 shrinker->scan_objects = zswap_shrinker_scan; 1343 shrinker->count_objects = zswap_shrinker_count; 1344 shrinker->batch = 0; 1345 shrinker->seeks = DEFAULT_SEEKS; 1346 return shrinker; 1347 } 1348 1349 static int shrink_memcg(struct mem_cgroup *memcg) 1350 { 1351 int nid, shrunk = 0, scanned = 0; 1352 1353 if (!mem_cgroup_zswap_writeback_enabled(memcg)) 1354 return -ENOENT; 1355 1356 /* 1357 * Skip zombies because their LRUs are reparented and we would be 1358 * reclaiming from the parent instead of the dead memcg. 1359 */ 1360 if (memcg && !mem_cgroup_online(memcg)) 1361 return -ENOENT; 1362 1363 for_each_node_state(nid, N_NORMAL_MEMORY) { 1364 unsigned long nr_to_walk = 1; 1365 1366 shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg, 1367 &shrink_memcg_cb, NULL, &nr_to_walk); 1368 scanned += 1 - nr_to_walk; 1369 } 1370 1371 if (!scanned) 1372 return -ENOENT; 1373 1374 return shrunk ? 0 : -EAGAIN; 1375 } 1376 1377 static void shrink_worker(struct work_struct *w) 1378 { 1379 struct mem_cgroup *memcg; 1380 int ret, failures = 0, attempts = 0; 1381 unsigned long thr; 1382 1383 /* Reclaim down to the accept threshold */ 1384 thr = zswap_accept_thr_pages(); 1385 1386 /* 1387 * Global reclaim will select cgroup in a round-robin fashion from all 1388 * online memcgs, but memcgs that have no pages in zswap and 1389 * writeback-disabled memcgs (memory.zswap.writeback=0) are not 1390 * candidates for shrinking. 1391 * 1392 * Shrinking will be aborted if we encounter the following 1393 * MAX_RECLAIM_RETRIES times: 1394 * - No writeback-candidate memcgs found in a memcg tree walk. 1395 * - Shrinking a writeback-candidate memcg failed. 1396 * 1397 * We save iteration cursor memcg into zswap_next_shrink, 1398 * which can be modified by the offline memcg cleaner 1399 * zswap_memcg_offline_cleanup(). 1400 * 1401 * Since the offline cleaner is called only once, we cannot leave an 1402 * offline memcg reference in zswap_next_shrink. 1403 * We can rely on the cleaner only if we get online memcg under lock. 1404 * 1405 * If we get an offline memcg, we cannot determine if the cleaner has 1406 * already been called or will be called later. We must put back the 1407 * reference before returning from this function. Otherwise, the 1408 * offline memcg left in zswap_next_shrink will hold the reference 1409 * until the next run of shrink_worker(). 1410 */ 1411 do { 1412 /* 1413 * Start shrinking from the next memcg after zswap_next_shrink. 1414 * When the offline cleaner has already advanced the cursor, 1415 * advancing the cursor here overlooks one memcg, but this 1416 * should be negligibly rare. 1417 * 1418 * If we get an online memcg, keep the extra reference in case 1419 * the original one obtained by mem_cgroup_iter() is dropped by 1420 * zswap_memcg_offline_cleanup() while we are shrinking the 1421 * memcg. 1422 */ 1423 spin_lock(&zswap_shrink_lock); 1424 do { 1425 memcg = mem_cgroup_iter(NULL, zswap_next_shrink, NULL); 1426 zswap_next_shrink = memcg; 1427 } while (memcg && !mem_cgroup_tryget_online(memcg)); 1428 spin_unlock(&zswap_shrink_lock); 1429 1430 if (!memcg) { 1431 /* 1432 * Continue shrinking without incrementing failures if 1433 * we found candidate memcgs in the last tree walk. 1434 */ 1435 if (!attempts && ++failures == MAX_RECLAIM_RETRIES) 1436 break; 1437 1438 attempts = 0; 1439 goto resched; 1440 } 1441 1442 ret = shrink_memcg(memcg); 1443 /* drop the extra reference */ 1444 mem_cgroup_put(memcg); 1445 1446 /* 1447 * There are no writeback-candidate pages in the memcg. 1448 * This is not an issue as long as we can find another memcg 1449 * with pages in zswap. Skip this without incrementing attempts 1450 * and failures. 1451 */ 1452 if (ret == -ENOENT) 1453 continue; 1454 ++attempts; 1455 1456 if (ret && ++failures == MAX_RECLAIM_RETRIES) 1457 break; 1458 resched: 1459 cond_resched(); 1460 } while (zswap_total_pages() > thr); 1461 } 1462 1463 /********************************* 1464 * main API 1465 **********************************/ 1466 1467 static bool zswap_store_page(struct page *page, 1468 struct obj_cgroup *objcg, 1469 struct zswap_pool *pool) 1470 { 1471 swp_entry_t page_swpentry = page_swap_entry(page); 1472 struct zswap_entry *entry, *old; 1473 1474 /* allocate entry */ 1475 entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page)); 1476 if (!entry) { 1477 zswap_reject_kmemcache_fail++; 1478 return false; 1479 } 1480 1481 if (!zswap_compress(page, entry, pool)) 1482 goto compress_failed; 1483 1484 old = xa_store(swap_zswap_tree(page_swpentry), 1485 swp_offset(page_swpentry), 1486 entry, GFP_KERNEL); 1487 if (xa_is_err(old)) { 1488 int err = xa_err(old); 1489 1490 WARN_ONCE(err != -ENOMEM, "unexpected xarray error: %d\n", err); 1491 zswap_reject_alloc_fail++; 1492 goto store_failed; 1493 } 1494 1495 /* 1496 * We may have had an existing entry that became stale when 1497 * the folio was redirtied and now the new version is being 1498 * swapped out. Get rid of the old. 1499 */ 1500 if (old) 1501 zswap_entry_free(old); 1502 1503 /* 1504 * The entry is successfully compressed and stored in the tree, there is 1505 * no further possibility of failure. Grab refs to the pool and objcg, 1506 * charge zswap memory, and increment zswap_stored_pages. 1507 * The opposite actions will be performed by zswap_entry_free() 1508 * when the entry is removed from the tree. 1509 */ 1510 zswap_pool_get(pool); 1511 if (objcg) { 1512 obj_cgroup_get(objcg); 1513 obj_cgroup_charge_zswap(objcg, entry->length); 1514 } 1515 atomic_long_inc(&zswap_stored_pages); 1516 1517 /* 1518 * We finish initializing the entry while it's already in xarray. 1519 * This is safe because: 1520 * 1521 * 1. Concurrent stores and invalidations are excluded by folio lock. 1522 * 1523 * 2. Writeback is excluded by the entry not being on the LRU yet. 1524 * The publishing order matters to prevent writeback from seeing 1525 * an incoherent entry. 1526 */ 1527 entry->pool = pool; 1528 entry->swpentry = page_swpentry; 1529 entry->objcg = objcg; 1530 entry->referenced = true; 1531 if (entry->length) { 1532 INIT_LIST_HEAD(&entry->lru); 1533 zswap_lru_add(&zswap_list_lru, entry); 1534 } 1535 1536 return true; 1537 1538 store_failed: 1539 zpool_free(pool->zpool, entry->handle); 1540 compress_failed: 1541 zswap_entry_cache_free(entry); 1542 return false; 1543 } 1544 1545 bool zswap_store(struct folio *folio) 1546 { 1547 long nr_pages = folio_nr_pages(folio); 1548 swp_entry_t swp = folio->swap; 1549 struct obj_cgroup *objcg = NULL; 1550 struct mem_cgroup *memcg = NULL; 1551 struct zswap_pool *pool; 1552 bool ret = false; 1553 long index; 1554 1555 VM_WARN_ON_ONCE(!folio_test_locked(folio)); 1556 VM_WARN_ON_ONCE(!folio_test_swapcache(folio)); 1557 1558 if (!zswap_enabled) 1559 goto check_old; 1560 1561 objcg = get_obj_cgroup_from_folio(folio); 1562 if (objcg && !obj_cgroup_may_zswap(objcg)) { 1563 memcg = get_mem_cgroup_from_objcg(objcg); 1564 if (shrink_memcg(memcg)) { 1565 mem_cgroup_put(memcg); 1566 goto put_objcg; 1567 } 1568 mem_cgroup_put(memcg); 1569 } 1570 1571 if (zswap_check_limits()) 1572 goto put_objcg; 1573 1574 pool = zswap_pool_current_get(); 1575 if (!pool) 1576 goto put_objcg; 1577 1578 if (objcg) { 1579 memcg = get_mem_cgroup_from_objcg(objcg); 1580 if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) { 1581 mem_cgroup_put(memcg); 1582 goto put_pool; 1583 } 1584 mem_cgroup_put(memcg); 1585 } 1586 1587 for (index = 0; index < nr_pages; ++index) { 1588 struct page *page = folio_page(folio, index); 1589 1590 if (!zswap_store_page(page, objcg, pool)) 1591 goto put_pool; 1592 } 1593 1594 if (objcg) 1595 count_objcg_events(objcg, ZSWPOUT, nr_pages); 1596 1597 count_vm_events(ZSWPOUT, nr_pages); 1598 1599 ret = true; 1600 1601 put_pool: 1602 zswap_pool_put(pool); 1603 put_objcg: 1604 obj_cgroup_put(objcg); 1605 if (!ret && zswap_pool_reached_full) 1606 queue_work(shrink_wq, &zswap_shrink_work); 1607 check_old: 1608 /* 1609 * If the zswap store fails or zswap is disabled, we must invalidate 1610 * the possibly stale entries which were previously stored at the 1611 * offsets corresponding to each page of the folio. Otherwise, 1612 * writeback could overwrite the new data in the swapfile. 1613 */ 1614 if (!ret) { 1615 unsigned type = swp_type(swp); 1616 pgoff_t offset = swp_offset(swp); 1617 struct zswap_entry *entry; 1618 struct xarray *tree; 1619 1620 for (index = 0; index < nr_pages; ++index) { 1621 tree = swap_zswap_tree(swp_entry(type, offset + index)); 1622 entry = xa_erase(tree, offset + index); 1623 if (entry) 1624 zswap_entry_free(entry); 1625 } 1626 } 1627 1628 return ret; 1629 } 1630 1631 /** 1632 * zswap_load() - load a folio from zswap 1633 * @folio: folio to load 1634 * 1635 * Return: 0 on success, with the folio unlocked and marked up-to-date, or one 1636 * of the following error codes: 1637 * 1638 * -EIO: if the swapped out content was in zswap, but could not be loaded 1639 * into the page due to a decompression failure. The folio is unlocked, but 1640 * NOT marked up-to-date, so that an IO error is emitted (e.g. do_swap_page() 1641 * will SIGBUS). 1642 * 1643 * -EINVAL: if the swapped out content was in zswap, but the page belongs 1644 * to a large folio, which is not supported by zswap. The folio is unlocked, 1645 * but NOT marked up-to-date, so that an IO error is emitted (e.g. 1646 * do_swap_page() will SIGBUS). 1647 * 1648 * -ENOENT: if the swapped out content was not in zswap. The folio remains 1649 * locked on return. 1650 */ 1651 int zswap_load(struct folio *folio) 1652 { 1653 swp_entry_t swp = folio->swap; 1654 pgoff_t offset = swp_offset(swp); 1655 bool swapcache = folio_test_swapcache(folio); 1656 struct xarray *tree = swap_zswap_tree(swp); 1657 struct zswap_entry *entry; 1658 1659 VM_WARN_ON_ONCE(!folio_test_locked(folio)); 1660 1661 if (zswap_never_enabled()) 1662 return -ENOENT; 1663 1664 /* 1665 * Large folios should not be swapped in while zswap is being used, as 1666 * they are not properly handled. Zswap does not properly load large 1667 * folios, and a large folio may only be partially in zswap. 1668 */ 1669 if (WARN_ON_ONCE(folio_test_large(folio))) { 1670 folio_unlock(folio); 1671 return -EINVAL; 1672 } 1673 1674 entry = xa_load(tree, offset); 1675 if (!entry) 1676 return -ENOENT; 1677 1678 if (!zswap_decompress(entry, folio)) { 1679 folio_unlock(folio); 1680 return -EIO; 1681 } 1682 1683 folio_mark_uptodate(folio); 1684 1685 count_vm_event(ZSWPIN); 1686 if (entry->objcg) 1687 count_objcg_events(entry->objcg, ZSWPIN, 1); 1688 1689 /* 1690 * When reading into the swapcache, invalidate our entry. The 1691 * swapcache can be the authoritative owner of the page and 1692 * its mappings, and the pressure that results from having two 1693 * in-memory copies outweighs any benefits of caching the 1694 * compression work. 1695 * 1696 * (Most swapins go through the swapcache. The notable 1697 * exception is the singleton fault on SWP_SYNCHRONOUS_IO 1698 * files, which reads into a private page and may free it if 1699 * the fault fails. We remain the primary owner of the entry.) 1700 */ 1701 if (swapcache) { 1702 folio_mark_dirty(folio); 1703 xa_erase(tree, offset); 1704 zswap_entry_free(entry); 1705 } 1706 1707 folio_unlock(folio); 1708 return 0; 1709 } 1710 1711 void zswap_invalidate(swp_entry_t swp) 1712 { 1713 pgoff_t offset = swp_offset(swp); 1714 struct xarray *tree = swap_zswap_tree(swp); 1715 struct zswap_entry *entry; 1716 1717 if (xa_empty(tree)) 1718 return; 1719 1720 entry = xa_erase(tree, offset); 1721 if (entry) 1722 zswap_entry_free(entry); 1723 } 1724 1725 int zswap_swapon(int type, unsigned long nr_pages) 1726 { 1727 struct xarray *trees, *tree; 1728 unsigned int nr, i; 1729 1730 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); 1731 trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL); 1732 if (!trees) { 1733 pr_err("alloc failed, zswap disabled for swap type %d\n", type); 1734 return -ENOMEM; 1735 } 1736 1737 for (i = 0; i < nr; i++) 1738 xa_init(trees + i); 1739 1740 nr_zswap_trees[type] = nr; 1741 zswap_trees[type] = trees; 1742 return 0; 1743 } 1744 1745 void zswap_swapoff(int type) 1746 { 1747 struct xarray *trees = zswap_trees[type]; 1748 unsigned int i; 1749 1750 if (!trees) 1751 return; 1752 1753 /* try_to_unuse() invalidated all the entries already */ 1754 for (i = 0; i < nr_zswap_trees[type]; i++) 1755 WARN_ON_ONCE(!xa_empty(trees + i)); 1756 1757 kvfree(trees); 1758 nr_zswap_trees[type] = 0; 1759 zswap_trees[type] = NULL; 1760 } 1761 1762 /********************************* 1763 * debugfs functions 1764 **********************************/ 1765 #ifdef CONFIG_DEBUG_FS 1766 #include <linux/debugfs.h> 1767 1768 static struct dentry *zswap_debugfs_root; 1769 1770 static int debugfs_get_total_size(void *data, u64 *val) 1771 { 1772 *val = zswap_total_pages() * PAGE_SIZE; 1773 return 0; 1774 } 1775 DEFINE_DEBUGFS_ATTRIBUTE(total_size_fops, debugfs_get_total_size, NULL, "%llu\n"); 1776 1777 static int debugfs_get_stored_pages(void *data, u64 *val) 1778 { 1779 *val = atomic_long_read(&zswap_stored_pages); 1780 return 0; 1781 } 1782 DEFINE_DEBUGFS_ATTRIBUTE(stored_pages_fops, debugfs_get_stored_pages, NULL, "%llu\n"); 1783 1784 static int zswap_debugfs_init(void) 1785 { 1786 if (!debugfs_initialized()) 1787 return -ENODEV; 1788 1789 zswap_debugfs_root = debugfs_create_dir("zswap", NULL); 1790 1791 debugfs_create_u64("pool_limit_hit", 0444, 1792 zswap_debugfs_root, &zswap_pool_limit_hit); 1793 debugfs_create_u64("reject_reclaim_fail", 0444, 1794 zswap_debugfs_root, &zswap_reject_reclaim_fail); 1795 debugfs_create_u64("reject_alloc_fail", 0444, 1796 zswap_debugfs_root, &zswap_reject_alloc_fail); 1797 debugfs_create_u64("reject_kmemcache_fail", 0444, 1798 zswap_debugfs_root, &zswap_reject_kmemcache_fail); 1799 debugfs_create_u64("reject_compress_fail", 0444, 1800 zswap_debugfs_root, &zswap_reject_compress_fail); 1801 debugfs_create_u64("reject_compress_poor", 0444, 1802 zswap_debugfs_root, &zswap_reject_compress_poor); 1803 debugfs_create_u64("decompress_fail", 0444, 1804 zswap_debugfs_root, &zswap_decompress_fail); 1805 debugfs_create_u64("written_back_pages", 0444, 1806 zswap_debugfs_root, &zswap_written_back_pages); 1807 debugfs_create_file("pool_total_size", 0444, 1808 zswap_debugfs_root, NULL, &total_size_fops); 1809 debugfs_create_file("stored_pages", 0444, 1810 zswap_debugfs_root, NULL, &stored_pages_fops); 1811 1812 return 0; 1813 } 1814 #else 1815 static int zswap_debugfs_init(void) 1816 { 1817 return 0; 1818 } 1819 #endif 1820 1821 /********************************* 1822 * module init and exit 1823 **********************************/ 1824 static int zswap_setup(void) 1825 { 1826 struct zswap_pool *pool; 1827 int ret; 1828 1829 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0); 1830 if (!zswap_entry_cache) { 1831 pr_err("entry cache creation failed\n"); 1832 goto cache_fail; 1833 } 1834 1835 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE, 1836 "mm/zswap_pool:prepare", 1837 zswap_cpu_comp_prepare, 1838 zswap_cpu_comp_dead); 1839 if (ret) 1840 goto hp_fail; 1841 1842 shrink_wq = alloc_workqueue("zswap-shrink", 1843 WQ_UNBOUND|WQ_MEM_RECLAIM, 1); 1844 if (!shrink_wq) 1845 goto shrink_wq_fail; 1846 1847 zswap_shrinker = zswap_alloc_shrinker(); 1848 if (!zswap_shrinker) 1849 goto shrinker_fail; 1850 if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker)) 1851 goto lru_fail; 1852 shrinker_register(zswap_shrinker); 1853 1854 INIT_WORK(&zswap_shrink_work, shrink_worker); 1855 1856 pool = __zswap_pool_create_fallback(); 1857 if (pool) { 1858 pr_info("loaded using pool %s/%s\n", pool->tfm_name, 1859 zpool_get_type(pool->zpool)); 1860 list_add(&pool->list, &zswap_pools); 1861 zswap_has_pool = true; 1862 static_branch_enable(&zswap_ever_enabled); 1863 } else { 1864 pr_err("pool creation failed\n"); 1865 zswap_enabled = false; 1866 } 1867 1868 if (zswap_debugfs_init()) 1869 pr_warn("debugfs initialization failed\n"); 1870 zswap_init_state = ZSWAP_INIT_SUCCEED; 1871 return 0; 1872 1873 lru_fail: 1874 shrinker_free(zswap_shrinker); 1875 shrinker_fail: 1876 destroy_workqueue(shrink_wq); 1877 shrink_wq_fail: 1878 cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE); 1879 hp_fail: 1880 kmem_cache_destroy(zswap_entry_cache); 1881 cache_fail: 1882 /* if built-in, we aren't unloaded on failure; don't allow use */ 1883 zswap_init_state = ZSWAP_INIT_FAILED; 1884 zswap_enabled = false; 1885 return -ENOMEM; 1886 } 1887 1888 static int __init zswap_init(void) 1889 { 1890 if (!zswap_enabled) 1891 return 0; 1892 return zswap_setup(); 1893 } 1894 /* must be late so crypto has time to come up */ 1895 late_initcall(zswap_init); 1896 1897 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>"); 1898 MODULE_DESCRIPTION("Compressed cache for swap pages"); 1899