1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * zswap.c - zswap driver file 4 * 5 * zswap is a cache that takes pages that are in the process 6 * of being swapped out and attempts to compress and store them in a 7 * RAM-based memory pool. This can result in a significant I/O reduction on 8 * the swap device and, in the case where decompressing from RAM is faster 9 * than reading from the swap device, can also improve workload performance. 10 * 11 * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com> 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16 #include <linux/module.h> 17 #include <linux/cpu.h> 18 #include <linux/highmem.h> 19 #include <linux/slab.h> 20 #include <linux/spinlock.h> 21 #include <linux/types.h> 22 #include <linux/atomic.h> 23 #include <linux/swap.h> 24 #include <linux/crypto.h> 25 #include <linux/scatterlist.h> 26 #include <linux/mempolicy.h> 27 #include <linux/mempool.h> 28 #include <linux/zpool.h> 29 #include <crypto/acompress.h> 30 #include <linux/zswap.h> 31 #include <linux/mm_types.h> 32 #include <linux/page-flags.h> 33 #include <linux/swapops.h> 34 #include <linux/writeback.h> 35 #include <linux/pagemap.h> 36 #include <linux/workqueue.h> 37 #include <linux/list_lru.h> 38 39 #include "swap.h" 40 #include "internal.h" 41 42 /********************************* 43 * statistics 44 **********************************/ 45 /* The number of compressed pages currently stored in zswap */ 46 atomic_long_t zswap_stored_pages = ATOMIC_LONG_INIT(0); 47 48 /* 49 * The statistics below are not protected from concurrent access for 50 * performance reasons so they may not be a 100% accurate. However, 51 * they do provide useful information on roughly how many times a 52 * certain event is occurring. 53 */ 54 55 /* Pool limit was hit (see zswap_max_pool_percent) */ 56 static u64 zswap_pool_limit_hit; 57 /* Pages written back when pool limit was reached */ 58 static u64 zswap_written_back_pages; 59 /* Store failed due to a reclaim failure after pool limit was reached */ 60 static u64 zswap_reject_reclaim_fail; 61 /* Store failed due to compression algorithm failure */ 62 static u64 zswap_reject_compress_fail; 63 /* Compressed page was too big for the allocator to (optimally) store */ 64 static u64 zswap_reject_compress_poor; 65 /* Store failed because underlying allocator could not get memory */ 66 static u64 zswap_reject_alloc_fail; 67 /* Store failed because the entry metadata could not be allocated (rare) */ 68 static u64 zswap_reject_kmemcache_fail; 69 70 /* Shrinker work queue */ 71 static struct workqueue_struct *shrink_wq; 72 /* Pool limit was hit, we need to calm down */ 73 static bool zswap_pool_reached_full; 74 75 /********************************* 76 * tunables 77 **********************************/ 78 79 #define ZSWAP_PARAM_UNSET "" 80 81 static int zswap_setup(void); 82 83 /* Enable/disable zswap */ 84 static DEFINE_STATIC_KEY_MAYBE(CONFIG_ZSWAP_DEFAULT_ON, zswap_ever_enabled); 85 static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON); 86 static int zswap_enabled_param_set(const char *, 87 const struct kernel_param *); 88 static const struct kernel_param_ops zswap_enabled_param_ops = { 89 .set = zswap_enabled_param_set, 90 .get = param_get_bool, 91 }; 92 module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644); 93 94 /* Crypto compressor to use */ 95 static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; 96 static int zswap_compressor_param_set(const char *, 97 const struct kernel_param *); 98 static const struct kernel_param_ops zswap_compressor_param_ops = { 99 .set = zswap_compressor_param_set, 100 .get = param_get_charp, 101 .free = param_free_charp, 102 }; 103 module_param_cb(compressor, &zswap_compressor_param_ops, 104 &zswap_compressor, 0644); 105 106 /* Compressed storage zpool to use */ 107 static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT; 108 static int zswap_zpool_param_set(const char *, const struct kernel_param *); 109 static const struct kernel_param_ops zswap_zpool_param_ops = { 110 .set = zswap_zpool_param_set, 111 .get = param_get_charp, 112 .free = param_free_charp, 113 }; 114 module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644); 115 116 /* The maximum percentage of memory that the compressed pool can occupy */ 117 static unsigned int zswap_max_pool_percent = 20; 118 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644); 119 120 /* The threshold for accepting new pages after the max_pool_percent was hit */ 121 static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */ 122 module_param_named(accept_threshold_percent, zswap_accept_thr_percent, 123 uint, 0644); 124 125 /* Enable/disable memory pressure-based shrinker. */ 126 static bool zswap_shrinker_enabled = IS_ENABLED( 127 CONFIG_ZSWAP_SHRINKER_DEFAULT_ON); 128 module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644); 129 130 bool zswap_is_enabled(void) 131 { 132 return zswap_enabled; 133 } 134 135 bool zswap_never_enabled(void) 136 { 137 return !static_branch_maybe(CONFIG_ZSWAP_DEFAULT_ON, &zswap_ever_enabled); 138 } 139 140 /********************************* 141 * data structures 142 **********************************/ 143 144 struct crypto_acomp_ctx { 145 struct crypto_acomp *acomp; 146 struct acomp_req *req; 147 struct crypto_wait wait; 148 u8 *buffer; 149 struct mutex mutex; 150 bool is_sleepable; 151 }; 152 153 /* 154 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock. 155 * The only case where lru_lock is not acquired while holding tree.lock is 156 * when a zswap_entry is taken off the lru for writeback, in that case it 157 * needs to be verified that it's still valid in the tree. 158 */ 159 struct zswap_pool { 160 struct zpool *zpool; 161 struct crypto_acomp_ctx __percpu *acomp_ctx; 162 struct percpu_ref ref; 163 struct list_head list; 164 struct work_struct release_work; 165 struct hlist_node node; 166 char tfm_name[CRYPTO_MAX_ALG_NAME]; 167 }; 168 169 /* Global LRU lists shared by all zswap pools. */ 170 static struct list_lru zswap_list_lru; 171 172 /* The lock protects zswap_next_shrink updates. */ 173 static DEFINE_SPINLOCK(zswap_shrink_lock); 174 static struct mem_cgroup *zswap_next_shrink; 175 static struct work_struct zswap_shrink_work; 176 static struct shrinker *zswap_shrinker; 177 178 /* 179 * struct zswap_entry 180 * 181 * This structure contains the metadata for tracking a single compressed 182 * page within zswap. 183 * 184 * swpentry - associated swap entry, the offset indexes into the red-black tree 185 * length - the length in bytes of the compressed page data. Needed during 186 * decompression. 187 * referenced - true if the entry recently entered the zswap pool. Unset by the 188 * writeback logic. The entry is only reclaimed by the writeback 189 * logic if referenced is unset. See comments in the shrinker 190 * section for context. 191 * pool - the zswap_pool the entry's data is in 192 * handle - zpool allocation handle that stores the compressed page data 193 * objcg - the obj_cgroup that the compressed memory is charged to 194 * lru - handle to the pool's lru used to evict pages. 195 */ 196 struct zswap_entry { 197 swp_entry_t swpentry; 198 unsigned int length; 199 bool referenced; 200 struct zswap_pool *pool; 201 unsigned long handle; 202 struct obj_cgroup *objcg; 203 struct list_head lru; 204 }; 205 206 static struct xarray *zswap_trees[MAX_SWAPFILES]; 207 static unsigned int nr_zswap_trees[MAX_SWAPFILES]; 208 209 /* RCU-protected iteration */ 210 static LIST_HEAD(zswap_pools); 211 /* protects zswap_pools list modification */ 212 static DEFINE_SPINLOCK(zswap_pools_lock); 213 /* pool counter to provide unique names to zpool */ 214 static atomic_t zswap_pools_count = ATOMIC_INIT(0); 215 216 enum zswap_init_type { 217 ZSWAP_UNINIT, 218 ZSWAP_INIT_SUCCEED, 219 ZSWAP_INIT_FAILED 220 }; 221 222 static enum zswap_init_type zswap_init_state; 223 224 /* used to ensure the integrity of initialization */ 225 static DEFINE_MUTEX(zswap_init_lock); 226 227 /* init completed, but couldn't create the initial pool */ 228 static bool zswap_has_pool; 229 230 /********************************* 231 * helpers and fwd declarations 232 **********************************/ 233 234 static inline struct xarray *swap_zswap_tree(swp_entry_t swp) 235 { 236 return &zswap_trees[swp_type(swp)][swp_offset(swp) 237 >> SWAP_ADDRESS_SPACE_SHIFT]; 238 } 239 240 #define zswap_pool_debug(msg, p) \ 241 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \ 242 zpool_get_type((p)->zpool)) 243 244 /********************************* 245 * pool functions 246 **********************************/ 247 static void __zswap_pool_empty(struct percpu_ref *ref); 248 249 static struct zswap_pool *zswap_pool_create(char *type, char *compressor) 250 { 251 struct zswap_pool *pool; 252 char name[38]; /* 'zswap' + 32 char (max) num + \0 */ 253 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; 254 int ret, cpu; 255 256 if (!zswap_has_pool) { 257 /* if either are unset, pool initialization failed, and we 258 * need both params to be set correctly before trying to 259 * create a pool. 260 */ 261 if (!strcmp(type, ZSWAP_PARAM_UNSET)) 262 return NULL; 263 if (!strcmp(compressor, ZSWAP_PARAM_UNSET)) 264 return NULL; 265 } 266 267 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 268 if (!pool) 269 return NULL; 270 271 /* unique name for each pool specifically required by zsmalloc */ 272 snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count)); 273 pool->zpool = zpool_create_pool(type, name, gfp); 274 if (!pool->zpool) { 275 pr_err("%s zpool not available\n", type); 276 goto error; 277 } 278 pr_debug("using %s zpool\n", zpool_get_type(pool->zpool)); 279 280 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name)); 281 282 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx); 283 if (!pool->acomp_ctx) { 284 pr_err("percpu alloc failed\n"); 285 goto error; 286 } 287 288 for_each_possible_cpu(cpu) 289 mutex_init(&per_cpu_ptr(pool->acomp_ctx, cpu)->mutex); 290 291 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE, 292 &pool->node); 293 if (ret) 294 goto error; 295 296 /* being the current pool takes 1 ref; this func expects the 297 * caller to always add the new pool as the current pool 298 */ 299 ret = percpu_ref_init(&pool->ref, __zswap_pool_empty, 300 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL); 301 if (ret) 302 goto ref_fail; 303 INIT_LIST_HEAD(&pool->list); 304 305 zswap_pool_debug("created", pool); 306 307 return pool; 308 309 ref_fail: 310 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); 311 error: 312 if (pool->acomp_ctx) 313 free_percpu(pool->acomp_ctx); 314 if (pool->zpool) 315 zpool_destroy_pool(pool->zpool); 316 kfree(pool); 317 return NULL; 318 } 319 320 static struct zswap_pool *__zswap_pool_create_fallback(void) 321 { 322 bool has_comp, has_zpool; 323 324 has_comp = crypto_has_acomp(zswap_compressor, 0, 0); 325 if (!has_comp && strcmp(zswap_compressor, 326 CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) { 327 pr_err("compressor %s not available, using default %s\n", 328 zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT); 329 param_free_charp(&zswap_compressor); 330 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; 331 has_comp = crypto_has_acomp(zswap_compressor, 0, 0); 332 } 333 if (!has_comp) { 334 pr_err("default compressor %s not available\n", 335 zswap_compressor); 336 param_free_charp(&zswap_compressor); 337 zswap_compressor = ZSWAP_PARAM_UNSET; 338 } 339 340 has_zpool = zpool_has_pool(zswap_zpool_type); 341 if (!has_zpool && strcmp(zswap_zpool_type, 342 CONFIG_ZSWAP_ZPOOL_DEFAULT)) { 343 pr_err("zpool %s not available, using default %s\n", 344 zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT); 345 param_free_charp(&zswap_zpool_type); 346 zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT; 347 has_zpool = zpool_has_pool(zswap_zpool_type); 348 } 349 if (!has_zpool) { 350 pr_err("default zpool %s not available\n", 351 zswap_zpool_type); 352 param_free_charp(&zswap_zpool_type); 353 zswap_zpool_type = ZSWAP_PARAM_UNSET; 354 } 355 356 if (!has_comp || !has_zpool) 357 return NULL; 358 359 return zswap_pool_create(zswap_zpool_type, zswap_compressor); 360 } 361 362 static void zswap_pool_destroy(struct zswap_pool *pool) 363 { 364 zswap_pool_debug("destroying", pool); 365 366 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); 367 free_percpu(pool->acomp_ctx); 368 369 zpool_destroy_pool(pool->zpool); 370 kfree(pool); 371 } 372 373 static void __zswap_pool_release(struct work_struct *work) 374 { 375 struct zswap_pool *pool = container_of(work, typeof(*pool), 376 release_work); 377 378 synchronize_rcu(); 379 380 /* nobody should have been able to get a ref... */ 381 WARN_ON(!percpu_ref_is_zero(&pool->ref)); 382 percpu_ref_exit(&pool->ref); 383 384 /* pool is now off zswap_pools list and has no references. */ 385 zswap_pool_destroy(pool); 386 } 387 388 static struct zswap_pool *zswap_pool_current(void); 389 390 static void __zswap_pool_empty(struct percpu_ref *ref) 391 { 392 struct zswap_pool *pool; 393 394 pool = container_of(ref, typeof(*pool), ref); 395 396 spin_lock_bh(&zswap_pools_lock); 397 398 WARN_ON(pool == zswap_pool_current()); 399 400 list_del_rcu(&pool->list); 401 402 INIT_WORK(&pool->release_work, __zswap_pool_release); 403 schedule_work(&pool->release_work); 404 405 spin_unlock_bh(&zswap_pools_lock); 406 } 407 408 static int __must_check zswap_pool_tryget(struct zswap_pool *pool) 409 { 410 if (!pool) 411 return 0; 412 413 return percpu_ref_tryget(&pool->ref); 414 } 415 416 /* The caller must already have a reference. */ 417 static void zswap_pool_get(struct zswap_pool *pool) 418 { 419 percpu_ref_get(&pool->ref); 420 } 421 422 static void zswap_pool_put(struct zswap_pool *pool) 423 { 424 percpu_ref_put(&pool->ref); 425 } 426 427 static struct zswap_pool *__zswap_pool_current(void) 428 { 429 struct zswap_pool *pool; 430 431 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list); 432 WARN_ONCE(!pool && zswap_has_pool, 433 "%s: no page storage pool!\n", __func__); 434 435 return pool; 436 } 437 438 static struct zswap_pool *zswap_pool_current(void) 439 { 440 assert_spin_locked(&zswap_pools_lock); 441 442 return __zswap_pool_current(); 443 } 444 445 static struct zswap_pool *zswap_pool_current_get(void) 446 { 447 struct zswap_pool *pool; 448 449 rcu_read_lock(); 450 451 pool = __zswap_pool_current(); 452 if (!zswap_pool_tryget(pool)) 453 pool = NULL; 454 455 rcu_read_unlock(); 456 457 return pool; 458 } 459 460 /* type and compressor must be null-terminated */ 461 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) 462 { 463 struct zswap_pool *pool; 464 465 assert_spin_locked(&zswap_pools_lock); 466 467 list_for_each_entry_rcu(pool, &zswap_pools, list) { 468 if (strcmp(pool->tfm_name, compressor)) 469 continue; 470 if (strcmp(zpool_get_type(pool->zpool), type)) 471 continue; 472 /* if we can't get it, it's about to be destroyed */ 473 if (!zswap_pool_tryget(pool)) 474 continue; 475 return pool; 476 } 477 478 return NULL; 479 } 480 481 static unsigned long zswap_max_pages(void) 482 { 483 return totalram_pages() * zswap_max_pool_percent / 100; 484 } 485 486 static unsigned long zswap_accept_thr_pages(void) 487 { 488 return zswap_max_pages() * zswap_accept_thr_percent / 100; 489 } 490 491 unsigned long zswap_total_pages(void) 492 { 493 struct zswap_pool *pool; 494 unsigned long total = 0; 495 496 rcu_read_lock(); 497 list_for_each_entry_rcu(pool, &zswap_pools, list) 498 total += zpool_get_total_pages(pool->zpool); 499 rcu_read_unlock(); 500 501 return total; 502 } 503 504 static bool zswap_check_limits(void) 505 { 506 unsigned long cur_pages = zswap_total_pages(); 507 unsigned long max_pages = zswap_max_pages(); 508 509 if (cur_pages >= max_pages) { 510 zswap_pool_limit_hit++; 511 zswap_pool_reached_full = true; 512 } else if (zswap_pool_reached_full && 513 cur_pages <= zswap_accept_thr_pages()) { 514 zswap_pool_reached_full = false; 515 } 516 return zswap_pool_reached_full; 517 } 518 519 /********************************* 520 * param callbacks 521 **********************************/ 522 523 static bool zswap_pool_changed(const char *s, const struct kernel_param *kp) 524 { 525 /* no change required */ 526 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool) 527 return false; 528 return true; 529 } 530 531 /* val must be a null-terminated string */ 532 static int __zswap_param_set(const char *val, const struct kernel_param *kp, 533 char *type, char *compressor) 534 { 535 struct zswap_pool *pool, *put_pool = NULL; 536 char *s = strstrip((char *)val); 537 int ret = 0; 538 bool new_pool = false; 539 540 mutex_lock(&zswap_init_lock); 541 switch (zswap_init_state) { 542 case ZSWAP_UNINIT: 543 /* if this is load-time (pre-init) param setting, 544 * don't create a pool; that's done during init. 545 */ 546 ret = param_set_charp(s, kp); 547 break; 548 case ZSWAP_INIT_SUCCEED: 549 new_pool = zswap_pool_changed(s, kp); 550 break; 551 case ZSWAP_INIT_FAILED: 552 pr_err("can't set param, initialization failed\n"); 553 ret = -ENODEV; 554 } 555 mutex_unlock(&zswap_init_lock); 556 557 /* no need to create a new pool, return directly */ 558 if (!new_pool) 559 return ret; 560 561 if (!type) { 562 if (!zpool_has_pool(s)) { 563 pr_err("zpool %s not available\n", s); 564 return -ENOENT; 565 } 566 type = s; 567 } else if (!compressor) { 568 if (!crypto_has_acomp(s, 0, 0)) { 569 pr_err("compressor %s not available\n", s); 570 return -ENOENT; 571 } 572 compressor = s; 573 } else { 574 WARN_ON(1); 575 return -EINVAL; 576 } 577 578 spin_lock_bh(&zswap_pools_lock); 579 580 pool = zswap_pool_find_get(type, compressor); 581 if (pool) { 582 zswap_pool_debug("using existing", pool); 583 WARN_ON(pool == zswap_pool_current()); 584 list_del_rcu(&pool->list); 585 } 586 587 spin_unlock_bh(&zswap_pools_lock); 588 589 if (!pool) 590 pool = zswap_pool_create(type, compressor); 591 else { 592 /* 593 * Restore the initial ref dropped by percpu_ref_kill() 594 * when the pool was decommissioned and switch it again 595 * to percpu mode. 596 */ 597 percpu_ref_resurrect(&pool->ref); 598 599 /* Drop the ref from zswap_pool_find_get(). */ 600 zswap_pool_put(pool); 601 } 602 603 if (pool) 604 ret = param_set_charp(s, kp); 605 else 606 ret = -EINVAL; 607 608 spin_lock_bh(&zswap_pools_lock); 609 610 if (!ret) { 611 put_pool = zswap_pool_current(); 612 list_add_rcu(&pool->list, &zswap_pools); 613 zswap_has_pool = true; 614 } else if (pool) { 615 /* add the possibly pre-existing pool to the end of the pools 616 * list; if it's new (and empty) then it'll be removed and 617 * destroyed by the put after we drop the lock 618 */ 619 list_add_tail_rcu(&pool->list, &zswap_pools); 620 put_pool = pool; 621 } 622 623 spin_unlock_bh(&zswap_pools_lock); 624 625 if (!zswap_has_pool && !pool) { 626 /* if initial pool creation failed, and this pool creation also 627 * failed, maybe both compressor and zpool params were bad. 628 * Allow changing this param, so pool creation will succeed 629 * when the other param is changed. We already verified this 630 * param is ok in the zpool_has_pool() or crypto_has_acomp() 631 * checks above. 632 */ 633 ret = param_set_charp(s, kp); 634 } 635 636 /* drop the ref from either the old current pool, 637 * or the new pool we failed to add 638 */ 639 if (put_pool) 640 percpu_ref_kill(&put_pool->ref); 641 642 return ret; 643 } 644 645 static int zswap_compressor_param_set(const char *val, 646 const struct kernel_param *kp) 647 { 648 return __zswap_param_set(val, kp, zswap_zpool_type, NULL); 649 } 650 651 static int zswap_zpool_param_set(const char *val, 652 const struct kernel_param *kp) 653 { 654 return __zswap_param_set(val, kp, NULL, zswap_compressor); 655 } 656 657 static int zswap_enabled_param_set(const char *val, 658 const struct kernel_param *kp) 659 { 660 int ret = -ENODEV; 661 662 /* if this is load-time (pre-init) param setting, only set param. */ 663 if (system_state != SYSTEM_RUNNING) 664 return param_set_bool(val, kp); 665 666 mutex_lock(&zswap_init_lock); 667 switch (zswap_init_state) { 668 case ZSWAP_UNINIT: 669 if (zswap_setup()) 670 break; 671 fallthrough; 672 case ZSWAP_INIT_SUCCEED: 673 if (!zswap_has_pool) 674 pr_err("can't enable, no pool configured\n"); 675 else 676 ret = param_set_bool(val, kp); 677 break; 678 case ZSWAP_INIT_FAILED: 679 pr_err("can't enable, initialization failed\n"); 680 } 681 mutex_unlock(&zswap_init_lock); 682 683 return ret; 684 } 685 686 /********************************* 687 * lru functions 688 **********************************/ 689 690 /* should be called under RCU */ 691 #ifdef CONFIG_MEMCG 692 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry) 693 { 694 return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL; 695 } 696 #else 697 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry) 698 { 699 return NULL; 700 } 701 #endif 702 703 static inline int entry_to_nid(struct zswap_entry *entry) 704 { 705 return page_to_nid(virt_to_page(entry)); 706 } 707 708 static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry) 709 { 710 int nid = entry_to_nid(entry); 711 struct mem_cgroup *memcg; 712 713 /* 714 * Note that it is safe to use rcu_read_lock() here, even in the face of 715 * concurrent memcg offlining: 716 * 717 * 1. list_lru_add() is called before list_lru_one is dead. The 718 * new entry will be reparented to memcg's parent's list_lru. 719 * 2. list_lru_add() is called after list_lru_one is dead. The 720 * new entry will be added directly to memcg's parent's list_lru. 721 * 722 * Similar reasoning holds for list_lru_del(). 723 */ 724 rcu_read_lock(); 725 memcg = mem_cgroup_from_entry(entry); 726 /* will always succeed */ 727 list_lru_add(list_lru, &entry->lru, nid, memcg); 728 rcu_read_unlock(); 729 } 730 731 static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry) 732 { 733 int nid = entry_to_nid(entry); 734 struct mem_cgroup *memcg; 735 736 rcu_read_lock(); 737 memcg = mem_cgroup_from_entry(entry); 738 /* will always succeed */ 739 list_lru_del(list_lru, &entry->lru, nid, memcg); 740 rcu_read_unlock(); 741 } 742 743 void zswap_lruvec_state_init(struct lruvec *lruvec) 744 { 745 atomic_long_set(&lruvec->zswap_lruvec_state.nr_disk_swapins, 0); 746 } 747 748 void zswap_folio_swapin(struct folio *folio) 749 { 750 struct lruvec *lruvec; 751 752 if (folio) { 753 lruvec = folio_lruvec(folio); 754 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_disk_swapins); 755 } 756 } 757 758 /* 759 * This function should be called when a memcg is being offlined. 760 * 761 * Since the global shrinker shrink_worker() may hold a reference 762 * of the memcg, we must check and release the reference in 763 * zswap_next_shrink. 764 * 765 * shrink_worker() must handle the case where this function releases 766 * the reference of memcg being shrunk. 767 */ 768 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) 769 { 770 /* lock out zswap shrinker walking memcg tree */ 771 spin_lock(&zswap_shrink_lock); 772 if (zswap_next_shrink == memcg) { 773 do { 774 zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL); 775 } while (zswap_next_shrink && !mem_cgroup_online(zswap_next_shrink)); 776 } 777 spin_unlock(&zswap_shrink_lock); 778 } 779 780 /********************************* 781 * zswap entry functions 782 **********************************/ 783 static struct kmem_cache *zswap_entry_cache; 784 785 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid) 786 { 787 struct zswap_entry *entry; 788 entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid); 789 if (!entry) 790 return NULL; 791 return entry; 792 } 793 794 static void zswap_entry_cache_free(struct zswap_entry *entry) 795 { 796 kmem_cache_free(zswap_entry_cache, entry); 797 } 798 799 /* 800 * Carries out the common pattern of freeing and entry's zpool allocation, 801 * freeing the entry itself, and decrementing the number of stored pages. 802 */ 803 static void zswap_entry_free(struct zswap_entry *entry) 804 { 805 zswap_lru_del(&zswap_list_lru, entry); 806 zpool_free(entry->pool->zpool, entry->handle); 807 zswap_pool_put(entry->pool); 808 if (entry->objcg) { 809 obj_cgroup_uncharge_zswap(entry->objcg, entry->length); 810 obj_cgroup_put(entry->objcg); 811 } 812 zswap_entry_cache_free(entry); 813 atomic_long_dec(&zswap_stored_pages); 814 } 815 816 /********************************* 817 * compressed storage functions 818 **********************************/ 819 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node) 820 { 821 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); 822 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); 823 struct crypto_acomp *acomp = NULL; 824 struct acomp_req *req = NULL; 825 u8 *buffer = NULL; 826 int ret; 827 828 buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu)); 829 if (!buffer) { 830 ret = -ENOMEM; 831 goto fail; 832 } 833 834 acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu)); 835 if (IS_ERR(acomp)) { 836 pr_err("could not alloc crypto acomp %s : %ld\n", 837 pool->tfm_name, PTR_ERR(acomp)); 838 ret = PTR_ERR(acomp); 839 goto fail; 840 } 841 842 req = acomp_request_alloc(acomp); 843 if (!req) { 844 pr_err("could not alloc crypto acomp_request %s\n", 845 pool->tfm_name); 846 ret = -ENOMEM; 847 goto fail; 848 } 849 850 /* 851 * Only hold the mutex after completing allocations, otherwise we may 852 * recurse into zswap through reclaim and attempt to hold the mutex 853 * again resulting in a deadlock. 854 */ 855 mutex_lock(&acomp_ctx->mutex); 856 crypto_init_wait(&acomp_ctx->wait); 857 858 /* 859 * if the backend of acomp is async zip, crypto_req_done() will wakeup 860 * crypto_wait_req(); if the backend of acomp is scomp, the callback 861 * won't be called, crypto_wait_req() will return without blocking. 862 */ 863 acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 864 crypto_req_done, &acomp_ctx->wait); 865 866 acomp_ctx->buffer = buffer; 867 acomp_ctx->acomp = acomp; 868 acomp_ctx->is_sleepable = acomp_is_async(acomp); 869 acomp_ctx->req = req; 870 mutex_unlock(&acomp_ctx->mutex); 871 return 0; 872 873 fail: 874 if (acomp) 875 crypto_free_acomp(acomp); 876 kfree(buffer); 877 return ret; 878 } 879 880 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node) 881 { 882 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); 883 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); 884 885 mutex_lock(&acomp_ctx->mutex); 886 if (!IS_ERR_OR_NULL(acomp_ctx)) { 887 if (!IS_ERR_OR_NULL(acomp_ctx->req)) 888 acomp_request_free(acomp_ctx->req); 889 acomp_ctx->req = NULL; 890 if (!IS_ERR_OR_NULL(acomp_ctx->acomp)) 891 crypto_free_acomp(acomp_ctx->acomp); 892 kfree(acomp_ctx->buffer); 893 } 894 mutex_unlock(&acomp_ctx->mutex); 895 896 return 0; 897 } 898 899 static struct crypto_acomp_ctx *acomp_ctx_get_cpu_lock(struct zswap_pool *pool) 900 { 901 struct crypto_acomp_ctx *acomp_ctx; 902 903 for (;;) { 904 acomp_ctx = raw_cpu_ptr(pool->acomp_ctx); 905 mutex_lock(&acomp_ctx->mutex); 906 if (likely(acomp_ctx->req)) 907 return acomp_ctx; 908 /* 909 * It is possible that we were migrated to a different CPU after 910 * getting the per-CPU ctx but before the mutex was acquired. If 911 * the old CPU got offlined, zswap_cpu_comp_dead() could have 912 * already freed ctx->req (among other things) and set it to 913 * NULL. Just try again on the new CPU that we ended up on. 914 */ 915 mutex_unlock(&acomp_ctx->mutex); 916 } 917 } 918 919 static void acomp_ctx_put_unlock(struct crypto_acomp_ctx *acomp_ctx) 920 { 921 mutex_unlock(&acomp_ctx->mutex); 922 } 923 924 static bool zswap_compress(struct page *page, struct zswap_entry *entry, 925 struct zswap_pool *pool) 926 { 927 struct crypto_acomp_ctx *acomp_ctx; 928 struct scatterlist input, output; 929 int comp_ret = 0, alloc_ret = 0; 930 unsigned int dlen = PAGE_SIZE; 931 unsigned long handle; 932 struct zpool *zpool; 933 char *buf; 934 gfp_t gfp; 935 u8 *dst; 936 937 acomp_ctx = acomp_ctx_get_cpu_lock(pool); 938 dst = acomp_ctx->buffer; 939 sg_init_table(&input, 1); 940 sg_set_page(&input, page, PAGE_SIZE, 0); 941 942 /* 943 * We need PAGE_SIZE * 2 here since there maybe over-compression case, 944 * and hardware-accelerators may won't check the dst buffer size, so 945 * giving the dst buffer with enough length to avoid buffer overflow. 946 */ 947 sg_init_one(&output, dst, PAGE_SIZE * 2); 948 acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen); 949 950 /* 951 * it maybe looks a little bit silly that we send an asynchronous request, 952 * then wait for its completion synchronously. This makes the process look 953 * synchronous in fact. 954 * Theoretically, acomp supports users send multiple acomp requests in one 955 * acomp instance, then get those requests done simultaneously. but in this 956 * case, zswap actually does store and load page by page, there is no 957 * existing method to send the second page before the first page is done 958 * in one thread doing zwap. 959 * but in different threads running on different cpu, we have different 960 * acomp instance, so multiple threads can do (de)compression in parallel. 961 */ 962 comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait); 963 dlen = acomp_ctx->req->dlen; 964 if (comp_ret) 965 goto unlock; 966 967 zpool = pool->zpool; 968 gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; 969 if (zpool_malloc_support_movable(zpool)) 970 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE; 971 alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle); 972 if (alloc_ret) 973 goto unlock; 974 975 buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO); 976 memcpy(buf, dst, dlen); 977 zpool_unmap_handle(zpool, handle); 978 979 entry->handle = handle; 980 entry->length = dlen; 981 982 unlock: 983 if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC) 984 zswap_reject_compress_poor++; 985 else if (comp_ret) 986 zswap_reject_compress_fail++; 987 else if (alloc_ret) 988 zswap_reject_alloc_fail++; 989 990 acomp_ctx_put_unlock(acomp_ctx); 991 return comp_ret == 0 && alloc_ret == 0; 992 } 993 994 static void zswap_decompress(struct zswap_entry *entry, struct folio *folio) 995 { 996 struct zpool *zpool = entry->pool->zpool; 997 struct scatterlist input, output; 998 struct crypto_acomp_ctx *acomp_ctx; 999 u8 *src; 1000 1001 acomp_ctx = acomp_ctx_get_cpu_lock(entry->pool); 1002 src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO); 1003 /* 1004 * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer 1005 * to do crypto_acomp_decompress() which might sleep. In such cases, we must 1006 * resort to copying the buffer to a temporary one. 1007 * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer, 1008 * such as a kmap address of high memory or even ever a vmap address. 1009 * However, sg_init_one is only equipped to handle linearly mapped low memory. 1010 * In such cases, we also must copy the buffer to a temporary and lowmem one. 1011 */ 1012 if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) || 1013 !virt_addr_valid(src)) { 1014 memcpy(acomp_ctx->buffer, src, entry->length); 1015 src = acomp_ctx->buffer; 1016 zpool_unmap_handle(zpool, entry->handle); 1017 } 1018 1019 sg_init_one(&input, src, entry->length); 1020 sg_init_table(&output, 1); 1021 sg_set_folio(&output, folio, PAGE_SIZE, 0); 1022 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE); 1023 BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait)); 1024 BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE); 1025 1026 if (src != acomp_ctx->buffer) 1027 zpool_unmap_handle(zpool, entry->handle); 1028 acomp_ctx_put_unlock(acomp_ctx); 1029 } 1030 1031 /********************************* 1032 * writeback code 1033 **********************************/ 1034 /* 1035 * Attempts to free an entry by adding a folio to the swap cache, 1036 * decompressing the entry data into the folio, and issuing a 1037 * bio write to write the folio back to the swap device. 1038 * 1039 * This can be thought of as a "resumed writeback" of the folio 1040 * to the swap device. We are basically resuming the same swap 1041 * writeback path that was intercepted with the zswap_store() 1042 * in the first place. After the folio has been decompressed into 1043 * the swap cache, the compressed version stored by zswap can be 1044 * freed. 1045 */ 1046 static int zswap_writeback_entry(struct zswap_entry *entry, 1047 swp_entry_t swpentry) 1048 { 1049 struct xarray *tree; 1050 pgoff_t offset = swp_offset(swpentry); 1051 struct folio *folio; 1052 struct mempolicy *mpol; 1053 bool folio_was_allocated; 1054 struct swap_info_struct *si; 1055 struct writeback_control wbc = { 1056 .sync_mode = WB_SYNC_NONE, 1057 }; 1058 1059 /* try to allocate swap cache folio */ 1060 si = get_swap_device(swpentry); 1061 if (!si) 1062 return -EEXIST; 1063 1064 mpol = get_task_policy(current); 1065 folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol, 1066 NO_INTERLEAVE_INDEX, &folio_was_allocated, true); 1067 put_swap_device(si); 1068 if (!folio) 1069 return -ENOMEM; 1070 1071 /* 1072 * Found an existing folio, we raced with swapin or concurrent 1073 * shrinker. We generally writeback cold folios from zswap, and 1074 * swapin means the folio just became hot, so skip this folio. 1075 * For unlikely concurrent shrinker case, it will be unlinked 1076 * and freed when invalidated by the concurrent shrinker anyway. 1077 */ 1078 if (!folio_was_allocated) { 1079 folio_put(folio); 1080 return -EEXIST; 1081 } 1082 1083 /* 1084 * folio is locked, and the swapcache is now secured against 1085 * concurrent swapping to and from the slot, and concurrent 1086 * swapoff so we can safely dereference the zswap tree here. 1087 * Verify that the swap entry hasn't been invalidated and recycled 1088 * behind our backs, to avoid overwriting a new swap folio with 1089 * old compressed data. Only when this is successful can the entry 1090 * be dereferenced. 1091 */ 1092 tree = swap_zswap_tree(swpentry); 1093 if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) { 1094 delete_from_swap_cache(folio); 1095 folio_unlock(folio); 1096 folio_put(folio); 1097 return -ENOMEM; 1098 } 1099 1100 zswap_decompress(entry, folio); 1101 1102 count_vm_event(ZSWPWB); 1103 if (entry->objcg) 1104 count_objcg_events(entry->objcg, ZSWPWB, 1); 1105 1106 zswap_entry_free(entry); 1107 1108 /* folio is up to date */ 1109 folio_mark_uptodate(folio); 1110 1111 /* move it to the tail of the inactive list after end_writeback */ 1112 folio_set_reclaim(folio); 1113 1114 /* start writeback */ 1115 __swap_writepage(folio, &wbc); 1116 folio_put(folio); 1117 1118 return 0; 1119 } 1120 1121 /********************************* 1122 * shrinker functions 1123 **********************************/ 1124 /* 1125 * The dynamic shrinker is modulated by the following factors: 1126 * 1127 * 1. Each zswap entry has a referenced bit, which the shrinker unsets (giving 1128 * the entry a second chance) before rotating it in the LRU list. If the 1129 * entry is considered again by the shrinker, with its referenced bit unset, 1130 * it is written back. The writeback rate as a result is dynamically 1131 * adjusted by the pool activities - if the pool is dominated by new entries 1132 * (i.e lots of recent zswapouts), these entries will be protected and 1133 * the writeback rate will slow down. On the other hand, if the pool has a 1134 * lot of stagnant entries, these entries will be reclaimed immediately, 1135 * effectively increasing the writeback rate. 1136 * 1137 * 2. Swapins counter: If we observe swapins, it is a sign that we are 1138 * overshrinking and should slow down. We maintain a swapins counter, which 1139 * is consumed and subtract from the number of eligible objects on the LRU 1140 * in zswap_shrinker_count(). 1141 * 1142 * 3. Compression ratio. The better the workload compresses, the less gains we 1143 * can expect from writeback. We scale down the number of objects available 1144 * for reclaim by this ratio. 1145 */ 1146 static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l, 1147 void *arg) 1148 { 1149 struct zswap_entry *entry = container_of(item, struct zswap_entry, lru); 1150 bool *encountered_page_in_swapcache = (bool *)arg; 1151 swp_entry_t swpentry; 1152 enum lru_status ret = LRU_REMOVED_RETRY; 1153 int writeback_result; 1154 1155 /* 1156 * Second chance algorithm: if the entry has its referenced bit set, give it 1157 * a second chance. Only clear the referenced bit and rotate it in the 1158 * zswap's LRU list. 1159 */ 1160 if (entry->referenced) { 1161 entry->referenced = false; 1162 return LRU_ROTATE; 1163 } 1164 1165 /* 1166 * As soon as we drop the LRU lock, the entry can be freed by 1167 * a concurrent invalidation. This means the following: 1168 * 1169 * 1. We extract the swp_entry_t to the stack, allowing 1170 * zswap_writeback_entry() to pin the swap entry and 1171 * then validate the zwap entry against that swap entry's 1172 * tree using pointer value comparison. Only when that 1173 * is successful can the entry be dereferenced. 1174 * 1175 * 2. Usually, objects are taken off the LRU for reclaim. In 1176 * this case this isn't possible, because if reclaim fails 1177 * for whatever reason, we have no means of knowing if the 1178 * entry is alive to put it back on the LRU. 1179 * 1180 * So rotate it before dropping the lock. If the entry is 1181 * written back or invalidated, the free path will unlink 1182 * it. For failures, rotation is the right thing as well. 1183 * 1184 * Temporary failures, where the same entry should be tried 1185 * again immediately, almost never happen for this shrinker. 1186 * We don't do any trylocking; -ENOMEM comes closest, 1187 * but that's extremely rare and doesn't happen spuriously 1188 * either. Don't bother distinguishing this case. 1189 */ 1190 list_move_tail(item, &l->list); 1191 1192 /* 1193 * Once the lru lock is dropped, the entry might get freed. The 1194 * swpentry is copied to the stack, and entry isn't deref'd again 1195 * until the entry is verified to still be alive in the tree. 1196 */ 1197 swpentry = entry->swpentry; 1198 1199 /* 1200 * It's safe to drop the lock here because we return either 1201 * LRU_REMOVED_RETRY, LRU_RETRY or LRU_STOP. 1202 */ 1203 spin_unlock(&l->lock); 1204 1205 writeback_result = zswap_writeback_entry(entry, swpentry); 1206 1207 if (writeback_result) { 1208 zswap_reject_reclaim_fail++; 1209 ret = LRU_RETRY; 1210 1211 /* 1212 * Encountering a page already in swap cache is a sign that we are shrinking 1213 * into the warmer region. We should terminate shrinking (if we're in the dynamic 1214 * shrinker context). 1215 */ 1216 if (writeback_result == -EEXIST && encountered_page_in_swapcache) { 1217 ret = LRU_STOP; 1218 *encountered_page_in_swapcache = true; 1219 } 1220 } else { 1221 zswap_written_back_pages++; 1222 } 1223 1224 return ret; 1225 } 1226 1227 static unsigned long zswap_shrinker_scan(struct shrinker *shrinker, 1228 struct shrink_control *sc) 1229 { 1230 unsigned long shrink_ret; 1231 bool encountered_page_in_swapcache = false; 1232 1233 if (!zswap_shrinker_enabled || 1234 !mem_cgroup_zswap_writeback_enabled(sc->memcg)) { 1235 sc->nr_scanned = 0; 1236 return SHRINK_STOP; 1237 } 1238 1239 shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb, 1240 &encountered_page_in_swapcache); 1241 1242 if (encountered_page_in_swapcache) 1243 return SHRINK_STOP; 1244 1245 return shrink_ret ? shrink_ret : SHRINK_STOP; 1246 } 1247 1248 static unsigned long zswap_shrinker_count(struct shrinker *shrinker, 1249 struct shrink_control *sc) 1250 { 1251 struct mem_cgroup *memcg = sc->memcg; 1252 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid)); 1253 atomic_long_t *nr_disk_swapins = 1254 &lruvec->zswap_lruvec_state.nr_disk_swapins; 1255 unsigned long nr_backing, nr_stored, nr_freeable, nr_disk_swapins_cur, 1256 nr_remain; 1257 1258 if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg)) 1259 return 0; 1260 1261 /* 1262 * The shrinker resumes swap writeback, which will enter block 1263 * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS 1264 * rules (may_enter_fs()), which apply on a per-folio basis. 1265 */ 1266 if (!gfp_has_io_fs(sc->gfp_mask)) 1267 return 0; 1268 1269 /* 1270 * For memcg, use the cgroup-wide ZSWAP stats since we don't 1271 * have them per-node and thus per-lruvec. Careful if memcg is 1272 * runtime-disabled: we can get sc->memcg == NULL, which is ok 1273 * for the lruvec, but not for memcg_page_state(). 1274 * 1275 * Without memcg, use the zswap pool-wide metrics. 1276 */ 1277 if (!mem_cgroup_disabled()) { 1278 mem_cgroup_flush_stats(memcg); 1279 nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT; 1280 nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED); 1281 } else { 1282 nr_backing = zswap_total_pages(); 1283 nr_stored = atomic_long_read(&zswap_stored_pages); 1284 } 1285 1286 if (!nr_stored) 1287 return 0; 1288 1289 nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc); 1290 if (!nr_freeable) 1291 return 0; 1292 1293 /* 1294 * Subtract from the lru size the number of pages that are recently swapped 1295 * in from disk. The idea is that had we protect the zswap's LRU by this 1296 * amount of pages, these disk swapins would not have happened. 1297 */ 1298 nr_disk_swapins_cur = atomic_long_read(nr_disk_swapins); 1299 do { 1300 if (nr_freeable >= nr_disk_swapins_cur) 1301 nr_remain = 0; 1302 else 1303 nr_remain = nr_disk_swapins_cur - nr_freeable; 1304 } while (!atomic_long_try_cmpxchg( 1305 nr_disk_swapins, &nr_disk_swapins_cur, nr_remain)); 1306 1307 nr_freeable -= nr_disk_swapins_cur - nr_remain; 1308 if (!nr_freeable) 1309 return 0; 1310 1311 /* 1312 * Scale the number of freeable pages by the memory saving factor. 1313 * This ensures that the better zswap compresses memory, the fewer 1314 * pages we will evict to swap (as it will otherwise incur IO for 1315 * relatively small memory saving). 1316 */ 1317 return mult_frac(nr_freeable, nr_backing, nr_stored); 1318 } 1319 1320 static struct shrinker *zswap_alloc_shrinker(void) 1321 { 1322 struct shrinker *shrinker; 1323 1324 shrinker = 1325 shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap"); 1326 if (!shrinker) 1327 return NULL; 1328 1329 shrinker->scan_objects = zswap_shrinker_scan; 1330 shrinker->count_objects = zswap_shrinker_count; 1331 shrinker->batch = 0; 1332 shrinker->seeks = DEFAULT_SEEKS; 1333 return shrinker; 1334 } 1335 1336 static int shrink_memcg(struct mem_cgroup *memcg) 1337 { 1338 int nid, shrunk = 0, scanned = 0; 1339 1340 if (!mem_cgroup_zswap_writeback_enabled(memcg)) 1341 return -ENOENT; 1342 1343 /* 1344 * Skip zombies because their LRUs are reparented and we would be 1345 * reclaiming from the parent instead of the dead memcg. 1346 */ 1347 if (memcg && !mem_cgroup_online(memcg)) 1348 return -ENOENT; 1349 1350 for_each_node_state(nid, N_NORMAL_MEMORY) { 1351 unsigned long nr_to_walk = 1; 1352 1353 shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg, 1354 &shrink_memcg_cb, NULL, &nr_to_walk); 1355 scanned += 1 - nr_to_walk; 1356 } 1357 1358 if (!scanned) 1359 return -ENOENT; 1360 1361 return shrunk ? 0 : -EAGAIN; 1362 } 1363 1364 static void shrink_worker(struct work_struct *w) 1365 { 1366 struct mem_cgroup *memcg; 1367 int ret, failures = 0, attempts = 0; 1368 unsigned long thr; 1369 1370 /* Reclaim down to the accept threshold */ 1371 thr = zswap_accept_thr_pages(); 1372 1373 /* 1374 * Global reclaim will select cgroup in a round-robin fashion from all 1375 * online memcgs, but memcgs that have no pages in zswap and 1376 * writeback-disabled memcgs (memory.zswap.writeback=0) are not 1377 * candidates for shrinking. 1378 * 1379 * Shrinking will be aborted if we encounter the following 1380 * MAX_RECLAIM_RETRIES times: 1381 * - No writeback-candidate memcgs found in a memcg tree walk. 1382 * - Shrinking a writeback-candidate memcg failed. 1383 * 1384 * We save iteration cursor memcg into zswap_next_shrink, 1385 * which can be modified by the offline memcg cleaner 1386 * zswap_memcg_offline_cleanup(). 1387 * 1388 * Since the offline cleaner is called only once, we cannot leave an 1389 * offline memcg reference in zswap_next_shrink. 1390 * We can rely on the cleaner only if we get online memcg under lock. 1391 * 1392 * If we get an offline memcg, we cannot determine if the cleaner has 1393 * already been called or will be called later. We must put back the 1394 * reference before returning from this function. Otherwise, the 1395 * offline memcg left in zswap_next_shrink will hold the reference 1396 * until the next run of shrink_worker(). 1397 */ 1398 do { 1399 /* 1400 * Start shrinking from the next memcg after zswap_next_shrink. 1401 * When the offline cleaner has already advanced the cursor, 1402 * advancing the cursor here overlooks one memcg, but this 1403 * should be negligibly rare. 1404 * 1405 * If we get an online memcg, keep the extra reference in case 1406 * the original one obtained by mem_cgroup_iter() is dropped by 1407 * zswap_memcg_offline_cleanup() while we are shrinking the 1408 * memcg. 1409 */ 1410 spin_lock(&zswap_shrink_lock); 1411 do { 1412 memcg = mem_cgroup_iter(NULL, zswap_next_shrink, NULL); 1413 zswap_next_shrink = memcg; 1414 } while (memcg && !mem_cgroup_tryget_online(memcg)); 1415 spin_unlock(&zswap_shrink_lock); 1416 1417 if (!memcg) { 1418 /* 1419 * Continue shrinking without incrementing failures if 1420 * we found candidate memcgs in the last tree walk. 1421 */ 1422 if (!attempts && ++failures == MAX_RECLAIM_RETRIES) 1423 break; 1424 1425 attempts = 0; 1426 goto resched; 1427 } 1428 1429 ret = shrink_memcg(memcg); 1430 /* drop the extra reference */ 1431 mem_cgroup_put(memcg); 1432 1433 /* 1434 * There are no writeback-candidate pages in the memcg. 1435 * This is not an issue as long as we can find another memcg 1436 * with pages in zswap. Skip this without incrementing attempts 1437 * and failures. 1438 */ 1439 if (ret == -ENOENT) 1440 continue; 1441 ++attempts; 1442 1443 if (ret && ++failures == MAX_RECLAIM_RETRIES) 1444 break; 1445 resched: 1446 cond_resched(); 1447 } while (zswap_total_pages() > thr); 1448 } 1449 1450 /********************************* 1451 * main API 1452 **********************************/ 1453 1454 static bool zswap_store_page(struct page *page, 1455 struct obj_cgroup *objcg, 1456 struct zswap_pool *pool) 1457 { 1458 swp_entry_t page_swpentry = page_swap_entry(page); 1459 struct zswap_entry *entry, *old; 1460 1461 /* allocate entry */ 1462 entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page)); 1463 if (!entry) { 1464 zswap_reject_kmemcache_fail++; 1465 return false; 1466 } 1467 1468 if (!zswap_compress(page, entry, pool)) 1469 goto compress_failed; 1470 1471 old = xa_store(swap_zswap_tree(page_swpentry), 1472 swp_offset(page_swpentry), 1473 entry, GFP_KERNEL); 1474 if (xa_is_err(old)) { 1475 int err = xa_err(old); 1476 1477 WARN_ONCE(err != -ENOMEM, "unexpected xarray error: %d\n", err); 1478 zswap_reject_alloc_fail++; 1479 goto store_failed; 1480 } 1481 1482 /* 1483 * We may have had an existing entry that became stale when 1484 * the folio was redirtied and now the new version is being 1485 * swapped out. Get rid of the old. 1486 */ 1487 if (old) 1488 zswap_entry_free(old); 1489 1490 /* 1491 * The entry is successfully compressed and stored in the tree, there is 1492 * no further possibility of failure. Grab refs to the pool and objcg, 1493 * charge zswap memory, and increment zswap_stored_pages. 1494 * The opposite actions will be performed by zswap_entry_free() 1495 * when the entry is removed from the tree. 1496 */ 1497 zswap_pool_get(pool); 1498 if (objcg) { 1499 obj_cgroup_get(objcg); 1500 obj_cgroup_charge_zswap(objcg, entry->length); 1501 } 1502 atomic_long_inc(&zswap_stored_pages); 1503 1504 /* 1505 * We finish initializing the entry while it's already in xarray. 1506 * This is safe because: 1507 * 1508 * 1. Concurrent stores and invalidations are excluded by folio lock. 1509 * 1510 * 2. Writeback is excluded by the entry not being on the LRU yet. 1511 * The publishing order matters to prevent writeback from seeing 1512 * an incoherent entry. 1513 */ 1514 entry->pool = pool; 1515 entry->swpentry = page_swpentry; 1516 entry->objcg = objcg; 1517 entry->referenced = true; 1518 if (entry->length) { 1519 INIT_LIST_HEAD(&entry->lru); 1520 zswap_lru_add(&zswap_list_lru, entry); 1521 } 1522 1523 return true; 1524 1525 store_failed: 1526 zpool_free(pool->zpool, entry->handle); 1527 compress_failed: 1528 zswap_entry_cache_free(entry); 1529 return false; 1530 } 1531 1532 bool zswap_store(struct folio *folio) 1533 { 1534 long nr_pages = folio_nr_pages(folio); 1535 swp_entry_t swp = folio->swap; 1536 struct obj_cgroup *objcg = NULL; 1537 struct mem_cgroup *memcg = NULL; 1538 struct zswap_pool *pool; 1539 bool ret = false; 1540 long index; 1541 1542 VM_WARN_ON_ONCE(!folio_test_locked(folio)); 1543 VM_WARN_ON_ONCE(!folio_test_swapcache(folio)); 1544 1545 if (!zswap_enabled) 1546 goto check_old; 1547 1548 objcg = get_obj_cgroup_from_folio(folio); 1549 if (objcg && !obj_cgroup_may_zswap(objcg)) { 1550 memcg = get_mem_cgroup_from_objcg(objcg); 1551 if (shrink_memcg(memcg)) { 1552 mem_cgroup_put(memcg); 1553 goto put_objcg; 1554 } 1555 mem_cgroup_put(memcg); 1556 } 1557 1558 if (zswap_check_limits()) 1559 goto put_objcg; 1560 1561 pool = zswap_pool_current_get(); 1562 if (!pool) 1563 goto put_objcg; 1564 1565 if (objcg) { 1566 memcg = get_mem_cgroup_from_objcg(objcg); 1567 if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) { 1568 mem_cgroup_put(memcg); 1569 goto put_pool; 1570 } 1571 mem_cgroup_put(memcg); 1572 } 1573 1574 for (index = 0; index < nr_pages; ++index) { 1575 struct page *page = folio_page(folio, index); 1576 1577 if (!zswap_store_page(page, objcg, pool)) 1578 goto put_pool; 1579 } 1580 1581 if (objcg) 1582 count_objcg_events(objcg, ZSWPOUT, nr_pages); 1583 1584 count_vm_events(ZSWPOUT, nr_pages); 1585 1586 ret = true; 1587 1588 put_pool: 1589 zswap_pool_put(pool); 1590 put_objcg: 1591 obj_cgroup_put(objcg); 1592 if (!ret && zswap_pool_reached_full) 1593 queue_work(shrink_wq, &zswap_shrink_work); 1594 check_old: 1595 /* 1596 * If the zswap store fails or zswap is disabled, we must invalidate 1597 * the possibly stale entries which were previously stored at the 1598 * offsets corresponding to each page of the folio. Otherwise, 1599 * writeback could overwrite the new data in the swapfile. 1600 */ 1601 if (!ret) { 1602 unsigned type = swp_type(swp); 1603 pgoff_t offset = swp_offset(swp); 1604 struct zswap_entry *entry; 1605 struct xarray *tree; 1606 1607 for (index = 0; index < nr_pages; ++index) { 1608 tree = swap_zswap_tree(swp_entry(type, offset + index)); 1609 entry = xa_erase(tree, offset + index); 1610 if (entry) 1611 zswap_entry_free(entry); 1612 } 1613 } 1614 1615 return ret; 1616 } 1617 1618 bool zswap_load(struct folio *folio) 1619 { 1620 swp_entry_t swp = folio->swap; 1621 pgoff_t offset = swp_offset(swp); 1622 bool swapcache = folio_test_swapcache(folio); 1623 struct xarray *tree = swap_zswap_tree(swp); 1624 struct zswap_entry *entry; 1625 1626 VM_WARN_ON_ONCE(!folio_test_locked(folio)); 1627 1628 if (zswap_never_enabled()) 1629 return false; 1630 1631 /* 1632 * Large folios should not be swapped in while zswap is being used, as 1633 * they are not properly handled. Zswap does not properly load large 1634 * folios, and a large folio may only be partially in zswap. 1635 * 1636 * Return true without marking the folio uptodate so that an IO error is 1637 * emitted (e.g. do_swap_page() will sigbus). 1638 */ 1639 if (WARN_ON_ONCE(folio_test_large(folio))) 1640 return true; 1641 1642 /* 1643 * When reading into the swapcache, invalidate our entry. The 1644 * swapcache can be the authoritative owner of the page and 1645 * its mappings, and the pressure that results from having two 1646 * in-memory copies outweighs any benefits of caching the 1647 * compression work. 1648 * 1649 * (Most swapins go through the swapcache. The notable 1650 * exception is the singleton fault on SWP_SYNCHRONOUS_IO 1651 * files, which reads into a private page and may free it if 1652 * the fault fails. We remain the primary owner of the entry.) 1653 */ 1654 if (swapcache) 1655 entry = xa_erase(tree, offset); 1656 else 1657 entry = xa_load(tree, offset); 1658 1659 if (!entry) 1660 return false; 1661 1662 zswap_decompress(entry, folio); 1663 1664 count_vm_event(ZSWPIN); 1665 if (entry->objcg) 1666 count_objcg_events(entry->objcg, ZSWPIN, 1); 1667 1668 if (swapcache) { 1669 zswap_entry_free(entry); 1670 folio_mark_dirty(folio); 1671 } 1672 1673 folio_mark_uptodate(folio); 1674 return true; 1675 } 1676 1677 void zswap_invalidate(swp_entry_t swp) 1678 { 1679 pgoff_t offset = swp_offset(swp); 1680 struct xarray *tree = swap_zswap_tree(swp); 1681 struct zswap_entry *entry; 1682 1683 if (xa_empty(tree)) 1684 return; 1685 1686 entry = xa_erase(tree, offset); 1687 if (entry) 1688 zswap_entry_free(entry); 1689 } 1690 1691 int zswap_swapon(int type, unsigned long nr_pages) 1692 { 1693 struct xarray *trees, *tree; 1694 unsigned int nr, i; 1695 1696 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); 1697 trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL); 1698 if (!trees) { 1699 pr_err("alloc failed, zswap disabled for swap type %d\n", type); 1700 return -ENOMEM; 1701 } 1702 1703 for (i = 0; i < nr; i++) 1704 xa_init(trees + i); 1705 1706 nr_zswap_trees[type] = nr; 1707 zswap_trees[type] = trees; 1708 return 0; 1709 } 1710 1711 void zswap_swapoff(int type) 1712 { 1713 struct xarray *trees = zswap_trees[type]; 1714 unsigned int i; 1715 1716 if (!trees) 1717 return; 1718 1719 /* try_to_unuse() invalidated all the entries already */ 1720 for (i = 0; i < nr_zswap_trees[type]; i++) 1721 WARN_ON_ONCE(!xa_empty(trees + i)); 1722 1723 kvfree(trees); 1724 nr_zswap_trees[type] = 0; 1725 zswap_trees[type] = NULL; 1726 } 1727 1728 /********************************* 1729 * debugfs functions 1730 **********************************/ 1731 #ifdef CONFIG_DEBUG_FS 1732 #include <linux/debugfs.h> 1733 1734 static struct dentry *zswap_debugfs_root; 1735 1736 static int debugfs_get_total_size(void *data, u64 *val) 1737 { 1738 *val = zswap_total_pages() * PAGE_SIZE; 1739 return 0; 1740 } 1741 DEFINE_DEBUGFS_ATTRIBUTE(total_size_fops, debugfs_get_total_size, NULL, "%llu\n"); 1742 1743 static int debugfs_get_stored_pages(void *data, u64 *val) 1744 { 1745 *val = atomic_long_read(&zswap_stored_pages); 1746 return 0; 1747 } 1748 DEFINE_DEBUGFS_ATTRIBUTE(stored_pages_fops, debugfs_get_stored_pages, NULL, "%llu\n"); 1749 1750 static int zswap_debugfs_init(void) 1751 { 1752 if (!debugfs_initialized()) 1753 return -ENODEV; 1754 1755 zswap_debugfs_root = debugfs_create_dir("zswap", NULL); 1756 1757 debugfs_create_u64("pool_limit_hit", 0444, 1758 zswap_debugfs_root, &zswap_pool_limit_hit); 1759 debugfs_create_u64("reject_reclaim_fail", 0444, 1760 zswap_debugfs_root, &zswap_reject_reclaim_fail); 1761 debugfs_create_u64("reject_alloc_fail", 0444, 1762 zswap_debugfs_root, &zswap_reject_alloc_fail); 1763 debugfs_create_u64("reject_kmemcache_fail", 0444, 1764 zswap_debugfs_root, &zswap_reject_kmemcache_fail); 1765 debugfs_create_u64("reject_compress_fail", 0444, 1766 zswap_debugfs_root, &zswap_reject_compress_fail); 1767 debugfs_create_u64("reject_compress_poor", 0444, 1768 zswap_debugfs_root, &zswap_reject_compress_poor); 1769 debugfs_create_u64("written_back_pages", 0444, 1770 zswap_debugfs_root, &zswap_written_back_pages); 1771 debugfs_create_file("pool_total_size", 0444, 1772 zswap_debugfs_root, NULL, &total_size_fops); 1773 debugfs_create_file("stored_pages", 0444, 1774 zswap_debugfs_root, NULL, &stored_pages_fops); 1775 1776 return 0; 1777 } 1778 #else 1779 static int zswap_debugfs_init(void) 1780 { 1781 return 0; 1782 } 1783 #endif 1784 1785 /********************************* 1786 * module init and exit 1787 **********************************/ 1788 static int zswap_setup(void) 1789 { 1790 struct zswap_pool *pool; 1791 int ret; 1792 1793 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0); 1794 if (!zswap_entry_cache) { 1795 pr_err("entry cache creation failed\n"); 1796 goto cache_fail; 1797 } 1798 1799 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE, 1800 "mm/zswap_pool:prepare", 1801 zswap_cpu_comp_prepare, 1802 zswap_cpu_comp_dead); 1803 if (ret) 1804 goto hp_fail; 1805 1806 shrink_wq = alloc_workqueue("zswap-shrink", 1807 WQ_UNBOUND|WQ_MEM_RECLAIM, 1); 1808 if (!shrink_wq) 1809 goto shrink_wq_fail; 1810 1811 zswap_shrinker = zswap_alloc_shrinker(); 1812 if (!zswap_shrinker) 1813 goto shrinker_fail; 1814 if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker)) 1815 goto lru_fail; 1816 shrinker_register(zswap_shrinker); 1817 1818 INIT_WORK(&zswap_shrink_work, shrink_worker); 1819 1820 pool = __zswap_pool_create_fallback(); 1821 if (pool) { 1822 pr_info("loaded using pool %s/%s\n", pool->tfm_name, 1823 zpool_get_type(pool->zpool)); 1824 list_add(&pool->list, &zswap_pools); 1825 zswap_has_pool = true; 1826 static_branch_enable(&zswap_ever_enabled); 1827 } else { 1828 pr_err("pool creation failed\n"); 1829 zswap_enabled = false; 1830 } 1831 1832 if (zswap_debugfs_init()) 1833 pr_warn("debugfs initialization failed\n"); 1834 zswap_init_state = ZSWAP_INIT_SUCCEED; 1835 return 0; 1836 1837 lru_fail: 1838 shrinker_free(zswap_shrinker); 1839 shrinker_fail: 1840 destroy_workqueue(shrink_wq); 1841 shrink_wq_fail: 1842 cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE); 1843 hp_fail: 1844 kmem_cache_destroy(zswap_entry_cache); 1845 cache_fail: 1846 /* if built-in, we aren't unloaded on failure; don't allow use */ 1847 zswap_init_state = ZSWAP_INIT_FAILED; 1848 zswap_enabled = false; 1849 return -ENOMEM; 1850 } 1851 1852 static int __init zswap_init(void) 1853 { 1854 if (!zswap_enabled) 1855 return 0; 1856 return zswap_setup(); 1857 } 1858 /* must be late so crypto has time to come up */ 1859 late_initcall(zswap_init); 1860 1861 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>"); 1862 MODULE_DESCRIPTION("Compressed cache for swap pages"); 1863