1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * zswap.c - zswap driver file 4 * 5 * zswap is a cache that takes pages that are in the process 6 * of being swapped out and attempts to compress and store them in a 7 * RAM-based memory pool. This can result in a significant I/O reduction on 8 * the swap device and, in the case where decompressing from RAM is faster 9 * than reading from the swap device, can also improve workload performance. 10 * 11 * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com> 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16 #include <linux/module.h> 17 #include <linux/cpu.h> 18 #include <linux/highmem.h> 19 #include <linux/slab.h> 20 #include <linux/spinlock.h> 21 #include <linux/types.h> 22 #include <linux/atomic.h> 23 #include <linux/swap.h> 24 #include <linux/crypto.h> 25 #include <linux/scatterlist.h> 26 #include <linux/mempolicy.h> 27 #include <linux/mempool.h> 28 #include <linux/zpool.h> 29 #include <crypto/acompress.h> 30 #include <linux/zswap.h> 31 #include <linux/mm_types.h> 32 #include <linux/page-flags.h> 33 #include <linux/swapops.h> 34 #include <linux/writeback.h> 35 #include <linux/pagemap.h> 36 #include <linux/workqueue.h> 37 #include <linux/list_lru.h> 38 39 #include "swap.h" 40 #include "internal.h" 41 42 /********************************* 43 * statistics 44 **********************************/ 45 /* The number of compressed pages currently stored in zswap */ 46 atomic_long_t zswap_stored_pages = ATOMIC_INIT(0); 47 48 /* 49 * The statistics below are not protected from concurrent access for 50 * performance reasons so they may not be a 100% accurate. However, 51 * they do provide useful information on roughly how many times a 52 * certain event is occurring. 53 */ 54 55 /* Pool limit was hit (see zswap_max_pool_percent) */ 56 static u64 zswap_pool_limit_hit; 57 /* Pages written back when pool limit was reached */ 58 static u64 zswap_written_back_pages; 59 /* Store failed due to a reclaim failure after pool limit was reached */ 60 static u64 zswap_reject_reclaim_fail; 61 /* Store failed due to compression algorithm failure */ 62 static u64 zswap_reject_compress_fail; 63 /* Compressed page was too big for the allocator to (optimally) store */ 64 static u64 zswap_reject_compress_poor; 65 /* Store failed because underlying allocator could not get memory */ 66 static u64 zswap_reject_alloc_fail; 67 /* Store failed because the entry metadata could not be allocated (rare) */ 68 static u64 zswap_reject_kmemcache_fail; 69 70 /* Shrinker work queue */ 71 static struct workqueue_struct *shrink_wq; 72 /* Pool limit was hit, we need to calm down */ 73 static bool zswap_pool_reached_full; 74 75 /********************************* 76 * tunables 77 **********************************/ 78 79 #define ZSWAP_PARAM_UNSET "" 80 81 static int zswap_setup(void); 82 83 /* Enable/disable zswap */ 84 static DEFINE_STATIC_KEY_MAYBE(CONFIG_ZSWAP_DEFAULT_ON, zswap_ever_enabled); 85 static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON); 86 static int zswap_enabled_param_set(const char *, 87 const struct kernel_param *); 88 static const struct kernel_param_ops zswap_enabled_param_ops = { 89 .set = zswap_enabled_param_set, 90 .get = param_get_bool, 91 }; 92 module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644); 93 94 /* Crypto compressor to use */ 95 static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; 96 static int zswap_compressor_param_set(const char *, 97 const struct kernel_param *); 98 static const struct kernel_param_ops zswap_compressor_param_ops = { 99 .set = zswap_compressor_param_set, 100 .get = param_get_charp, 101 .free = param_free_charp, 102 }; 103 module_param_cb(compressor, &zswap_compressor_param_ops, 104 &zswap_compressor, 0644); 105 106 /* Compressed storage zpool to use */ 107 static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT; 108 static int zswap_zpool_param_set(const char *, const struct kernel_param *); 109 static const struct kernel_param_ops zswap_zpool_param_ops = { 110 .set = zswap_zpool_param_set, 111 .get = param_get_charp, 112 .free = param_free_charp, 113 }; 114 module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644); 115 116 /* The maximum percentage of memory that the compressed pool can occupy */ 117 static unsigned int zswap_max_pool_percent = 20; 118 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644); 119 120 /* The threshold for accepting new pages after the max_pool_percent was hit */ 121 static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */ 122 module_param_named(accept_threshold_percent, zswap_accept_thr_percent, 123 uint, 0644); 124 125 /* Enable/disable memory pressure-based shrinker. */ 126 static bool zswap_shrinker_enabled = IS_ENABLED( 127 CONFIG_ZSWAP_SHRINKER_DEFAULT_ON); 128 module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644); 129 130 bool zswap_is_enabled(void) 131 { 132 return zswap_enabled; 133 } 134 135 bool zswap_never_enabled(void) 136 { 137 return !static_branch_maybe(CONFIG_ZSWAP_DEFAULT_ON, &zswap_ever_enabled); 138 } 139 140 /********************************* 141 * data structures 142 **********************************/ 143 144 struct crypto_acomp_ctx { 145 struct crypto_acomp *acomp; 146 struct acomp_req *req; 147 struct crypto_wait wait; 148 u8 *buffer; 149 struct mutex mutex; 150 bool is_sleepable; 151 }; 152 153 /* 154 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock. 155 * The only case where lru_lock is not acquired while holding tree.lock is 156 * when a zswap_entry is taken off the lru for writeback, in that case it 157 * needs to be verified that it's still valid in the tree. 158 */ 159 struct zswap_pool { 160 struct zpool *zpool; 161 struct crypto_acomp_ctx __percpu *acomp_ctx; 162 struct percpu_ref ref; 163 struct list_head list; 164 struct work_struct release_work; 165 struct hlist_node node; 166 char tfm_name[CRYPTO_MAX_ALG_NAME]; 167 }; 168 169 /* Global LRU lists shared by all zswap pools. */ 170 static struct list_lru zswap_list_lru; 171 172 /* The lock protects zswap_next_shrink updates. */ 173 static DEFINE_SPINLOCK(zswap_shrink_lock); 174 static struct mem_cgroup *zswap_next_shrink; 175 static struct work_struct zswap_shrink_work; 176 static struct shrinker *zswap_shrinker; 177 178 /* 179 * struct zswap_entry 180 * 181 * This structure contains the metadata for tracking a single compressed 182 * page within zswap. 183 * 184 * swpentry - associated swap entry, the offset indexes into the red-black tree 185 * length - the length in bytes of the compressed page data. Needed during 186 * decompression. 187 * referenced - true if the entry recently entered the zswap pool. Unset by the 188 * writeback logic. The entry is only reclaimed by the writeback 189 * logic if referenced is unset. See comments in the shrinker 190 * section for context. 191 * pool - the zswap_pool the entry's data is in 192 * handle - zpool allocation handle that stores the compressed page data 193 * objcg - the obj_cgroup that the compressed memory is charged to 194 * lru - handle to the pool's lru used to evict pages. 195 */ 196 struct zswap_entry { 197 swp_entry_t swpentry; 198 unsigned int length; 199 bool referenced; 200 struct zswap_pool *pool; 201 unsigned long handle; 202 struct obj_cgroup *objcg; 203 struct list_head lru; 204 }; 205 206 static struct xarray *zswap_trees[MAX_SWAPFILES]; 207 static unsigned int nr_zswap_trees[MAX_SWAPFILES]; 208 209 /* RCU-protected iteration */ 210 static LIST_HEAD(zswap_pools); 211 /* protects zswap_pools list modification */ 212 static DEFINE_SPINLOCK(zswap_pools_lock); 213 /* pool counter to provide unique names to zpool */ 214 static atomic_t zswap_pools_count = ATOMIC_INIT(0); 215 216 enum zswap_init_type { 217 ZSWAP_UNINIT, 218 ZSWAP_INIT_SUCCEED, 219 ZSWAP_INIT_FAILED 220 }; 221 222 static enum zswap_init_type zswap_init_state; 223 224 /* used to ensure the integrity of initialization */ 225 static DEFINE_MUTEX(zswap_init_lock); 226 227 /* init completed, but couldn't create the initial pool */ 228 static bool zswap_has_pool; 229 230 /********************************* 231 * helpers and fwd declarations 232 **********************************/ 233 234 static inline struct xarray *swap_zswap_tree(swp_entry_t swp) 235 { 236 return &zswap_trees[swp_type(swp)][swp_offset(swp) 237 >> SWAP_ADDRESS_SPACE_SHIFT]; 238 } 239 240 #define zswap_pool_debug(msg, p) \ 241 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \ 242 zpool_get_type((p)->zpool)) 243 244 /********************************* 245 * pool functions 246 **********************************/ 247 static void __zswap_pool_empty(struct percpu_ref *ref); 248 249 static struct zswap_pool *zswap_pool_create(char *type, char *compressor) 250 { 251 struct zswap_pool *pool; 252 char name[38]; /* 'zswap' + 32 char (max) num + \0 */ 253 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; 254 int ret; 255 256 if (!zswap_has_pool) { 257 /* if either are unset, pool initialization failed, and we 258 * need both params to be set correctly before trying to 259 * create a pool. 260 */ 261 if (!strcmp(type, ZSWAP_PARAM_UNSET)) 262 return NULL; 263 if (!strcmp(compressor, ZSWAP_PARAM_UNSET)) 264 return NULL; 265 } 266 267 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 268 if (!pool) 269 return NULL; 270 271 /* unique name for each pool specifically required by zsmalloc */ 272 snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count)); 273 pool->zpool = zpool_create_pool(type, name, gfp); 274 if (!pool->zpool) { 275 pr_err("%s zpool not available\n", type); 276 goto error; 277 } 278 pr_debug("using %s zpool\n", zpool_get_type(pool->zpool)); 279 280 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name)); 281 282 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx); 283 if (!pool->acomp_ctx) { 284 pr_err("percpu alloc failed\n"); 285 goto error; 286 } 287 288 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE, 289 &pool->node); 290 if (ret) 291 goto error; 292 293 /* being the current pool takes 1 ref; this func expects the 294 * caller to always add the new pool as the current pool 295 */ 296 ret = percpu_ref_init(&pool->ref, __zswap_pool_empty, 297 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL); 298 if (ret) 299 goto ref_fail; 300 INIT_LIST_HEAD(&pool->list); 301 302 zswap_pool_debug("created", pool); 303 304 return pool; 305 306 ref_fail: 307 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); 308 error: 309 if (pool->acomp_ctx) 310 free_percpu(pool->acomp_ctx); 311 if (pool->zpool) 312 zpool_destroy_pool(pool->zpool); 313 kfree(pool); 314 return NULL; 315 } 316 317 static struct zswap_pool *__zswap_pool_create_fallback(void) 318 { 319 bool has_comp, has_zpool; 320 321 has_comp = crypto_has_acomp(zswap_compressor, 0, 0); 322 if (!has_comp && strcmp(zswap_compressor, 323 CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) { 324 pr_err("compressor %s not available, using default %s\n", 325 zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT); 326 param_free_charp(&zswap_compressor); 327 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; 328 has_comp = crypto_has_acomp(zswap_compressor, 0, 0); 329 } 330 if (!has_comp) { 331 pr_err("default compressor %s not available\n", 332 zswap_compressor); 333 param_free_charp(&zswap_compressor); 334 zswap_compressor = ZSWAP_PARAM_UNSET; 335 } 336 337 has_zpool = zpool_has_pool(zswap_zpool_type); 338 if (!has_zpool && strcmp(zswap_zpool_type, 339 CONFIG_ZSWAP_ZPOOL_DEFAULT)) { 340 pr_err("zpool %s not available, using default %s\n", 341 zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT); 342 param_free_charp(&zswap_zpool_type); 343 zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT; 344 has_zpool = zpool_has_pool(zswap_zpool_type); 345 } 346 if (!has_zpool) { 347 pr_err("default zpool %s not available\n", 348 zswap_zpool_type); 349 param_free_charp(&zswap_zpool_type); 350 zswap_zpool_type = ZSWAP_PARAM_UNSET; 351 } 352 353 if (!has_comp || !has_zpool) 354 return NULL; 355 356 return zswap_pool_create(zswap_zpool_type, zswap_compressor); 357 } 358 359 static void zswap_pool_destroy(struct zswap_pool *pool) 360 { 361 zswap_pool_debug("destroying", pool); 362 363 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); 364 free_percpu(pool->acomp_ctx); 365 366 zpool_destroy_pool(pool->zpool); 367 kfree(pool); 368 } 369 370 static void __zswap_pool_release(struct work_struct *work) 371 { 372 struct zswap_pool *pool = container_of(work, typeof(*pool), 373 release_work); 374 375 synchronize_rcu(); 376 377 /* nobody should have been able to get a ref... */ 378 WARN_ON(!percpu_ref_is_zero(&pool->ref)); 379 percpu_ref_exit(&pool->ref); 380 381 /* pool is now off zswap_pools list and has no references. */ 382 zswap_pool_destroy(pool); 383 } 384 385 static struct zswap_pool *zswap_pool_current(void); 386 387 static void __zswap_pool_empty(struct percpu_ref *ref) 388 { 389 struct zswap_pool *pool; 390 391 pool = container_of(ref, typeof(*pool), ref); 392 393 spin_lock_bh(&zswap_pools_lock); 394 395 WARN_ON(pool == zswap_pool_current()); 396 397 list_del_rcu(&pool->list); 398 399 INIT_WORK(&pool->release_work, __zswap_pool_release); 400 schedule_work(&pool->release_work); 401 402 spin_unlock_bh(&zswap_pools_lock); 403 } 404 405 static int __must_check zswap_pool_tryget(struct zswap_pool *pool) 406 { 407 if (!pool) 408 return 0; 409 410 return percpu_ref_tryget(&pool->ref); 411 } 412 413 /* The caller must already have a reference. */ 414 static void zswap_pool_get(struct zswap_pool *pool) 415 { 416 percpu_ref_get(&pool->ref); 417 } 418 419 static void zswap_pool_put(struct zswap_pool *pool) 420 { 421 percpu_ref_put(&pool->ref); 422 } 423 424 static struct zswap_pool *__zswap_pool_current(void) 425 { 426 struct zswap_pool *pool; 427 428 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list); 429 WARN_ONCE(!pool && zswap_has_pool, 430 "%s: no page storage pool!\n", __func__); 431 432 return pool; 433 } 434 435 static struct zswap_pool *zswap_pool_current(void) 436 { 437 assert_spin_locked(&zswap_pools_lock); 438 439 return __zswap_pool_current(); 440 } 441 442 static struct zswap_pool *zswap_pool_current_get(void) 443 { 444 struct zswap_pool *pool; 445 446 rcu_read_lock(); 447 448 pool = __zswap_pool_current(); 449 if (!zswap_pool_tryget(pool)) 450 pool = NULL; 451 452 rcu_read_unlock(); 453 454 return pool; 455 } 456 457 /* type and compressor must be null-terminated */ 458 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) 459 { 460 struct zswap_pool *pool; 461 462 assert_spin_locked(&zswap_pools_lock); 463 464 list_for_each_entry_rcu(pool, &zswap_pools, list) { 465 if (strcmp(pool->tfm_name, compressor)) 466 continue; 467 if (strcmp(zpool_get_type(pool->zpool), type)) 468 continue; 469 /* if we can't get it, it's about to be destroyed */ 470 if (!zswap_pool_tryget(pool)) 471 continue; 472 return pool; 473 } 474 475 return NULL; 476 } 477 478 static unsigned long zswap_max_pages(void) 479 { 480 return totalram_pages() * zswap_max_pool_percent / 100; 481 } 482 483 static unsigned long zswap_accept_thr_pages(void) 484 { 485 return zswap_max_pages() * zswap_accept_thr_percent / 100; 486 } 487 488 unsigned long zswap_total_pages(void) 489 { 490 struct zswap_pool *pool; 491 unsigned long total = 0; 492 493 rcu_read_lock(); 494 list_for_each_entry_rcu(pool, &zswap_pools, list) 495 total += zpool_get_total_pages(pool->zpool); 496 rcu_read_unlock(); 497 498 return total; 499 } 500 501 static bool zswap_check_limits(void) 502 { 503 unsigned long cur_pages = zswap_total_pages(); 504 unsigned long max_pages = zswap_max_pages(); 505 506 if (cur_pages >= max_pages) { 507 zswap_pool_limit_hit++; 508 zswap_pool_reached_full = true; 509 } else if (zswap_pool_reached_full && 510 cur_pages <= zswap_accept_thr_pages()) { 511 zswap_pool_reached_full = false; 512 } 513 return zswap_pool_reached_full; 514 } 515 516 /********************************* 517 * param callbacks 518 **********************************/ 519 520 static bool zswap_pool_changed(const char *s, const struct kernel_param *kp) 521 { 522 /* no change required */ 523 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool) 524 return false; 525 return true; 526 } 527 528 /* val must be a null-terminated string */ 529 static int __zswap_param_set(const char *val, const struct kernel_param *kp, 530 char *type, char *compressor) 531 { 532 struct zswap_pool *pool, *put_pool = NULL; 533 char *s = strstrip((char *)val); 534 int ret = 0; 535 bool new_pool = false; 536 537 mutex_lock(&zswap_init_lock); 538 switch (zswap_init_state) { 539 case ZSWAP_UNINIT: 540 /* if this is load-time (pre-init) param setting, 541 * don't create a pool; that's done during init. 542 */ 543 ret = param_set_charp(s, kp); 544 break; 545 case ZSWAP_INIT_SUCCEED: 546 new_pool = zswap_pool_changed(s, kp); 547 break; 548 case ZSWAP_INIT_FAILED: 549 pr_err("can't set param, initialization failed\n"); 550 ret = -ENODEV; 551 } 552 mutex_unlock(&zswap_init_lock); 553 554 /* no need to create a new pool, return directly */ 555 if (!new_pool) 556 return ret; 557 558 if (!type) { 559 if (!zpool_has_pool(s)) { 560 pr_err("zpool %s not available\n", s); 561 return -ENOENT; 562 } 563 type = s; 564 } else if (!compressor) { 565 if (!crypto_has_acomp(s, 0, 0)) { 566 pr_err("compressor %s not available\n", s); 567 return -ENOENT; 568 } 569 compressor = s; 570 } else { 571 WARN_ON(1); 572 return -EINVAL; 573 } 574 575 spin_lock_bh(&zswap_pools_lock); 576 577 pool = zswap_pool_find_get(type, compressor); 578 if (pool) { 579 zswap_pool_debug("using existing", pool); 580 WARN_ON(pool == zswap_pool_current()); 581 list_del_rcu(&pool->list); 582 } 583 584 spin_unlock_bh(&zswap_pools_lock); 585 586 if (!pool) 587 pool = zswap_pool_create(type, compressor); 588 else { 589 /* 590 * Restore the initial ref dropped by percpu_ref_kill() 591 * when the pool was decommissioned and switch it again 592 * to percpu mode. 593 */ 594 percpu_ref_resurrect(&pool->ref); 595 596 /* Drop the ref from zswap_pool_find_get(). */ 597 zswap_pool_put(pool); 598 } 599 600 if (pool) 601 ret = param_set_charp(s, kp); 602 else 603 ret = -EINVAL; 604 605 spin_lock_bh(&zswap_pools_lock); 606 607 if (!ret) { 608 put_pool = zswap_pool_current(); 609 list_add_rcu(&pool->list, &zswap_pools); 610 zswap_has_pool = true; 611 } else if (pool) { 612 /* add the possibly pre-existing pool to the end of the pools 613 * list; if it's new (and empty) then it'll be removed and 614 * destroyed by the put after we drop the lock 615 */ 616 list_add_tail_rcu(&pool->list, &zswap_pools); 617 put_pool = pool; 618 } 619 620 spin_unlock_bh(&zswap_pools_lock); 621 622 if (!zswap_has_pool && !pool) { 623 /* if initial pool creation failed, and this pool creation also 624 * failed, maybe both compressor and zpool params were bad. 625 * Allow changing this param, so pool creation will succeed 626 * when the other param is changed. We already verified this 627 * param is ok in the zpool_has_pool() or crypto_has_acomp() 628 * checks above. 629 */ 630 ret = param_set_charp(s, kp); 631 } 632 633 /* drop the ref from either the old current pool, 634 * or the new pool we failed to add 635 */ 636 if (put_pool) 637 percpu_ref_kill(&put_pool->ref); 638 639 return ret; 640 } 641 642 static int zswap_compressor_param_set(const char *val, 643 const struct kernel_param *kp) 644 { 645 return __zswap_param_set(val, kp, zswap_zpool_type, NULL); 646 } 647 648 static int zswap_zpool_param_set(const char *val, 649 const struct kernel_param *kp) 650 { 651 return __zswap_param_set(val, kp, NULL, zswap_compressor); 652 } 653 654 static int zswap_enabled_param_set(const char *val, 655 const struct kernel_param *kp) 656 { 657 int ret = -ENODEV; 658 659 /* if this is load-time (pre-init) param setting, only set param. */ 660 if (system_state != SYSTEM_RUNNING) 661 return param_set_bool(val, kp); 662 663 mutex_lock(&zswap_init_lock); 664 switch (zswap_init_state) { 665 case ZSWAP_UNINIT: 666 if (zswap_setup()) 667 break; 668 fallthrough; 669 case ZSWAP_INIT_SUCCEED: 670 if (!zswap_has_pool) 671 pr_err("can't enable, no pool configured\n"); 672 else 673 ret = param_set_bool(val, kp); 674 break; 675 case ZSWAP_INIT_FAILED: 676 pr_err("can't enable, initialization failed\n"); 677 } 678 mutex_unlock(&zswap_init_lock); 679 680 return ret; 681 } 682 683 /********************************* 684 * lru functions 685 **********************************/ 686 687 /* should be called under RCU */ 688 #ifdef CONFIG_MEMCG 689 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry) 690 { 691 return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL; 692 } 693 #else 694 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry) 695 { 696 return NULL; 697 } 698 #endif 699 700 static inline int entry_to_nid(struct zswap_entry *entry) 701 { 702 return page_to_nid(virt_to_page(entry)); 703 } 704 705 static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry) 706 { 707 int nid = entry_to_nid(entry); 708 struct mem_cgroup *memcg; 709 710 /* 711 * Note that it is safe to use rcu_read_lock() here, even in the face of 712 * concurrent memcg offlining: 713 * 714 * 1. list_lru_add() is called before list_lru_one is dead. The 715 * new entry will be reparented to memcg's parent's list_lru. 716 * 2. list_lru_add() is called after list_lru_one is dead. The 717 * new entry will be added directly to memcg's parent's list_lru. 718 * 719 * Similar reasoning holds for list_lru_del(). 720 */ 721 rcu_read_lock(); 722 memcg = mem_cgroup_from_entry(entry); 723 /* will always succeed */ 724 list_lru_add(list_lru, &entry->lru, nid, memcg); 725 rcu_read_unlock(); 726 } 727 728 static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry) 729 { 730 int nid = entry_to_nid(entry); 731 struct mem_cgroup *memcg; 732 733 rcu_read_lock(); 734 memcg = mem_cgroup_from_entry(entry); 735 /* will always succeed */ 736 list_lru_del(list_lru, &entry->lru, nid, memcg); 737 rcu_read_unlock(); 738 } 739 740 void zswap_lruvec_state_init(struct lruvec *lruvec) 741 { 742 atomic_long_set(&lruvec->zswap_lruvec_state.nr_disk_swapins, 0); 743 } 744 745 void zswap_folio_swapin(struct folio *folio) 746 { 747 struct lruvec *lruvec; 748 749 if (folio) { 750 lruvec = folio_lruvec(folio); 751 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_disk_swapins); 752 } 753 } 754 755 /* 756 * This function should be called when a memcg is being offlined. 757 * 758 * Since the global shrinker shrink_worker() may hold a reference 759 * of the memcg, we must check and release the reference in 760 * zswap_next_shrink. 761 * 762 * shrink_worker() must handle the case where this function releases 763 * the reference of memcg being shrunk. 764 */ 765 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) 766 { 767 /* lock out zswap shrinker walking memcg tree */ 768 spin_lock(&zswap_shrink_lock); 769 if (zswap_next_shrink == memcg) { 770 do { 771 zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL); 772 } while (zswap_next_shrink && !mem_cgroup_online(zswap_next_shrink)); 773 } 774 spin_unlock(&zswap_shrink_lock); 775 } 776 777 /********************************* 778 * zswap entry functions 779 **********************************/ 780 static struct kmem_cache *zswap_entry_cache; 781 782 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid) 783 { 784 struct zswap_entry *entry; 785 entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid); 786 if (!entry) 787 return NULL; 788 return entry; 789 } 790 791 static void zswap_entry_cache_free(struct zswap_entry *entry) 792 { 793 kmem_cache_free(zswap_entry_cache, entry); 794 } 795 796 /* 797 * Carries out the common pattern of freeing and entry's zpool allocation, 798 * freeing the entry itself, and decrementing the number of stored pages. 799 */ 800 static void zswap_entry_free(struct zswap_entry *entry) 801 { 802 zswap_lru_del(&zswap_list_lru, entry); 803 zpool_free(entry->pool->zpool, entry->handle); 804 zswap_pool_put(entry->pool); 805 if (entry->objcg) { 806 obj_cgroup_uncharge_zswap(entry->objcg, entry->length); 807 obj_cgroup_put(entry->objcg); 808 } 809 zswap_entry_cache_free(entry); 810 atomic_long_dec(&zswap_stored_pages); 811 } 812 813 /********************************* 814 * compressed storage functions 815 **********************************/ 816 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node) 817 { 818 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); 819 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); 820 struct crypto_acomp *acomp; 821 struct acomp_req *req; 822 int ret; 823 824 mutex_init(&acomp_ctx->mutex); 825 826 acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu)); 827 if (!acomp_ctx->buffer) 828 return -ENOMEM; 829 830 acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu)); 831 if (IS_ERR(acomp)) { 832 pr_err("could not alloc crypto acomp %s : %ld\n", 833 pool->tfm_name, PTR_ERR(acomp)); 834 ret = PTR_ERR(acomp); 835 goto acomp_fail; 836 } 837 acomp_ctx->acomp = acomp; 838 acomp_ctx->is_sleepable = acomp_is_async(acomp); 839 840 req = acomp_request_alloc(acomp_ctx->acomp); 841 if (!req) { 842 pr_err("could not alloc crypto acomp_request %s\n", 843 pool->tfm_name); 844 ret = -ENOMEM; 845 goto req_fail; 846 } 847 acomp_ctx->req = req; 848 849 crypto_init_wait(&acomp_ctx->wait); 850 /* 851 * if the backend of acomp is async zip, crypto_req_done() will wakeup 852 * crypto_wait_req(); if the backend of acomp is scomp, the callback 853 * won't be called, crypto_wait_req() will return without blocking. 854 */ 855 acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 856 crypto_req_done, &acomp_ctx->wait); 857 858 return 0; 859 860 req_fail: 861 crypto_free_acomp(acomp_ctx->acomp); 862 acomp_fail: 863 kfree(acomp_ctx->buffer); 864 return ret; 865 } 866 867 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node) 868 { 869 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); 870 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); 871 872 if (!IS_ERR_OR_NULL(acomp_ctx)) { 873 if (!IS_ERR_OR_NULL(acomp_ctx->req)) 874 acomp_request_free(acomp_ctx->req); 875 if (!IS_ERR_OR_NULL(acomp_ctx->acomp)) 876 crypto_free_acomp(acomp_ctx->acomp); 877 kfree(acomp_ctx->buffer); 878 } 879 880 return 0; 881 } 882 883 /* Prevent CPU hotplug from freeing up the per-CPU acomp_ctx resources */ 884 static struct crypto_acomp_ctx *acomp_ctx_get_cpu(struct crypto_acomp_ctx __percpu *acomp_ctx) 885 { 886 cpus_read_lock(); 887 return raw_cpu_ptr(acomp_ctx); 888 } 889 890 static void acomp_ctx_put_cpu(void) 891 { 892 cpus_read_unlock(); 893 } 894 895 static bool zswap_compress(struct page *page, struct zswap_entry *entry, 896 struct zswap_pool *pool) 897 { 898 struct crypto_acomp_ctx *acomp_ctx; 899 struct scatterlist input, output; 900 int comp_ret = 0, alloc_ret = 0; 901 unsigned int dlen = PAGE_SIZE; 902 unsigned long handle; 903 struct zpool *zpool; 904 char *buf; 905 gfp_t gfp; 906 u8 *dst; 907 908 acomp_ctx = acomp_ctx_get_cpu(pool->acomp_ctx); 909 mutex_lock(&acomp_ctx->mutex); 910 911 dst = acomp_ctx->buffer; 912 sg_init_table(&input, 1); 913 sg_set_page(&input, page, PAGE_SIZE, 0); 914 915 /* 916 * We need PAGE_SIZE * 2 here since there maybe over-compression case, 917 * and hardware-accelerators may won't check the dst buffer size, so 918 * giving the dst buffer with enough length to avoid buffer overflow. 919 */ 920 sg_init_one(&output, dst, PAGE_SIZE * 2); 921 acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen); 922 923 /* 924 * it maybe looks a little bit silly that we send an asynchronous request, 925 * then wait for its completion synchronously. This makes the process look 926 * synchronous in fact. 927 * Theoretically, acomp supports users send multiple acomp requests in one 928 * acomp instance, then get those requests done simultaneously. but in this 929 * case, zswap actually does store and load page by page, there is no 930 * existing method to send the second page before the first page is done 931 * in one thread doing zwap. 932 * but in different threads running on different cpu, we have different 933 * acomp instance, so multiple threads can do (de)compression in parallel. 934 */ 935 comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait); 936 dlen = acomp_ctx->req->dlen; 937 if (comp_ret) 938 goto unlock; 939 940 zpool = pool->zpool; 941 gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; 942 if (zpool_malloc_support_movable(zpool)) 943 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE; 944 alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle); 945 if (alloc_ret) 946 goto unlock; 947 948 buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO); 949 memcpy(buf, dst, dlen); 950 zpool_unmap_handle(zpool, handle); 951 952 entry->handle = handle; 953 entry->length = dlen; 954 955 unlock: 956 if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC) 957 zswap_reject_compress_poor++; 958 else if (comp_ret) 959 zswap_reject_compress_fail++; 960 else if (alloc_ret) 961 zswap_reject_alloc_fail++; 962 963 mutex_unlock(&acomp_ctx->mutex); 964 acomp_ctx_put_cpu(); 965 return comp_ret == 0 && alloc_ret == 0; 966 } 967 968 static void zswap_decompress(struct zswap_entry *entry, struct folio *folio) 969 { 970 struct zpool *zpool = entry->pool->zpool; 971 struct scatterlist input, output; 972 struct crypto_acomp_ctx *acomp_ctx; 973 u8 *src; 974 975 acomp_ctx = acomp_ctx_get_cpu(entry->pool->acomp_ctx); 976 mutex_lock(&acomp_ctx->mutex); 977 978 src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO); 979 /* 980 * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer 981 * to do crypto_acomp_decompress() which might sleep. In such cases, we must 982 * resort to copying the buffer to a temporary one. 983 * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer, 984 * such as a kmap address of high memory or even ever a vmap address. 985 * However, sg_init_one is only equipped to handle linearly mapped low memory. 986 * In such cases, we also must copy the buffer to a temporary and lowmem one. 987 */ 988 if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) || 989 !virt_addr_valid(src)) { 990 memcpy(acomp_ctx->buffer, src, entry->length); 991 src = acomp_ctx->buffer; 992 zpool_unmap_handle(zpool, entry->handle); 993 } 994 995 sg_init_one(&input, src, entry->length); 996 sg_init_table(&output, 1); 997 sg_set_folio(&output, folio, PAGE_SIZE, 0); 998 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE); 999 BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait)); 1000 BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE); 1001 mutex_unlock(&acomp_ctx->mutex); 1002 1003 if (src != acomp_ctx->buffer) 1004 zpool_unmap_handle(zpool, entry->handle); 1005 acomp_ctx_put_cpu(); 1006 } 1007 1008 /********************************* 1009 * writeback code 1010 **********************************/ 1011 /* 1012 * Attempts to free an entry by adding a folio to the swap cache, 1013 * decompressing the entry data into the folio, and issuing a 1014 * bio write to write the folio back to the swap device. 1015 * 1016 * This can be thought of as a "resumed writeback" of the folio 1017 * to the swap device. We are basically resuming the same swap 1018 * writeback path that was intercepted with the zswap_store() 1019 * in the first place. After the folio has been decompressed into 1020 * the swap cache, the compressed version stored by zswap can be 1021 * freed. 1022 */ 1023 static int zswap_writeback_entry(struct zswap_entry *entry, 1024 swp_entry_t swpentry) 1025 { 1026 struct xarray *tree; 1027 pgoff_t offset = swp_offset(swpentry); 1028 struct folio *folio; 1029 struct mempolicy *mpol; 1030 bool folio_was_allocated; 1031 struct writeback_control wbc = { 1032 .sync_mode = WB_SYNC_NONE, 1033 }; 1034 1035 /* try to allocate swap cache folio */ 1036 mpol = get_task_policy(current); 1037 folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol, 1038 NO_INTERLEAVE_INDEX, &folio_was_allocated, true); 1039 if (!folio) 1040 return -ENOMEM; 1041 1042 /* 1043 * Found an existing folio, we raced with swapin or concurrent 1044 * shrinker. We generally writeback cold folios from zswap, and 1045 * swapin means the folio just became hot, so skip this folio. 1046 * For unlikely concurrent shrinker case, it will be unlinked 1047 * and freed when invalidated by the concurrent shrinker anyway. 1048 */ 1049 if (!folio_was_allocated) { 1050 folio_put(folio); 1051 return -EEXIST; 1052 } 1053 1054 /* 1055 * folio is locked, and the swapcache is now secured against 1056 * concurrent swapping to and from the slot, and concurrent 1057 * swapoff so we can safely dereference the zswap tree here. 1058 * Verify that the swap entry hasn't been invalidated and recycled 1059 * behind our backs, to avoid overwriting a new swap folio with 1060 * old compressed data. Only when this is successful can the entry 1061 * be dereferenced. 1062 */ 1063 tree = swap_zswap_tree(swpentry); 1064 if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) { 1065 delete_from_swap_cache(folio); 1066 folio_unlock(folio); 1067 folio_put(folio); 1068 return -ENOMEM; 1069 } 1070 1071 zswap_decompress(entry, folio); 1072 1073 count_vm_event(ZSWPWB); 1074 if (entry->objcg) 1075 count_objcg_events(entry->objcg, ZSWPWB, 1); 1076 1077 zswap_entry_free(entry); 1078 1079 /* folio is up to date */ 1080 folio_mark_uptodate(folio); 1081 1082 /* move it to the tail of the inactive list after end_writeback */ 1083 folio_set_reclaim(folio); 1084 1085 /* start writeback */ 1086 __swap_writepage(folio, &wbc); 1087 folio_put(folio); 1088 1089 return 0; 1090 } 1091 1092 /********************************* 1093 * shrinker functions 1094 **********************************/ 1095 /* 1096 * The dynamic shrinker is modulated by the following factors: 1097 * 1098 * 1. Each zswap entry has a referenced bit, which the shrinker unsets (giving 1099 * the entry a second chance) before rotating it in the LRU list. If the 1100 * entry is considered again by the shrinker, with its referenced bit unset, 1101 * it is written back. The writeback rate as a result is dynamically 1102 * adjusted by the pool activities - if the pool is dominated by new entries 1103 * (i.e lots of recent zswapouts), these entries will be protected and 1104 * the writeback rate will slow down. On the other hand, if the pool has a 1105 * lot of stagnant entries, these entries will be reclaimed immediately, 1106 * effectively increasing the writeback rate. 1107 * 1108 * 2. Swapins counter: If we observe swapins, it is a sign that we are 1109 * overshrinking and should slow down. We maintain a swapins counter, which 1110 * is consumed and subtract from the number of eligible objects on the LRU 1111 * in zswap_shrinker_count(). 1112 * 1113 * 3. Compression ratio. The better the workload compresses, the less gains we 1114 * can expect from writeback. We scale down the number of objects available 1115 * for reclaim by this ratio. 1116 */ 1117 static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l, 1118 void *arg) 1119 { 1120 struct zswap_entry *entry = container_of(item, struct zswap_entry, lru); 1121 bool *encountered_page_in_swapcache = (bool *)arg; 1122 swp_entry_t swpentry; 1123 enum lru_status ret = LRU_REMOVED_RETRY; 1124 int writeback_result; 1125 1126 /* 1127 * Second chance algorithm: if the entry has its referenced bit set, give it 1128 * a second chance. Only clear the referenced bit and rotate it in the 1129 * zswap's LRU list. 1130 */ 1131 if (entry->referenced) { 1132 entry->referenced = false; 1133 return LRU_ROTATE; 1134 } 1135 1136 /* 1137 * As soon as we drop the LRU lock, the entry can be freed by 1138 * a concurrent invalidation. This means the following: 1139 * 1140 * 1. We extract the swp_entry_t to the stack, allowing 1141 * zswap_writeback_entry() to pin the swap entry and 1142 * then validate the zwap entry against that swap entry's 1143 * tree using pointer value comparison. Only when that 1144 * is successful can the entry be dereferenced. 1145 * 1146 * 2. Usually, objects are taken off the LRU for reclaim. In 1147 * this case this isn't possible, because if reclaim fails 1148 * for whatever reason, we have no means of knowing if the 1149 * entry is alive to put it back on the LRU. 1150 * 1151 * So rotate it before dropping the lock. If the entry is 1152 * written back or invalidated, the free path will unlink 1153 * it. For failures, rotation is the right thing as well. 1154 * 1155 * Temporary failures, where the same entry should be tried 1156 * again immediately, almost never happen for this shrinker. 1157 * We don't do any trylocking; -ENOMEM comes closest, 1158 * but that's extremely rare and doesn't happen spuriously 1159 * either. Don't bother distinguishing this case. 1160 */ 1161 list_move_tail(item, &l->list); 1162 1163 /* 1164 * Once the lru lock is dropped, the entry might get freed. The 1165 * swpentry is copied to the stack, and entry isn't deref'd again 1166 * until the entry is verified to still be alive in the tree. 1167 */ 1168 swpentry = entry->swpentry; 1169 1170 /* 1171 * It's safe to drop the lock here because we return either 1172 * LRU_REMOVED_RETRY or LRU_RETRY. 1173 */ 1174 spin_unlock(&l->lock); 1175 1176 writeback_result = zswap_writeback_entry(entry, swpentry); 1177 1178 if (writeback_result) { 1179 zswap_reject_reclaim_fail++; 1180 ret = LRU_RETRY; 1181 1182 /* 1183 * Encountering a page already in swap cache is a sign that we are shrinking 1184 * into the warmer region. We should terminate shrinking (if we're in the dynamic 1185 * shrinker context). 1186 */ 1187 if (writeback_result == -EEXIST && encountered_page_in_swapcache) { 1188 ret = LRU_STOP; 1189 *encountered_page_in_swapcache = true; 1190 } 1191 } else { 1192 zswap_written_back_pages++; 1193 } 1194 1195 return ret; 1196 } 1197 1198 static unsigned long zswap_shrinker_scan(struct shrinker *shrinker, 1199 struct shrink_control *sc) 1200 { 1201 unsigned long shrink_ret; 1202 bool encountered_page_in_swapcache = false; 1203 1204 if (!zswap_shrinker_enabled || 1205 !mem_cgroup_zswap_writeback_enabled(sc->memcg)) { 1206 sc->nr_scanned = 0; 1207 return SHRINK_STOP; 1208 } 1209 1210 shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb, 1211 &encountered_page_in_swapcache); 1212 1213 if (encountered_page_in_swapcache) 1214 return SHRINK_STOP; 1215 1216 return shrink_ret ? shrink_ret : SHRINK_STOP; 1217 } 1218 1219 static unsigned long zswap_shrinker_count(struct shrinker *shrinker, 1220 struct shrink_control *sc) 1221 { 1222 struct mem_cgroup *memcg = sc->memcg; 1223 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid)); 1224 atomic_long_t *nr_disk_swapins = 1225 &lruvec->zswap_lruvec_state.nr_disk_swapins; 1226 unsigned long nr_backing, nr_stored, nr_freeable, nr_disk_swapins_cur, 1227 nr_remain; 1228 1229 if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg)) 1230 return 0; 1231 1232 /* 1233 * The shrinker resumes swap writeback, which will enter block 1234 * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS 1235 * rules (may_enter_fs()), which apply on a per-folio basis. 1236 */ 1237 if (!gfp_has_io_fs(sc->gfp_mask)) 1238 return 0; 1239 1240 /* 1241 * For memcg, use the cgroup-wide ZSWAP stats since we don't 1242 * have them per-node and thus per-lruvec. Careful if memcg is 1243 * runtime-disabled: we can get sc->memcg == NULL, which is ok 1244 * for the lruvec, but not for memcg_page_state(). 1245 * 1246 * Without memcg, use the zswap pool-wide metrics. 1247 */ 1248 if (!mem_cgroup_disabled()) { 1249 mem_cgroup_flush_stats(memcg); 1250 nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT; 1251 nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED); 1252 } else { 1253 nr_backing = zswap_total_pages(); 1254 nr_stored = atomic_long_read(&zswap_stored_pages); 1255 } 1256 1257 if (!nr_stored) 1258 return 0; 1259 1260 nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc); 1261 if (!nr_freeable) 1262 return 0; 1263 1264 /* 1265 * Subtract from the lru size the number of pages that are recently swapped 1266 * in from disk. The idea is that had we protect the zswap's LRU by this 1267 * amount of pages, these disk swapins would not have happened. 1268 */ 1269 nr_disk_swapins_cur = atomic_long_read(nr_disk_swapins); 1270 do { 1271 if (nr_freeable >= nr_disk_swapins_cur) 1272 nr_remain = 0; 1273 else 1274 nr_remain = nr_disk_swapins_cur - nr_freeable; 1275 } while (!atomic_long_try_cmpxchg( 1276 nr_disk_swapins, &nr_disk_swapins_cur, nr_remain)); 1277 1278 nr_freeable -= nr_disk_swapins_cur - nr_remain; 1279 if (!nr_freeable) 1280 return 0; 1281 1282 /* 1283 * Scale the number of freeable pages by the memory saving factor. 1284 * This ensures that the better zswap compresses memory, the fewer 1285 * pages we will evict to swap (as it will otherwise incur IO for 1286 * relatively small memory saving). 1287 */ 1288 return mult_frac(nr_freeable, nr_backing, nr_stored); 1289 } 1290 1291 static struct shrinker *zswap_alloc_shrinker(void) 1292 { 1293 struct shrinker *shrinker; 1294 1295 shrinker = 1296 shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap"); 1297 if (!shrinker) 1298 return NULL; 1299 1300 shrinker->scan_objects = zswap_shrinker_scan; 1301 shrinker->count_objects = zswap_shrinker_count; 1302 shrinker->batch = 0; 1303 shrinker->seeks = DEFAULT_SEEKS; 1304 return shrinker; 1305 } 1306 1307 static int shrink_memcg(struct mem_cgroup *memcg) 1308 { 1309 int nid, shrunk = 0, scanned = 0; 1310 1311 if (!mem_cgroup_zswap_writeback_enabled(memcg)) 1312 return -ENOENT; 1313 1314 /* 1315 * Skip zombies because their LRUs are reparented and we would be 1316 * reclaiming from the parent instead of the dead memcg. 1317 */ 1318 if (memcg && !mem_cgroup_online(memcg)) 1319 return -ENOENT; 1320 1321 for_each_node_state(nid, N_NORMAL_MEMORY) { 1322 unsigned long nr_to_walk = 1; 1323 1324 shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg, 1325 &shrink_memcg_cb, NULL, &nr_to_walk); 1326 scanned += 1 - nr_to_walk; 1327 } 1328 1329 if (!scanned) 1330 return -ENOENT; 1331 1332 return shrunk ? 0 : -EAGAIN; 1333 } 1334 1335 static void shrink_worker(struct work_struct *w) 1336 { 1337 struct mem_cgroup *memcg; 1338 int ret, failures = 0, attempts = 0; 1339 unsigned long thr; 1340 1341 /* Reclaim down to the accept threshold */ 1342 thr = zswap_accept_thr_pages(); 1343 1344 /* 1345 * Global reclaim will select cgroup in a round-robin fashion from all 1346 * online memcgs, but memcgs that have no pages in zswap and 1347 * writeback-disabled memcgs (memory.zswap.writeback=0) are not 1348 * candidates for shrinking. 1349 * 1350 * Shrinking will be aborted if we encounter the following 1351 * MAX_RECLAIM_RETRIES times: 1352 * - No writeback-candidate memcgs found in a memcg tree walk. 1353 * - Shrinking a writeback-candidate memcg failed. 1354 * 1355 * We save iteration cursor memcg into zswap_next_shrink, 1356 * which can be modified by the offline memcg cleaner 1357 * zswap_memcg_offline_cleanup(). 1358 * 1359 * Since the offline cleaner is called only once, we cannot leave an 1360 * offline memcg reference in zswap_next_shrink. 1361 * We can rely on the cleaner only if we get online memcg under lock. 1362 * 1363 * If we get an offline memcg, we cannot determine if the cleaner has 1364 * already been called or will be called later. We must put back the 1365 * reference before returning from this function. Otherwise, the 1366 * offline memcg left in zswap_next_shrink will hold the reference 1367 * until the next run of shrink_worker(). 1368 */ 1369 do { 1370 /* 1371 * Start shrinking from the next memcg after zswap_next_shrink. 1372 * When the offline cleaner has already advanced the cursor, 1373 * advancing the cursor here overlooks one memcg, but this 1374 * should be negligibly rare. 1375 * 1376 * If we get an online memcg, keep the extra reference in case 1377 * the original one obtained by mem_cgroup_iter() is dropped by 1378 * zswap_memcg_offline_cleanup() while we are shrinking the 1379 * memcg. 1380 */ 1381 spin_lock(&zswap_shrink_lock); 1382 do { 1383 memcg = mem_cgroup_iter(NULL, zswap_next_shrink, NULL); 1384 zswap_next_shrink = memcg; 1385 } while (memcg && !mem_cgroup_tryget_online(memcg)); 1386 spin_unlock(&zswap_shrink_lock); 1387 1388 if (!memcg) { 1389 /* 1390 * Continue shrinking without incrementing failures if 1391 * we found candidate memcgs in the last tree walk. 1392 */ 1393 if (!attempts && ++failures == MAX_RECLAIM_RETRIES) 1394 break; 1395 1396 attempts = 0; 1397 goto resched; 1398 } 1399 1400 ret = shrink_memcg(memcg); 1401 /* drop the extra reference */ 1402 mem_cgroup_put(memcg); 1403 1404 /* 1405 * There are no writeback-candidate pages in the memcg. 1406 * This is not an issue as long as we can find another memcg 1407 * with pages in zswap. Skip this without incrementing attempts 1408 * and failures. 1409 */ 1410 if (ret == -ENOENT) 1411 continue; 1412 ++attempts; 1413 1414 if (ret && ++failures == MAX_RECLAIM_RETRIES) 1415 break; 1416 resched: 1417 cond_resched(); 1418 } while (zswap_total_pages() > thr); 1419 } 1420 1421 /********************************* 1422 * main API 1423 **********************************/ 1424 1425 static ssize_t zswap_store_page(struct page *page, 1426 struct obj_cgroup *objcg, 1427 struct zswap_pool *pool) 1428 { 1429 swp_entry_t page_swpentry = page_swap_entry(page); 1430 struct zswap_entry *entry, *old; 1431 1432 /* allocate entry */ 1433 entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page)); 1434 if (!entry) { 1435 zswap_reject_kmemcache_fail++; 1436 return -EINVAL; 1437 } 1438 1439 if (!zswap_compress(page, entry, pool)) 1440 goto compress_failed; 1441 1442 old = xa_store(swap_zswap_tree(page_swpentry), 1443 swp_offset(page_swpentry), 1444 entry, GFP_KERNEL); 1445 if (xa_is_err(old)) { 1446 int err = xa_err(old); 1447 1448 WARN_ONCE(err != -ENOMEM, "unexpected xarray error: %d\n", err); 1449 zswap_reject_alloc_fail++; 1450 goto store_failed; 1451 } 1452 1453 /* 1454 * We may have had an existing entry that became stale when 1455 * the folio was redirtied and now the new version is being 1456 * swapped out. Get rid of the old. 1457 */ 1458 if (old) 1459 zswap_entry_free(old); 1460 1461 /* 1462 * The entry is successfully compressed and stored in the tree, there is 1463 * no further possibility of failure. Grab refs to the pool and objcg. 1464 * These refs will be dropped by zswap_entry_free() when the entry is 1465 * removed from the tree. 1466 */ 1467 zswap_pool_get(pool); 1468 if (objcg) 1469 obj_cgroup_get(objcg); 1470 1471 /* 1472 * We finish initializing the entry while it's already in xarray. 1473 * This is safe because: 1474 * 1475 * 1. Concurrent stores and invalidations are excluded by folio lock. 1476 * 1477 * 2. Writeback is excluded by the entry not being on the LRU yet. 1478 * The publishing order matters to prevent writeback from seeing 1479 * an incoherent entry. 1480 */ 1481 entry->pool = pool; 1482 entry->swpentry = page_swpentry; 1483 entry->objcg = objcg; 1484 entry->referenced = true; 1485 if (entry->length) { 1486 INIT_LIST_HEAD(&entry->lru); 1487 zswap_lru_add(&zswap_list_lru, entry); 1488 } 1489 1490 return entry->length; 1491 1492 store_failed: 1493 zpool_free(pool->zpool, entry->handle); 1494 compress_failed: 1495 zswap_entry_cache_free(entry); 1496 return -EINVAL; 1497 } 1498 1499 bool zswap_store(struct folio *folio) 1500 { 1501 long nr_pages = folio_nr_pages(folio); 1502 swp_entry_t swp = folio->swap; 1503 struct obj_cgroup *objcg = NULL; 1504 struct mem_cgroup *memcg = NULL; 1505 struct zswap_pool *pool; 1506 size_t compressed_bytes = 0; 1507 bool ret = false; 1508 long index; 1509 1510 VM_WARN_ON_ONCE(!folio_test_locked(folio)); 1511 VM_WARN_ON_ONCE(!folio_test_swapcache(folio)); 1512 1513 if (!zswap_enabled) 1514 goto check_old; 1515 1516 objcg = get_obj_cgroup_from_folio(folio); 1517 if (objcg && !obj_cgroup_may_zswap(objcg)) { 1518 memcg = get_mem_cgroup_from_objcg(objcg); 1519 if (shrink_memcg(memcg)) { 1520 mem_cgroup_put(memcg); 1521 goto put_objcg; 1522 } 1523 mem_cgroup_put(memcg); 1524 } 1525 1526 if (zswap_check_limits()) 1527 goto put_objcg; 1528 1529 pool = zswap_pool_current_get(); 1530 if (!pool) 1531 goto put_objcg; 1532 1533 if (objcg) { 1534 memcg = get_mem_cgroup_from_objcg(objcg); 1535 if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) { 1536 mem_cgroup_put(memcg); 1537 goto put_pool; 1538 } 1539 mem_cgroup_put(memcg); 1540 } 1541 1542 for (index = 0; index < nr_pages; ++index) { 1543 struct page *page = folio_page(folio, index); 1544 ssize_t bytes; 1545 1546 bytes = zswap_store_page(page, objcg, pool); 1547 if (bytes < 0) 1548 goto put_pool; 1549 compressed_bytes += bytes; 1550 } 1551 1552 if (objcg) { 1553 obj_cgroup_charge_zswap(objcg, compressed_bytes); 1554 count_objcg_events(objcg, ZSWPOUT, nr_pages); 1555 } 1556 1557 atomic_long_add(nr_pages, &zswap_stored_pages); 1558 count_vm_events(ZSWPOUT, nr_pages); 1559 1560 ret = true; 1561 1562 put_pool: 1563 zswap_pool_put(pool); 1564 put_objcg: 1565 obj_cgroup_put(objcg); 1566 if (!ret && zswap_pool_reached_full) 1567 queue_work(shrink_wq, &zswap_shrink_work); 1568 check_old: 1569 /* 1570 * If the zswap store fails or zswap is disabled, we must invalidate 1571 * the possibly stale entries which were previously stored at the 1572 * offsets corresponding to each page of the folio. Otherwise, 1573 * writeback could overwrite the new data in the swapfile. 1574 */ 1575 if (!ret) { 1576 unsigned type = swp_type(swp); 1577 pgoff_t offset = swp_offset(swp); 1578 struct zswap_entry *entry; 1579 struct xarray *tree; 1580 1581 for (index = 0; index < nr_pages; ++index) { 1582 tree = swap_zswap_tree(swp_entry(type, offset + index)); 1583 entry = xa_erase(tree, offset + index); 1584 if (entry) 1585 zswap_entry_free(entry); 1586 } 1587 } 1588 1589 return ret; 1590 } 1591 1592 bool zswap_load(struct folio *folio) 1593 { 1594 swp_entry_t swp = folio->swap; 1595 pgoff_t offset = swp_offset(swp); 1596 bool swapcache = folio_test_swapcache(folio); 1597 struct xarray *tree = swap_zswap_tree(swp); 1598 struct zswap_entry *entry; 1599 1600 VM_WARN_ON_ONCE(!folio_test_locked(folio)); 1601 1602 if (zswap_never_enabled()) 1603 return false; 1604 1605 /* 1606 * Large folios should not be swapped in while zswap is being used, as 1607 * they are not properly handled. Zswap does not properly load large 1608 * folios, and a large folio may only be partially in zswap. 1609 * 1610 * Return true without marking the folio uptodate so that an IO error is 1611 * emitted (e.g. do_swap_page() will sigbus). 1612 */ 1613 if (WARN_ON_ONCE(folio_test_large(folio))) 1614 return true; 1615 1616 /* 1617 * When reading into the swapcache, invalidate our entry. The 1618 * swapcache can be the authoritative owner of the page and 1619 * its mappings, and the pressure that results from having two 1620 * in-memory copies outweighs any benefits of caching the 1621 * compression work. 1622 * 1623 * (Most swapins go through the swapcache. The notable 1624 * exception is the singleton fault on SWP_SYNCHRONOUS_IO 1625 * files, which reads into a private page and may free it if 1626 * the fault fails. We remain the primary owner of the entry.) 1627 */ 1628 if (swapcache) 1629 entry = xa_erase(tree, offset); 1630 else 1631 entry = xa_load(tree, offset); 1632 1633 if (!entry) 1634 return false; 1635 1636 zswap_decompress(entry, folio); 1637 1638 count_vm_event(ZSWPIN); 1639 if (entry->objcg) 1640 count_objcg_events(entry->objcg, ZSWPIN, 1); 1641 1642 if (swapcache) { 1643 zswap_entry_free(entry); 1644 folio_mark_dirty(folio); 1645 } 1646 1647 folio_mark_uptodate(folio); 1648 return true; 1649 } 1650 1651 void zswap_invalidate(swp_entry_t swp) 1652 { 1653 pgoff_t offset = swp_offset(swp); 1654 struct xarray *tree = swap_zswap_tree(swp); 1655 struct zswap_entry *entry; 1656 1657 if (xa_empty(tree)) 1658 return; 1659 1660 entry = xa_erase(tree, offset); 1661 if (entry) 1662 zswap_entry_free(entry); 1663 } 1664 1665 int zswap_swapon(int type, unsigned long nr_pages) 1666 { 1667 struct xarray *trees, *tree; 1668 unsigned int nr, i; 1669 1670 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); 1671 trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL); 1672 if (!trees) { 1673 pr_err("alloc failed, zswap disabled for swap type %d\n", type); 1674 return -ENOMEM; 1675 } 1676 1677 for (i = 0; i < nr; i++) 1678 xa_init(trees + i); 1679 1680 nr_zswap_trees[type] = nr; 1681 zswap_trees[type] = trees; 1682 return 0; 1683 } 1684 1685 void zswap_swapoff(int type) 1686 { 1687 struct xarray *trees = zswap_trees[type]; 1688 unsigned int i; 1689 1690 if (!trees) 1691 return; 1692 1693 /* try_to_unuse() invalidated all the entries already */ 1694 for (i = 0; i < nr_zswap_trees[type]; i++) 1695 WARN_ON_ONCE(!xa_empty(trees + i)); 1696 1697 kvfree(trees); 1698 nr_zswap_trees[type] = 0; 1699 zswap_trees[type] = NULL; 1700 } 1701 1702 /********************************* 1703 * debugfs functions 1704 **********************************/ 1705 #ifdef CONFIG_DEBUG_FS 1706 #include <linux/debugfs.h> 1707 1708 static struct dentry *zswap_debugfs_root; 1709 1710 static int debugfs_get_total_size(void *data, u64 *val) 1711 { 1712 *val = zswap_total_pages() * PAGE_SIZE; 1713 return 0; 1714 } 1715 DEFINE_DEBUGFS_ATTRIBUTE(total_size_fops, debugfs_get_total_size, NULL, "%llu\n"); 1716 1717 static int debugfs_get_stored_pages(void *data, u64 *val) 1718 { 1719 *val = atomic_long_read(&zswap_stored_pages); 1720 return 0; 1721 } 1722 DEFINE_DEBUGFS_ATTRIBUTE(stored_pages_fops, debugfs_get_stored_pages, NULL, "%llu\n"); 1723 1724 static int zswap_debugfs_init(void) 1725 { 1726 if (!debugfs_initialized()) 1727 return -ENODEV; 1728 1729 zswap_debugfs_root = debugfs_create_dir("zswap", NULL); 1730 1731 debugfs_create_u64("pool_limit_hit", 0444, 1732 zswap_debugfs_root, &zswap_pool_limit_hit); 1733 debugfs_create_u64("reject_reclaim_fail", 0444, 1734 zswap_debugfs_root, &zswap_reject_reclaim_fail); 1735 debugfs_create_u64("reject_alloc_fail", 0444, 1736 zswap_debugfs_root, &zswap_reject_alloc_fail); 1737 debugfs_create_u64("reject_kmemcache_fail", 0444, 1738 zswap_debugfs_root, &zswap_reject_kmemcache_fail); 1739 debugfs_create_u64("reject_compress_fail", 0444, 1740 zswap_debugfs_root, &zswap_reject_compress_fail); 1741 debugfs_create_u64("reject_compress_poor", 0444, 1742 zswap_debugfs_root, &zswap_reject_compress_poor); 1743 debugfs_create_u64("written_back_pages", 0444, 1744 zswap_debugfs_root, &zswap_written_back_pages); 1745 debugfs_create_file("pool_total_size", 0444, 1746 zswap_debugfs_root, NULL, &total_size_fops); 1747 debugfs_create_file("stored_pages", 0444, 1748 zswap_debugfs_root, NULL, &stored_pages_fops); 1749 1750 return 0; 1751 } 1752 #else 1753 static int zswap_debugfs_init(void) 1754 { 1755 return 0; 1756 } 1757 #endif 1758 1759 /********************************* 1760 * module init and exit 1761 **********************************/ 1762 static int zswap_setup(void) 1763 { 1764 struct zswap_pool *pool; 1765 int ret; 1766 1767 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0); 1768 if (!zswap_entry_cache) { 1769 pr_err("entry cache creation failed\n"); 1770 goto cache_fail; 1771 } 1772 1773 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE, 1774 "mm/zswap_pool:prepare", 1775 zswap_cpu_comp_prepare, 1776 zswap_cpu_comp_dead); 1777 if (ret) 1778 goto hp_fail; 1779 1780 shrink_wq = alloc_workqueue("zswap-shrink", 1781 WQ_UNBOUND|WQ_MEM_RECLAIM, 1); 1782 if (!shrink_wq) 1783 goto shrink_wq_fail; 1784 1785 zswap_shrinker = zswap_alloc_shrinker(); 1786 if (!zswap_shrinker) 1787 goto shrinker_fail; 1788 if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker)) 1789 goto lru_fail; 1790 shrinker_register(zswap_shrinker); 1791 1792 INIT_WORK(&zswap_shrink_work, shrink_worker); 1793 1794 pool = __zswap_pool_create_fallback(); 1795 if (pool) { 1796 pr_info("loaded using pool %s/%s\n", pool->tfm_name, 1797 zpool_get_type(pool->zpool)); 1798 list_add(&pool->list, &zswap_pools); 1799 zswap_has_pool = true; 1800 static_branch_enable(&zswap_ever_enabled); 1801 } else { 1802 pr_err("pool creation failed\n"); 1803 zswap_enabled = false; 1804 } 1805 1806 if (zswap_debugfs_init()) 1807 pr_warn("debugfs initialization failed\n"); 1808 zswap_init_state = ZSWAP_INIT_SUCCEED; 1809 return 0; 1810 1811 lru_fail: 1812 shrinker_free(zswap_shrinker); 1813 shrinker_fail: 1814 destroy_workqueue(shrink_wq); 1815 shrink_wq_fail: 1816 cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE); 1817 hp_fail: 1818 kmem_cache_destroy(zswap_entry_cache); 1819 cache_fail: 1820 /* if built-in, we aren't unloaded on failure; don't allow use */ 1821 zswap_init_state = ZSWAP_INIT_FAILED; 1822 zswap_enabled = false; 1823 return -ENOMEM; 1824 } 1825 1826 static int __init zswap_init(void) 1827 { 1828 if (!zswap_enabled) 1829 return 0; 1830 return zswap_setup(); 1831 } 1832 /* must be late so crypto has time to come up */ 1833 late_initcall(zswap_init); 1834 1835 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>"); 1836 MODULE_DESCRIPTION("Compressed cache for swap pages"); 1837