1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * zswap.c - zswap driver file 4 * 5 * zswap is a cache that takes pages that are in the process 6 * of being swapped out and attempts to compress and store them in a 7 * RAM-based memory pool. This can result in a significant I/O reduction on 8 * the swap device and, in the case where decompressing from RAM is faster 9 * than reading from the swap device, can also improve workload performance. 10 * 11 * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com> 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16 #include <linux/module.h> 17 #include <linux/cpu.h> 18 #include <linux/highmem.h> 19 #include <linux/slab.h> 20 #include <linux/spinlock.h> 21 #include <linux/types.h> 22 #include <linux/atomic.h> 23 #include <linux/swap.h> 24 #include <linux/crypto.h> 25 #include <linux/scatterlist.h> 26 #include <linux/mempolicy.h> 27 #include <linux/mempool.h> 28 #include <linux/zpool.h> 29 #include <crypto/acompress.h> 30 #include <linux/zswap.h> 31 #include <linux/mm_types.h> 32 #include <linux/page-flags.h> 33 #include <linux/swapops.h> 34 #include <linux/writeback.h> 35 #include <linux/pagemap.h> 36 #include <linux/workqueue.h> 37 #include <linux/list_lru.h> 38 39 #include "swap.h" 40 #include "internal.h" 41 42 /********************************* 43 * statistics 44 **********************************/ 45 /* The number of compressed pages currently stored in zswap */ 46 atomic_t zswap_stored_pages = ATOMIC_INIT(0); 47 48 /* 49 * The statistics below are not protected from concurrent access for 50 * performance reasons so they may not be a 100% accurate. However, 51 * they do provide useful information on roughly how many times a 52 * certain event is occurring. 53 */ 54 55 /* Pool limit was hit (see zswap_max_pool_percent) */ 56 static u64 zswap_pool_limit_hit; 57 /* Pages written back when pool limit was reached */ 58 static u64 zswap_written_back_pages; 59 /* Store failed due to a reclaim failure after pool limit was reached */ 60 static u64 zswap_reject_reclaim_fail; 61 /* Store failed due to compression algorithm failure */ 62 static u64 zswap_reject_compress_fail; 63 /* Compressed page was too big for the allocator to (optimally) store */ 64 static u64 zswap_reject_compress_poor; 65 /* Store failed because underlying allocator could not get memory */ 66 static u64 zswap_reject_alloc_fail; 67 /* Store failed because the entry metadata could not be allocated (rare) */ 68 static u64 zswap_reject_kmemcache_fail; 69 70 /* Shrinker work queue */ 71 static struct workqueue_struct *shrink_wq; 72 /* Pool limit was hit, we need to calm down */ 73 static bool zswap_pool_reached_full; 74 75 /********************************* 76 * tunables 77 **********************************/ 78 79 #define ZSWAP_PARAM_UNSET "" 80 81 static int zswap_setup(void); 82 83 /* Enable/disable zswap */ 84 static DEFINE_STATIC_KEY_MAYBE(CONFIG_ZSWAP_DEFAULT_ON, zswap_ever_enabled); 85 static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON); 86 static int zswap_enabled_param_set(const char *, 87 const struct kernel_param *); 88 static const struct kernel_param_ops zswap_enabled_param_ops = { 89 .set = zswap_enabled_param_set, 90 .get = param_get_bool, 91 }; 92 module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644); 93 94 /* Crypto compressor to use */ 95 static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; 96 static int zswap_compressor_param_set(const char *, 97 const struct kernel_param *); 98 static const struct kernel_param_ops zswap_compressor_param_ops = { 99 .set = zswap_compressor_param_set, 100 .get = param_get_charp, 101 .free = param_free_charp, 102 }; 103 module_param_cb(compressor, &zswap_compressor_param_ops, 104 &zswap_compressor, 0644); 105 106 /* Compressed storage zpool to use */ 107 static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT; 108 static int zswap_zpool_param_set(const char *, const struct kernel_param *); 109 static const struct kernel_param_ops zswap_zpool_param_ops = { 110 .set = zswap_zpool_param_set, 111 .get = param_get_charp, 112 .free = param_free_charp, 113 }; 114 module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644); 115 116 /* The maximum percentage of memory that the compressed pool can occupy */ 117 static unsigned int zswap_max_pool_percent = 20; 118 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644); 119 120 /* The threshold for accepting new pages after the max_pool_percent was hit */ 121 static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */ 122 module_param_named(accept_threshold_percent, zswap_accept_thr_percent, 123 uint, 0644); 124 125 /* Enable/disable memory pressure-based shrinker. */ 126 static bool zswap_shrinker_enabled = IS_ENABLED( 127 CONFIG_ZSWAP_SHRINKER_DEFAULT_ON); 128 module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644); 129 130 bool zswap_is_enabled(void) 131 { 132 return zswap_enabled; 133 } 134 135 bool zswap_never_enabled(void) 136 { 137 return !static_branch_maybe(CONFIG_ZSWAP_DEFAULT_ON, &zswap_ever_enabled); 138 } 139 140 /********************************* 141 * data structures 142 **********************************/ 143 144 struct crypto_acomp_ctx { 145 struct crypto_acomp *acomp; 146 struct acomp_req *req; 147 struct crypto_wait wait; 148 u8 *buffer; 149 struct mutex mutex; 150 bool is_sleepable; 151 }; 152 153 /* 154 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock. 155 * The only case where lru_lock is not acquired while holding tree.lock is 156 * when a zswap_entry is taken off the lru for writeback, in that case it 157 * needs to be verified that it's still valid in the tree. 158 */ 159 struct zswap_pool { 160 struct zpool *zpool; 161 struct crypto_acomp_ctx __percpu *acomp_ctx; 162 struct percpu_ref ref; 163 struct list_head list; 164 struct work_struct release_work; 165 struct hlist_node node; 166 char tfm_name[CRYPTO_MAX_ALG_NAME]; 167 }; 168 169 /* Global LRU lists shared by all zswap pools. */ 170 static struct list_lru zswap_list_lru; 171 172 /* The lock protects zswap_next_shrink updates. */ 173 static DEFINE_SPINLOCK(zswap_shrink_lock); 174 static struct mem_cgroup *zswap_next_shrink; 175 static struct work_struct zswap_shrink_work; 176 static struct shrinker *zswap_shrinker; 177 178 /* 179 * struct zswap_entry 180 * 181 * This structure contains the metadata for tracking a single compressed 182 * page within zswap. 183 * 184 * swpentry - associated swap entry, the offset indexes into the red-black tree 185 * length - the length in bytes of the compressed page data. Needed during 186 * decompression. 187 * referenced - true if the entry recently entered the zswap pool. Unset by the 188 * writeback logic. The entry is only reclaimed by the writeback 189 * logic if referenced is unset. See comments in the shrinker 190 * section for context. 191 * pool - the zswap_pool the entry's data is in 192 * handle - zpool allocation handle that stores the compressed page data 193 * objcg - the obj_cgroup that the compressed memory is charged to 194 * lru - handle to the pool's lru used to evict pages. 195 */ 196 struct zswap_entry { 197 swp_entry_t swpentry; 198 unsigned int length; 199 bool referenced; 200 struct zswap_pool *pool; 201 unsigned long handle; 202 struct obj_cgroup *objcg; 203 struct list_head lru; 204 }; 205 206 static struct xarray *zswap_trees[MAX_SWAPFILES]; 207 static unsigned int nr_zswap_trees[MAX_SWAPFILES]; 208 209 /* RCU-protected iteration */ 210 static LIST_HEAD(zswap_pools); 211 /* protects zswap_pools list modification */ 212 static DEFINE_SPINLOCK(zswap_pools_lock); 213 /* pool counter to provide unique names to zpool */ 214 static atomic_t zswap_pools_count = ATOMIC_INIT(0); 215 216 enum zswap_init_type { 217 ZSWAP_UNINIT, 218 ZSWAP_INIT_SUCCEED, 219 ZSWAP_INIT_FAILED 220 }; 221 222 static enum zswap_init_type zswap_init_state; 223 224 /* used to ensure the integrity of initialization */ 225 static DEFINE_MUTEX(zswap_init_lock); 226 227 /* init completed, but couldn't create the initial pool */ 228 static bool zswap_has_pool; 229 230 /********************************* 231 * helpers and fwd declarations 232 **********************************/ 233 234 static inline struct xarray *swap_zswap_tree(swp_entry_t swp) 235 { 236 return &zswap_trees[swp_type(swp)][swp_offset(swp) 237 >> SWAP_ADDRESS_SPACE_SHIFT]; 238 } 239 240 #define zswap_pool_debug(msg, p) \ 241 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \ 242 zpool_get_type((p)->zpool)) 243 244 /********************************* 245 * pool functions 246 **********************************/ 247 static void __zswap_pool_empty(struct percpu_ref *ref); 248 249 static struct zswap_pool *zswap_pool_create(char *type, char *compressor) 250 { 251 struct zswap_pool *pool; 252 char name[38]; /* 'zswap' + 32 char (max) num + \0 */ 253 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; 254 int ret; 255 256 if (!zswap_has_pool) { 257 /* if either are unset, pool initialization failed, and we 258 * need both params to be set correctly before trying to 259 * create a pool. 260 */ 261 if (!strcmp(type, ZSWAP_PARAM_UNSET)) 262 return NULL; 263 if (!strcmp(compressor, ZSWAP_PARAM_UNSET)) 264 return NULL; 265 } 266 267 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 268 if (!pool) 269 return NULL; 270 271 /* unique name for each pool specifically required by zsmalloc */ 272 snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count)); 273 pool->zpool = zpool_create_pool(type, name, gfp); 274 if (!pool->zpool) { 275 pr_err("%s zpool not available\n", type); 276 goto error; 277 } 278 pr_debug("using %s zpool\n", zpool_get_type(pool->zpool)); 279 280 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name)); 281 282 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx); 283 if (!pool->acomp_ctx) { 284 pr_err("percpu alloc failed\n"); 285 goto error; 286 } 287 288 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE, 289 &pool->node); 290 if (ret) 291 goto error; 292 293 /* being the current pool takes 1 ref; this func expects the 294 * caller to always add the new pool as the current pool 295 */ 296 ret = percpu_ref_init(&pool->ref, __zswap_pool_empty, 297 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL); 298 if (ret) 299 goto ref_fail; 300 INIT_LIST_HEAD(&pool->list); 301 302 zswap_pool_debug("created", pool); 303 304 return pool; 305 306 ref_fail: 307 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); 308 error: 309 if (pool->acomp_ctx) 310 free_percpu(pool->acomp_ctx); 311 if (pool->zpool) 312 zpool_destroy_pool(pool->zpool); 313 kfree(pool); 314 return NULL; 315 } 316 317 static struct zswap_pool *__zswap_pool_create_fallback(void) 318 { 319 bool has_comp, has_zpool; 320 321 has_comp = crypto_has_acomp(zswap_compressor, 0, 0); 322 if (!has_comp && strcmp(zswap_compressor, 323 CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) { 324 pr_err("compressor %s not available, using default %s\n", 325 zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT); 326 param_free_charp(&zswap_compressor); 327 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; 328 has_comp = crypto_has_acomp(zswap_compressor, 0, 0); 329 } 330 if (!has_comp) { 331 pr_err("default compressor %s not available\n", 332 zswap_compressor); 333 param_free_charp(&zswap_compressor); 334 zswap_compressor = ZSWAP_PARAM_UNSET; 335 } 336 337 has_zpool = zpool_has_pool(zswap_zpool_type); 338 if (!has_zpool && strcmp(zswap_zpool_type, 339 CONFIG_ZSWAP_ZPOOL_DEFAULT)) { 340 pr_err("zpool %s not available, using default %s\n", 341 zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT); 342 param_free_charp(&zswap_zpool_type); 343 zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT; 344 has_zpool = zpool_has_pool(zswap_zpool_type); 345 } 346 if (!has_zpool) { 347 pr_err("default zpool %s not available\n", 348 zswap_zpool_type); 349 param_free_charp(&zswap_zpool_type); 350 zswap_zpool_type = ZSWAP_PARAM_UNSET; 351 } 352 353 if (!has_comp || !has_zpool) 354 return NULL; 355 356 return zswap_pool_create(zswap_zpool_type, zswap_compressor); 357 } 358 359 static void zswap_pool_destroy(struct zswap_pool *pool) 360 { 361 zswap_pool_debug("destroying", pool); 362 363 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); 364 free_percpu(pool->acomp_ctx); 365 366 zpool_destroy_pool(pool->zpool); 367 kfree(pool); 368 } 369 370 static void __zswap_pool_release(struct work_struct *work) 371 { 372 struct zswap_pool *pool = container_of(work, typeof(*pool), 373 release_work); 374 375 synchronize_rcu(); 376 377 /* nobody should have been able to get a ref... */ 378 WARN_ON(!percpu_ref_is_zero(&pool->ref)); 379 percpu_ref_exit(&pool->ref); 380 381 /* pool is now off zswap_pools list and has no references. */ 382 zswap_pool_destroy(pool); 383 } 384 385 static struct zswap_pool *zswap_pool_current(void); 386 387 static void __zswap_pool_empty(struct percpu_ref *ref) 388 { 389 struct zswap_pool *pool; 390 391 pool = container_of(ref, typeof(*pool), ref); 392 393 spin_lock_bh(&zswap_pools_lock); 394 395 WARN_ON(pool == zswap_pool_current()); 396 397 list_del_rcu(&pool->list); 398 399 INIT_WORK(&pool->release_work, __zswap_pool_release); 400 schedule_work(&pool->release_work); 401 402 spin_unlock_bh(&zswap_pools_lock); 403 } 404 405 static int __must_check zswap_pool_get(struct zswap_pool *pool) 406 { 407 if (!pool) 408 return 0; 409 410 return percpu_ref_tryget(&pool->ref); 411 } 412 413 static void zswap_pool_put(struct zswap_pool *pool) 414 { 415 percpu_ref_put(&pool->ref); 416 } 417 418 static struct zswap_pool *__zswap_pool_current(void) 419 { 420 struct zswap_pool *pool; 421 422 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list); 423 WARN_ONCE(!pool && zswap_has_pool, 424 "%s: no page storage pool!\n", __func__); 425 426 return pool; 427 } 428 429 static struct zswap_pool *zswap_pool_current(void) 430 { 431 assert_spin_locked(&zswap_pools_lock); 432 433 return __zswap_pool_current(); 434 } 435 436 static struct zswap_pool *zswap_pool_current_get(void) 437 { 438 struct zswap_pool *pool; 439 440 rcu_read_lock(); 441 442 pool = __zswap_pool_current(); 443 if (!zswap_pool_get(pool)) 444 pool = NULL; 445 446 rcu_read_unlock(); 447 448 return pool; 449 } 450 451 /* type and compressor must be null-terminated */ 452 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) 453 { 454 struct zswap_pool *pool; 455 456 assert_spin_locked(&zswap_pools_lock); 457 458 list_for_each_entry_rcu(pool, &zswap_pools, list) { 459 if (strcmp(pool->tfm_name, compressor)) 460 continue; 461 if (strcmp(zpool_get_type(pool->zpool), type)) 462 continue; 463 /* if we can't get it, it's about to be destroyed */ 464 if (!zswap_pool_get(pool)) 465 continue; 466 return pool; 467 } 468 469 return NULL; 470 } 471 472 static unsigned long zswap_max_pages(void) 473 { 474 return totalram_pages() * zswap_max_pool_percent / 100; 475 } 476 477 static unsigned long zswap_accept_thr_pages(void) 478 { 479 return zswap_max_pages() * zswap_accept_thr_percent / 100; 480 } 481 482 unsigned long zswap_total_pages(void) 483 { 484 struct zswap_pool *pool; 485 unsigned long total = 0; 486 487 rcu_read_lock(); 488 list_for_each_entry_rcu(pool, &zswap_pools, list) 489 total += zpool_get_total_pages(pool->zpool); 490 rcu_read_unlock(); 491 492 return total; 493 } 494 495 static bool zswap_check_limits(void) 496 { 497 unsigned long cur_pages = zswap_total_pages(); 498 unsigned long max_pages = zswap_max_pages(); 499 500 if (cur_pages >= max_pages) { 501 zswap_pool_limit_hit++; 502 zswap_pool_reached_full = true; 503 } else if (zswap_pool_reached_full && 504 cur_pages <= zswap_accept_thr_pages()) { 505 zswap_pool_reached_full = false; 506 } 507 return zswap_pool_reached_full; 508 } 509 510 /********************************* 511 * param callbacks 512 **********************************/ 513 514 static bool zswap_pool_changed(const char *s, const struct kernel_param *kp) 515 { 516 /* no change required */ 517 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool) 518 return false; 519 return true; 520 } 521 522 /* val must be a null-terminated string */ 523 static int __zswap_param_set(const char *val, const struct kernel_param *kp, 524 char *type, char *compressor) 525 { 526 struct zswap_pool *pool, *put_pool = NULL; 527 char *s = strstrip((char *)val); 528 int ret = 0; 529 bool new_pool = false; 530 531 mutex_lock(&zswap_init_lock); 532 switch (zswap_init_state) { 533 case ZSWAP_UNINIT: 534 /* if this is load-time (pre-init) param setting, 535 * don't create a pool; that's done during init. 536 */ 537 ret = param_set_charp(s, kp); 538 break; 539 case ZSWAP_INIT_SUCCEED: 540 new_pool = zswap_pool_changed(s, kp); 541 break; 542 case ZSWAP_INIT_FAILED: 543 pr_err("can't set param, initialization failed\n"); 544 ret = -ENODEV; 545 } 546 mutex_unlock(&zswap_init_lock); 547 548 /* no need to create a new pool, return directly */ 549 if (!new_pool) 550 return ret; 551 552 if (!type) { 553 if (!zpool_has_pool(s)) { 554 pr_err("zpool %s not available\n", s); 555 return -ENOENT; 556 } 557 type = s; 558 } else if (!compressor) { 559 if (!crypto_has_acomp(s, 0, 0)) { 560 pr_err("compressor %s not available\n", s); 561 return -ENOENT; 562 } 563 compressor = s; 564 } else { 565 WARN_ON(1); 566 return -EINVAL; 567 } 568 569 spin_lock_bh(&zswap_pools_lock); 570 571 pool = zswap_pool_find_get(type, compressor); 572 if (pool) { 573 zswap_pool_debug("using existing", pool); 574 WARN_ON(pool == zswap_pool_current()); 575 list_del_rcu(&pool->list); 576 } 577 578 spin_unlock_bh(&zswap_pools_lock); 579 580 if (!pool) 581 pool = zswap_pool_create(type, compressor); 582 else { 583 /* 584 * Restore the initial ref dropped by percpu_ref_kill() 585 * when the pool was decommissioned and switch it again 586 * to percpu mode. 587 */ 588 percpu_ref_resurrect(&pool->ref); 589 590 /* Drop the ref from zswap_pool_find_get(). */ 591 zswap_pool_put(pool); 592 } 593 594 if (pool) 595 ret = param_set_charp(s, kp); 596 else 597 ret = -EINVAL; 598 599 spin_lock_bh(&zswap_pools_lock); 600 601 if (!ret) { 602 put_pool = zswap_pool_current(); 603 list_add_rcu(&pool->list, &zswap_pools); 604 zswap_has_pool = true; 605 } else if (pool) { 606 /* add the possibly pre-existing pool to the end of the pools 607 * list; if it's new (and empty) then it'll be removed and 608 * destroyed by the put after we drop the lock 609 */ 610 list_add_tail_rcu(&pool->list, &zswap_pools); 611 put_pool = pool; 612 } 613 614 spin_unlock_bh(&zswap_pools_lock); 615 616 if (!zswap_has_pool && !pool) { 617 /* if initial pool creation failed, and this pool creation also 618 * failed, maybe both compressor and zpool params were bad. 619 * Allow changing this param, so pool creation will succeed 620 * when the other param is changed. We already verified this 621 * param is ok in the zpool_has_pool() or crypto_has_acomp() 622 * checks above. 623 */ 624 ret = param_set_charp(s, kp); 625 } 626 627 /* drop the ref from either the old current pool, 628 * or the new pool we failed to add 629 */ 630 if (put_pool) 631 percpu_ref_kill(&put_pool->ref); 632 633 return ret; 634 } 635 636 static int zswap_compressor_param_set(const char *val, 637 const struct kernel_param *kp) 638 { 639 return __zswap_param_set(val, kp, zswap_zpool_type, NULL); 640 } 641 642 static int zswap_zpool_param_set(const char *val, 643 const struct kernel_param *kp) 644 { 645 return __zswap_param_set(val, kp, NULL, zswap_compressor); 646 } 647 648 static int zswap_enabled_param_set(const char *val, 649 const struct kernel_param *kp) 650 { 651 int ret = -ENODEV; 652 653 /* if this is load-time (pre-init) param setting, only set param. */ 654 if (system_state != SYSTEM_RUNNING) 655 return param_set_bool(val, kp); 656 657 mutex_lock(&zswap_init_lock); 658 switch (zswap_init_state) { 659 case ZSWAP_UNINIT: 660 if (zswap_setup()) 661 break; 662 fallthrough; 663 case ZSWAP_INIT_SUCCEED: 664 if (!zswap_has_pool) 665 pr_err("can't enable, no pool configured\n"); 666 else 667 ret = param_set_bool(val, kp); 668 break; 669 case ZSWAP_INIT_FAILED: 670 pr_err("can't enable, initialization failed\n"); 671 } 672 mutex_unlock(&zswap_init_lock); 673 674 return ret; 675 } 676 677 /********************************* 678 * lru functions 679 **********************************/ 680 681 /* should be called under RCU */ 682 #ifdef CONFIG_MEMCG 683 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry) 684 { 685 return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL; 686 } 687 #else 688 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry) 689 { 690 return NULL; 691 } 692 #endif 693 694 static inline int entry_to_nid(struct zswap_entry *entry) 695 { 696 return page_to_nid(virt_to_page(entry)); 697 } 698 699 static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry) 700 { 701 int nid = entry_to_nid(entry); 702 struct mem_cgroup *memcg; 703 704 /* 705 * Note that it is safe to use rcu_read_lock() here, even in the face of 706 * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection 707 * used in list_lru lookup, only two scenarios are possible: 708 * 709 * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The 710 * new entry will be reparented to memcg's parent's list_lru. 711 * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The 712 * new entry will be added directly to memcg's parent's list_lru. 713 * 714 * Similar reasoning holds for list_lru_del(). 715 */ 716 rcu_read_lock(); 717 memcg = mem_cgroup_from_entry(entry); 718 /* will always succeed */ 719 list_lru_add(list_lru, &entry->lru, nid, memcg); 720 rcu_read_unlock(); 721 } 722 723 static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry) 724 { 725 int nid = entry_to_nid(entry); 726 struct mem_cgroup *memcg; 727 728 rcu_read_lock(); 729 memcg = mem_cgroup_from_entry(entry); 730 /* will always succeed */ 731 list_lru_del(list_lru, &entry->lru, nid, memcg); 732 rcu_read_unlock(); 733 } 734 735 void zswap_lruvec_state_init(struct lruvec *lruvec) 736 { 737 atomic_long_set(&lruvec->zswap_lruvec_state.nr_disk_swapins, 0); 738 } 739 740 void zswap_folio_swapin(struct folio *folio) 741 { 742 struct lruvec *lruvec; 743 744 if (folio) { 745 lruvec = folio_lruvec(folio); 746 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_disk_swapins); 747 } 748 } 749 750 /* 751 * This function should be called when a memcg is being offlined. 752 * 753 * Since the global shrinker shrink_worker() may hold a reference 754 * of the memcg, we must check and release the reference in 755 * zswap_next_shrink. 756 * 757 * shrink_worker() must handle the case where this function releases 758 * the reference of memcg being shrunk. 759 */ 760 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) 761 { 762 /* lock out zswap shrinker walking memcg tree */ 763 spin_lock(&zswap_shrink_lock); 764 if (zswap_next_shrink == memcg) { 765 do { 766 zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL); 767 } while (zswap_next_shrink && !mem_cgroup_online(zswap_next_shrink)); 768 } 769 spin_unlock(&zswap_shrink_lock); 770 } 771 772 /********************************* 773 * zswap entry functions 774 **********************************/ 775 static struct kmem_cache *zswap_entry_cache; 776 777 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid) 778 { 779 struct zswap_entry *entry; 780 entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid); 781 if (!entry) 782 return NULL; 783 return entry; 784 } 785 786 static void zswap_entry_cache_free(struct zswap_entry *entry) 787 { 788 kmem_cache_free(zswap_entry_cache, entry); 789 } 790 791 /* 792 * Carries out the common pattern of freeing and entry's zpool allocation, 793 * freeing the entry itself, and decrementing the number of stored pages. 794 */ 795 static void zswap_entry_free(struct zswap_entry *entry) 796 { 797 zswap_lru_del(&zswap_list_lru, entry); 798 zpool_free(entry->pool->zpool, entry->handle); 799 zswap_pool_put(entry->pool); 800 if (entry->objcg) { 801 obj_cgroup_uncharge_zswap(entry->objcg, entry->length); 802 obj_cgroup_put(entry->objcg); 803 } 804 zswap_entry_cache_free(entry); 805 atomic_dec(&zswap_stored_pages); 806 } 807 808 /********************************* 809 * compressed storage functions 810 **********************************/ 811 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node) 812 { 813 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); 814 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); 815 struct crypto_acomp *acomp; 816 struct acomp_req *req; 817 int ret; 818 819 mutex_init(&acomp_ctx->mutex); 820 821 acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu)); 822 if (!acomp_ctx->buffer) 823 return -ENOMEM; 824 825 acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu)); 826 if (IS_ERR(acomp)) { 827 pr_err("could not alloc crypto acomp %s : %ld\n", 828 pool->tfm_name, PTR_ERR(acomp)); 829 ret = PTR_ERR(acomp); 830 goto acomp_fail; 831 } 832 acomp_ctx->acomp = acomp; 833 acomp_ctx->is_sleepable = acomp_is_async(acomp); 834 835 req = acomp_request_alloc(acomp_ctx->acomp); 836 if (!req) { 837 pr_err("could not alloc crypto acomp_request %s\n", 838 pool->tfm_name); 839 ret = -ENOMEM; 840 goto req_fail; 841 } 842 acomp_ctx->req = req; 843 844 crypto_init_wait(&acomp_ctx->wait); 845 /* 846 * if the backend of acomp is async zip, crypto_req_done() will wakeup 847 * crypto_wait_req(); if the backend of acomp is scomp, the callback 848 * won't be called, crypto_wait_req() will return without blocking. 849 */ 850 acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 851 crypto_req_done, &acomp_ctx->wait); 852 853 return 0; 854 855 req_fail: 856 crypto_free_acomp(acomp_ctx->acomp); 857 acomp_fail: 858 kfree(acomp_ctx->buffer); 859 return ret; 860 } 861 862 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node) 863 { 864 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); 865 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); 866 867 if (!IS_ERR_OR_NULL(acomp_ctx)) { 868 if (!IS_ERR_OR_NULL(acomp_ctx->req)) 869 acomp_request_free(acomp_ctx->req); 870 if (!IS_ERR_OR_NULL(acomp_ctx->acomp)) 871 crypto_free_acomp(acomp_ctx->acomp); 872 kfree(acomp_ctx->buffer); 873 } 874 875 return 0; 876 } 877 878 static bool zswap_compress(struct folio *folio, struct zswap_entry *entry) 879 { 880 struct crypto_acomp_ctx *acomp_ctx; 881 struct scatterlist input, output; 882 int comp_ret = 0, alloc_ret = 0; 883 unsigned int dlen = PAGE_SIZE; 884 unsigned long handle; 885 struct zpool *zpool; 886 char *buf; 887 gfp_t gfp; 888 u8 *dst; 889 890 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); 891 892 mutex_lock(&acomp_ctx->mutex); 893 894 dst = acomp_ctx->buffer; 895 sg_init_table(&input, 1); 896 sg_set_folio(&input, folio, PAGE_SIZE, 0); 897 898 /* 899 * We need PAGE_SIZE * 2 here since there maybe over-compression case, 900 * and hardware-accelerators may won't check the dst buffer size, so 901 * giving the dst buffer with enough length to avoid buffer overflow. 902 */ 903 sg_init_one(&output, dst, PAGE_SIZE * 2); 904 acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen); 905 906 /* 907 * it maybe looks a little bit silly that we send an asynchronous request, 908 * then wait for its completion synchronously. This makes the process look 909 * synchronous in fact. 910 * Theoretically, acomp supports users send multiple acomp requests in one 911 * acomp instance, then get those requests done simultaneously. but in this 912 * case, zswap actually does store and load page by page, there is no 913 * existing method to send the second page before the first page is done 914 * in one thread doing zwap. 915 * but in different threads running on different cpu, we have different 916 * acomp instance, so multiple threads can do (de)compression in parallel. 917 */ 918 comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait); 919 dlen = acomp_ctx->req->dlen; 920 if (comp_ret) 921 goto unlock; 922 923 zpool = entry->pool->zpool; 924 gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; 925 if (zpool_malloc_support_movable(zpool)) 926 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE; 927 alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle); 928 if (alloc_ret) 929 goto unlock; 930 931 buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO); 932 memcpy(buf, dst, dlen); 933 zpool_unmap_handle(zpool, handle); 934 935 entry->handle = handle; 936 entry->length = dlen; 937 938 unlock: 939 if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC) 940 zswap_reject_compress_poor++; 941 else if (comp_ret) 942 zswap_reject_compress_fail++; 943 else if (alloc_ret) 944 zswap_reject_alloc_fail++; 945 946 mutex_unlock(&acomp_ctx->mutex); 947 return comp_ret == 0 && alloc_ret == 0; 948 } 949 950 static void zswap_decompress(struct zswap_entry *entry, struct folio *folio) 951 { 952 struct zpool *zpool = entry->pool->zpool; 953 struct scatterlist input, output; 954 struct crypto_acomp_ctx *acomp_ctx; 955 u8 *src; 956 957 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); 958 mutex_lock(&acomp_ctx->mutex); 959 960 src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO); 961 /* 962 * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer 963 * to do crypto_acomp_decompress() which might sleep. In such cases, we must 964 * resort to copying the buffer to a temporary one. 965 * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer, 966 * such as a kmap address of high memory or even ever a vmap address. 967 * However, sg_init_one is only equipped to handle linearly mapped low memory. 968 * In such cases, we also must copy the buffer to a temporary and lowmem one. 969 */ 970 if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) || 971 !virt_addr_valid(src)) { 972 memcpy(acomp_ctx->buffer, src, entry->length); 973 src = acomp_ctx->buffer; 974 zpool_unmap_handle(zpool, entry->handle); 975 } 976 977 sg_init_one(&input, src, entry->length); 978 sg_init_table(&output, 1); 979 sg_set_folio(&output, folio, PAGE_SIZE, 0); 980 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE); 981 BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait)); 982 BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE); 983 mutex_unlock(&acomp_ctx->mutex); 984 985 if (src != acomp_ctx->buffer) 986 zpool_unmap_handle(zpool, entry->handle); 987 } 988 989 /********************************* 990 * writeback code 991 **********************************/ 992 /* 993 * Attempts to free an entry by adding a folio to the swap cache, 994 * decompressing the entry data into the folio, and issuing a 995 * bio write to write the folio back to the swap device. 996 * 997 * This can be thought of as a "resumed writeback" of the folio 998 * to the swap device. We are basically resuming the same swap 999 * writeback path that was intercepted with the zswap_store() 1000 * in the first place. After the folio has been decompressed into 1001 * the swap cache, the compressed version stored by zswap can be 1002 * freed. 1003 */ 1004 static int zswap_writeback_entry(struct zswap_entry *entry, 1005 swp_entry_t swpentry) 1006 { 1007 struct xarray *tree; 1008 pgoff_t offset = swp_offset(swpentry); 1009 struct folio *folio; 1010 struct mempolicy *mpol; 1011 bool folio_was_allocated; 1012 struct writeback_control wbc = { 1013 .sync_mode = WB_SYNC_NONE, 1014 }; 1015 1016 /* try to allocate swap cache folio */ 1017 mpol = get_task_policy(current); 1018 folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol, 1019 NO_INTERLEAVE_INDEX, &folio_was_allocated, true); 1020 if (!folio) 1021 return -ENOMEM; 1022 1023 /* 1024 * Found an existing folio, we raced with swapin or concurrent 1025 * shrinker. We generally writeback cold folios from zswap, and 1026 * swapin means the folio just became hot, so skip this folio. 1027 * For unlikely concurrent shrinker case, it will be unlinked 1028 * and freed when invalidated by the concurrent shrinker anyway. 1029 */ 1030 if (!folio_was_allocated) { 1031 folio_put(folio); 1032 return -EEXIST; 1033 } 1034 1035 /* 1036 * folio is locked, and the swapcache is now secured against 1037 * concurrent swapping to and from the slot, and concurrent 1038 * swapoff so we can safely dereference the zswap tree here. 1039 * Verify that the swap entry hasn't been invalidated and recycled 1040 * behind our backs, to avoid overwriting a new swap folio with 1041 * old compressed data. Only when this is successful can the entry 1042 * be dereferenced. 1043 */ 1044 tree = swap_zswap_tree(swpentry); 1045 if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) { 1046 delete_from_swap_cache(folio); 1047 folio_unlock(folio); 1048 folio_put(folio); 1049 return -ENOMEM; 1050 } 1051 1052 zswap_decompress(entry, folio); 1053 1054 count_vm_event(ZSWPWB); 1055 if (entry->objcg) 1056 count_objcg_event(entry->objcg, ZSWPWB); 1057 1058 zswap_entry_free(entry); 1059 1060 /* folio is up to date */ 1061 folio_mark_uptodate(folio); 1062 1063 /* move it to the tail of the inactive list after end_writeback */ 1064 folio_set_reclaim(folio); 1065 1066 /* start writeback */ 1067 __swap_writepage(folio, &wbc); 1068 folio_put(folio); 1069 1070 return 0; 1071 } 1072 1073 /********************************* 1074 * shrinker functions 1075 **********************************/ 1076 /* 1077 * The dynamic shrinker is modulated by the following factors: 1078 * 1079 * 1. Each zswap entry has a referenced bit, which the shrinker unsets (giving 1080 * the entry a second chance) before rotating it in the LRU list. If the 1081 * entry is considered again by the shrinker, with its referenced bit unset, 1082 * it is written back. The writeback rate as a result is dynamically 1083 * adjusted by the pool activities - if the pool is dominated by new entries 1084 * (i.e lots of recent zswapouts), these entries will be protected and 1085 * the writeback rate will slow down. On the other hand, if the pool has a 1086 * lot of stagnant entries, these entries will be reclaimed immediately, 1087 * effectively increasing the writeback rate. 1088 * 1089 * 2. Swapins counter: If we observe swapins, it is a sign that we are 1090 * overshrinking and should slow down. We maintain a swapins counter, which 1091 * is consumed and subtract from the number of eligible objects on the LRU 1092 * in zswap_shrinker_count(). 1093 * 1094 * 3. Compression ratio. The better the workload compresses, the less gains we 1095 * can expect from writeback. We scale down the number of objects available 1096 * for reclaim by this ratio. 1097 */ 1098 static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l, 1099 spinlock_t *lock, void *arg) 1100 { 1101 struct zswap_entry *entry = container_of(item, struct zswap_entry, lru); 1102 bool *encountered_page_in_swapcache = (bool *)arg; 1103 swp_entry_t swpentry; 1104 enum lru_status ret = LRU_REMOVED_RETRY; 1105 int writeback_result; 1106 1107 /* 1108 * Second chance algorithm: if the entry has its referenced bit set, give it 1109 * a second chance. Only clear the referenced bit and rotate it in the 1110 * zswap's LRU list. 1111 */ 1112 if (entry->referenced) { 1113 entry->referenced = false; 1114 return LRU_ROTATE; 1115 } 1116 1117 /* 1118 * As soon as we drop the LRU lock, the entry can be freed by 1119 * a concurrent invalidation. This means the following: 1120 * 1121 * 1. We extract the swp_entry_t to the stack, allowing 1122 * zswap_writeback_entry() to pin the swap entry and 1123 * then validate the zwap entry against that swap entry's 1124 * tree using pointer value comparison. Only when that 1125 * is successful can the entry be dereferenced. 1126 * 1127 * 2. Usually, objects are taken off the LRU for reclaim. In 1128 * this case this isn't possible, because if reclaim fails 1129 * for whatever reason, we have no means of knowing if the 1130 * entry is alive to put it back on the LRU. 1131 * 1132 * So rotate it before dropping the lock. If the entry is 1133 * written back or invalidated, the free path will unlink 1134 * it. For failures, rotation is the right thing as well. 1135 * 1136 * Temporary failures, where the same entry should be tried 1137 * again immediately, almost never happen for this shrinker. 1138 * We don't do any trylocking; -ENOMEM comes closest, 1139 * but that's extremely rare and doesn't happen spuriously 1140 * either. Don't bother distinguishing this case. 1141 */ 1142 list_move_tail(item, &l->list); 1143 1144 /* 1145 * Once the lru lock is dropped, the entry might get freed. The 1146 * swpentry is copied to the stack, and entry isn't deref'd again 1147 * until the entry is verified to still be alive in the tree. 1148 */ 1149 swpentry = entry->swpentry; 1150 1151 /* 1152 * It's safe to drop the lock here because we return either 1153 * LRU_REMOVED_RETRY or LRU_RETRY. 1154 */ 1155 spin_unlock(lock); 1156 1157 writeback_result = zswap_writeback_entry(entry, swpentry); 1158 1159 if (writeback_result) { 1160 zswap_reject_reclaim_fail++; 1161 ret = LRU_RETRY; 1162 1163 /* 1164 * Encountering a page already in swap cache is a sign that we are shrinking 1165 * into the warmer region. We should terminate shrinking (if we're in the dynamic 1166 * shrinker context). 1167 */ 1168 if (writeback_result == -EEXIST && encountered_page_in_swapcache) { 1169 ret = LRU_STOP; 1170 *encountered_page_in_swapcache = true; 1171 } 1172 } else { 1173 zswap_written_back_pages++; 1174 } 1175 1176 spin_lock(lock); 1177 return ret; 1178 } 1179 1180 static unsigned long zswap_shrinker_scan(struct shrinker *shrinker, 1181 struct shrink_control *sc) 1182 { 1183 unsigned long shrink_ret; 1184 bool encountered_page_in_swapcache = false; 1185 1186 if (!zswap_shrinker_enabled || 1187 !mem_cgroup_zswap_writeback_enabled(sc->memcg)) { 1188 sc->nr_scanned = 0; 1189 return SHRINK_STOP; 1190 } 1191 1192 shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb, 1193 &encountered_page_in_swapcache); 1194 1195 if (encountered_page_in_swapcache) 1196 return SHRINK_STOP; 1197 1198 return shrink_ret ? shrink_ret : SHRINK_STOP; 1199 } 1200 1201 static unsigned long zswap_shrinker_count(struct shrinker *shrinker, 1202 struct shrink_control *sc) 1203 { 1204 struct mem_cgroup *memcg = sc->memcg; 1205 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid)); 1206 atomic_long_t *nr_disk_swapins = 1207 &lruvec->zswap_lruvec_state.nr_disk_swapins; 1208 unsigned long nr_backing, nr_stored, nr_freeable, nr_disk_swapins_cur, 1209 nr_remain; 1210 1211 if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg)) 1212 return 0; 1213 1214 /* 1215 * The shrinker resumes swap writeback, which will enter block 1216 * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS 1217 * rules (may_enter_fs()), which apply on a per-folio basis. 1218 */ 1219 if (!gfp_has_io_fs(sc->gfp_mask)) 1220 return 0; 1221 1222 /* 1223 * For memcg, use the cgroup-wide ZSWAP stats since we don't 1224 * have them per-node and thus per-lruvec. Careful if memcg is 1225 * runtime-disabled: we can get sc->memcg == NULL, which is ok 1226 * for the lruvec, but not for memcg_page_state(). 1227 * 1228 * Without memcg, use the zswap pool-wide metrics. 1229 */ 1230 if (!mem_cgroup_disabled()) { 1231 mem_cgroup_flush_stats(memcg); 1232 nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT; 1233 nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED); 1234 } else { 1235 nr_backing = zswap_total_pages(); 1236 nr_stored = atomic_read(&zswap_stored_pages); 1237 } 1238 1239 if (!nr_stored) 1240 return 0; 1241 1242 nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc); 1243 if (!nr_freeable) 1244 return 0; 1245 1246 /* 1247 * Subtract from the lru size the number of pages that are recently swapped 1248 * in from disk. The idea is that had we protect the zswap's LRU by this 1249 * amount of pages, these disk swapins would not have happened. 1250 */ 1251 nr_disk_swapins_cur = atomic_long_read(nr_disk_swapins); 1252 do { 1253 if (nr_freeable >= nr_disk_swapins_cur) 1254 nr_remain = 0; 1255 else 1256 nr_remain = nr_disk_swapins_cur - nr_freeable; 1257 } while (!atomic_long_try_cmpxchg( 1258 nr_disk_swapins, &nr_disk_swapins_cur, nr_remain)); 1259 1260 nr_freeable -= nr_disk_swapins_cur - nr_remain; 1261 if (!nr_freeable) 1262 return 0; 1263 1264 /* 1265 * Scale the number of freeable pages by the memory saving factor. 1266 * This ensures that the better zswap compresses memory, the fewer 1267 * pages we will evict to swap (as it will otherwise incur IO for 1268 * relatively small memory saving). 1269 */ 1270 return mult_frac(nr_freeable, nr_backing, nr_stored); 1271 } 1272 1273 static struct shrinker *zswap_alloc_shrinker(void) 1274 { 1275 struct shrinker *shrinker; 1276 1277 shrinker = 1278 shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap"); 1279 if (!shrinker) 1280 return NULL; 1281 1282 shrinker->scan_objects = zswap_shrinker_scan; 1283 shrinker->count_objects = zswap_shrinker_count; 1284 shrinker->batch = 0; 1285 shrinker->seeks = DEFAULT_SEEKS; 1286 return shrinker; 1287 } 1288 1289 static int shrink_memcg(struct mem_cgroup *memcg) 1290 { 1291 int nid, shrunk = 0, scanned = 0; 1292 1293 if (!mem_cgroup_zswap_writeback_enabled(memcg)) 1294 return -ENOENT; 1295 1296 /* 1297 * Skip zombies because their LRUs are reparented and we would be 1298 * reclaiming from the parent instead of the dead memcg. 1299 */ 1300 if (memcg && !mem_cgroup_online(memcg)) 1301 return -ENOENT; 1302 1303 for_each_node_state(nid, N_NORMAL_MEMORY) { 1304 unsigned long nr_to_walk = 1; 1305 1306 shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg, 1307 &shrink_memcg_cb, NULL, &nr_to_walk); 1308 scanned += 1 - nr_to_walk; 1309 } 1310 1311 if (!scanned) 1312 return -ENOENT; 1313 1314 return shrunk ? 0 : -EAGAIN; 1315 } 1316 1317 static void shrink_worker(struct work_struct *w) 1318 { 1319 struct mem_cgroup *memcg; 1320 int ret, failures = 0, attempts = 0; 1321 unsigned long thr; 1322 1323 /* Reclaim down to the accept threshold */ 1324 thr = zswap_accept_thr_pages(); 1325 1326 /* 1327 * Global reclaim will select cgroup in a round-robin fashion from all 1328 * online memcgs, but memcgs that have no pages in zswap and 1329 * writeback-disabled memcgs (memory.zswap.writeback=0) are not 1330 * candidates for shrinking. 1331 * 1332 * Shrinking will be aborted if we encounter the following 1333 * MAX_RECLAIM_RETRIES times: 1334 * - No writeback-candidate memcgs found in a memcg tree walk. 1335 * - Shrinking a writeback-candidate memcg failed. 1336 * 1337 * We save iteration cursor memcg into zswap_next_shrink, 1338 * which can be modified by the offline memcg cleaner 1339 * zswap_memcg_offline_cleanup(). 1340 * 1341 * Since the offline cleaner is called only once, we cannot leave an 1342 * offline memcg reference in zswap_next_shrink. 1343 * We can rely on the cleaner only if we get online memcg under lock. 1344 * 1345 * If we get an offline memcg, we cannot determine if the cleaner has 1346 * already been called or will be called later. We must put back the 1347 * reference before returning from this function. Otherwise, the 1348 * offline memcg left in zswap_next_shrink will hold the reference 1349 * until the next run of shrink_worker(). 1350 */ 1351 do { 1352 /* 1353 * Start shrinking from the next memcg after zswap_next_shrink. 1354 * When the offline cleaner has already advanced the cursor, 1355 * advancing the cursor here overlooks one memcg, but this 1356 * should be negligibly rare. 1357 * 1358 * If we get an online memcg, keep the extra reference in case 1359 * the original one obtained by mem_cgroup_iter() is dropped by 1360 * zswap_memcg_offline_cleanup() while we are shrinking the 1361 * memcg. 1362 */ 1363 spin_lock(&zswap_shrink_lock); 1364 do { 1365 memcg = mem_cgroup_iter(NULL, zswap_next_shrink, NULL); 1366 zswap_next_shrink = memcg; 1367 } while (memcg && !mem_cgroup_tryget_online(memcg)); 1368 spin_unlock(&zswap_shrink_lock); 1369 1370 if (!memcg) { 1371 /* 1372 * Continue shrinking without incrementing failures if 1373 * we found candidate memcgs in the last tree walk. 1374 */ 1375 if (!attempts && ++failures == MAX_RECLAIM_RETRIES) 1376 break; 1377 1378 attempts = 0; 1379 goto resched; 1380 } 1381 1382 ret = shrink_memcg(memcg); 1383 /* drop the extra reference */ 1384 mem_cgroup_put(memcg); 1385 1386 /* 1387 * There are no writeback-candidate pages in the memcg. 1388 * This is not an issue as long as we can find another memcg 1389 * with pages in zswap. Skip this without incrementing attempts 1390 * and failures. 1391 */ 1392 if (ret == -ENOENT) 1393 continue; 1394 ++attempts; 1395 1396 if (ret && ++failures == MAX_RECLAIM_RETRIES) 1397 break; 1398 resched: 1399 cond_resched(); 1400 } while (zswap_total_pages() > thr); 1401 } 1402 1403 /********************************* 1404 * main API 1405 **********************************/ 1406 bool zswap_store(struct folio *folio) 1407 { 1408 swp_entry_t swp = folio->swap; 1409 pgoff_t offset = swp_offset(swp); 1410 struct xarray *tree = swap_zswap_tree(swp); 1411 struct zswap_entry *entry, *old; 1412 struct obj_cgroup *objcg = NULL; 1413 struct mem_cgroup *memcg = NULL; 1414 1415 VM_WARN_ON_ONCE(!folio_test_locked(folio)); 1416 VM_WARN_ON_ONCE(!folio_test_swapcache(folio)); 1417 1418 /* Large folios aren't supported */ 1419 if (folio_test_large(folio)) 1420 return false; 1421 1422 if (!zswap_enabled) 1423 goto check_old; 1424 1425 /* Check cgroup limits */ 1426 objcg = get_obj_cgroup_from_folio(folio); 1427 if (objcg && !obj_cgroup_may_zswap(objcg)) { 1428 memcg = get_mem_cgroup_from_objcg(objcg); 1429 if (shrink_memcg(memcg)) { 1430 mem_cgroup_put(memcg); 1431 goto reject; 1432 } 1433 mem_cgroup_put(memcg); 1434 } 1435 1436 if (zswap_check_limits()) 1437 goto reject; 1438 1439 /* allocate entry */ 1440 entry = zswap_entry_cache_alloc(GFP_KERNEL, folio_nid(folio)); 1441 if (!entry) { 1442 zswap_reject_kmemcache_fail++; 1443 goto reject; 1444 } 1445 1446 /* if entry is successfully added, it keeps the reference */ 1447 entry->pool = zswap_pool_current_get(); 1448 if (!entry->pool) 1449 goto freepage; 1450 1451 if (objcg) { 1452 memcg = get_mem_cgroup_from_objcg(objcg); 1453 if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) { 1454 mem_cgroup_put(memcg); 1455 goto put_pool; 1456 } 1457 mem_cgroup_put(memcg); 1458 } 1459 1460 if (!zswap_compress(folio, entry)) 1461 goto put_pool; 1462 1463 entry->swpentry = swp; 1464 entry->objcg = objcg; 1465 entry->referenced = true; 1466 1467 old = xa_store(tree, offset, entry, GFP_KERNEL); 1468 if (xa_is_err(old)) { 1469 int err = xa_err(old); 1470 1471 WARN_ONCE(err != -ENOMEM, "unexpected xarray error: %d\n", err); 1472 zswap_reject_alloc_fail++; 1473 goto store_failed; 1474 } 1475 1476 /* 1477 * We may have had an existing entry that became stale when 1478 * the folio was redirtied and now the new version is being 1479 * swapped out. Get rid of the old. 1480 */ 1481 if (old) 1482 zswap_entry_free(old); 1483 1484 if (objcg) { 1485 obj_cgroup_charge_zswap(objcg, entry->length); 1486 count_objcg_event(objcg, ZSWPOUT); 1487 } 1488 1489 /* 1490 * We finish initializing the entry while it's already in xarray. 1491 * This is safe because: 1492 * 1493 * 1. Concurrent stores and invalidations are excluded by folio lock. 1494 * 1495 * 2. Writeback is excluded by the entry not being on the LRU yet. 1496 * The publishing order matters to prevent writeback from seeing 1497 * an incoherent entry. 1498 */ 1499 if (entry->length) { 1500 INIT_LIST_HEAD(&entry->lru); 1501 zswap_lru_add(&zswap_list_lru, entry); 1502 } 1503 1504 /* update stats */ 1505 atomic_inc(&zswap_stored_pages); 1506 count_vm_event(ZSWPOUT); 1507 1508 return true; 1509 1510 store_failed: 1511 zpool_free(entry->pool->zpool, entry->handle); 1512 put_pool: 1513 zswap_pool_put(entry->pool); 1514 freepage: 1515 zswap_entry_cache_free(entry); 1516 reject: 1517 obj_cgroup_put(objcg); 1518 if (zswap_pool_reached_full) 1519 queue_work(shrink_wq, &zswap_shrink_work); 1520 check_old: 1521 /* 1522 * If the zswap store fails or zswap is disabled, we must invalidate the 1523 * possibly stale entry which was previously stored at this offset. 1524 * Otherwise, writeback could overwrite the new data in the swapfile. 1525 */ 1526 entry = xa_erase(tree, offset); 1527 if (entry) 1528 zswap_entry_free(entry); 1529 return false; 1530 } 1531 1532 bool zswap_load(struct folio *folio) 1533 { 1534 swp_entry_t swp = folio->swap; 1535 pgoff_t offset = swp_offset(swp); 1536 bool swapcache = folio_test_swapcache(folio); 1537 struct xarray *tree = swap_zswap_tree(swp); 1538 struct zswap_entry *entry; 1539 1540 VM_WARN_ON_ONCE(!folio_test_locked(folio)); 1541 1542 if (zswap_never_enabled()) 1543 return false; 1544 1545 /* 1546 * Large folios should not be swapped in while zswap is being used, as 1547 * they are not properly handled. Zswap does not properly load large 1548 * folios, and a large folio may only be partially in zswap. 1549 * 1550 * Return true without marking the folio uptodate so that an IO error is 1551 * emitted (e.g. do_swap_page() will sigbus). 1552 */ 1553 if (WARN_ON_ONCE(folio_test_large(folio))) 1554 return true; 1555 1556 /* 1557 * When reading into the swapcache, invalidate our entry. The 1558 * swapcache can be the authoritative owner of the page and 1559 * its mappings, and the pressure that results from having two 1560 * in-memory copies outweighs any benefits of caching the 1561 * compression work. 1562 * 1563 * (Most swapins go through the swapcache. The notable 1564 * exception is the singleton fault on SWP_SYNCHRONOUS_IO 1565 * files, which reads into a private page and may free it if 1566 * the fault fails. We remain the primary owner of the entry.) 1567 */ 1568 if (swapcache) 1569 entry = xa_erase(tree, offset); 1570 else 1571 entry = xa_load(tree, offset); 1572 1573 if (!entry) 1574 return false; 1575 1576 zswap_decompress(entry, folio); 1577 1578 count_vm_event(ZSWPIN); 1579 if (entry->objcg) 1580 count_objcg_event(entry->objcg, ZSWPIN); 1581 1582 if (swapcache) { 1583 zswap_entry_free(entry); 1584 folio_mark_dirty(folio); 1585 } 1586 1587 folio_mark_uptodate(folio); 1588 return true; 1589 } 1590 1591 void zswap_invalidate(swp_entry_t swp) 1592 { 1593 pgoff_t offset = swp_offset(swp); 1594 struct xarray *tree = swap_zswap_tree(swp); 1595 struct zswap_entry *entry; 1596 1597 entry = xa_erase(tree, offset); 1598 if (entry) 1599 zswap_entry_free(entry); 1600 } 1601 1602 int zswap_swapon(int type, unsigned long nr_pages) 1603 { 1604 struct xarray *trees, *tree; 1605 unsigned int nr, i; 1606 1607 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); 1608 trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL); 1609 if (!trees) { 1610 pr_err("alloc failed, zswap disabled for swap type %d\n", type); 1611 return -ENOMEM; 1612 } 1613 1614 for (i = 0; i < nr; i++) 1615 xa_init(trees + i); 1616 1617 nr_zswap_trees[type] = nr; 1618 zswap_trees[type] = trees; 1619 return 0; 1620 } 1621 1622 void zswap_swapoff(int type) 1623 { 1624 struct xarray *trees = zswap_trees[type]; 1625 unsigned int i; 1626 1627 if (!trees) 1628 return; 1629 1630 /* try_to_unuse() invalidated all the entries already */ 1631 for (i = 0; i < nr_zswap_trees[type]; i++) 1632 WARN_ON_ONCE(!xa_empty(trees + i)); 1633 1634 kvfree(trees); 1635 nr_zswap_trees[type] = 0; 1636 zswap_trees[type] = NULL; 1637 } 1638 1639 /********************************* 1640 * debugfs functions 1641 **********************************/ 1642 #ifdef CONFIG_DEBUG_FS 1643 #include <linux/debugfs.h> 1644 1645 static struct dentry *zswap_debugfs_root; 1646 1647 static int debugfs_get_total_size(void *data, u64 *val) 1648 { 1649 *val = zswap_total_pages() * PAGE_SIZE; 1650 return 0; 1651 } 1652 DEFINE_DEBUGFS_ATTRIBUTE(total_size_fops, debugfs_get_total_size, NULL, "%llu\n"); 1653 1654 static int zswap_debugfs_init(void) 1655 { 1656 if (!debugfs_initialized()) 1657 return -ENODEV; 1658 1659 zswap_debugfs_root = debugfs_create_dir("zswap", NULL); 1660 1661 debugfs_create_u64("pool_limit_hit", 0444, 1662 zswap_debugfs_root, &zswap_pool_limit_hit); 1663 debugfs_create_u64("reject_reclaim_fail", 0444, 1664 zswap_debugfs_root, &zswap_reject_reclaim_fail); 1665 debugfs_create_u64("reject_alloc_fail", 0444, 1666 zswap_debugfs_root, &zswap_reject_alloc_fail); 1667 debugfs_create_u64("reject_kmemcache_fail", 0444, 1668 zswap_debugfs_root, &zswap_reject_kmemcache_fail); 1669 debugfs_create_u64("reject_compress_fail", 0444, 1670 zswap_debugfs_root, &zswap_reject_compress_fail); 1671 debugfs_create_u64("reject_compress_poor", 0444, 1672 zswap_debugfs_root, &zswap_reject_compress_poor); 1673 debugfs_create_u64("written_back_pages", 0444, 1674 zswap_debugfs_root, &zswap_written_back_pages); 1675 debugfs_create_file("pool_total_size", 0444, 1676 zswap_debugfs_root, NULL, &total_size_fops); 1677 debugfs_create_atomic_t("stored_pages", 0444, 1678 zswap_debugfs_root, &zswap_stored_pages); 1679 1680 return 0; 1681 } 1682 #else 1683 static int zswap_debugfs_init(void) 1684 { 1685 return 0; 1686 } 1687 #endif 1688 1689 /********************************* 1690 * module init and exit 1691 **********************************/ 1692 static int zswap_setup(void) 1693 { 1694 struct zswap_pool *pool; 1695 int ret; 1696 1697 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0); 1698 if (!zswap_entry_cache) { 1699 pr_err("entry cache creation failed\n"); 1700 goto cache_fail; 1701 } 1702 1703 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE, 1704 "mm/zswap_pool:prepare", 1705 zswap_cpu_comp_prepare, 1706 zswap_cpu_comp_dead); 1707 if (ret) 1708 goto hp_fail; 1709 1710 shrink_wq = alloc_workqueue("zswap-shrink", 1711 WQ_UNBOUND|WQ_MEM_RECLAIM, 1); 1712 if (!shrink_wq) 1713 goto shrink_wq_fail; 1714 1715 zswap_shrinker = zswap_alloc_shrinker(); 1716 if (!zswap_shrinker) 1717 goto shrinker_fail; 1718 if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker)) 1719 goto lru_fail; 1720 shrinker_register(zswap_shrinker); 1721 1722 INIT_WORK(&zswap_shrink_work, shrink_worker); 1723 1724 pool = __zswap_pool_create_fallback(); 1725 if (pool) { 1726 pr_info("loaded using pool %s/%s\n", pool->tfm_name, 1727 zpool_get_type(pool->zpool)); 1728 list_add(&pool->list, &zswap_pools); 1729 zswap_has_pool = true; 1730 static_branch_enable(&zswap_ever_enabled); 1731 } else { 1732 pr_err("pool creation failed\n"); 1733 zswap_enabled = false; 1734 } 1735 1736 if (zswap_debugfs_init()) 1737 pr_warn("debugfs initialization failed\n"); 1738 zswap_init_state = ZSWAP_INIT_SUCCEED; 1739 return 0; 1740 1741 lru_fail: 1742 shrinker_free(zswap_shrinker); 1743 shrinker_fail: 1744 destroy_workqueue(shrink_wq); 1745 shrink_wq_fail: 1746 cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE); 1747 hp_fail: 1748 kmem_cache_destroy(zswap_entry_cache); 1749 cache_fail: 1750 /* if built-in, we aren't unloaded on failure; don't allow use */ 1751 zswap_init_state = ZSWAP_INIT_FAILED; 1752 zswap_enabled = false; 1753 return -ENOMEM; 1754 } 1755 1756 static int __init zswap_init(void) 1757 { 1758 if (!zswap_enabled) 1759 return 0; 1760 return zswap_setup(); 1761 } 1762 /* must be late so crypto has time to come up */ 1763 late_initcall(zswap_init); 1764 1765 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>"); 1766 MODULE_DESCRIPTION("Compressed cache for swap pages"); 1767