1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * zswap.c - zswap driver file 4 * 5 * zswap is a cache that takes pages that are in the process 6 * of being swapped out and attempts to compress and store them in a 7 * RAM-based memory pool. This can result in a significant I/O reduction on 8 * the swap device and, in the case where decompressing from RAM is faster 9 * than reading from the swap device, can also improve workload performance. 10 * 11 * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com> 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16 #include <linux/module.h> 17 #include <linux/cpu.h> 18 #include <linux/highmem.h> 19 #include <linux/slab.h> 20 #include <linux/spinlock.h> 21 #include <linux/types.h> 22 #include <linux/atomic.h> 23 #include <linux/swap.h> 24 #include <linux/crypto.h> 25 #include <linux/scatterlist.h> 26 #include <linux/mempolicy.h> 27 #include <linux/mempool.h> 28 #include <crypto/acompress.h> 29 #include <crypto/scatterwalk.h> 30 #include <linux/zswap.h> 31 #include <linux/mm_types.h> 32 #include <linux/page-flags.h> 33 #include <linux/swapops.h> 34 #include <linux/writeback.h> 35 #include <linux/pagemap.h> 36 #include <linux/workqueue.h> 37 #include <linux/list_lru.h> 38 #include <linux/zsmalloc.h> 39 40 #include "swap.h" 41 #include "internal.h" 42 43 /********************************* 44 * statistics 45 **********************************/ 46 /* The number of pages currently stored in zswap */ 47 atomic_long_t zswap_stored_pages = ATOMIC_LONG_INIT(0); 48 /* The number of incompressible pages currently stored in zswap */ 49 static atomic_long_t zswap_stored_incompressible_pages = ATOMIC_LONG_INIT(0); 50 51 /* 52 * The statistics below are not protected from concurrent access for 53 * performance reasons so they may not be a 100% accurate. However, 54 * they do provide useful information on roughly how many times a 55 * certain event is occurring. 56 */ 57 58 /* Pool limit was hit (see zswap_max_pool_percent) */ 59 static u64 zswap_pool_limit_hit; 60 /* Pages written back when pool limit was reached */ 61 static u64 zswap_written_back_pages; 62 /* Store failed due to a reclaim failure after pool limit was reached */ 63 static u64 zswap_reject_reclaim_fail; 64 /* Store failed due to compression algorithm failure */ 65 static u64 zswap_reject_compress_fail; 66 /* Compressed page was too big for the allocator to (optimally) store */ 67 static u64 zswap_reject_compress_poor; 68 /* Load or writeback failed due to decompression failure */ 69 static u64 zswap_decompress_fail; 70 /* Store failed because underlying allocator could not get memory */ 71 static u64 zswap_reject_alloc_fail; 72 /* Store failed because the entry metadata could not be allocated (rare) */ 73 static u64 zswap_reject_kmemcache_fail; 74 75 /* Shrinker work queue */ 76 static struct workqueue_struct *shrink_wq; 77 /* Pool limit was hit, we need to calm down */ 78 static bool zswap_pool_reached_full; 79 80 /********************************* 81 * tunables 82 **********************************/ 83 84 #define ZSWAP_PARAM_UNSET "" 85 86 static int zswap_setup(void); 87 88 /* Enable/disable zswap */ 89 static DEFINE_STATIC_KEY_MAYBE(CONFIG_ZSWAP_DEFAULT_ON, zswap_ever_enabled); 90 static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON); 91 static int zswap_enabled_param_set(const char *, 92 const struct kernel_param *); 93 static const struct kernel_param_ops zswap_enabled_param_ops = { 94 .set = zswap_enabled_param_set, 95 .get = param_get_bool, 96 }; 97 module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644); 98 99 /* Crypto compressor to use */ 100 static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; 101 static int zswap_compressor_param_set(const char *, 102 const struct kernel_param *); 103 static const struct kernel_param_ops zswap_compressor_param_ops = { 104 .set = zswap_compressor_param_set, 105 .get = param_get_charp, 106 .free = param_free_charp, 107 }; 108 module_param_cb(compressor, &zswap_compressor_param_ops, 109 &zswap_compressor, 0644); 110 111 /* The maximum percentage of memory that the compressed pool can occupy */ 112 static unsigned int zswap_max_pool_percent = 20; 113 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644); 114 115 /* The threshold for accepting new pages after the max_pool_percent was hit */ 116 static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */ 117 module_param_named(accept_threshold_percent, zswap_accept_thr_percent, 118 uint, 0644); 119 120 /* Enable/disable memory pressure-based shrinker. */ 121 static bool zswap_shrinker_enabled = IS_ENABLED( 122 CONFIG_ZSWAP_SHRINKER_DEFAULT_ON); 123 module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644); 124 125 bool zswap_is_enabled(void) 126 { 127 return zswap_enabled; 128 } 129 130 bool zswap_never_enabled(void) 131 { 132 return !static_branch_maybe(CONFIG_ZSWAP_DEFAULT_ON, &zswap_ever_enabled); 133 } 134 135 /********************************* 136 * data structures 137 **********************************/ 138 139 struct crypto_acomp_ctx { 140 struct crypto_acomp *acomp; 141 struct acomp_req *req; 142 struct crypto_wait wait; 143 u8 *buffer; 144 struct mutex mutex; 145 }; 146 147 /* 148 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock. 149 * The only case where lru_lock is not acquired while holding tree.lock is 150 * when a zswap_entry is taken off the lru for writeback, in that case it 151 * needs to be verified that it's still valid in the tree. 152 */ 153 struct zswap_pool { 154 struct zs_pool *zs_pool; 155 struct crypto_acomp_ctx __percpu *acomp_ctx; 156 struct percpu_ref ref; 157 struct list_head list; 158 struct work_struct release_work; 159 struct hlist_node node; 160 char tfm_name[CRYPTO_MAX_ALG_NAME]; 161 }; 162 163 /* Global LRU lists shared by all zswap pools. */ 164 static struct list_lru zswap_list_lru; 165 166 /* The lock protects zswap_next_shrink updates. */ 167 static DEFINE_SPINLOCK(zswap_shrink_lock); 168 static struct mem_cgroup *zswap_next_shrink; 169 static struct work_struct zswap_shrink_work; 170 static struct shrinker *zswap_shrinker; 171 172 /* 173 * struct zswap_entry 174 * 175 * This structure contains the metadata for tracking a single compressed 176 * page within zswap. 177 * 178 * swpentry - associated swap entry, the offset indexes into the xarray 179 * length - the length in bytes of the compressed page data. Needed during 180 * decompression. 181 * referenced - true if the entry recently entered the zswap pool. Unset by the 182 * writeback logic. The entry is only reclaimed by the writeback 183 * logic if referenced is unset. See comments in the shrinker 184 * section for context. 185 * pool - the zswap_pool the entry's data is in 186 * handle - zsmalloc allocation handle that stores the compressed page data 187 * objcg - the obj_cgroup that the compressed memory is charged to 188 * lru - handle to the pool's lru used to evict pages. 189 */ 190 struct zswap_entry { 191 swp_entry_t swpentry; 192 unsigned int length; 193 bool referenced; 194 struct zswap_pool *pool; 195 unsigned long handle; 196 struct obj_cgroup *objcg; 197 struct list_head lru; 198 }; 199 200 static struct xarray *zswap_trees[MAX_SWAPFILES]; 201 static unsigned int nr_zswap_trees[MAX_SWAPFILES]; 202 203 /* RCU-protected iteration */ 204 static LIST_HEAD(zswap_pools); 205 /* protects zswap_pools list modification */ 206 static DEFINE_SPINLOCK(zswap_pools_lock); 207 /* pool counter to provide unique names to zsmalloc */ 208 static atomic_t zswap_pools_count = ATOMIC_INIT(0); 209 210 enum zswap_init_type { 211 ZSWAP_UNINIT, 212 ZSWAP_INIT_SUCCEED, 213 ZSWAP_INIT_FAILED 214 }; 215 216 static enum zswap_init_type zswap_init_state; 217 218 /* used to ensure the integrity of initialization */ 219 static DEFINE_MUTEX(zswap_init_lock); 220 221 /* init completed, but couldn't create the initial pool */ 222 static bool zswap_has_pool; 223 224 /********************************* 225 * helpers and fwd declarations 226 **********************************/ 227 228 /* One swap address space for each 64M swap space */ 229 #define ZSWAP_ADDRESS_SPACE_SHIFT 14 230 #define ZSWAP_ADDRESS_SPACE_PAGES (1 << ZSWAP_ADDRESS_SPACE_SHIFT) 231 static inline struct xarray *swap_zswap_tree(swp_entry_t swp) 232 { 233 return &zswap_trees[swp_type(swp)][swp_offset(swp) 234 >> ZSWAP_ADDRESS_SPACE_SHIFT]; 235 } 236 237 #define zswap_pool_debug(msg, p) \ 238 pr_debug("%s pool %s\n", msg, (p)->tfm_name) 239 240 /********************************* 241 * pool functions 242 **********************************/ 243 static void __zswap_pool_empty(struct percpu_ref *ref); 244 245 static struct zswap_pool *zswap_pool_create(char *compressor) 246 { 247 struct zswap_pool *pool; 248 char name[38]; /* 'zswap' + 32 char (max) num + \0 */ 249 int ret, cpu; 250 251 if (!zswap_has_pool && !strcmp(compressor, ZSWAP_PARAM_UNSET)) 252 return NULL; 253 254 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 255 if (!pool) 256 return NULL; 257 258 /* unique name for each pool specifically required by zsmalloc */ 259 snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count)); 260 pool->zs_pool = zs_create_pool(name); 261 if (!pool->zs_pool) 262 goto error; 263 264 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name)); 265 266 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx); 267 if (!pool->acomp_ctx) { 268 pr_err("percpu alloc failed\n"); 269 goto error; 270 } 271 272 for_each_possible_cpu(cpu) 273 mutex_init(&per_cpu_ptr(pool->acomp_ctx, cpu)->mutex); 274 275 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE, 276 &pool->node); 277 if (ret) 278 goto error; 279 280 /* being the current pool takes 1 ref; this func expects the 281 * caller to always add the new pool as the current pool 282 */ 283 ret = percpu_ref_init(&pool->ref, __zswap_pool_empty, 284 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL); 285 if (ret) 286 goto ref_fail; 287 INIT_LIST_HEAD(&pool->list); 288 289 zswap_pool_debug("created", pool); 290 291 return pool; 292 293 ref_fail: 294 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); 295 error: 296 if (pool->acomp_ctx) 297 free_percpu(pool->acomp_ctx); 298 if (pool->zs_pool) 299 zs_destroy_pool(pool->zs_pool); 300 kfree(pool); 301 return NULL; 302 } 303 304 static struct zswap_pool *__zswap_pool_create_fallback(void) 305 { 306 if (!crypto_has_acomp(zswap_compressor, 0, 0) && 307 strcmp(zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) { 308 pr_err("compressor %s not available, using default %s\n", 309 zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT); 310 param_free_charp(&zswap_compressor); 311 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; 312 } 313 314 /* Default compressor should be available. Kconfig bug? */ 315 if (WARN_ON_ONCE(!crypto_has_acomp(zswap_compressor, 0, 0))) { 316 zswap_compressor = ZSWAP_PARAM_UNSET; 317 return NULL; 318 } 319 320 return zswap_pool_create(zswap_compressor); 321 } 322 323 static void zswap_pool_destroy(struct zswap_pool *pool) 324 { 325 zswap_pool_debug("destroying", pool); 326 327 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); 328 free_percpu(pool->acomp_ctx); 329 330 zs_destroy_pool(pool->zs_pool); 331 kfree(pool); 332 } 333 334 static void __zswap_pool_release(struct work_struct *work) 335 { 336 struct zswap_pool *pool = container_of(work, typeof(*pool), 337 release_work); 338 339 synchronize_rcu(); 340 341 /* nobody should have been able to get a ref... */ 342 WARN_ON(!percpu_ref_is_zero(&pool->ref)); 343 percpu_ref_exit(&pool->ref); 344 345 /* pool is now off zswap_pools list and has no references. */ 346 zswap_pool_destroy(pool); 347 } 348 349 static struct zswap_pool *zswap_pool_current(void); 350 351 static void __zswap_pool_empty(struct percpu_ref *ref) 352 { 353 struct zswap_pool *pool; 354 355 pool = container_of(ref, typeof(*pool), ref); 356 357 spin_lock_bh(&zswap_pools_lock); 358 359 WARN_ON(pool == zswap_pool_current()); 360 361 list_del_rcu(&pool->list); 362 363 INIT_WORK(&pool->release_work, __zswap_pool_release); 364 schedule_work(&pool->release_work); 365 366 spin_unlock_bh(&zswap_pools_lock); 367 } 368 369 static int __must_check zswap_pool_tryget(struct zswap_pool *pool) 370 { 371 if (!pool) 372 return 0; 373 374 return percpu_ref_tryget(&pool->ref); 375 } 376 377 /* The caller must already have a reference. */ 378 static void zswap_pool_get(struct zswap_pool *pool) 379 { 380 percpu_ref_get(&pool->ref); 381 } 382 383 static void zswap_pool_put(struct zswap_pool *pool) 384 { 385 percpu_ref_put(&pool->ref); 386 } 387 388 static struct zswap_pool *__zswap_pool_current(void) 389 { 390 struct zswap_pool *pool; 391 392 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list); 393 WARN_ONCE(!pool && zswap_has_pool, 394 "%s: no page storage pool!\n", __func__); 395 396 return pool; 397 } 398 399 static struct zswap_pool *zswap_pool_current(void) 400 { 401 assert_spin_locked(&zswap_pools_lock); 402 403 return __zswap_pool_current(); 404 } 405 406 static struct zswap_pool *zswap_pool_current_get(void) 407 { 408 struct zswap_pool *pool; 409 410 rcu_read_lock(); 411 412 pool = __zswap_pool_current(); 413 if (!zswap_pool_tryget(pool)) 414 pool = NULL; 415 416 rcu_read_unlock(); 417 418 return pool; 419 } 420 421 /* type and compressor must be null-terminated */ 422 static struct zswap_pool *zswap_pool_find_get(char *compressor) 423 { 424 struct zswap_pool *pool; 425 426 assert_spin_locked(&zswap_pools_lock); 427 428 list_for_each_entry_rcu(pool, &zswap_pools, list) { 429 if (strcmp(pool->tfm_name, compressor)) 430 continue; 431 /* if we can't get it, it's about to be destroyed */ 432 if (!zswap_pool_tryget(pool)) 433 continue; 434 return pool; 435 } 436 437 return NULL; 438 } 439 440 static unsigned long zswap_max_pages(void) 441 { 442 return totalram_pages() * zswap_max_pool_percent / 100; 443 } 444 445 static unsigned long zswap_accept_thr_pages(void) 446 { 447 return zswap_max_pages() * zswap_accept_thr_percent / 100; 448 } 449 450 unsigned long zswap_total_pages(void) 451 { 452 struct zswap_pool *pool; 453 unsigned long total = 0; 454 455 rcu_read_lock(); 456 list_for_each_entry_rcu(pool, &zswap_pools, list) 457 total += zs_get_total_pages(pool->zs_pool); 458 rcu_read_unlock(); 459 460 return total; 461 } 462 463 static bool zswap_check_limits(void) 464 { 465 unsigned long cur_pages = zswap_total_pages(); 466 unsigned long max_pages = zswap_max_pages(); 467 468 if (cur_pages >= max_pages) { 469 zswap_pool_limit_hit++; 470 zswap_pool_reached_full = true; 471 } else if (zswap_pool_reached_full && 472 cur_pages <= zswap_accept_thr_pages()) { 473 zswap_pool_reached_full = false; 474 } 475 return zswap_pool_reached_full; 476 } 477 478 /********************************* 479 * param callbacks 480 **********************************/ 481 482 static int zswap_compressor_param_set(const char *val, const struct kernel_param *kp) 483 { 484 struct zswap_pool *pool, *put_pool = NULL; 485 char *s = strstrip((char *)val); 486 bool create_pool = false; 487 int ret = 0; 488 489 mutex_lock(&zswap_init_lock); 490 switch (zswap_init_state) { 491 case ZSWAP_UNINIT: 492 /* Handled in zswap_setup() */ 493 ret = param_set_charp(s, kp); 494 break; 495 case ZSWAP_INIT_SUCCEED: 496 if (!zswap_has_pool || strcmp(s, *(char **)kp->arg)) 497 create_pool = true; 498 break; 499 case ZSWAP_INIT_FAILED: 500 pr_err("can't set param, initialization failed\n"); 501 ret = -ENODEV; 502 } 503 mutex_unlock(&zswap_init_lock); 504 505 if (!create_pool) 506 return ret; 507 508 if (!crypto_has_acomp(s, 0, 0)) { 509 pr_err("compressor %s not available\n", s); 510 return -ENOENT; 511 } 512 513 spin_lock_bh(&zswap_pools_lock); 514 515 pool = zswap_pool_find_get(s); 516 if (pool) { 517 zswap_pool_debug("using existing", pool); 518 WARN_ON(pool == zswap_pool_current()); 519 list_del_rcu(&pool->list); 520 } 521 522 spin_unlock_bh(&zswap_pools_lock); 523 524 if (!pool) 525 pool = zswap_pool_create(s); 526 else { 527 /* 528 * Restore the initial ref dropped by percpu_ref_kill() 529 * when the pool was decommissioned and switch it again 530 * to percpu mode. 531 */ 532 percpu_ref_resurrect(&pool->ref); 533 534 /* Drop the ref from zswap_pool_find_get(). */ 535 zswap_pool_put(pool); 536 } 537 538 if (pool) 539 ret = param_set_charp(s, kp); 540 else 541 ret = -EINVAL; 542 543 spin_lock_bh(&zswap_pools_lock); 544 545 if (!ret) { 546 put_pool = zswap_pool_current(); 547 list_add_rcu(&pool->list, &zswap_pools); 548 zswap_has_pool = true; 549 } else if (pool) { 550 /* 551 * Add the possibly pre-existing pool to the end of the pools 552 * list; if it's new (and empty) then it'll be removed and 553 * destroyed by the put after we drop the lock 554 */ 555 list_add_tail_rcu(&pool->list, &zswap_pools); 556 put_pool = pool; 557 } 558 559 spin_unlock_bh(&zswap_pools_lock); 560 561 /* 562 * Drop the ref from either the old current pool, 563 * or the new pool we failed to add 564 */ 565 if (put_pool) 566 percpu_ref_kill(&put_pool->ref); 567 568 return ret; 569 } 570 571 static int zswap_enabled_param_set(const char *val, 572 const struct kernel_param *kp) 573 { 574 int ret = -ENODEV; 575 576 /* if this is load-time (pre-init) param setting, only set param. */ 577 if (system_state != SYSTEM_RUNNING) 578 return param_set_bool(val, kp); 579 580 mutex_lock(&zswap_init_lock); 581 switch (zswap_init_state) { 582 case ZSWAP_UNINIT: 583 if (zswap_setup()) 584 break; 585 fallthrough; 586 case ZSWAP_INIT_SUCCEED: 587 if (!zswap_has_pool) 588 pr_err("can't enable, no pool configured\n"); 589 else 590 ret = param_set_bool(val, kp); 591 break; 592 case ZSWAP_INIT_FAILED: 593 pr_err("can't enable, initialization failed\n"); 594 } 595 mutex_unlock(&zswap_init_lock); 596 597 return ret; 598 } 599 600 /********************************* 601 * lru functions 602 **********************************/ 603 604 /* should be called under RCU */ 605 #ifdef CONFIG_MEMCG 606 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry) 607 { 608 return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL; 609 } 610 #else 611 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry) 612 { 613 return NULL; 614 } 615 #endif 616 617 static inline int entry_to_nid(struct zswap_entry *entry) 618 { 619 return page_to_nid(virt_to_page(entry)); 620 } 621 622 static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry) 623 { 624 int nid = entry_to_nid(entry); 625 struct mem_cgroup *memcg; 626 627 /* 628 * Note that it is safe to use rcu_read_lock() here, even in the face of 629 * concurrent memcg offlining: 630 * 631 * 1. list_lru_add() is called before list_lru_one is dead. The 632 * new entry will be reparented to memcg's parent's list_lru. 633 * 2. list_lru_add() is called after list_lru_one is dead. The 634 * new entry will be added directly to memcg's parent's list_lru. 635 * 636 * Similar reasoning holds for list_lru_del(). 637 */ 638 rcu_read_lock(); 639 memcg = mem_cgroup_from_entry(entry); 640 /* will always succeed */ 641 list_lru_add(list_lru, &entry->lru, nid, memcg); 642 rcu_read_unlock(); 643 } 644 645 static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry) 646 { 647 int nid = entry_to_nid(entry); 648 struct mem_cgroup *memcg; 649 650 rcu_read_lock(); 651 memcg = mem_cgroup_from_entry(entry); 652 /* will always succeed */ 653 list_lru_del(list_lru, &entry->lru, nid, memcg); 654 rcu_read_unlock(); 655 } 656 657 void zswap_lruvec_state_init(struct lruvec *lruvec) 658 { 659 atomic_long_set(&lruvec->zswap_lruvec_state.nr_disk_swapins, 0); 660 } 661 662 void zswap_folio_swapin(struct folio *folio) 663 { 664 struct lruvec *lruvec; 665 666 if (folio) { 667 lruvec = folio_lruvec(folio); 668 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_disk_swapins); 669 } 670 } 671 672 /* 673 * This function should be called when a memcg is being offlined. 674 * 675 * Since the global shrinker shrink_worker() may hold a reference 676 * of the memcg, we must check and release the reference in 677 * zswap_next_shrink. 678 * 679 * shrink_worker() must handle the case where this function releases 680 * the reference of memcg being shrunk. 681 */ 682 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) 683 { 684 /* lock out zswap shrinker walking memcg tree */ 685 spin_lock(&zswap_shrink_lock); 686 if (zswap_next_shrink == memcg) { 687 do { 688 zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL); 689 } while (zswap_next_shrink && !mem_cgroup_online(zswap_next_shrink)); 690 } 691 spin_unlock(&zswap_shrink_lock); 692 } 693 694 /********************************* 695 * zswap entry functions 696 **********************************/ 697 static struct kmem_cache *zswap_entry_cache; 698 699 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid) 700 { 701 struct zswap_entry *entry; 702 entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid); 703 if (!entry) 704 return NULL; 705 return entry; 706 } 707 708 static void zswap_entry_cache_free(struct zswap_entry *entry) 709 { 710 kmem_cache_free(zswap_entry_cache, entry); 711 } 712 713 /* 714 * Carries out the common pattern of freeing an entry's zsmalloc allocation, 715 * freeing the entry itself, and decrementing the number of stored pages. 716 */ 717 static void zswap_entry_free(struct zswap_entry *entry) 718 { 719 zswap_lru_del(&zswap_list_lru, entry); 720 zs_free(entry->pool->zs_pool, entry->handle); 721 zswap_pool_put(entry->pool); 722 if (entry->objcg) { 723 obj_cgroup_uncharge_zswap(entry->objcg, entry->length); 724 obj_cgroup_put(entry->objcg); 725 } 726 if (entry->length == PAGE_SIZE) 727 atomic_long_dec(&zswap_stored_incompressible_pages); 728 zswap_entry_cache_free(entry); 729 atomic_long_dec(&zswap_stored_pages); 730 } 731 732 /********************************* 733 * compressed storage functions 734 **********************************/ 735 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node) 736 { 737 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); 738 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); 739 struct crypto_acomp *acomp = NULL; 740 struct acomp_req *req = NULL; 741 u8 *buffer = NULL; 742 int ret; 743 744 buffer = kmalloc_node(PAGE_SIZE, GFP_KERNEL, cpu_to_node(cpu)); 745 if (!buffer) { 746 ret = -ENOMEM; 747 goto fail; 748 } 749 750 acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu)); 751 if (IS_ERR(acomp)) { 752 pr_err("could not alloc crypto acomp %s : %pe\n", 753 pool->tfm_name, acomp); 754 ret = PTR_ERR(acomp); 755 goto fail; 756 } 757 758 req = acomp_request_alloc(acomp); 759 if (!req) { 760 pr_err("could not alloc crypto acomp_request %s\n", 761 pool->tfm_name); 762 ret = -ENOMEM; 763 goto fail; 764 } 765 766 /* 767 * Only hold the mutex after completing allocations, otherwise we may 768 * recurse into zswap through reclaim and attempt to hold the mutex 769 * again resulting in a deadlock. 770 */ 771 mutex_lock(&acomp_ctx->mutex); 772 crypto_init_wait(&acomp_ctx->wait); 773 774 /* 775 * if the backend of acomp is async zip, crypto_req_done() will wakeup 776 * crypto_wait_req(); if the backend of acomp is scomp, the callback 777 * won't be called, crypto_wait_req() will return without blocking. 778 */ 779 acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 780 crypto_req_done, &acomp_ctx->wait); 781 782 acomp_ctx->buffer = buffer; 783 acomp_ctx->acomp = acomp; 784 acomp_ctx->req = req; 785 mutex_unlock(&acomp_ctx->mutex); 786 return 0; 787 788 fail: 789 if (!IS_ERR_OR_NULL(acomp)) 790 crypto_free_acomp(acomp); 791 kfree(buffer); 792 return ret; 793 } 794 795 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node) 796 { 797 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); 798 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); 799 struct acomp_req *req; 800 struct crypto_acomp *acomp; 801 u8 *buffer; 802 803 if (IS_ERR_OR_NULL(acomp_ctx)) 804 return 0; 805 806 mutex_lock(&acomp_ctx->mutex); 807 req = acomp_ctx->req; 808 acomp = acomp_ctx->acomp; 809 buffer = acomp_ctx->buffer; 810 acomp_ctx->req = NULL; 811 acomp_ctx->acomp = NULL; 812 acomp_ctx->buffer = NULL; 813 mutex_unlock(&acomp_ctx->mutex); 814 815 /* 816 * Do the actual freeing after releasing the mutex to avoid subtle 817 * locking dependencies causing deadlocks. 818 */ 819 if (!IS_ERR_OR_NULL(req)) 820 acomp_request_free(req); 821 if (!IS_ERR_OR_NULL(acomp)) 822 crypto_free_acomp(acomp); 823 kfree(buffer); 824 825 return 0; 826 } 827 828 static struct crypto_acomp_ctx *acomp_ctx_get_cpu_lock(struct zswap_pool *pool) 829 { 830 struct crypto_acomp_ctx *acomp_ctx; 831 832 for (;;) { 833 acomp_ctx = raw_cpu_ptr(pool->acomp_ctx); 834 mutex_lock(&acomp_ctx->mutex); 835 if (likely(acomp_ctx->req)) 836 return acomp_ctx; 837 /* 838 * It is possible that we were migrated to a different CPU after 839 * getting the per-CPU ctx but before the mutex was acquired. If 840 * the old CPU got offlined, zswap_cpu_comp_dead() could have 841 * already freed ctx->req (among other things) and set it to 842 * NULL. Just try again on the new CPU that we ended up on. 843 */ 844 mutex_unlock(&acomp_ctx->mutex); 845 } 846 } 847 848 static void acomp_ctx_put_unlock(struct crypto_acomp_ctx *acomp_ctx) 849 { 850 mutex_unlock(&acomp_ctx->mutex); 851 } 852 853 static bool zswap_compress(struct page *page, struct zswap_entry *entry, 854 struct zswap_pool *pool) 855 { 856 struct crypto_acomp_ctx *acomp_ctx; 857 struct scatterlist input, output; 858 int comp_ret = 0, alloc_ret = 0; 859 unsigned int dlen = PAGE_SIZE; 860 unsigned long handle; 861 gfp_t gfp; 862 u8 *dst; 863 bool mapped = false; 864 865 acomp_ctx = acomp_ctx_get_cpu_lock(pool); 866 dst = acomp_ctx->buffer; 867 sg_init_table(&input, 1); 868 sg_set_page(&input, page, PAGE_SIZE, 0); 869 870 sg_init_one(&output, dst, PAGE_SIZE); 871 acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen); 872 873 /* 874 * it maybe looks a little bit silly that we send an asynchronous request, 875 * then wait for its completion synchronously. This makes the process look 876 * synchronous in fact. 877 * Theoretically, acomp supports users send multiple acomp requests in one 878 * acomp instance, then get those requests done simultaneously. but in this 879 * case, zswap actually does store and load page by page, there is no 880 * existing method to send the second page before the first page is done 881 * in one thread doing zswap. 882 * but in different threads running on different cpu, we have different 883 * acomp instance, so multiple threads can do (de)compression in parallel. 884 */ 885 comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait); 886 dlen = acomp_ctx->req->dlen; 887 888 /* 889 * If a page cannot be compressed into a size smaller than PAGE_SIZE, 890 * save the content as is without a compression, to keep the LRU order 891 * of writebacks. If writeback is disabled, reject the page since it 892 * only adds metadata overhead. swap_writeout() will put the page back 893 * to the active LRU list in the case. 894 */ 895 if (comp_ret || !dlen || dlen >= PAGE_SIZE) { 896 if (!mem_cgroup_zswap_writeback_enabled( 897 folio_memcg(page_folio(page)))) { 898 comp_ret = comp_ret ? comp_ret : -EINVAL; 899 goto unlock; 900 } 901 comp_ret = 0; 902 dlen = PAGE_SIZE; 903 dst = kmap_local_page(page); 904 mapped = true; 905 } 906 907 gfp = GFP_NOWAIT | __GFP_NORETRY | __GFP_HIGHMEM | __GFP_MOVABLE; 908 handle = zs_malloc(pool->zs_pool, dlen, gfp, page_to_nid(page)); 909 if (IS_ERR_VALUE(handle)) { 910 alloc_ret = PTR_ERR((void *)handle); 911 goto unlock; 912 } 913 914 zs_obj_write(pool->zs_pool, handle, dst, dlen); 915 entry->handle = handle; 916 entry->length = dlen; 917 918 unlock: 919 if (mapped) 920 kunmap_local(dst); 921 if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC) 922 zswap_reject_compress_poor++; 923 else if (comp_ret) 924 zswap_reject_compress_fail++; 925 else if (alloc_ret) 926 zswap_reject_alloc_fail++; 927 928 acomp_ctx_put_unlock(acomp_ctx); 929 return comp_ret == 0 && alloc_ret == 0; 930 } 931 932 static bool zswap_decompress(struct zswap_entry *entry, struct folio *folio) 933 { 934 struct zswap_pool *pool = entry->pool; 935 struct scatterlist input[2]; /* zsmalloc returns an SG list 1-2 entries */ 936 struct scatterlist output; 937 struct crypto_acomp_ctx *acomp_ctx; 938 int ret = 0, dlen; 939 940 acomp_ctx = acomp_ctx_get_cpu_lock(pool); 941 zs_obj_read_sg_begin(pool->zs_pool, entry->handle, input, entry->length); 942 943 /* zswap entries of length PAGE_SIZE are not compressed. */ 944 if (entry->length == PAGE_SIZE) { 945 WARN_ON_ONCE(input->length != PAGE_SIZE); 946 memcpy_from_sglist(kmap_local_folio(folio, 0), input, 0, PAGE_SIZE); 947 dlen = PAGE_SIZE; 948 } else { 949 sg_init_table(&output, 1); 950 sg_set_folio(&output, folio, PAGE_SIZE, 0); 951 acomp_request_set_params(acomp_ctx->req, input, &output, 952 entry->length, PAGE_SIZE); 953 ret = crypto_acomp_decompress(acomp_ctx->req); 954 ret = crypto_wait_req(ret, &acomp_ctx->wait); 955 dlen = acomp_ctx->req->dlen; 956 } 957 958 zs_obj_read_sg_end(pool->zs_pool, entry->handle); 959 acomp_ctx_put_unlock(acomp_ctx); 960 961 if (!ret && dlen == PAGE_SIZE) 962 return true; 963 964 zswap_decompress_fail++; 965 pr_alert_ratelimited("Decompression error from zswap (%d:%lu %s %u->%d)\n", 966 swp_type(entry->swpentry), 967 swp_offset(entry->swpentry), 968 entry->pool->tfm_name, 969 entry->length, dlen); 970 return false; 971 } 972 973 /********************************* 974 * writeback code 975 **********************************/ 976 /* 977 * Attempts to free an entry by adding a folio to the swap cache, 978 * decompressing the entry data into the folio, and issuing a 979 * bio write to write the folio back to the swap device. 980 * 981 * This can be thought of as a "resumed writeback" of the folio 982 * to the swap device. We are basically resuming the same swap 983 * writeback path that was intercepted with the zswap_store() 984 * in the first place. After the folio has been decompressed into 985 * the swap cache, the compressed version stored by zswap can be 986 * freed. 987 */ 988 static int zswap_writeback_entry(struct zswap_entry *entry, 989 swp_entry_t swpentry) 990 { 991 struct xarray *tree; 992 pgoff_t offset = swp_offset(swpentry); 993 struct folio *folio; 994 struct mempolicy *mpol; 995 bool folio_was_allocated; 996 struct swap_info_struct *si; 997 int ret = 0; 998 999 /* try to allocate swap cache folio */ 1000 si = get_swap_device(swpentry); 1001 if (!si) 1002 return -EEXIST; 1003 1004 mpol = get_task_policy(current); 1005 folio = swap_cache_alloc_folio(swpentry, GFP_KERNEL, mpol, 1006 NO_INTERLEAVE_INDEX, &folio_was_allocated); 1007 put_swap_device(si); 1008 if (!folio) 1009 return -ENOMEM; 1010 1011 /* 1012 * Found an existing folio, we raced with swapin or concurrent 1013 * shrinker. We generally writeback cold folios from zswap, and 1014 * swapin means the folio just became hot, so skip this folio. 1015 * For unlikely concurrent shrinker case, it will be unlinked 1016 * and freed when invalidated by the concurrent shrinker anyway. 1017 */ 1018 if (!folio_was_allocated) { 1019 ret = -EEXIST; 1020 goto out; 1021 } 1022 1023 /* 1024 * folio is locked, and the swapcache is now secured against 1025 * concurrent swapping to and from the slot, and concurrent 1026 * swapoff so we can safely dereference the zswap tree here. 1027 * Verify that the swap entry hasn't been invalidated and recycled 1028 * behind our backs, to avoid overwriting a new swap folio with 1029 * old compressed data. Only when this is successful can the entry 1030 * be dereferenced. 1031 */ 1032 tree = swap_zswap_tree(swpentry); 1033 if (entry != xa_load(tree, offset)) { 1034 ret = -ENOMEM; 1035 goto out; 1036 } 1037 1038 if (!zswap_decompress(entry, folio)) { 1039 ret = -EIO; 1040 goto out; 1041 } 1042 1043 xa_erase(tree, offset); 1044 1045 count_vm_event(ZSWPWB); 1046 if (entry->objcg) 1047 count_objcg_events(entry->objcg, ZSWPWB, 1); 1048 1049 zswap_entry_free(entry); 1050 1051 /* folio is up to date */ 1052 folio_mark_uptodate(folio); 1053 1054 /* move it to the tail of the inactive list after end_writeback */ 1055 folio_set_reclaim(folio); 1056 1057 /* start writeback */ 1058 __swap_writepage(folio, NULL); 1059 1060 out: 1061 if (ret && ret != -EEXIST) { 1062 swap_cache_del_folio(folio); 1063 folio_unlock(folio); 1064 } 1065 folio_put(folio); 1066 return ret; 1067 } 1068 1069 /********************************* 1070 * shrinker functions 1071 **********************************/ 1072 /* 1073 * The dynamic shrinker is modulated by the following factors: 1074 * 1075 * 1. Each zswap entry has a referenced bit, which the shrinker unsets (giving 1076 * the entry a second chance) before rotating it in the LRU list. If the 1077 * entry is considered again by the shrinker, with its referenced bit unset, 1078 * it is written back. The writeback rate as a result is dynamically 1079 * adjusted by the pool activities - if the pool is dominated by new entries 1080 * (i.e lots of recent zswapouts), these entries will be protected and 1081 * the writeback rate will slow down. On the other hand, if the pool has a 1082 * lot of stagnant entries, these entries will be reclaimed immediately, 1083 * effectively increasing the writeback rate. 1084 * 1085 * 2. Swapins counter: If we observe swapins, it is a sign that we are 1086 * overshrinking and should slow down. We maintain a swapins counter, which 1087 * is consumed and subtract from the number of eligible objects on the LRU 1088 * in zswap_shrinker_count(). 1089 * 1090 * 3. Compression ratio. The better the workload compresses, the less gains we 1091 * can expect from writeback. We scale down the number of objects available 1092 * for reclaim by this ratio. 1093 */ 1094 static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l, 1095 void *arg) 1096 { 1097 struct zswap_entry *entry = container_of(item, struct zswap_entry, lru); 1098 bool *encountered_page_in_swapcache = (bool *)arg; 1099 swp_entry_t swpentry; 1100 enum lru_status ret = LRU_REMOVED_RETRY; 1101 int writeback_result; 1102 1103 /* 1104 * Second chance algorithm: if the entry has its referenced bit set, give it 1105 * a second chance. Only clear the referenced bit and rotate it in the 1106 * zswap's LRU list. 1107 */ 1108 if (entry->referenced) { 1109 entry->referenced = false; 1110 return LRU_ROTATE; 1111 } 1112 1113 /* 1114 * As soon as we drop the LRU lock, the entry can be freed by 1115 * a concurrent invalidation. This means the following: 1116 * 1117 * 1. We extract the swp_entry_t to the stack, allowing 1118 * zswap_writeback_entry() to pin the swap entry and 1119 * then validate the zswap entry against that swap entry's 1120 * tree using pointer value comparison. Only when that 1121 * is successful can the entry be dereferenced. 1122 * 1123 * 2. Usually, objects are taken off the LRU for reclaim. In 1124 * this case this isn't possible, because if reclaim fails 1125 * for whatever reason, we have no means of knowing if the 1126 * entry is alive to put it back on the LRU. 1127 * 1128 * So rotate it before dropping the lock. If the entry is 1129 * written back or invalidated, the free path will unlink 1130 * it. For failures, rotation is the right thing as well. 1131 * 1132 * Temporary failures, where the same entry should be tried 1133 * again immediately, almost never happen for this shrinker. 1134 * We don't do any trylocking; -ENOMEM comes closest, 1135 * but that's extremely rare and doesn't happen spuriously 1136 * either. Don't bother distinguishing this case. 1137 */ 1138 list_move_tail(item, &l->list); 1139 1140 /* 1141 * Once the lru lock is dropped, the entry might get freed. The 1142 * swpentry is copied to the stack, and entry isn't deref'd again 1143 * until the entry is verified to still be alive in the tree. 1144 */ 1145 swpentry = entry->swpentry; 1146 1147 /* 1148 * It's safe to drop the lock here because we return either 1149 * LRU_REMOVED_RETRY, LRU_RETRY or LRU_STOP. 1150 */ 1151 spin_unlock(&l->lock); 1152 1153 writeback_result = zswap_writeback_entry(entry, swpentry); 1154 1155 if (writeback_result) { 1156 zswap_reject_reclaim_fail++; 1157 ret = LRU_RETRY; 1158 1159 /* 1160 * Encountering a page already in swap cache is a sign that we are shrinking 1161 * into the warmer region. We should terminate shrinking (if we're in the dynamic 1162 * shrinker context). 1163 */ 1164 if (writeback_result == -EEXIST && encountered_page_in_swapcache) { 1165 ret = LRU_STOP; 1166 *encountered_page_in_swapcache = true; 1167 } 1168 } else { 1169 zswap_written_back_pages++; 1170 } 1171 1172 return ret; 1173 } 1174 1175 static unsigned long zswap_shrinker_scan(struct shrinker *shrinker, 1176 struct shrink_control *sc) 1177 { 1178 unsigned long shrink_ret; 1179 bool encountered_page_in_swapcache = false; 1180 1181 if (!zswap_shrinker_enabled || 1182 !mem_cgroup_zswap_writeback_enabled(sc->memcg)) { 1183 sc->nr_scanned = 0; 1184 return SHRINK_STOP; 1185 } 1186 1187 shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb, 1188 &encountered_page_in_swapcache); 1189 1190 if (encountered_page_in_swapcache) 1191 return SHRINK_STOP; 1192 1193 return shrink_ret ? shrink_ret : SHRINK_STOP; 1194 } 1195 1196 static unsigned long zswap_shrinker_count(struct shrinker *shrinker, 1197 struct shrink_control *sc) 1198 { 1199 struct mem_cgroup *memcg = sc->memcg; 1200 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid)); 1201 atomic_long_t *nr_disk_swapins = 1202 &lruvec->zswap_lruvec_state.nr_disk_swapins; 1203 unsigned long nr_backing, nr_stored, nr_freeable, nr_disk_swapins_cur, 1204 nr_remain; 1205 1206 if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg)) 1207 return 0; 1208 1209 /* 1210 * The shrinker resumes swap writeback, which will enter block 1211 * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS 1212 * rules (may_enter_fs()), which apply on a per-folio basis. 1213 */ 1214 if (!gfp_has_io_fs(sc->gfp_mask)) 1215 return 0; 1216 1217 /* 1218 * For memcg, use the cgroup-wide ZSWAP stats since we don't 1219 * have them per-node and thus per-lruvec. Careful if memcg is 1220 * runtime-disabled: we can get sc->memcg == NULL, which is ok 1221 * for the lruvec, but not for memcg_page_state(). 1222 * 1223 * Without memcg, use the zswap pool-wide metrics. 1224 */ 1225 if (!mem_cgroup_disabled()) { 1226 mem_cgroup_flush_stats(memcg); 1227 nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT; 1228 nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED); 1229 } else { 1230 nr_backing = zswap_total_pages(); 1231 nr_stored = atomic_long_read(&zswap_stored_pages); 1232 } 1233 1234 if (!nr_stored) 1235 return 0; 1236 1237 nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc); 1238 if (!nr_freeable) 1239 return 0; 1240 1241 /* 1242 * Subtract from the lru size the number of pages that are recently swapped 1243 * in from disk. The idea is that had we protect the zswap's LRU by this 1244 * amount of pages, these disk swapins would not have happened. 1245 */ 1246 nr_disk_swapins_cur = atomic_long_read(nr_disk_swapins); 1247 do { 1248 if (nr_freeable >= nr_disk_swapins_cur) 1249 nr_remain = 0; 1250 else 1251 nr_remain = nr_disk_swapins_cur - nr_freeable; 1252 } while (!atomic_long_try_cmpxchg( 1253 nr_disk_swapins, &nr_disk_swapins_cur, nr_remain)); 1254 1255 nr_freeable -= nr_disk_swapins_cur - nr_remain; 1256 if (!nr_freeable) 1257 return 0; 1258 1259 /* 1260 * Scale the number of freeable pages by the memory saving factor. 1261 * This ensures that the better zswap compresses memory, the fewer 1262 * pages we will evict to swap (as it will otherwise incur IO for 1263 * relatively small memory saving). 1264 */ 1265 return mult_frac(nr_freeable, nr_backing, nr_stored); 1266 } 1267 1268 static struct shrinker *zswap_alloc_shrinker(void) 1269 { 1270 struct shrinker *shrinker; 1271 1272 shrinker = 1273 shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap"); 1274 if (!shrinker) 1275 return NULL; 1276 1277 shrinker->scan_objects = zswap_shrinker_scan; 1278 shrinker->count_objects = zswap_shrinker_count; 1279 shrinker->batch = 0; 1280 shrinker->seeks = DEFAULT_SEEKS; 1281 return shrinker; 1282 } 1283 1284 static int shrink_memcg(struct mem_cgroup *memcg) 1285 { 1286 int nid, shrunk = 0, scanned = 0; 1287 1288 if (!mem_cgroup_zswap_writeback_enabled(memcg)) 1289 return -ENOENT; 1290 1291 /* 1292 * Skip zombies because their LRUs are reparented and we would be 1293 * reclaiming from the parent instead of the dead memcg. 1294 */ 1295 if (memcg && !mem_cgroup_online(memcg)) 1296 return -ENOENT; 1297 1298 for_each_node_state(nid, N_NORMAL_MEMORY) { 1299 unsigned long nr_to_walk = 1; 1300 1301 shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg, 1302 &shrink_memcg_cb, NULL, &nr_to_walk); 1303 scanned += 1 - nr_to_walk; 1304 } 1305 1306 if (!scanned) 1307 return -ENOENT; 1308 1309 return shrunk ? 0 : -EAGAIN; 1310 } 1311 1312 static void shrink_worker(struct work_struct *w) 1313 { 1314 struct mem_cgroup *memcg; 1315 int ret, failures = 0, attempts = 0; 1316 unsigned long thr; 1317 1318 /* Reclaim down to the accept threshold */ 1319 thr = zswap_accept_thr_pages(); 1320 1321 /* 1322 * Global reclaim will select cgroup in a round-robin fashion from all 1323 * online memcgs, but memcgs that have no pages in zswap and 1324 * writeback-disabled memcgs (memory.zswap.writeback=0) are not 1325 * candidates for shrinking. 1326 * 1327 * Shrinking will be aborted if we encounter the following 1328 * MAX_RECLAIM_RETRIES times: 1329 * - No writeback-candidate memcgs found in a memcg tree walk. 1330 * - Shrinking a writeback-candidate memcg failed. 1331 * 1332 * We save iteration cursor memcg into zswap_next_shrink, 1333 * which can be modified by the offline memcg cleaner 1334 * zswap_memcg_offline_cleanup(). 1335 * 1336 * Since the offline cleaner is called only once, we cannot leave an 1337 * offline memcg reference in zswap_next_shrink. 1338 * We can rely on the cleaner only if we get online memcg under lock. 1339 * 1340 * If we get an offline memcg, we cannot determine if the cleaner has 1341 * already been called or will be called later. We must put back the 1342 * reference before returning from this function. Otherwise, the 1343 * offline memcg left in zswap_next_shrink will hold the reference 1344 * until the next run of shrink_worker(). 1345 */ 1346 do { 1347 /* 1348 * Start shrinking from the next memcg after zswap_next_shrink. 1349 * When the offline cleaner has already advanced the cursor, 1350 * advancing the cursor here overlooks one memcg, but this 1351 * should be negligibly rare. 1352 * 1353 * If we get an online memcg, keep the extra reference in case 1354 * the original one obtained by mem_cgroup_iter() is dropped by 1355 * zswap_memcg_offline_cleanup() while we are shrinking the 1356 * memcg. 1357 */ 1358 spin_lock(&zswap_shrink_lock); 1359 do { 1360 memcg = mem_cgroup_iter(NULL, zswap_next_shrink, NULL); 1361 zswap_next_shrink = memcg; 1362 } while (memcg && !mem_cgroup_tryget_online(memcg)); 1363 spin_unlock(&zswap_shrink_lock); 1364 1365 if (!memcg) { 1366 /* 1367 * Continue shrinking without incrementing failures if 1368 * we found candidate memcgs in the last tree walk. 1369 */ 1370 if (!attempts && ++failures == MAX_RECLAIM_RETRIES) 1371 break; 1372 1373 attempts = 0; 1374 goto resched; 1375 } 1376 1377 ret = shrink_memcg(memcg); 1378 /* drop the extra reference */ 1379 mem_cgroup_put(memcg); 1380 1381 /* 1382 * There are no writeback-candidate pages in the memcg. 1383 * This is not an issue as long as we can find another memcg 1384 * with pages in zswap. Skip this without incrementing attempts 1385 * and failures. 1386 */ 1387 if (ret == -ENOENT) 1388 continue; 1389 ++attempts; 1390 1391 if (ret && ++failures == MAX_RECLAIM_RETRIES) 1392 break; 1393 resched: 1394 cond_resched(); 1395 } while (zswap_total_pages() > thr); 1396 } 1397 1398 /********************************* 1399 * main API 1400 **********************************/ 1401 1402 static bool zswap_store_page(struct page *page, 1403 struct obj_cgroup *objcg, 1404 struct zswap_pool *pool) 1405 { 1406 swp_entry_t page_swpentry = page_swap_entry(page); 1407 struct zswap_entry *entry, *old; 1408 1409 /* allocate entry */ 1410 entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page)); 1411 if (!entry) { 1412 zswap_reject_kmemcache_fail++; 1413 return false; 1414 } 1415 1416 if (!zswap_compress(page, entry, pool)) 1417 goto compress_failed; 1418 1419 old = xa_store(swap_zswap_tree(page_swpentry), 1420 swp_offset(page_swpentry), 1421 entry, GFP_KERNEL); 1422 if (xa_is_err(old)) { 1423 int err = xa_err(old); 1424 1425 WARN_ONCE(err != -ENOMEM, "unexpected xarray error: %d\n", err); 1426 zswap_reject_alloc_fail++; 1427 goto store_failed; 1428 } 1429 1430 /* 1431 * We may have had an existing entry that became stale when 1432 * the folio was redirtied and now the new version is being 1433 * swapped out. Get rid of the old. 1434 */ 1435 if (old) 1436 zswap_entry_free(old); 1437 1438 /* 1439 * The entry is successfully compressed and stored in the tree, there is 1440 * no further possibility of failure. Grab refs to the pool and objcg, 1441 * charge zswap memory, and increment zswap_stored_pages. 1442 * The opposite actions will be performed by zswap_entry_free() 1443 * when the entry is removed from the tree. 1444 */ 1445 zswap_pool_get(pool); 1446 if (objcg) { 1447 obj_cgroup_get(objcg); 1448 obj_cgroup_charge_zswap(objcg, entry->length); 1449 } 1450 atomic_long_inc(&zswap_stored_pages); 1451 if (entry->length == PAGE_SIZE) 1452 atomic_long_inc(&zswap_stored_incompressible_pages); 1453 1454 /* 1455 * We finish initializing the entry while it's already in xarray. 1456 * This is safe because: 1457 * 1458 * 1. Concurrent stores and invalidations are excluded by folio lock. 1459 * 1460 * 2. Writeback is excluded by the entry not being on the LRU yet. 1461 * The publishing order matters to prevent writeback from seeing 1462 * an incoherent entry. 1463 */ 1464 entry->pool = pool; 1465 entry->swpentry = page_swpentry; 1466 entry->objcg = objcg; 1467 entry->referenced = true; 1468 if (entry->length) { 1469 INIT_LIST_HEAD(&entry->lru); 1470 zswap_lru_add(&zswap_list_lru, entry); 1471 } 1472 1473 return true; 1474 1475 store_failed: 1476 zs_free(pool->zs_pool, entry->handle); 1477 compress_failed: 1478 zswap_entry_cache_free(entry); 1479 return false; 1480 } 1481 1482 bool zswap_store(struct folio *folio) 1483 { 1484 long nr_pages = folio_nr_pages(folio); 1485 swp_entry_t swp = folio->swap; 1486 struct obj_cgroup *objcg = NULL; 1487 struct mem_cgroup *memcg = NULL; 1488 struct zswap_pool *pool; 1489 bool ret = false; 1490 long index; 1491 1492 VM_WARN_ON_ONCE(!folio_test_locked(folio)); 1493 VM_WARN_ON_ONCE(!folio_test_swapcache(folio)); 1494 1495 if (!zswap_enabled) 1496 goto check_old; 1497 1498 objcg = get_obj_cgroup_from_folio(folio); 1499 if (objcg && !obj_cgroup_may_zswap(objcg)) { 1500 memcg = get_mem_cgroup_from_objcg(objcg); 1501 if (shrink_memcg(memcg)) { 1502 mem_cgroup_put(memcg); 1503 goto put_objcg; 1504 } 1505 mem_cgroup_put(memcg); 1506 } 1507 1508 if (zswap_check_limits()) 1509 goto put_objcg; 1510 1511 pool = zswap_pool_current_get(); 1512 if (!pool) 1513 goto put_objcg; 1514 1515 if (objcg) { 1516 memcg = get_mem_cgroup_from_objcg(objcg); 1517 if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) { 1518 mem_cgroup_put(memcg); 1519 goto put_pool; 1520 } 1521 mem_cgroup_put(memcg); 1522 } 1523 1524 for (index = 0; index < nr_pages; ++index) { 1525 struct page *page = folio_page(folio, index); 1526 1527 if (!zswap_store_page(page, objcg, pool)) 1528 goto put_pool; 1529 } 1530 1531 if (objcg) 1532 count_objcg_events(objcg, ZSWPOUT, nr_pages); 1533 1534 count_vm_events(ZSWPOUT, nr_pages); 1535 1536 ret = true; 1537 1538 put_pool: 1539 zswap_pool_put(pool); 1540 put_objcg: 1541 obj_cgroup_put(objcg); 1542 if (!ret && zswap_pool_reached_full) 1543 queue_work(shrink_wq, &zswap_shrink_work); 1544 check_old: 1545 /* 1546 * If the zswap store fails or zswap is disabled, we must invalidate 1547 * the possibly stale entries which were previously stored at the 1548 * offsets corresponding to each page of the folio. Otherwise, 1549 * writeback could overwrite the new data in the swapfile. 1550 */ 1551 if (!ret) { 1552 unsigned type = swp_type(swp); 1553 pgoff_t offset = swp_offset(swp); 1554 struct zswap_entry *entry; 1555 struct xarray *tree; 1556 1557 for (index = 0; index < nr_pages; ++index) { 1558 tree = swap_zswap_tree(swp_entry(type, offset + index)); 1559 entry = xa_erase(tree, offset + index); 1560 if (entry) 1561 zswap_entry_free(entry); 1562 } 1563 } 1564 1565 return ret; 1566 } 1567 1568 /** 1569 * zswap_load() - load a folio from zswap 1570 * @folio: folio to load 1571 * 1572 * Return: 0 on success, with the folio unlocked and marked up-to-date, or one 1573 * of the following error codes: 1574 * 1575 * -EIO: if the swapped out content was in zswap, but could not be loaded 1576 * into the page due to a decompression failure. The folio is unlocked, but 1577 * NOT marked up-to-date, so that an IO error is emitted (e.g. do_swap_page() 1578 * will SIGBUS). 1579 * 1580 * -EINVAL: if the swapped out content was in zswap, but the page belongs 1581 * to a large folio, which is not supported by zswap. The folio is unlocked, 1582 * but NOT marked up-to-date, so that an IO error is emitted (e.g. 1583 * do_swap_page() will SIGBUS). 1584 * 1585 * -ENOENT: if the swapped out content was not in zswap. The folio remains 1586 * locked on return. 1587 */ 1588 int zswap_load(struct folio *folio) 1589 { 1590 swp_entry_t swp = folio->swap; 1591 pgoff_t offset = swp_offset(swp); 1592 bool swapcache = folio_test_swapcache(folio); 1593 struct xarray *tree = swap_zswap_tree(swp); 1594 struct zswap_entry *entry; 1595 1596 VM_WARN_ON_ONCE(!folio_test_locked(folio)); 1597 1598 if (zswap_never_enabled()) 1599 return -ENOENT; 1600 1601 /* 1602 * Large folios should not be swapped in while zswap is being used, as 1603 * they are not properly handled. Zswap does not properly load large 1604 * folios, and a large folio may only be partially in zswap. 1605 */ 1606 if (WARN_ON_ONCE(folio_test_large(folio))) { 1607 folio_unlock(folio); 1608 return -EINVAL; 1609 } 1610 1611 entry = xa_load(tree, offset); 1612 if (!entry) 1613 return -ENOENT; 1614 1615 if (!zswap_decompress(entry, folio)) { 1616 folio_unlock(folio); 1617 return -EIO; 1618 } 1619 1620 folio_mark_uptodate(folio); 1621 1622 count_vm_event(ZSWPIN); 1623 if (entry->objcg) 1624 count_objcg_events(entry->objcg, ZSWPIN, 1); 1625 1626 /* 1627 * When reading into the swapcache, invalidate our entry. The 1628 * swapcache can be the authoritative owner of the page and 1629 * its mappings, and the pressure that results from having two 1630 * in-memory copies outweighs any benefits of caching the 1631 * compression work. 1632 * 1633 * (Most swapins go through the swapcache. The notable 1634 * exception is the singleton fault on SWP_SYNCHRONOUS_IO 1635 * files, which reads into a private page and may free it if 1636 * the fault fails. We remain the primary owner of the entry.) 1637 */ 1638 if (swapcache) { 1639 folio_mark_dirty(folio); 1640 xa_erase(tree, offset); 1641 zswap_entry_free(entry); 1642 } 1643 1644 folio_unlock(folio); 1645 return 0; 1646 } 1647 1648 void zswap_invalidate(swp_entry_t swp) 1649 { 1650 pgoff_t offset = swp_offset(swp); 1651 struct xarray *tree = swap_zswap_tree(swp); 1652 struct zswap_entry *entry; 1653 1654 if (xa_empty(tree)) 1655 return; 1656 1657 entry = xa_erase(tree, offset); 1658 if (entry) 1659 zswap_entry_free(entry); 1660 } 1661 1662 int zswap_swapon(int type, unsigned long nr_pages) 1663 { 1664 struct xarray *trees, *tree; 1665 unsigned int nr, i; 1666 1667 nr = DIV_ROUND_UP(nr_pages, ZSWAP_ADDRESS_SPACE_PAGES); 1668 trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL); 1669 if (!trees) { 1670 pr_err("alloc failed, zswap disabled for swap type %d\n", type); 1671 return -ENOMEM; 1672 } 1673 1674 for (i = 0; i < nr; i++) 1675 xa_init(trees + i); 1676 1677 nr_zswap_trees[type] = nr; 1678 zswap_trees[type] = trees; 1679 return 0; 1680 } 1681 1682 void zswap_swapoff(int type) 1683 { 1684 struct xarray *trees = zswap_trees[type]; 1685 unsigned int i; 1686 1687 if (!trees) 1688 return; 1689 1690 /* try_to_unuse() invalidated all the entries already */ 1691 for (i = 0; i < nr_zswap_trees[type]; i++) 1692 WARN_ON_ONCE(!xa_empty(trees + i)); 1693 1694 kvfree(trees); 1695 nr_zswap_trees[type] = 0; 1696 zswap_trees[type] = NULL; 1697 } 1698 1699 /********************************* 1700 * debugfs functions 1701 **********************************/ 1702 #ifdef CONFIG_DEBUG_FS 1703 #include <linux/debugfs.h> 1704 1705 static struct dentry *zswap_debugfs_root; 1706 1707 static int debugfs_get_total_size(void *data, u64 *val) 1708 { 1709 *val = zswap_total_pages() * PAGE_SIZE; 1710 return 0; 1711 } 1712 DEFINE_DEBUGFS_ATTRIBUTE(total_size_fops, debugfs_get_total_size, NULL, "%llu\n"); 1713 1714 static int debugfs_get_stored_pages(void *data, u64 *val) 1715 { 1716 *val = atomic_long_read(&zswap_stored_pages); 1717 return 0; 1718 } 1719 DEFINE_DEBUGFS_ATTRIBUTE(stored_pages_fops, debugfs_get_stored_pages, NULL, "%llu\n"); 1720 1721 static int debugfs_get_stored_incompressible_pages(void *data, u64 *val) 1722 { 1723 *val = atomic_long_read(&zswap_stored_incompressible_pages); 1724 return 0; 1725 } 1726 DEFINE_DEBUGFS_ATTRIBUTE(stored_incompressible_pages_fops, 1727 debugfs_get_stored_incompressible_pages, NULL, "%llu\n"); 1728 1729 static int zswap_debugfs_init(void) 1730 { 1731 if (!debugfs_initialized()) 1732 return -ENODEV; 1733 1734 zswap_debugfs_root = debugfs_create_dir("zswap", NULL); 1735 1736 debugfs_create_u64("pool_limit_hit", 0444, 1737 zswap_debugfs_root, &zswap_pool_limit_hit); 1738 debugfs_create_u64("reject_reclaim_fail", 0444, 1739 zswap_debugfs_root, &zswap_reject_reclaim_fail); 1740 debugfs_create_u64("reject_alloc_fail", 0444, 1741 zswap_debugfs_root, &zswap_reject_alloc_fail); 1742 debugfs_create_u64("reject_kmemcache_fail", 0444, 1743 zswap_debugfs_root, &zswap_reject_kmemcache_fail); 1744 debugfs_create_u64("reject_compress_fail", 0444, 1745 zswap_debugfs_root, &zswap_reject_compress_fail); 1746 debugfs_create_u64("reject_compress_poor", 0444, 1747 zswap_debugfs_root, &zswap_reject_compress_poor); 1748 debugfs_create_u64("decompress_fail", 0444, 1749 zswap_debugfs_root, &zswap_decompress_fail); 1750 debugfs_create_u64("written_back_pages", 0444, 1751 zswap_debugfs_root, &zswap_written_back_pages); 1752 debugfs_create_file("pool_total_size", 0444, 1753 zswap_debugfs_root, NULL, &total_size_fops); 1754 debugfs_create_file("stored_pages", 0444, 1755 zswap_debugfs_root, NULL, &stored_pages_fops); 1756 debugfs_create_file("stored_incompressible_pages", 0444, 1757 zswap_debugfs_root, NULL, 1758 &stored_incompressible_pages_fops); 1759 1760 return 0; 1761 } 1762 #else 1763 static int zswap_debugfs_init(void) 1764 { 1765 return 0; 1766 } 1767 #endif 1768 1769 /********************************* 1770 * module init and exit 1771 **********************************/ 1772 static int zswap_setup(void) 1773 { 1774 struct zswap_pool *pool; 1775 int ret; 1776 1777 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0); 1778 if (!zswap_entry_cache) { 1779 pr_err("entry cache creation failed\n"); 1780 goto cache_fail; 1781 } 1782 1783 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE, 1784 "mm/zswap_pool:prepare", 1785 zswap_cpu_comp_prepare, 1786 zswap_cpu_comp_dead); 1787 if (ret) 1788 goto hp_fail; 1789 1790 shrink_wq = alloc_workqueue("zswap-shrink", 1791 WQ_UNBOUND|WQ_MEM_RECLAIM, 1); 1792 if (!shrink_wq) 1793 goto shrink_wq_fail; 1794 1795 zswap_shrinker = zswap_alloc_shrinker(); 1796 if (!zswap_shrinker) 1797 goto shrinker_fail; 1798 if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker)) 1799 goto lru_fail; 1800 shrinker_register(zswap_shrinker); 1801 1802 INIT_WORK(&zswap_shrink_work, shrink_worker); 1803 1804 pool = __zswap_pool_create_fallback(); 1805 if (pool) { 1806 pr_info("loaded using pool %s\n", pool->tfm_name); 1807 list_add(&pool->list, &zswap_pools); 1808 zswap_has_pool = true; 1809 static_branch_enable(&zswap_ever_enabled); 1810 } else { 1811 pr_err("pool creation failed\n"); 1812 zswap_enabled = false; 1813 } 1814 1815 if (zswap_debugfs_init()) 1816 pr_warn("debugfs initialization failed\n"); 1817 zswap_init_state = ZSWAP_INIT_SUCCEED; 1818 return 0; 1819 1820 lru_fail: 1821 shrinker_free(zswap_shrinker); 1822 shrinker_fail: 1823 destroy_workqueue(shrink_wq); 1824 shrink_wq_fail: 1825 cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE); 1826 hp_fail: 1827 kmem_cache_destroy(zswap_entry_cache); 1828 cache_fail: 1829 /* if built-in, we aren't unloaded on failure; don't allow use */ 1830 zswap_init_state = ZSWAP_INIT_FAILED; 1831 zswap_enabled = false; 1832 return -ENOMEM; 1833 } 1834 1835 static int __init zswap_init(void) 1836 { 1837 if (!zswap_enabled) 1838 return 0; 1839 return zswap_setup(); 1840 } 1841 /* must be late so crypto has time to come up */ 1842 late_initcall(zswap_init); 1843 1844 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>"); 1845 MODULE_DESCRIPTION("Compressed cache for swap pages"); 1846