1c942fddfSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 22b281117SSeth Jennings /* 32b281117SSeth Jennings * zswap.c - zswap driver file 42b281117SSeth Jennings * 52b281117SSeth Jennings * zswap is a backend for frontswap that takes pages that are in the process 62b281117SSeth Jennings * of being swapped out and attempts to compress and store them in a 72b281117SSeth Jennings * RAM-based memory pool. This can result in a significant I/O reduction on 82b281117SSeth Jennings * the swap device and, in the case where decompressing from RAM is faster 92b281117SSeth Jennings * than reading from the swap device, can also improve workload performance. 102b281117SSeth Jennings * 112b281117SSeth Jennings * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com> 122b281117SSeth Jennings */ 132b281117SSeth Jennings 142b281117SSeth Jennings #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 152b281117SSeth Jennings 162b281117SSeth Jennings #include <linux/module.h> 172b281117SSeth Jennings #include <linux/cpu.h> 182b281117SSeth Jennings #include <linux/highmem.h> 192b281117SSeth Jennings #include <linux/slab.h> 202b281117SSeth Jennings #include <linux/spinlock.h> 212b281117SSeth Jennings #include <linux/types.h> 222b281117SSeth Jennings #include <linux/atomic.h> 232b281117SSeth Jennings #include <linux/frontswap.h> 242b281117SSeth Jennings #include <linux/rbtree.h> 252b281117SSeth Jennings #include <linux/swap.h> 262b281117SSeth Jennings #include <linux/crypto.h> 271ec3b5feSBarry Song #include <linux/scatterlist.h> 282b281117SSeth Jennings #include <linux/mempool.h> 2912d79d64SDan Streetman #include <linux/zpool.h> 301ec3b5feSBarry Song #include <crypto/acompress.h> 312b281117SSeth Jennings 322b281117SSeth Jennings #include <linux/mm_types.h> 332b281117SSeth Jennings #include <linux/page-flags.h> 342b281117SSeth Jennings #include <linux/swapops.h> 352b281117SSeth Jennings #include <linux/writeback.h> 362b281117SSeth Jennings #include <linux/pagemap.h> 3745190f01SVitaly Wool #include <linux/workqueue.h> 382b281117SSeth Jennings 39014bb1deSNeilBrown #include "swap.h" 40014bb1deSNeilBrown 412b281117SSeth Jennings /********************************* 422b281117SSeth Jennings * statistics 432b281117SSeth Jennings **********************************/ 4412d79d64SDan Streetman /* Total bytes used by the compressed storage */ 45f6498b77SJohannes Weiner u64 zswap_pool_total_size; 462b281117SSeth Jennings /* The number of compressed pages currently stored in zswap */ 47f6498b77SJohannes Weiner atomic_t zswap_stored_pages = ATOMIC_INIT(0); 48a85f878bSSrividya Desireddy /* The number of same-value filled pages currently stored in zswap */ 49a85f878bSSrividya Desireddy static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0); 502b281117SSeth Jennings 512b281117SSeth Jennings /* 522b281117SSeth Jennings * The statistics below are not protected from concurrent access for 532b281117SSeth Jennings * performance reasons so they may not be a 100% accurate. However, 542b281117SSeth Jennings * they do provide useful information on roughly how many times a 552b281117SSeth Jennings * certain event is occurring. 562b281117SSeth Jennings */ 572b281117SSeth Jennings 582b281117SSeth Jennings /* Pool limit was hit (see zswap_max_pool_percent) */ 592b281117SSeth Jennings static u64 zswap_pool_limit_hit; 602b281117SSeth Jennings /* Pages written back when pool limit was reached */ 612b281117SSeth Jennings static u64 zswap_written_back_pages; 622b281117SSeth Jennings /* Store failed due to a reclaim failure after pool limit was reached */ 632b281117SSeth Jennings static u64 zswap_reject_reclaim_fail; 642b281117SSeth Jennings /* Compressed page was too big for the allocator to (optimally) store */ 652b281117SSeth Jennings static u64 zswap_reject_compress_poor; 662b281117SSeth Jennings /* Store failed because underlying allocator could not get memory */ 672b281117SSeth Jennings static u64 zswap_reject_alloc_fail; 682b281117SSeth Jennings /* Store failed because the entry metadata could not be allocated (rare) */ 692b281117SSeth Jennings static u64 zswap_reject_kmemcache_fail; 702b281117SSeth Jennings /* Duplicate store was encountered (rare) */ 712b281117SSeth Jennings static u64 zswap_duplicate_entry; 722b281117SSeth Jennings 7345190f01SVitaly Wool /* Shrinker work queue */ 7445190f01SVitaly Wool static struct workqueue_struct *shrink_wq; 7545190f01SVitaly Wool /* Pool limit was hit, we need to calm down */ 7645190f01SVitaly Wool static bool zswap_pool_reached_full; 7745190f01SVitaly Wool 782b281117SSeth Jennings /********************************* 792b281117SSeth Jennings * tunables 802b281117SSeth Jennings **********************************/ 81c00ed16aSDan Streetman 82bae21db8SDan Streetman #define ZSWAP_PARAM_UNSET "" 83bae21db8SDan Streetman 84*141fdeecSLiu Shixin static int zswap_setup(void); 85*141fdeecSLiu Shixin 86bb8b93b5SMaciej S. Szmigiero /* Enable/disable zswap */ 87bb8b93b5SMaciej S. Szmigiero static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON); 88d7b028f5SDan Streetman static int zswap_enabled_param_set(const char *, 89d7b028f5SDan Streetman const struct kernel_param *); 9083aed6cdSJoe Perches static const struct kernel_param_ops zswap_enabled_param_ops = { 91d7b028f5SDan Streetman .set = zswap_enabled_param_set, 92d7b028f5SDan Streetman .get = param_get_bool, 93d7b028f5SDan Streetman }; 94d7b028f5SDan Streetman module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644); 952b281117SSeth Jennings 9690b0fc26SDan Streetman /* Crypto compressor to use */ 97bb8b93b5SMaciej S. Szmigiero static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; 9890b0fc26SDan Streetman static int zswap_compressor_param_set(const char *, 9990b0fc26SDan Streetman const struct kernel_param *); 10083aed6cdSJoe Perches static const struct kernel_param_ops zswap_compressor_param_ops = { 10190b0fc26SDan Streetman .set = zswap_compressor_param_set, 102c99b42c3SDan Streetman .get = param_get_charp, 103c99b42c3SDan Streetman .free = param_free_charp, 10490b0fc26SDan Streetman }; 10590b0fc26SDan Streetman module_param_cb(compressor, &zswap_compressor_param_ops, 106c99b42c3SDan Streetman &zswap_compressor, 0644); 10790b0fc26SDan Streetman 10890b0fc26SDan Streetman /* Compressed storage zpool to use */ 109bb8b93b5SMaciej S. Szmigiero static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT; 11090b0fc26SDan Streetman static int zswap_zpool_param_set(const char *, const struct kernel_param *); 11183aed6cdSJoe Perches static const struct kernel_param_ops zswap_zpool_param_ops = { 11290b0fc26SDan Streetman .set = zswap_zpool_param_set, 113c99b42c3SDan Streetman .get = param_get_charp, 114c99b42c3SDan Streetman .free = param_free_charp, 11590b0fc26SDan Streetman }; 116c99b42c3SDan Streetman module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644); 1172b281117SSeth Jennings 1182b281117SSeth Jennings /* The maximum percentage of memory that the compressed pool can occupy */ 1192b281117SSeth Jennings static unsigned int zswap_max_pool_percent = 20; 12090b0fc26SDan Streetman module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644); 12160105e12SMinchan Kim 12245190f01SVitaly Wool /* The threshold for accepting new pages after the max_pool_percent was hit */ 12345190f01SVitaly Wool static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */ 12445190f01SVitaly Wool module_param_named(accept_threshold_percent, zswap_accept_thr_percent, 12545190f01SVitaly Wool uint, 0644); 12645190f01SVitaly Wool 127cb325dddSMaciej S. Szmigiero /* 128cb325dddSMaciej S. Szmigiero * Enable/disable handling same-value filled pages (enabled by default). 129cb325dddSMaciej S. Szmigiero * If disabled every page is considered non-same-value filled. 130cb325dddSMaciej S. Szmigiero */ 131a85f878bSSrividya Desireddy static bool zswap_same_filled_pages_enabled = true; 132a85f878bSSrividya Desireddy module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled, 133a85f878bSSrividya Desireddy bool, 0644); 134a85f878bSSrividya Desireddy 135cb325dddSMaciej S. Szmigiero /* Enable/disable handling non-same-value filled pages (enabled by default) */ 136cb325dddSMaciej S. Szmigiero static bool zswap_non_same_filled_pages_enabled = true; 137cb325dddSMaciej S. Szmigiero module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled, 138cb325dddSMaciej S. Szmigiero bool, 0644); 139cb325dddSMaciej S. Szmigiero 1402b281117SSeth Jennings /********************************* 1412b281117SSeth Jennings * data structures 1422b281117SSeth Jennings **********************************/ 143f1c54846SDan Streetman 1441ec3b5feSBarry Song struct crypto_acomp_ctx { 1451ec3b5feSBarry Song struct crypto_acomp *acomp; 1461ec3b5feSBarry Song struct acomp_req *req; 1471ec3b5feSBarry Song struct crypto_wait wait; 1481ec3b5feSBarry Song u8 *dstmem; 1491ec3b5feSBarry Song struct mutex *mutex; 1501ec3b5feSBarry Song }; 1511ec3b5feSBarry Song 152f1c54846SDan Streetman struct zswap_pool { 153f1c54846SDan Streetman struct zpool *zpool; 1541ec3b5feSBarry Song struct crypto_acomp_ctx __percpu *acomp_ctx; 155f1c54846SDan Streetman struct kref kref; 156f1c54846SDan Streetman struct list_head list; 15745190f01SVitaly Wool struct work_struct release_work; 15845190f01SVitaly Wool struct work_struct shrink_work; 159cab7a7e5SSebastian Andrzej Siewior struct hlist_node node; 160f1c54846SDan Streetman char tfm_name[CRYPTO_MAX_ALG_NAME]; 161f1c54846SDan Streetman }; 162f1c54846SDan Streetman 1632b281117SSeth Jennings /* 1642b281117SSeth Jennings * struct zswap_entry 1652b281117SSeth Jennings * 1662b281117SSeth Jennings * This structure contains the metadata for tracking a single compressed 1672b281117SSeth Jennings * page within zswap. 1682b281117SSeth Jennings * 1692b281117SSeth Jennings * rbnode - links the entry into red-black tree for the appropriate swap type 170f1c54846SDan Streetman * offset - the swap offset for the entry. Index into the red-black tree. 1712b281117SSeth Jennings * refcount - the number of outstanding reference to the entry. This is needed 1722b281117SSeth Jennings * to protect against premature freeing of the entry by code 1736b452516SSeongJae Park * concurrent calls to load, invalidate, and writeback. The lock 1742b281117SSeth Jennings * for the zswap_tree structure that contains the entry must 1752b281117SSeth Jennings * be held while changing the refcount. Since the lock must 1762b281117SSeth Jennings * be held, there is no reason to also make refcount atomic. 1772b281117SSeth Jennings * length - the length in bytes of the compressed page data. Needed during 178a85f878bSSrividya Desireddy * decompression. For a same value filled page length is 0. 179f1c54846SDan Streetman * pool - the zswap_pool the entry's data is in 180f1c54846SDan Streetman * handle - zpool allocation handle that stores the compressed page data 181a85f878bSSrividya Desireddy * value - value of the same-value filled pages which have same content 1822b281117SSeth Jennings */ 1832b281117SSeth Jennings struct zswap_entry { 1842b281117SSeth Jennings struct rb_node rbnode; 1852b281117SSeth Jennings pgoff_t offset; 1862b281117SSeth Jennings int refcount; 1872b281117SSeth Jennings unsigned int length; 188f1c54846SDan Streetman struct zswap_pool *pool; 189a85f878bSSrividya Desireddy union { 1902b281117SSeth Jennings unsigned long handle; 191a85f878bSSrividya Desireddy unsigned long value; 192a85f878bSSrividya Desireddy }; 193f4840ccfSJohannes Weiner struct obj_cgroup *objcg; 1942b281117SSeth Jennings }; 1952b281117SSeth Jennings 1962b281117SSeth Jennings struct zswap_header { 1972b281117SSeth Jennings swp_entry_t swpentry; 1982b281117SSeth Jennings }; 1992b281117SSeth Jennings 2002b281117SSeth Jennings /* 2012b281117SSeth Jennings * The tree lock in the zswap_tree struct protects a few things: 2022b281117SSeth Jennings * - the rbtree 2032b281117SSeth Jennings * - the refcount field of each entry in the tree 2042b281117SSeth Jennings */ 2052b281117SSeth Jennings struct zswap_tree { 2062b281117SSeth Jennings struct rb_root rbroot; 2072b281117SSeth Jennings spinlock_t lock; 2082b281117SSeth Jennings }; 2092b281117SSeth Jennings 2102b281117SSeth Jennings static struct zswap_tree *zswap_trees[MAX_SWAPFILES]; 2112b281117SSeth Jennings 212f1c54846SDan Streetman /* RCU-protected iteration */ 213f1c54846SDan Streetman static LIST_HEAD(zswap_pools); 214f1c54846SDan Streetman /* protects zswap_pools list modification */ 215f1c54846SDan Streetman static DEFINE_SPINLOCK(zswap_pools_lock); 21632a4e169SDan Streetman /* pool counter to provide unique names to zpool */ 21732a4e169SDan Streetman static atomic_t zswap_pools_count = ATOMIC_INIT(0); 218f1c54846SDan Streetman 2199021ccecSLiu Shixin enum zswap_init_type { 2209021ccecSLiu Shixin ZSWAP_UNINIT, 2219021ccecSLiu Shixin ZSWAP_INIT_SUCCEED, 2229021ccecSLiu Shixin ZSWAP_INIT_FAILED 2239021ccecSLiu Shixin }; 22490b0fc26SDan Streetman 2259021ccecSLiu Shixin static enum zswap_init_type zswap_init_state; 226d7b028f5SDan Streetman 227*141fdeecSLiu Shixin /* used to ensure the integrity of initialization */ 228*141fdeecSLiu Shixin static DEFINE_MUTEX(zswap_init_lock); 229*141fdeecSLiu Shixin 230ae3d89a7SDan Streetman /* init completed, but couldn't create the initial pool */ 231ae3d89a7SDan Streetman static bool zswap_has_pool; 232ae3d89a7SDan Streetman 233f1c54846SDan Streetman /********************************* 234f1c54846SDan Streetman * helpers and fwd declarations 235f1c54846SDan Streetman **********************************/ 236f1c54846SDan Streetman 237f1c54846SDan Streetman #define zswap_pool_debug(msg, p) \ 238f1c54846SDan Streetman pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \ 239f1c54846SDan Streetman zpool_get_type((p)->zpool)) 240f1c54846SDan Streetman 241f1c54846SDan Streetman static int zswap_writeback_entry(struct zpool *pool, unsigned long handle); 242f1c54846SDan Streetman static int zswap_pool_get(struct zswap_pool *pool); 243f1c54846SDan Streetman static void zswap_pool_put(struct zswap_pool *pool); 244f1c54846SDan Streetman 245f1c54846SDan Streetman static const struct zpool_ops zswap_zpool_ops = { 246f1c54846SDan Streetman .evict = zswap_writeback_entry 247f1c54846SDan Streetman }; 248f1c54846SDan Streetman 249f1c54846SDan Streetman static bool zswap_is_full(void) 250f1c54846SDan Streetman { 251ca79b0c2SArun KS return totalram_pages() * zswap_max_pool_percent / 100 < 252f1c54846SDan Streetman DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE); 253f1c54846SDan Streetman } 254f1c54846SDan Streetman 25545190f01SVitaly Wool static bool zswap_can_accept(void) 25645190f01SVitaly Wool { 25745190f01SVitaly Wool return totalram_pages() * zswap_accept_thr_percent / 100 * 25845190f01SVitaly Wool zswap_max_pool_percent / 100 > 25945190f01SVitaly Wool DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE); 26045190f01SVitaly Wool } 26145190f01SVitaly Wool 262f1c54846SDan Streetman static void zswap_update_total_size(void) 263f1c54846SDan Streetman { 264f1c54846SDan Streetman struct zswap_pool *pool; 265f1c54846SDan Streetman u64 total = 0; 266f1c54846SDan Streetman 267f1c54846SDan Streetman rcu_read_lock(); 268f1c54846SDan Streetman 269f1c54846SDan Streetman list_for_each_entry_rcu(pool, &zswap_pools, list) 270f1c54846SDan Streetman total += zpool_get_total_size(pool->zpool); 271f1c54846SDan Streetman 272f1c54846SDan Streetman rcu_read_unlock(); 273f1c54846SDan Streetman 274f1c54846SDan Streetman zswap_pool_total_size = total; 275f1c54846SDan Streetman } 276f1c54846SDan Streetman 2772b281117SSeth Jennings /********************************* 2782b281117SSeth Jennings * zswap entry functions 2792b281117SSeth Jennings **********************************/ 2802b281117SSeth Jennings static struct kmem_cache *zswap_entry_cache; 2812b281117SSeth Jennings 2822b281117SSeth Jennings static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp) 2832b281117SSeth Jennings { 2842b281117SSeth Jennings struct zswap_entry *entry; 2852b281117SSeth Jennings entry = kmem_cache_alloc(zswap_entry_cache, gfp); 2862b281117SSeth Jennings if (!entry) 2872b281117SSeth Jennings return NULL; 2882b281117SSeth Jennings entry->refcount = 1; 2890ab0abcfSWeijie Yang RB_CLEAR_NODE(&entry->rbnode); 2902b281117SSeth Jennings return entry; 2912b281117SSeth Jennings } 2922b281117SSeth Jennings 2932b281117SSeth Jennings static void zswap_entry_cache_free(struct zswap_entry *entry) 2942b281117SSeth Jennings { 2952b281117SSeth Jennings kmem_cache_free(zswap_entry_cache, entry); 2962b281117SSeth Jennings } 2972b281117SSeth Jennings 2982b281117SSeth Jennings /********************************* 2992b281117SSeth Jennings * rbtree functions 3002b281117SSeth Jennings **********************************/ 3012b281117SSeth Jennings static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset) 3022b281117SSeth Jennings { 3032b281117SSeth Jennings struct rb_node *node = root->rb_node; 3042b281117SSeth Jennings struct zswap_entry *entry; 3052b281117SSeth Jennings 3062b281117SSeth Jennings while (node) { 3072b281117SSeth Jennings entry = rb_entry(node, struct zswap_entry, rbnode); 3082b281117SSeth Jennings if (entry->offset > offset) 3092b281117SSeth Jennings node = node->rb_left; 3102b281117SSeth Jennings else if (entry->offset < offset) 3112b281117SSeth Jennings node = node->rb_right; 3122b281117SSeth Jennings else 3132b281117SSeth Jennings return entry; 3142b281117SSeth Jennings } 3152b281117SSeth Jennings return NULL; 3162b281117SSeth Jennings } 3172b281117SSeth Jennings 3182b281117SSeth Jennings /* 3192b281117SSeth Jennings * In the case that a entry with the same offset is found, a pointer to 3202b281117SSeth Jennings * the existing entry is stored in dupentry and the function returns -EEXIST 3212b281117SSeth Jennings */ 3222b281117SSeth Jennings static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry, 3232b281117SSeth Jennings struct zswap_entry **dupentry) 3242b281117SSeth Jennings { 3252b281117SSeth Jennings struct rb_node **link = &root->rb_node, *parent = NULL; 3262b281117SSeth Jennings struct zswap_entry *myentry; 3272b281117SSeth Jennings 3282b281117SSeth Jennings while (*link) { 3292b281117SSeth Jennings parent = *link; 3302b281117SSeth Jennings myentry = rb_entry(parent, struct zswap_entry, rbnode); 3312b281117SSeth Jennings if (myentry->offset > entry->offset) 3322b281117SSeth Jennings link = &(*link)->rb_left; 3332b281117SSeth Jennings else if (myentry->offset < entry->offset) 3342b281117SSeth Jennings link = &(*link)->rb_right; 3352b281117SSeth Jennings else { 3362b281117SSeth Jennings *dupentry = myentry; 3372b281117SSeth Jennings return -EEXIST; 3382b281117SSeth Jennings } 3392b281117SSeth Jennings } 3402b281117SSeth Jennings rb_link_node(&entry->rbnode, parent, link); 3412b281117SSeth Jennings rb_insert_color(&entry->rbnode, root); 3422b281117SSeth Jennings return 0; 3432b281117SSeth Jennings } 3442b281117SSeth Jennings 3450ab0abcfSWeijie Yang static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry) 3460ab0abcfSWeijie Yang { 3470ab0abcfSWeijie Yang if (!RB_EMPTY_NODE(&entry->rbnode)) { 3480ab0abcfSWeijie Yang rb_erase(&entry->rbnode, root); 3490ab0abcfSWeijie Yang RB_CLEAR_NODE(&entry->rbnode); 3500ab0abcfSWeijie Yang } 3510ab0abcfSWeijie Yang } 3520ab0abcfSWeijie Yang 3530ab0abcfSWeijie Yang /* 35412d79d64SDan Streetman * Carries out the common pattern of freeing and entry's zpool allocation, 3550ab0abcfSWeijie Yang * freeing the entry itself, and decrementing the number of stored pages. 3560ab0abcfSWeijie Yang */ 35760105e12SMinchan Kim static void zswap_free_entry(struct zswap_entry *entry) 3580ab0abcfSWeijie Yang { 359f4840ccfSJohannes Weiner if (entry->objcg) { 360f4840ccfSJohannes Weiner obj_cgroup_uncharge_zswap(entry->objcg, entry->length); 361f4840ccfSJohannes Weiner obj_cgroup_put(entry->objcg); 362f4840ccfSJohannes Weiner } 363a85f878bSSrividya Desireddy if (!entry->length) 364a85f878bSSrividya Desireddy atomic_dec(&zswap_same_filled_pages); 365a85f878bSSrividya Desireddy else { 366f1c54846SDan Streetman zpool_free(entry->pool->zpool, entry->handle); 367f1c54846SDan Streetman zswap_pool_put(entry->pool); 368a85f878bSSrividya Desireddy } 3690ab0abcfSWeijie Yang zswap_entry_cache_free(entry); 3700ab0abcfSWeijie Yang atomic_dec(&zswap_stored_pages); 371f1c54846SDan Streetman zswap_update_total_size(); 3720ab0abcfSWeijie Yang } 3730ab0abcfSWeijie Yang 3740ab0abcfSWeijie Yang /* caller must hold the tree lock */ 3750ab0abcfSWeijie Yang static void zswap_entry_get(struct zswap_entry *entry) 3760ab0abcfSWeijie Yang { 3770ab0abcfSWeijie Yang entry->refcount++; 3780ab0abcfSWeijie Yang } 3790ab0abcfSWeijie Yang 3800ab0abcfSWeijie Yang /* caller must hold the tree lock 3810ab0abcfSWeijie Yang * remove from the tree and free it, if nobody reference the entry 3820ab0abcfSWeijie Yang */ 3830ab0abcfSWeijie Yang static void zswap_entry_put(struct zswap_tree *tree, 3840ab0abcfSWeijie Yang struct zswap_entry *entry) 3850ab0abcfSWeijie Yang { 3860ab0abcfSWeijie Yang int refcount = --entry->refcount; 3870ab0abcfSWeijie Yang 3880ab0abcfSWeijie Yang BUG_ON(refcount < 0); 3890ab0abcfSWeijie Yang if (refcount == 0) { 3900ab0abcfSWeijie Yang zswap_rb_erase(&tree->rbroot, entry); 39160105e12SMinchan Kim zswap_free_entry(entry); 3920ab0abcfSWeijie Yang } 3930ab0abcfSWeijie Yang } 3940ab0abcfSWeijie Yang 3950ab0abcfSWeijie Yang /* caller must hold the tree lock */ 3960ab0abcfSWeijie Yang static struct zswap_entry *zswap_entry_find_get(struct rb_root *root, 3970ab0abcfSWeijie Yang pgoff_t offset) 3980ab0abcfSWeijie Yang { 399b0c9865fSAlexey Klimov struct zswap_entry *entry; 4000ab0abcfSWeijie Yang 4010ab0abcfSWeijie Yang entry = zswap_rb_search(root, offset); 4020ab0abcfSWeijie Yang if (entry) 4030ab0abcfSWeijie Yang zswap_entry_get(entry); 4040ab0abcfSWeijie Yang 4050ab0abcfSWeijie Yang return entry; 4060ab0abcfSWeijie Yang } 4070ab0abcfSWeijie Yang 4082b281117SSeth Jennings /********************************* 4092b281117SSeth Jennings * per-cpu code 4102b281117SSeth Jennings **********************************/ 4112b281117SSeth Jennings static DEFINE_PER_CPU(u8 *, zswap_dstmem); 4121ec3b5feSBarry Song /* 4131ec3b5feSBarry Song * If users dynamically change the zpool type and compressor at runtime, i.e. 4141ec3b5feSBarry Song * zswap is running, zswap can have more than one zpool on one cpu, but they 4151ec3b5feSBarry Song * are sharing dtsmem. So we need this mutex to be per-cpu. 4161ec3b5feSBarry Song */ 4171ec3b5feSBarry Song static DEFINE_PER_CPU(struct mutex *, zswap_mutex); 4182b281117SSeth Jennings 419ad7ed770SSebastian Andrzej Siewior static int zswap_dstmem_prepare(unsigned int cpu) 4202b281117SSeth Jennings { 4211ec3b5feSBarry Song struct mutex *mutex; 4222b281117SSeth Jennings u8 *dst; 4232b281117SSeth Jennings 42472d09633SEric Dumazet dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu)); 4252b2695f5SMarkus Elfring if (!dst) 426ad7ed770SSebastian Andrzej Siewior return -ENOMEM; 4272b2695f5SMarkus Elfring 4281ec3b5feSBarry Song mutex = kmalloc_node(sizeof(*mutex), GFP_KERNEL, cpu_to_node(cpu)); 4291ec3b5feSBarry Song if (!mutex) { 4301ec3b5feSBarry Song kfree(dst); 4311ec3b5feSBarry Song return -ENOMEM; 4321ec3b5feSBarry Song } 4331ec3b5feSBarry Song 4341ec3b5feSBarry Song mutex_init(mutex); 4352b281117SSeth Jennings per_cpu(zswap_dstmem, cpu) = dst; 4361ec3b5feSBarry Song per_cpu(zswap_mutex, cpu) = mutex; 437ad7ed770SSebastian Andrzej Siewior return 0; 438ad7ed770SSebastian Andrzej Siewior } 439ad7ed770SSebastian Andrzej Siewior 440ad7ed770SSebastian Andrzej Siewior static int zswap_dstmem_dead(unsigned int cpu) 441ad7ed770SSebastian Andrzej Siewior { 4421ec3b5feSBarry Song struct mutex *mutex; 443ad7ed770SSebastian Andrzej Siewior u8 *dst; 444ad7ed770SSebastian Andrzej Siewior 4451ec3b5feSBarry Song mutex = per_cpu(zswap_mutex, cpu); 4461ec3b5feSBarry Song kfree(mutex); 4471ec3b5feSBarry Song per_cpu(zswap_mutex, cpu) = NULL; 4481ec3b5feSBarry Song 4492b281117SSeth Jennings dst = per_cpu(zswap_dstmem, cpu); 4502b281117SSeth Jennings kfree(dst); 4512b281117SSeth Jennings per_cpu(zswap_dstmem, cpu) = NULL; 4522b281117SSeth Jennings 4532b281117SSeth Jennings return 0; 454f1c54846SDan Streetman } 455f1c54846SDan Streetman 456cab7a7e5SSebastian Andrzej Siewior static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node) 457f1c54846SDan Streetman { 458cab7a7e5SSebastian Andrzej Siewior struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); 4591ec3b5feSBarry Song struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); 4601ec3b5feSBarry Song struct crypto_acomp *acomp; 4611ec3b5feSBarry Song struct acomp_req *req; 462f1c54846SDan Streetman 4631ec3b5feSBarry Song acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu)); 4641ec3b5feSBarry Song if (IS_ERR(acomp)) { 4651ec3b5feSBarry Song pr_err("could not alloc crypto acomp %s : %ld\n", 4661ec3b5feSBarry Song pool->tfm_name, PTR_ERR(acomp)); 4671ec3b5feSBarry Song return PTR_ERR(acomp); 4681ec3b5feSBarry Song } 4691ec3b5feSBarry Song acomp_ctx->acomp = acomp; 470cab7a7e5SSebastian Andrzej Siewior 4711ec3b5feSBarry Song req = acomp_request_alloc(acomp_ctx->acomp); 4721ec3b5feSBarry Song if (!req) { 4731ec3b5feSBarry Song pr_err("could not alloc crypto acomp_request %s\n", 4741ec3b5feSBarry Song pool->tfm_name); 4751ec3b5feSBarry Song crypto_free_acomp(acomp_ctx->acomp); 476cab7a7e5SSebastian Andrzej Siewior return -ENOMEM; 477f1c54846SDan Streetman } 4781ec3b5feSBarry Song acomp_ctx->req = req; 4791ec3b5feSBarry Song 4801ec3b5feSBarry Song crypto_init_wait(&acomp_ctx->wait); 4811ec3b5feSBarry Song /* 4821ec3b5feSBarry Song * if the backend of acomp is async zip, crypto_req_done() will wakeup 4831ec3b5feSBarry Song * crypto_wait_req(); if the backend of acomp is scomp, the callback 4841ec3b5feSBarry Song * won't be called, crypto_wait_req() will return without blocking. 4851ec3b5feSBarry Song */ 4861ec3b5feSBarry Song acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 4871ec3b5feSBarry Song crypto_req_done, &acomp_ctx->wait); 4881ec3b5feSBarry Song 4891ec3b5feSBarry Song acomp_ctx->mutex = per_cpu(zswap_mutex, cpu); 4901ec3b5feSBarry Song acomp_ctx->dstmem = per_cpu(zswap_dstmem, cpu); 4911ec3b5feSBarry Song 492cab7a7e5SSebastian Andrzej Siewior return 0; 493cab7a7e5SSebastian Andrzej Siewior } 494cab7a7e5SSebastian Andrzej Siewior 495cab7a7e5SSebastian Andrzej Siewior static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node) 496cab7a7e5SSebastian Andrzej Siewior { 497cab7a7e5SSebastian Andrzej Siewior struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); 4981ec3b5feSBarry Song struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); 499cab7a7e5SSebastian Andrzej Siewior 5001ec3b5feSBarry Song if (!IS_ERR_OR_NULL(acomp_ctx)) { 5011ec3b5feSBarry Song if (!IS_ERR_OR_NULL(acomp_ctx->req)) 5021ec3b5feSBarry Song acomp_request_free(acomp_ctx->req); 5031ec3b5feSBarry Song if (!IS_ERR_OR_NULL(acomp_ctx->acomp)) 5041ec3b5feSBarry Song crypto_free_acomp(acomp_ctx->acomp); 5051ec3b5feSBarry Song } 5061ec3b5feSBarry Song 507f1c54846SDan Streetman return 0; 508f1c54846SDan Streetman } 509f1c54846SDan Streetman 510f1c54846SDan Streetman /********************************* 511f1c54846SDan Streetman * pool functions 512f1c54846SDan Streetman **********************************/ 513f1c54846SDan Streetman 514f1c54846SDan Streetman static struct zswap_pool *__zswap_pool_current(void) 515f1c54846SDan Streetman { 516f1c54846SDan Streetman struct zswap_pool *pool; 517f1c54846SDan Streetman 518f1c54846SDan Streetman pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list); 519ae3d89a7SDan Streetman WARN_ONCE(!pool && zswap_has_pool, 520ae3d89a7SDan Streetman "%s: no page storage pool!\n", __func__); 521f1c54846SDan Streetman 522f1c54846SDan Streetman return pool; 523f1c54846SDan Streetman } 524f1c54846SDan Streetman 525f1c54846SDan Streetman static struct zswap_pool *zswap_pool_current(void) 526f1c54846SDan Streetman { 527f1c54846SDan Streetman assert_spin_locked(&zswap_pools_lock); 528f1c54846SDan Streetman 529f1c54846SDan Streetman return __zswap_pool_current(); 530f1c54846SDan Streetman } 531f1c54846SDan Streetman 532f1c54846SDan Streetman static struct zswap_pool *zswap_pool_current_get(void) 533f1c54846SDan Streetman { 534f1c54846SDan Streetman struct zswap_pool *pool; 535f1c54846SDan Streetman 536f1c54846SDan Streetman rcu_read_lock(); 537f1c54846SDan Streetman 538f1c54846SDan Streetman pool = __zswap_pool_current(); 539ae3d89a7SDan Streetman if (!zswap_pool_get(pool)) 540f1c54846SDan Streetman pool = NULL; 541f1c54846SDan Streetman 542f1c54846SDan Streetman rcu_read_unlock(); 543f1c54846SDan Streetman 544f1c54846SDan Streetman return pool; 545f1c54846SDan Streetman } 546f1c54846SDan Streetman 547f1c54846SDan Streetman static struct zswap_pool *zswap_pool_last_get(void) 548f1c54846SDan Streetman { 549f1c54846SDan Streetman struct zswap_pool *pool, *last = NULL; 550f1c54846SDan Streetman 551f1c54846SDan Streetman rcu_read_lock(); 552f1c54846SDan Streetman 553f1c54846SDan Streetman list_for_each_entry_rcu(pool, &zswap_pools, list) 554f1c54846SDan Streetman last = pool; 555ae3d89a7SDan Streetman WARN_ONCE(!last && zswap_has_pool, 556ae3d89a7SDan Streetman "%s: no page storage pool!\n", __func__); 557ae3d89a7SDan Streetman if (!zswap_pool_get(last)) 558f1c54846SDan Streetman last = NULL; 559f1c54846SDan Streetman 560f1c54846SDan Streetman rcu_read_unlock(); 561f1c54846SDan Streetman 562f1c54846SDan Streetman return last; 563f1c54846SDan Streetman } 564f1c54846SDan Streetman 5658bc8b228SDan Streetman /* type and compressor must be null-terminated */ 566f1c54846SDan Streetman static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) 567f1c54846SDan Streetman { 568f1c54846SDan Streetman struct zswap_pool *pool; 569f1c54846SDan Streetman 570f1c54846SDan Streetman assert_spin_locked(&zswap_pools_lock); 571f1c54846SDan Streetman 572f1c54846SDan Streetman list_for_each_entry_rcu(pool, &zswap_pools, list) { 5738bc8b228SDan Streetman if (strcmp(pool->tfm_name, compressor)) 574f1c54846SDan Streetman continue; 5758bc8b228SDan Streetman if (strcmp(zpool_get_type(pool->zpool), type)) 576f1c54846SDan Streetman continue; 577f1c54846SDan Streetman /* if we can't get it, it's about to be destroyed */ 578f1c54846SDan Streetman if (!zswap_pool_get(pool)) 579f1c54846SDan Streetman continue; 580f1c54846SDan Streetman return pool; 581f1c54846SDan Streetman } 582f1c54846SDan Streetman 583f1c54846SDan Streetman return NULL; 584f1c54846SDan Streetman } 585f1c54846SDan Streetman 58645190f01SVitaly Wool static void shrink_worker(struct work_struct *w) 58745190f01SVitaly Wool { 58845190f01SVitaly Wool struct zswap_pool *pool = container_of(w, typeof(*pool), 58945190f01SVitaly Wool shrink_work); 59045190f01SVitaly Wool 59145190f01SVitaly Wool if (zpool_shrink(pool->zpool, 1, NULL)) 59245190f01SVitaly Wool zswap_reject_reclaim_fail++; 59345190f01SVitaly Wool zswap_pool_put(pool); 59445190f01SVitaly Wool } 59545190f01SVitaly Wool 596f1c54846SDan Streetman static struct zswap_pool *zswap_pool_create(char *type, char *compressor) 597f1c54846SDan Streetman { 598f1c54846SDan Streetman struct zswap_pool *pool; 59932a4e169SDan Streetman char name[38]; /* 'zswap' + 32 char (max) num + \0 */ 600d0164adcSMel Gorman gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; 601cab7a7e5SSebastian Andrzej Siewior int ret; 602f1c54846SDan Streetman 603bae21db8SDan Streetman if (!zswap_has_pool) { 604bae21db8SDan Streetman /* if either are unset, pool initialization failed, and we 605bae21db8SDan Streetman * need both params to be set correctly before trying to 606bae21db8SDan Streetman * create a pool. 607bae21db8SDan Streetman */ 608bae21db8SDan Streetman if (!strcmp(type, ZSWAP_PARAM_UNSET)) 609bae21db8SDan Streetman return NULL; 610bae21db8SDan Streetman if (!strcmp(compressor, ZSWAP_PARAM_UNSET)) 611bae21db8SDan Streetman return NULL; 612bae21db8SDan Streetman } 613bae21db8SDan Streetman 614f1c54846SDan Streetman pool = kzalloc(sizeof(*pool), GFP_KERNEL); 615f4ae0ce0SMarkus Elfring if (!pool) 616f1c54846SDan Streetman return NULL; 617f1c54846SDan Streetman 61832a4e169SDan Streetman /* unique name for each pool specifically required by zsmalloc */ 61932a4e169SDan Streetman snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count)); 62032a4e169SDan Streetman 62132a4e169SDan Streetman pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops); 622f1c54846SDan Streetman if (!pool->zpool) { 623f1c54846SDan Streetman pr_err("%s zpool not available\n", type); 624f1c54846SDan Streetman goto error; 625f1c54846SDan Streetman } 626f1c54846SDan Streetman pr_debug("using %s zpool\n", zpool_get_type(pool->zpool)); 627f1c54846SDan Streetman 62879cd4202SZhiyuan Dai strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name)); 6291ec3b5feSBarry Song 6301ec3b5feSBarry Song pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx); 6311ec3b5feSBarry Song if (!pool->acomp_ctx) { 632f1c54846SDan Streetman pr_err("percpu alloc failed\n"); 633f1c54846SDan Streetman goto error; 634f1c54846SDan Streetman } 635f1c54846SDan Streetman 636cab7a7e5SSebastian Andrzej Siewior ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE, 637cab7a7e5SSebastian Andrzej Siewior &pool->node); 638cab7a7e5SSebastian Andrzej Siewior if (ret) 639f1c54846SDan Streetman goto error; 640f1c54846SDan Streetman pr_debug("using %s compressor\n", pool->tfm_name); 641f1c54846SDan Streetman 642f1c54846SDan Streetman /* being the current pool takes 1 ref; this func expects the 643f1c54846SDan Streetman * caller to always add the new pool as the current pool 644f1c54846SDan Streetman */ 645f1c54846SDan Streetman kref_init(&pool->kref); 646f1c54846SDan Streetman INIT_LIST_HEAD(&pool->list); 64745190f01SVitaly Wool INIT_WORK(&pool->shrink_work, shrink_worker); 648f1c54846SDan Streetman 649f1c54846SDan Streetman zswap_pool_debug("created", pool); 650f1c54846SDan Streetman 651f1c54846SDan Streetman return pool; 652f1c54846SDan Streetman 653f1c54846SDan Streetman error: 6541ec3b5feSBarry Song if (pool->acomp_ctx) 6551ec3b5feSBarry Song free_percpu(pool->acomp_ctx); 656f1c54846SDan Streetman if (pool->zpool) 657f1c54846SDan Streetman zpool_destroy_pool(pool->zpool); 658f1c54846SDan Streetman kfree(pool); 659f1c54846SDan Streetman return NULL; 660f1c54846SDan Streetman } 661f1c54846SDan Streetman 662*141fdeecSLiu Shixin static struct zswap_pool *__zswap_pool_create_fallback(void) 663f1c54846SDan Streetman { 664bae21db8SDan Streetman bool has_comp, has_zpool; 665bae21db8SDan Streetman 6661ec3b5feSBarry Song has_comp = crypto_has_acomp(zswap_compressor, 0, 0); 667bb8b93b5SMaciej S. Szmigiero if (!has_comp && strcmp(zswap_compressor, 668bb8b93b5SMaciej S. Szmigiero CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) { 669f1c54846SDan Streetman pr_err("compressor %s not available, using default %s\n", 670bb8b93b5SMaciej S. Szmigiero zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT); 671c99b42c3SDan Streetman param_free_charp(&zswap_compressor); 672bb8b93b5SMaciej S. Szmigiero zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; 6731ec3b5feSBarry Song has_comp = crypto_has_acomp(zswap_compressor, 0, 0); 674f1c54846SDan Streetman } 675bae21db8SDan Streetman if (!has_comp) { 676bae21db8SDan Streetman pr_err("default compressor %s not available\n", 677bae21db8SDan Streetman zswap_compressor); 678bae21db8SDan Streetman param_free_charp(&zswap_compressor); 679bae21db8SDan Streetman zswap_compressor = ZSWAP_PARAM_UNSET; 680c99b42c3SDan Streetman } 681bae21db8SDan Streetman 682bae21db8SDan Streetman has_zpool = zpool_has_pool(zswap_zpool_type); 683bb8b93b5SMaciej S. Szmigiero if (!has_zpool && strcmp(zswap_zpool_type, 684bb8b93b5SMaciej S. Szmigiero CONFIG_ZSWAP_ZPOOL_DEFAULT)) { 685f1c54846SDan Streetman pr_err("zpool %s not available, using default %s\n", 686bb8b93b5SMaciej S. Szmigiero zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT); 687c99b42c3SDan Streetman param_free_charp(&zswap_zpool_type); 688bb8b93b5SMaciej S. Szmigiero zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT; 689bae21db8SDan Streetman has_zpool = zpool_has_pool(zswap_zpool_type); 690f1c54846SDan Streetman } 691bae21db8SDan Streetman if (!has_zpool) { 692bae21db8SDan Streetman pr_err("default zpool %s not available\n", 693bae21db8SDan Streetman zswap_zpool_type); 694bae21db8SDan Streetman param_free_charp(&zswap_zpool_type); 695bae21db8SDan Streetman zswap_zpool_type = ZSWAP_PARAM_UNSET; 696bae21db8SDan Streetman } 697bae21db8SDan Streetman 698bae21db8SDan Streetman if (!has_comp || !has_zpool) 699bae21db8SDan Streetman return NULL; 700f1c54846SDan Streetman 701f1c54846SDan Streetman return zswap_pool_create(zswap_zpool_type, zswap_compressor); 702f1c54846SDan Streetman } 703f1c54846SDan Streetman 704f1c54846SDan Streetman static void zswap_pool_destroy(struct zswap_pool *pool) 705f1c54846SDan Streetman { 706f1c54846SDan Streetman zswap_pool_debug("destroying", pool); 707f1c54846SDan Streetman 708cab7a7e5SSebastian Andrzej Siewior cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); 7091ec3b5feSBarry Song free_percpu(pool->acomp_ctx); 710f1c54846SDan Streetman zpool_destroy_pool(pool->zpool); 711f1c54846SDan Streetman kfree(pool); 712f1c54846SDan Streetman } 713f1c54846SDan Streetman 714f1c54846SDan Streetman static int __must_check zswap_pool_get(struct zswap_pool *pool) 715f1c54846SDan Streetman { 716ae3d89a7SDan Streetman if (!pool) 717ae3d89a7SDan Streetman return 0; 718ae3d89a7SDan Streetman 719f1c54846SDan Streetman return kref_get_unless_zero(&pool->kref); 720f1c54846SDan Streetman } 721f1c54846SDan Streetman 722200867afSDan Streetman static void __zswap_pool_release(struct work_struct *work) 723f1c54846SDan Streetman { 72445190f01SVitaly Wool struct zswap_pool *pool = container_of(work, typeof(*pool), 72545190f01SVitaly Wool release_work); 726200867afSDan Streetman 727200867afSDan Streetman synchronize_rcu(); 728f1c54846SDan Streetman 729f1c54846SDan Streetman /* nobody should have been able to get a kref... */ 730f1c54846SDan Streetman WARN_ON(kref_get_unless_zero(&pool->kref)); 731f1c54846SDan Streetman 732f1c54846SDan Streetman /* pool is now off zswap_pools list and has no references. */ 733f1c54846SDan Streetman zswap_pool_destroy(pool); 734f1c54846SDan Streetman } 735f1c54846SDan Streetman 736f1c54846SDan Streetman static void __zswap_pool_empty(struct kref *kref) 737f1c54846SDan Streetman { 738f1c54846SDan Streetman struct zswap_pool *pool; 739f1c54846SDan Streetman 740f1c54846SDan Streetman pool = container_of(kref, typeof(*pool), kref); 741f1c54846SDan Streetman 742f1c54846SDan Streetman spin_lock(&zswap_pools_lock); 743f1c54846SDan Streetman 744f1c54846SDan Streetman WARN_ON(pool == zswap_pool_current()); 745f1c54846SDan Streetman 746f1c54846SDan Streetman list_del_rcu(&pool->list); 747200867afSDan Streetman 74845190f01SVitaly Wool INIT_WORK(&pool->release_work, __zswap_pool_release); 74945190f01SVitaly Wool schedule_work(&pool->release_work); 750f1c54846SDan Streetman 751f1c54846SDan Streetman spin_unlock(&zswap_pools_lock); 752f1c54846SDan Streetman } 753f1c54846SDan Streetman 754f1c54846SDan Streetman static void zswap_pool_put(struct zswap_pool *pool) 755f1c54846SDan Streetman { 756f1c54846SDan Streetman kref_put(&pool->kref, __zswap_pool_empty); 7572b281117SSeth Jennings } 7582b281117SSeth Jennings 7592b281117SSeth Jennings /********************************* 76090b0fc26SDan Streetman * param callbacks 76190b0fc26SDan Streetman **********************************/ 76290b0fc26SDan Streetman 763*141fdeecSLiu Shixin static bool zswap_pool_changed(const char *s, const struct kernel_param *kp) 764*141fdeecSLiu Shixin { 765*141fdeecSLiu Shixin /* no change required */ 766*141fdeecSLiu Shixin if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool) 767*141fdeecSLiu Shixin return false; 768*141fdeecSLiu Shixin return true; 769*141fdeecSLiu Shixin } 770*141fdeecSLiu Shixin 771c99b42c3SDan Streetman /* val must be a null-terminated string */ 77290b0fc26SDan Streetman static int __zswap_param_set(const char *val, const struct kernel_param *kp, 77390b0fc26SDan Streetman char *type, char *compressor) 77490b0fc26SDan Streetman { 77590b0fc26SDan Streetman struct zswap_pool *pool, *put_pool = NULL; 776c99b42c3SDan Streetman char *s = strstrip((char *)val); 777*141fdeecSLiu Shixin int ret = 0; 778*141fdeecSLiu Shixin bool new_pool = false; 77990b0fc26SDan Streetman 780*141fdeecSLiu Shixin mutex_lock(&zswap_init_lock); 7819021ccecSLiu Shixin switch (zswap_init_state) { 7829021ccecSLiu Shixin case ZSWAP_UNINIT: 78390b0fc26SDan Streetman /* if this is load-time (pre-init) param setting, 78490b0fc26SDan Streetman * don't create a pool; that's done during init. 78590b0fc26SDan Streetman */ 786*141fdeecSLiu Shixin ret = param_set_charp(s, kp); 787*141fdeecSLiu Shixin break; 7889021ccecSLiu Shixin case ZSWAP_INIT_SUCCEED: 789*141fdeecSLiu Shixin new_pool = zswap_pool_changed(s, kp); 7909021ccecSLiu Shixin break; 7919021ccecSLiu Shixin case ZSWAP_INIT_FAILED: 7929021ccecSLiu Shixin pr_err("can't set param, initialization failed\n"); 793*141fdeecSLiu Shixin ret = -ENODEV; 7949021ccecSLiu Shixin } 795*141fdeecSLiu Shixin mutex_unlock(&zswap_init_lock); 796*141fdeecSLiu Shixin 797*141fdeecSLiu Shixin /* no need to create a new pool, return directly */ 798*141fdeecSLiu Shixin if (!new_pool) 799*141fdeecSLiu Shixin return ret; 80090b0fc26SDan Streetman 80190b0fc26SDan Streetman if (!type) { 802c99b42c3SDan Streetman if (!zpool_has_pool(s)) { 803c99b42c3SDan Streetman pr_err("zpool %s not available\n", s); 804c99b42c3SDan Streetman return -ENOENT; 805c99b42c3SDan Streetman } 80690b0fc26SDan Streetman type = s; 80790b0fc26SDan Streetman } else if (!compressor) { 8081ec3b5feSBarry Song if (!crypto_has_acomp(s, 0, 0)) { 809c99b42c3SDan Streetman pr_err("compressor %s not available\n", s); 81090b0fc26SDan Streetman return -ENOENT; 81190b0fc26SDan Streetman } 812c99b42c3SDan Streetman compressor = s; 813c99b42c3SDan Streetman } else { 814c99b42c3SDan Streetman WARN_ON(1); 815c99b42c3SDan Streetman return -EINVAL; 81690b0fc26SDan Streetman } 81790b0fc26SDan Streetman 81890b0fc26SDan Streetman spin_lock(&zswap_pools_lock); 81990b0fc26SDan Streetman 82090b0fc26SDan Streetman pool = zswap_pool_find_get(type, compressor); 82190b0fc26SDan Streetman if (pool) { 82290b0fc26SDan Streetman zswap_pool_debug("using existing", pool); 823fd5bb66cSDan Streetman WARN_ON(pool == zswap_pool_current()); 82490b0fc26SDan Streetman list_del_rcu(&pool->list); 82590b0fc26SDan Streetman } 82690b0fc26SDan Streetman 827fd5bb66cSDan Streetman spin_unlock(&zswap_pools_lock); 828fd5bb66cSDan Streetman 829fd5bb66cSDan Streetman if (!pool) 830fd5bb66cSDan Streetman pool = zswap_pool_create(type, compressor); 831fd5bb66cSDan Streetman 83290b0fc26SDan Streetman if (pool) 833c99b42c3SDan Streetman ret = param_set_charp(s, kp); 83490b0fc26SDan Streetman else 83590b0fc26SDan Streetman ret = -EINVAL; 83690b0fc26SDan Streetman 837fd5bb66cSDan Streetman spin_lock(&zswap_pools_lock); 838fd5bb66cSDan Streetman 83990b0fc26SDan Streetman if (!ret) { 84090b0fc26SDan Streetman put_pool = zswap_pool_current(); 84190b0fc26SDan Streetman list_add_rcu(&pool->list, &zswap_pools); 842ae3d89a7SDan Streetman zswap_has_pool = true; 84390b0fc26SDan Streetman } else if (pool) { 84490b0fc26SDan Streetman /* add the possibly pre-existing pool to the end of the pools 84590b0fc26SDan Streetman * list; if it's new (and empty) then it'll be removed and 84690b0fc26SDan Streetman * destroyed by the put after we drop the lock 84790b0fc26SDan Streetman */ 84890b0fc26SDan Streetman list_add_tail_rcu(&pool->list, &zswap_pools); 84990b0fc26SDan Streetman put_pool = pool; 850fd5bb66cSDan Streetman } 851fd5bb66cSDan Streetman 852fd5bb66cSDan Streetman spin_unlock(&zswap_pools_lock); 853fd5bb66cSDan Streetman 854fd5bb66cSDan Streetman if (!zswap_has_pool && !pool) { 855ae3d89a7SDan Streetman /* if initial pool creation failed, and this pool creation also 856ae3d89a7SDan Streetman * failed, maybe both compressor and zpool params were bad. 857ae3d89a7SDan Streetman * Allow changing this param, so pool creation will succeed 858ae3d89a7SDan Streetman * when the other param is changed. We already verified this 8591ec3b5feSBarry Song * param is ok in the zpool_has_pool() or crypto_has_acomp() 860ae3d89a7SDan Streetman * checks above. 861ae3d89a7SDan Streetman */ 862ae3d89a7SDan Streetman ret = param_set_charp(s, kp); 86390b0fc26SDan Streetman } 86490b0fc26SDan Streetman 86590b0fc26SDan Streetman /* drop the ref from either the old current pool, 86690b0fc26SDan Streetman * or the new pool we failed to add 86790b0fc26SDan Streetman */ 86890b0fc26SDan Streetman if (put_pool) 86990b0fc26SDan Streetman zswap_pool_put(put_pool); 87090b0fc26SDan Streetman 87190b0fc26SDan Streetman return ret; 87290b0fc26SDan Streetman } 87390b0fc26SDan Streetman 87490b0fc26SDan Streetman static int zswap_compressor_param_set(const char *val, 87590b0fc26SDan Streetman const struct kernel_param *kp) 87690b0fc26SDan Streetman { 87790b0fc26SDan Streetman return __zswap_param_set(val, kp, zswap_zpool_type, NULL); 87890b0fc26SDan Streetman } 87990b0fc26SDan Streetman 88090b0fc26SDan Streetman static int zswap_zpool_param_set(const char *val, 88190b0fc26SDan Streetman const struct kernel_param *kp) 88290b0fc26SDan Streetman { 88390b0fc26SDan Streetman return __zswap_param_set(val, kp, NULL, zswap_compressor); 88490b0fc26SDan Streetman } 88590b0fc26SDan Streetman 886d7b028f5SDan Streetman static int zswap_enabled_param_set(const char *val, 887d7b028f5SDan Streetman const struct kernel_param *kp) 888d7b028f5SDan Streetman { 889*141fdeecSLiu Shixin int ret = -ENODEV; 890*141fdeecSLiu Shixin 891*141fdeecSLiu Shixin /* if this is load-time (pre-init) param setting, only set param. */ 892*141fdeecSLiu Shixin if (system_state != SYSTEM_RUNNING) 893*141fdeecSLiu Shixin return param_set_bool(val, kp); 894*141fdeecSLiu Shixin 895*141fdeecSLiu Shixin mutex_lock(&zswap_init_lock); 8969021ccecSLiu Shixin switch (zswap_init_state) { 8979021ccecSLiu Shixin case ZSWAP_UNINIT: 898*141fdeecSLiu Shixin if (zswap_setup()) 899*141fdeecSLiu Shixin break; 900*141fdeecSLiu Shixin fallthrough; 9019021ccecSLiu Shixin case ZSWAP_INIT_SUCCEED: 902*141fdeecSLiu Shixin if (!zswap_has_pool) 9039021ccecSLiu Shixin pr_err("can't enable, no pool configured\n"); 904*141fdeecSLiu Shixin else 905*141fdeecSLiu Shixin ret = param_set_bool(val, kp); 906*141fdeecSLiu Shixin break; 9079021ccecSLiu Shixin case ZSWAP_INIT_FAILED: 908d7b028f5SDan Streetman pr_err("can't enable, initialization failed\n"); 909d7b028f5SDan Streetman } 910*141fdeecSLiu Shixin mutex_unlock(&zswap_init_lock); 911*141fdeecSLiu Shixin 912*141fdeecSLiu Shixin return ret; 913d7b028f5SDan Streetman } 914d7b028f5SDan Streetman 91590b0fc26SDan Streetman /********************************* 9162b281117SSeth Jennings * writeback code 9172b281117SSeth Jennings **********************************/ 9182b281117SSeth Jennings /* return enum for zswap_get_swap_cache_page */ 9192b281117SSeth Jennings enum zswap_get_swap_ret { 9202b281117SSeth Jennings ZSWAP_SWAPCACHE_NEW, 9212b281117SSeth Jennings ZSWAP_SWAPCACHE_EXIST, 92267d13fe8SWeijie Yang ZSWAP_SWAPCACHE_FAIL, 9232b281117SSeth Jennings }; 9242b281117SSeth Jennings 9252b281117SSeth Jennings /* 9262b281117SSeth Jennings * zswap_get_swap_cache_page 9272b281117SSeth Jennings * 9282b281117SSeth Jennings * This is an adaption of read_swap_cache_async() 9292b281117SSeth Jennings * 9302b281117SSeth Jennings * This function tries to find a page with the given swap entry 9312b281117SSeth Jennings * in the swapper_space address space (the swap cache). If the page 9322b281117SSeth Jennings * is found, it is returned in retpage. Otherwise, a page is allocated, 9332b281117SSeth Jennings * added to the swap cache, and returned in retpage. 9342b281117SSeth Jennings * 9352b281117SSeth Jennings * If success, the swap cache page is returned in retpage 93667d13fe8SWeijie Yang * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache 93767d13fe8SWeijie Yang * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated, 93867d13fe8SWeijie Yang * the new page is added to swapcache and locked 93967d13fe8SWeijie Yang * Returns ZSWAP_SWAPCACHE_FAIL on error 9402b281117SSeth Jennings */ 9412b281117SSeth Jennings static int zswap_get_swap_cache_page(swp_entry_t entry, 9422b281117SSeth Jennings struct page **retpage) 9432b281117SSeth Jennings { 9445b999aadSDmitry Safonov bool page_was_allocated; 9452b281117SSeth Jennings 9465b999aadSDmitry Safonov *retpage = __read_swap_cache_async(entry, GFP_KERNEL, 9475b999aadSDmitry Safonov NULL, 0, &page_was_allocated); 9485b999aadSDmitry Safonov if (page_was_allocated) 9492b281117SSeth Jennings return ZSWAP_SWAPCACHE_NEW; 9505b999aadSDmitry Safonov if (!*retpage) 95167d13fe8SWeijie Yang return ZSWAP_SWAPCACHE_FAIL; 9522b281117SSeth Jennings return ZSWAP_SWAPCACHE_EXIST; 9532b281117SSeth Jennings } 9542b281117SSeth Jennings 9552b281117SSeth Jennings /* 9562b281117SSeth Jennings * Attempts to free an entry by adding a page to the swap cache, 9572b281117SSeth Jennings * decompressing the entry data into the page, and issuing a 9582b281117SSeth Jennings * bio write to write the page back to the swap device. 9592b281117SSeth Jennings * 9602b281117SSeth Jennings * This can be thought of as a "resumed writeback" of the page 9612b281117SSeth Jennings * to the swap device. We are basically resuming the same swap 9622b281117SSeth Jennings * writeback path that was intercepted with the frontswap_store() 9632b281117SSeth Jennings * in the first place. After the page has been decompressed into 9642b281117SSeth Jennings * the swap cache, the compressed version stored by zswap can be 9652b281117SSeth Jennings * freed. 9662b281117SSeth Jennings */ 96712d79d64SDan Streetman static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) 9682b281117SSeth Jennings { 9692b281117SSeth Jennings struct zswap_header *zhdr; 9702b281117SSeth Jennings swp_entry_t swpentry; 9712b281117SSeth Jennings struct zswap_tree *tree; 9722b281117SSeth Jennings pgoff_t offset; 9732b281117SSeth Jennings struct zswap_entry *entry; 9742b281117SSeth Jennings struct page *page; 9751ec3b5feSBarry Song struct scatterlist input, output; 9761ec3b5feSBarry Song struct crypto_acomp_ctx *acomp_ctx; 9771ec3b5feSBarry Song 978fc6697a8STian Tao u8 *src, *tmp = NULL; 9792b281117SSeth Jennings unsigned int dlen; 9800ab0abcfSWeijie Yang int ret; 9812b281117SSeth Jennings struct writeback_control wbc = { 9822b281117SSeth Jennings .sync_mode = WB_SYNC_NONE, 9832b281117SSeth Jennings }; 9842b281117SSeth Jennings 985fc6697a8STian Tao if (!zpool_can_sleep_mapped(pool)) { 9868d9b6370SSergey Senozhatsky tmp = kmalloc(PAGE_SIZE, GFP_KERNEL); 987fc6697a8STian Tao if (!tmp) 988fc6697a8STian Tao return -ENOMEM; 989fc6697a8STian Tao } 990fc6697a8STian Tao 9912b281117SSeth Jennings /* extract swpentry from data */ 99212d79d64SDan Streetman zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO); 9932b281117SSeth Jennings swpentry = zhdr->swpentry; /* here */ 9942b281117SSeth Jennings tree = zswap_trees[swp_type(swpentry)]; 9952b281117SSeth Jennings offset = swp_offset(swpentry); 9966b3379e8SJohannes Weiner zpool_unmap_handle(pool, handle); 9972b281117SSeth Jennings 9982b281117SSeth Jennings /* find and ref zswap entry */ 9992b281117SSeth Jennings spin_lock(&tree->lock); 10000ab0abcfSWeijie Yang entry = zswap_entry_find_get(&tree->rbroot, offset); 10012b281117SSeth Jennings if (!entry) { 10022b281117SSeth Jennings /* entry was invalidated */ 10032b281117SSeth Jennings spin_unlock(&tree->lock); 1004fc6697a8STian Tao kfree(tmp); 10052b281117SSeth Jennings return 0; 10062b281117SSeth Jennings } 10072b281117SSeth Jennings spin_unlock(&tree->lock); 10082b281117SSeth Jennings BUG_ON(offset != entry->offset); 10092b281117SSeth Jennings 10102b281117SSeth Jennings /* try to allocate swap cache page */ 10112b281117SSeth Jennings switch (zswap_get_swap_cache_page(swpentry, &page)) { 101267d13fe8SWeijie Yang case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */ 10132b281117SSeth Jennings ret = -ENOMEM; 10142b281117SSeth Jennings goto fail; 10152b281117SSeth Jennings 101667d13fe8SWeijie Yang case ZSWAP_SWAPCACHE_EXIST: 10172b281117SSeth Jennings /* page is already in the swap cache, ignore for now */ 101809cbfeafSKirill A. Shutemov put_page(page); 10192b281117SSeth Jennings ret = -EEXIST; 10202b281117SSeth Jennings goto fail; 10212b281117SSeth Jennings 10222b281117SSeth Jennings case ZSWAP_SWAPCACHE_NEW: /* page is locked */ 10232b281117SSeth Jennings /* decompress */ 10241ec3b5feSBarry Song acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); 10252b281117SSeth Jennings dlen = PAGE_SIZE; 1026fc6697a8STian Tao 10276b3379e8SJohannes Weiner zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO); 10286b3379e8SJohannes Weiner src = (u8 *)zhdr + sizeof(struct zswap_header); 10296b3379e8SJohannes Weiner if (!zpool_can_sleep_mapped(pool)) { 10306b3379e8SJohannes Weiner memcpy(tmp, src, entry->length); 10316b3379e8SJohannes Weiner src = tmp; 10326b3379e8SJohannes Weiner zpool_unmap_handle(pool, handle); 10336b3379e8SJohannes Weiner } 10346b3379e8SJohannes Weiner 10351ec3b5feSBarry Song mutex_lock(acomp_ctx->mutex); 10361ec3b5feSBarry Song sg_init_one(&input, src, entry->length); 10371ec3b5feSBarry Song sg_init_table(&output, 1); 10381ec3b5feSBarry Song sg_set_page(&output, page, PAGE_SIZE, 0); 10391ec3b5feSBarry Song acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen); 10401ec3b5feSBarry Song ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait); 10411ec3b5feSBarry Song dlen = acomp_ctx->req->dlen; 10421ec3b5feSBarry Song mutex_unlock(acomp_ctx->mutex); 10431ec3b5feSBarry Song 10446b3379e8SJohannes Weiner if (!zpool_can_sleep_mapped(pool)) 10456b3379e8SJohannes Weiner kfree(tmp); 10466b3379e8SJohannes Weiner else 10476b3379e8SJohannes Weiner zpool_unmap_handle(pool, handle); 10486b3379e8SJohannes Weiner 10492b281117SSeth Jennings BUG_ON(ret); 10502b281117SSeth Jennings BUG_ON(dlen != PAGE_SIZE); 10512b281117SSeth Jennings 10522b281117SSeth Jennings /* page is up to date */ 10532b281117SSeth Jennings SetPageUptodate(page); 10542b281117SSeth Jennings } 10552b281117SSeth Jennings 1056b349acc7SWeijie Yang /* move it to the tail of the inactive list after end_writeback */ 1057b349acc7SWeijie Yang SetPageReclaim(page); 1058b349acc7SWeijie Yang 10592b281117SSeth Jennings /* start writeback */ 1060cf1e3fe4SChristoph Hellwig __swap_writepage(page, &wbc); 106109cbfeafSKirill A. Shutemov put_page(page); 10622b281117SSeth Jennings zswap_written_back_pages++; 10632b281117SSeth Jennings 10642b281117SSeth Jennings spin_lock(&tree->lock); 10652b281117SSeth Jennings /* drop local reference */ 10660ab0abcfSWeijie Yang zswap_entry_put(tree, entry); 10672b281117SSeth Jennings 10682b281117SSeth Jennings /* 10690ab0abcfSWeijie Yang * There are two possible situations for entry here: 10700ab0abcfSWeijie Yang * (1) refcount is 1(normal case), entry is valid and on the tree 10710ab0abcfSWeijie Yang * (2) refcount is 0, entry is freed and not on the tree 10720ab0abcfSWeijie Yang * because invalidate happened during writeback 10730ab0abcfSWeijie Yang * search the tree and free the entry if find entry 10742b281117SSeth Jennings */ 10750ab0abcfSWeijie Yang if (entry == zswap_rb_search(&tree->rbroot, offset)) 10760ab0abcfSWeijie Yang zswap_entry_put(tree, entry); 10772b281117SSeth Jennings spin_unlock(&tree->lock); 10782b281117SSeth Jennings 10796b3379e8SJohannes Weiner return ret; 10806b3379e8SJohannes Weiner 10816b3379e8SJohannes Weiner fail: 10826b3379e8SJohannes Weiner if (!zpool_can_sleep_mapped(pool)) 10836b3379e8SJohannes Weiner kfree(tmp); 10840ab0abcfSWeijie Yang 10850ab0abcfSWeijie Yang /* 10860ab0abcfSWeijie Yang * if we get here due to ZSWAP_SWAPCACHE_EXIST 1087c0c641d7SRandy Dunlap * a load may be happening concurrently. 1088c0c641d7SRandy Dunlap * it is safe and okay to not free the entry. 10890ab0abcfSWeijie Yang * if we free the entry in the following put 1090c0c641d7SRandy Dunlap * it is also okay to return !0 10910ab0abcfSWeijie Yang */ 10922b281117SSeth Jennings spin_lock(&tree->lock); 10930ab0abcfSWeijie Yang zswap_entry_put(tree, entry); 10942b281117SSeth Jennings spin_unlock(&tree->lock); 10950ab0abcfSWeijie Yang 10962b281117SSeth Jennings return ret; 10972b281117SSeth Jennings } 10982b281117SSeth Jennings 1099a85f878bSSrividya Desireddy static int zswap_is_page_same_filled(void *ptr, unsigned long *value) 1100a85f878bSSrividya Desireddy { 1101a85f878bSSrividya Desireddy unsigned long *page; 110262bf1258STaejoon Song unsigned long val; 110362bf1258STaejoon Song unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1; 1104a85f878bSSrividya Desireddy 1105a85f878bSSrividya Desireddy page = (unsigned long *)ptr; 110662bf1258STaejoon Song val = page[0]; 110762bf1258STaejoon Song 110862bf1258STaejoon Song if (val != page[last_pos]) 110962bf1258STaejoon Song return 0; 111062bf1258STaejoon Song 111162bf1258STaejoon Song for (pos = 1; pos < last_pos; pos++) { 111262bf1258STaejoon Song if (val != page[pos]) 1113a85f878bSSrividya Desireddy return 0; 1114a85f878bSSrividya Desireddy } 111562bf1258STaejoon Song 111662bf1258STaejoon Song *value = val; 111762bf1258STaejoon Song 1118a85f878bSSrividya Desireddy return 1; 1119a85f878bSSrividya Desireddy } 1120a85f878bSSrividya Desireddy 1121a85f878bSSrividya Desireddy static void zswap_fill_page(void *ptr, unsigned long value) 1122a85f878bSSrividya Desireddy { 1123a85f878bSSrividya Desireddy unsigned long *page; 1124a85f878bSSrividya Desireddy 1125a85f878bSSrividya Desireddy page = (unsigned long *)ptr; 1126a85f878bSSrividya Desireddy memset_l(page, value, PAGE_SIZE / sizeof(unsigned long)); 1127a85f878bSSrividya Desireddy } 1128a85f878bSSrividya Desireddy 11292b281117SSeth Jennings /********************************* 11302b281117SSeth Jennings * frontswap hooks 11312b281117SSeth Jennings **********************************/ 11322b281117SSeth Jennings /* attempts to compress and store an single page */ 11332b281117SSeth Jennings static int zswap_frontswap_store(unsigned type, pgoff_t offset, 11342b281117SSeth Jennings struct page *page) 11352b281117SSeth Jennings { 11362b281117SSeth Jennings struct zswap_tree *tree = zswap_trees[type]; 11372b281117SSeth Jennings struct zswap_entry *entry, *dupentry; 11381ec3b5feSBarry Song struct scatterlist input, output; 11391ec3b5feSBarry Song struct crypto_acomp_ctx *acomp_ctx; 1140f4840ccfSJohannes Weiner struct obj_cgroup *objcg = NULL; 1141f4840ccfSJohannes Weiner struct zswap_pool *pool; 11422b281117SSeth Jennings int ret; 11439c3760ebSYu Zhao unsigned int hlen, dlen = PAGE_SIZE; 1144a85f878bSSrividya Desireddy unsigned long handle, value; 11452b281117SSeth Jennings char *buf; 11462b281117SSeth Jennings u8 *src, *dst; 11479c3760ebSYu Zhao struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) }; 1148d2fcd82bSHui Zhu gfp_t gfp; 11492b281117SSeth Jennings 11507ba71669SHuang Ying /* THP isn't supported */ 11517ba71669SHuang Ying if (PageTransHuge(page)) { 11527ba71669SHuang Ying ret = -EINVAL; 11537ba71669SHuang Ying goto reject; 11547ba71669SHuang Ying } 11557ba71669SHuang Ying 1156c00ed16aSDan Streetman if (!zswap_enabled || !tree) { 11572b281117SSeth Jennings ret = -ENODEV; 11582b281117SSeth Jennings goto reject; 11592b281117SSeth Jennings } 11602b281117SSeth Jennings 1161f4840ccfSJohannes Weiner objcg = get_obj_cgroup_from_page(page); 1162f4840ccfSJohannes Weiner if (objcg && !obj_cgroup_may_zswap(objcg)) 1163f4840ccfSJohannes Weiner goto shrink; 1164f4840ccfSJohannes Weiner 11652b281117SSeth Jennings /* reclaim space if needed */ 11662b281117SSeth Jennings if (zswap_is_full()) { 11672b281117SSeth Jennings zswap_pool_limit_hit++; 116845190f01SVitaly Wool zswap_pool_reached_full = true; 1169f4840ccfSJohannes Weiner goto shrink; 11702b281117SSeth Jennings } 117116e536efSLi Wang 117245190f01SVitaly Wool if (zswap_pool_reached_full) { 117345190f01SVitaly Wool if (!zswap_can_accept()) { 117416e536efSLi Wang ret = -ENOMEM; 117516e536efSLi Wang goto reject; 117645190f01SVitaly Wool } else 117745190f01SVitaly Wool zswap_pool_reached_full = false; 11782b281117SSeth Jennings } 11792b281117SSeth Jennings 11802b281117SSeth Jennings /* allocate entry */ 11812b281117SSeth Jennings entry = zswap_entry_cache_alloc(GFP_KERNEL); 11822b281117SSeth Jennings if (!entry) { 11832b281117SSeth Jennings zswap_reject_kmemcache_fail++; 11842b281117SSeth Jennings ret = -ENOMEM; 11852b281117SSeth Jennings goto reject; 11862b281117SSeth Jennings } 11872b281117SSeth Jennings 1188a85f878bSSrividya Desireddy if (zswap_same_filled_pages_enabled) { 1189a85f878bSSrividya Desireddy src = kmap_atomic(page); 1190a85f878bSSrividya Desireddy if (zswap_is_page_same_filled(src, &value)) { 1191a85f878bSSrividya Desireddy kunmap_atomic(src); 1192a85f878bSSrividya Desireddy entry->offset = offset; 1193a85f878bSSrividya Desireddy entry->length = 0; 1194a85f878bSSrividya Desireddy entry->value = value; 1195a85f878bSSrividya Desireddy atomic_inc(&zswap_same_filled_pages); 1196a85f878bSSrividya Desireddy goto insert_entry; 1197a85f878bSSrividya Desireddy } 1198a85f878bSSrividya Desireddy kunmap_atomic(src); 1199a85f878bSSrividya Desireddy } 1200a85f878bSSrividya Desireddy 1201cb325dddSMaciej S. Szmigiero if (!zswap_non_same_filled_pages_enabled) { 1202cb325dddSMaciej S. Szmigiero ret = -EINVAL; 1203cb325dddSMaciej S. Szmigiero goto freepage; 1204cb325dddSMaciej S. Szmigiero } 1205cb325dddSMaciej S. Szmigiero 1206f1c54846SDan Streetman /* if entry is successfully added, it keeps the reference */ 1207f1c54846SDan Streetman entry->pool = zswap_pool_current_get(); 1208f1c54846SDan Streetman if (!entry->pool) { 12092b281117SSeth Jennings ret = -EINVAL; 12102b281117SSeth Jennings goto freepage; 12112b281117SSeth Jennings } 12122b281117SSeth Jennings 1213f1c54846SDan Streetman /* compress */ 12141ec3b5feSBarry Song acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); 12151ec3b5feSBarry Song 12161ec3b5feSBarry Song mutex_lock(acomp_ctx->mutex); 12171ec3b5feSBarry Song 12181ec3b5feSBarry Song dst = acomp_ctx->dstmem; 12191ec3b5feSBarry Song sg_init_table(&input, 1); 12201ec3b5feSBarry Song sg_set_page(&input, page, PAGE_SIZE, 0); 12211ec3b5feSBarry Song 12221ec3b5feSBarry Song /* zswap_dstmem is of size (PAGE_SIZE * 2). Reflect same in sg_list */ 12231ec3b5feSBarry Song sg_init_one(&output, dst, PAGE_SIZE * 2); 12241ec3b5feSBarry Song acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen); 12251ec3b5feSBarry Song /* 12261ec3b5feSBarry Song * it maybe looks a little bit silly that we send an asynchronous request, 12271ec3b5feSBarry Song * then wait for its completion synchronously. This makes the process look 12281ec3b5feSBarry Song * synchronous in fact. 12291ec3b5feSBarry Song * Theoretically, acomp supports users send multiple acomp requests in one 12301ec3b5feSBarry Song * acomp instance, then get those requests done simultaneously. but in this 12311ec3b5feSBarry Song * case, frontswap actually does store and load page by page, there is no 12321ec3b5feSBarry Song * existing method to send the second page before the first page is done 12331ec3b5feSBarry Song * in one thread doing frontswap. 12341ec3b5feSBarry Song * but in different threads running on different cpu, we have different 12351ec3b5feSBarry Song * acomp instance, so multiple threads can do (de)compression in parallel. 12361ec3b5feSBarry Song */ 12371ec3b5feSBarry Song ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait); 12381ec3b5feSBarry Song dlen = acomp_ctx->req->dlen; 12391ec3b5feSBarry Song 1240f1c54846SDan Streetman if (ret) { 1241f1c54846SDan Streetman ret = -EINVAL; 1242f1c54846SDan Streetman goto put_dstmem; 1243f1c54846SDan Streetman } 1244f1c54846SDan Streetman 12452b281117SSeth Jennings /* store */ 12469c3760ebSYu Zhao hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0; 1247d2fcd82bSHui Zhu gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; 1248d2fcd82bSHui Zhu if (zpool_malloc_support_movable(entry->pool->zpool)) 1249d2fcd82bSHui Zhu gfp |= __GFP_HIGHMEM | __GFP_MOVABLE; 1250d2fcd82bSHui Zhu ret = zpool_malloc(entry->pool->zpool, hlen + dlen, gfp, &handle); 12512b281117SSeth Jennings if (ret == -ENOSPC) { 12522b281117SSeth Jennings zswap_reject_compress_poor++; 1253f1c54846SDan Streetman goto put_dstmem; 12542b281117SSeth Jennings } 12552b281117SSeth Jennings if (ret) { 12562b281117SSeth Jennings zswap_reject_alloc_fail++; 1257f1c54846SDan Streetman goto put_dstmem; 12582b281117SSeth Jennings } 1259ae34af1fSMiaohe Lin buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_WO); 12609c3760ebSYu Zhao memcpy(buf, &zhdr, hlen); 12619c3760ebSYu Zhao memcpy(buf + hlen, dst, dlen); 1262f1c54846SDan Streetman zpool_unmap_handle(entry->pool->zpool, handle); 12631ec3b5feSBarry Song mutex_unlock(acomp_ctx->mutex); 12642b281117SSeth Jennings 12652b281117SSeth Jennings /* populate entry */ 12662b281117SSeth Jennings entry->offset = offset; 12672b281117SSeth Jennings entry->handle = handle; 12682b281117SSeth Jennings entry->length = dlen; 12692b281117SSeth Jennings 1270a85f878bSSrividya Desireddy insert_entry: 1271f4840ccfSJohannes Weiner entry->objcg = objcg; 1272f4840ccfSJohannes Weiner if (objcg) { 1273f4840ccfSJohannes Weiner obj_cgroup_charge_zswap(objcg, entry->length); 1274f4840ccfSJohannes Weiner /* Account before objcg ref is moved to tree */ 1275f4840ccfSJohannes Weiner count_objcg_event(objcg, ZSWPOUT); 1276f4840ccfSJohannes Weiner } 1277f4840ccfSJohannes Weiner 12782b281117SSeth Jennings /* map */ 12792b281117SSeth Jennings spin_lock(&tree->lock); 12802b281117SSeth Jennings do { 12812b281117SSeth Jennings ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry); 12822b281117SSeth Jennings if (ret == -EEXIST) { 12832b281117SSeth Jennings zswap_duplicate_entry++; 12842b281117SSeth Jennings /* remove from rbtree */ 12850ab0abcfSWeijie Yang zswap_rb_erase(&tree->rbroot, dupentry); 12860ab0abcfSWeijie Yang zswap_entry_put(tree, dupentry); 12872b281117SSeth Jennings } 12882b281117SSeth Jennings } while (ret == -EEXIST); 12892b281117SSeth Jennings spin_unlock(&tree->lock); 12902b281117SSeth Jennings 12912b281117SSeth Jennings /* update stats */ 12922b281117SSeth Jennings atomic_inc(&zswap_stored_pages); 1293f1c54846SDan Streetman zswap_update_total_size(); 1294f6498b77SJohannes Weiner count_vm_event(ZSWPOUT); 12952b281117SSeth Jennings 12962b281117SSeth Jennings return 0; 12972b281117SSeth Jennings 1298f1c54846SDan Streetman put_dstmem: 12991ec3b5feSBarry Song mutex_unlock(acomp_ctx->mutex); 1300f1c54846SDan Streetman zswap_pool_put(entry->pool); 1301f1c54846SDan Streetman freepage: 13022b281117SSeth Jennings zswap_entry_cache_free(entry); 13032b281117SSeth Jennings reject: 1304f4840ccfSJohannes Weiner if (objcg) 1305f4840ccfSJohannes Weiner obj_cgroup_put(objcg); 13062b281117SSeth Jennings return ret; 1307f4840ccfSJohannes Weiner 1308f4840ccfSJohannes Weiner shrink: 1309f4840ccfSJohannes Weiner pool = zswap_pool_last_get(); 1310f4840ccfSJohannes Weiner if (pool) 1311f4840ccfSJohannes Weiner queue_work(shrink_wq, &pool->shrink_work); 1312f4840ccfSJohannes Weiner ret = -ENOMEM; 1313f4840ccfSJohannes Weiner goto reject; 13142b281117SSeth Jennings } 13152b281117SSeth Jennings 13162b281117SSeth Jennings /* 13172b281117SSeth Jennings * returns 0 if the page was successfully decompressed 13182b281117SSeth Jennings * return -1 on entry not found or error 13192b281117SSeth Jennings */ 13202b281117SSeth Jennings static int zswap_frontswap_load(unsigned type, pgoff_t offset, 13212b281117SSeth Jennings struct page *page) 13222b281117SSeth Jennings { 13232b281117SSeth Jennings struct zswap_tree *tree = zswap_trees[type]; 13242b281117SSeth Jennings struct zswap_entry *entry; 13251ec3b5feSBarry Song struct scatterlist input, output; 13261ec3b5feSBarry Song struct crypto_acomp_ctx *acomp_ctx; 1327fc6697a8STian Tao u8 *src, *dst, *tmp; 13282b281117SSeth Jennings unsigned int dlen; 13290ab0abcfSWeijie Yang int ret; 13302b281117SSeth Jennings 13312b281117SSeth Jennings /* find */ 13322b281117SSeth Jennings spin_lock(&tree->lock); 13330ab0abcfSWeijie Yang entry = zswap_entry_find_get(&tree->rbroot, offset); 13342b281117SSeth Jennings if (!entry) { 13352b281117SSeth Jennings /* entry was written back */ 13362b281117SSeth Jennings spin_unlock(&tree->lock); 13372b281117SSeth Jennings return -1; 13382b281117SSeth Jennings } 13392b281117SSeth Jennings spin_unlock(&tree->lock); 13402b281117SSeth Jennings 1341a85f878bSSrividya Desireddy if (!entry->length) { 1342a85f878bSSrividya Desireddy dst = kmap_atomic(page); 1343a85f878bSSrividya Desireddy zswap_fill_page(dst, entry->value); 1344a85f878bSSrividya Desireddy kunmap_atomic(dst); 1345fc6697a8STian Tao ret = 0; 1346f6498b77SJohannes Weiner goto stats; 1347a85f878bSSrividya Desireddy } 1348a85f878bSSrividya Desireddy 1349fc6697a8STian Tao if (!zpool_can_sleep_mapped(entry->pool->zpool)) { 13508d9b6370SSergey Senozhatsky tmp = kmalloc(entry->length, GFP_KERNEL); 1351fc6697a8STian Tao if (!tmp) { 1352fc6697a8STian Tao ret = -ENOMEM; 1353fc6697a8STian Tao goto freeentry; 1354fc6697a8STian Tao } 1355fc6697a8STian Tao } 1356fc6697a8STian Tao 13572b281117SSeth Jennings /* decompress */ 13582b281117SSeth Jennings dlen = PAGE_SIZE; 13599c3760ebSYu Zhao src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO); 13609c3760ebSYu Zhao if (zpool_evictable(entry->pool->zpool)) 13619c3760ebSYu Zhao src += sizeof(struct zswap_header); 13621ec3b5feSBarry Song 1363fc6697a8STian Tao if (!zpool_can_sleep_mapped(entry->pool->zpool)) { 1364fc6697a8STian Tao memcpy(tmp, src, entry->length); 1365fc6697a8STian Tao src = tmp; 1366fc6697a8STian Tao zpool_unmap_handle(entry->pool->zpool, entry->handle); 1367fc6697a8STian Tao } 1368fc6697a8STian Tao 13691ec3b5feSBarry Song acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); 13701ec3b5feSBarry Song mutex_lock(acomp_ctx->mutex); 13711ec3b5feSBarry Song sg_init_one(&input, src, entry->length); 13721ec3b5feSBarry Song sg_init_table(&output, 1); 13731ec3b5feSBarry Song sg_set_page(&output, page, PAGE_SIZE, 0); 13741ec3b5feSBarry Song acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen); 13751ec3b5feSBarry Song ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait); 13761ec3b5feSBarry Song mutex_unlock(acomp_ctx->mutex); 13771ec3b5feSBarry Song 1378fc6697a8STian Tao if (zpool_can_sleep_mapped(entry->pool->zpool)) 1379f1c54846SDan Streetman zpool_unmap_handle(entry->pool->zpool, entry->handle); 1380fc6697a8STian Tao else 1381fc6697a8STian Tao kfree(tmp); 1382fc6697a8STian Tao 13832b281117SSeth Jennings BUG_ON(ret); 1384f6498b77SJohannes Weiner stats: 1385f6498b77SJohannes Weiner count_vm_event(ZSWPIN); 1386f4840ccfSJohannes Weiner if (entry->objcg) 1387f4840ccfSJohannes Weiner count_objcg_event(entry->objcg, ZSWPIN); 1388a85f878bSSrividya Desireddy freeentry: 13892b281117SSeth Jennings spin_lock(&tree->lock); 13900ab0abcfSWeijie Yang zswap_entry_put(tree, entry); 13912b281117SSeth Jennings spin_unlock(&tree->lock); 13922b281117SSeth Jennings 1393fc6697a8STian Tao return ret; 13942b281117SSeth Jennings } 13952b281117SSeth Jennings 13962b281117SSeth Jennings /* frees an entry in zswap */ 13972b281117SSeth Jennings static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset) 13982b281117SSeth Jennings { 13992b281117SSeth Jennings struct zswap_tree *tree = zswap_trees[type]; 14002b281117SSeth Jennings struct zswap_entry *entry; 14012b281117SSeth Jennings 14022b281117SSeth Jennings /* find */ 14032b281117SSeth Jennings spin_lock(&tree->lock); 14042b281117SSeth Jennings entry = zswap_rb_search(&tree->rbroot, offset); 14052b281117SSeth Jennings if (!entry) { 14062b281117SSeth Jennings /* entry was written back */ 14072b281117SSeth Jennings spin_unlock(&tree->lock); 14082b281117SSeth Jennings return; 14092b281117SSeth Jennings } 14102b281117SSeth Jennings 14112b281117SSeth Jennings /* remove from rbtree */ 14120ab0abcfSWeijie Yang zswap_rb_erase(&tree->rbroot, entry); 14132b281117SSeth Jennings 14142b281117SSeth Jennings /* drop the initial reference from entry creation */ 14150ab0abcfSWeijie Yang zswap_entry_put(tree, entry); 14162b281117SSeth Jennings 14172b281117SSeth Jennings spin_unlock(&tree->lock); 14182b281117SSeth Jennings } 14192b281117SSeth Jennings 14202b281117SSeth Jennings /* frees all zswap entries for the given swap type */ 14212b281117SSeth Jennings static void zswap_frontswap_invalidate_area(unsigned type) 14222b281117SSeth Jennings { 14232b281117SSeth Jennings struct zswap_tree *tree = zswap_trees[type]; 14240bd42136SCody P Schafer struct zswap_entry *entry, *n; 14252b281117SSeth Jennings 14262b281117SSeth Jennings if (!tree) 14272b281117SSeth Jennings return; 14282b281117SSeth Jennings 14292b281117SSeth Jennings /* walk the tree and free everything */ 14302b281117SSeth Jennings spin_lock(&tree->lock); 14310ab0abcfSWeijie Yang rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode) 143260105e12SMinchan Kim zswap_free_entry(entry); 14332b281117SSeth Jennings tree->rbroot = RB_ROOT; 14342b281117SSeth Jennings spin_unlock(&tree->lock); 1435aa9bca05SWeijie Yang kfree(tree); 1436aa9bca05SWeijie Yang zswap_trees[type] = NULL; 14372b281117SSeth Jennings } 14382b281117SSeth Jennings 14392b281117SSeth Jennings static void zswap_frontswap_init(unsigned type) 14402b281117SSeth Jennings { 14412b281117SSeth Jennings struct zswap_tree *tree; 14422b281117SSeth Jennings 14439cd1f701SMarkus Elfring tree = kzalloc(sizeof(*tree), GFP_KERNEL); 144460105e12SMinchan Kim if (!tree) { 144560105e12SMinchan Kim pr_err("alloc failed, zswap disabled for swap type %d\n", type); 144660105e12SMinchan Kim return; 144760105e12SMinchan Kim } 144860105e12SMinchan Kim 14492b281117SSeth Jennings tree->rbroot = RB_ROOT; 14502b281117SSeth Jennings spin_lock_init(&tree->lock); 14512b281117SSeth Jennings zswap_trees[type] = tree; 14522b281117SSeth Jennings } 14532b281117SSeth Jennings 14541da0d94aSChristoph Hellwig static const struct frontswap_ops zswap_frontswap_ops = { 14552b281117SSeth Jennings .store = zswap_frontswap_store, 14562b281117SSeth Jennings .load = zswap_frontswap_load, 14572b281117SSeth Jennings .invalidate_page = zswap_frontswap_invalidate_page, 14582b281117SSeth Jennings .invalidate_area = zswap_frontswap_invalidate_area, 14592b281117SSeth Jennings .init = zswap_frontswap_init 14602b281117SSeth Jennings }; 14612b281117SSeth Jennings 14622b281117SSeth Jennings /********************************* 14632b281117SSeth Jennings * debugfs functions 14642b281117SSeth Jennings **********************************/ 14652b281117SSeth Jennings #ifdef CONFIG_DEBUG_FS 14662b281117SSeth Jennings #include <linux/debugfs.h> 14672b281117SSeth Jennings 14682b281117SSeth Jennings static struct dentry *zswap_debugfs_root; 14692b281117SSeth Jennings 1470*141fdeecSLiu Shixin static int zswap_debugfs_init(void) 14712b281117SSeth Jennings { 14722b281117SSeth Jennings if (!debugfs_initialized()) 14732b281117SSeth Jennings return -ENODEV; 14742b281117SSeth Jennings 14752b281117SSeth Jennings zswap_debugfs_root = debugfs_create_dir("zswap", NULL); 14762b281117SSeth Jennings 14770825a6f9SJoe Perches debugfs_create_u64("pool_limit_hit", 0444, 14782b281117SSeth Jennings zswap_debugfs_root, &zswap_pool_limit_hit); 14790825a6f9SJoe Perches debugfs_create_u64("reject_reclaim_fail", 0444, 14802b281117SSeth Jennings zswap_debugfs_root, &zswap_reject_reclaim_fail); 14810825a6f9SJoe Perches debugfs_create_u64("reject_alloc_fail", 0444, 14822b281117SSeth Jennings zswap_debugfs_root, &zswap_reject_alloc_fail); 14830825a6f9SJoe Perches debugfs_create_u64("reject_kmemcache_fail", 0444, 14842b281117SSeth Jennings zswap_debugfs_root, &zswap_reject_kmemcache_fail); 14850825a6f9SJoe Perches debugfs_create_u64("reject_compress_poor", 0444, 14862b281117SSeth Jennings zswap_debugfs_root, &zswap_reject_compress_poor); 14870825a6f9SJoe Perches debugfs_create_u64("written_back_pages", 0444, 14882b281117SSeth Jennings zswap_debugfs_root, &zswap_written_back_pages); 14890825a6f9SJoe Perches debugfs_create_u64("duplicate_entry", 0444, 14902b281117SSeth Jennings zswap_debugfs_root, &zswap_duplicate_entry); 14910825a6f9SJoe Perches debugfs_create_u64("pool_total_size", 0444, 149212d79d64SDan Streetman zswap_debugfs_root, &zswap_pool_total_size); 14930825a6f9SJoe Perches debugfs_create_atomic_t("stored_pages", 0444, 14942b281117SSeth Jennings zswap_debugfs_root, &zswap_stored_pages); 1495a85f878bSSrividya Desireddy debugfs_create_atomic_t("same_filled_pages", 0444, 1496a85f878bSSrividya Desireddy zswap_debugfs_root, &zswap_same_filled_pages); 14972b281117SSeth Jennings 14982b281117SSeth Jennings return 0; 14992b281117SSeth Jennings } 15002b281117SSeth Jennings #else 1501*141fdeecSLiu Shixin static int zswap_debugfs_init(void) 15022b281117SSeth Jennings { 15032b281117SSeth Jennings return 0; 15042b281117SSeth Jennings } 15052b281117SSeth Jennings #endif 15062b281117SSeth Jennings 15072b281117SSeth Jennings /********************************* 15082b281117SSeth Jennings * module init and exit 15092b281117SSeth Jennings **********************************/ 1510*141fdeecSLiu Shixin static int zswap_setup(void) 15112b281117SSeth Jennings { 1512f1c54846SDan Streetman struct zswap_pool *pool; 1513ad7ed770SSebastian Andrzej Siewior int ret; 151460105e12SMinchan Kim 1515b7919122SLiu Shixin zswap_entry_cache = KMEM_CACHE(zswap_entry, 0); 1516b7919122SLiu Shixin if (!zswap_entry_cache) { 15172b281117SSeth Jennings pr_err("entry cache creation failed\n"); 1518f1c54846SDan Streetman goto cache_fail; 15192b281117SSeth Jennings } 1520f1c54846SDan Streetman 1521ad7ed770SSebastian Andrzej Siewior ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare", 1522ad7ed770SSebastian Andrzej Siewior zswap_dstmem_prepare, zswap_dstmem_dead); 1523ad7ed770SSebastian Andrzej Siewior if (ret) { 1524f1c54846SDan Streetman pr_err("dstmem alloc failed\n"); 1525f1c54846SDan Streetman goto dstmem_fail; 15262b281117SSeth Jennings } 1527f1c54846SDan Streetman 1528cab7a7e5SSebastian Andrzej Siewior ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE, 1529cab7a7e5SSebastian Andrzej Siewior "mm/zswap_pool:prepare", 1530cab7a7e5SSebastian Andrzej Siewior zswap_cpu_comp_prepare, 1531cab7a7e5SSebastian Andrzej Siewior zswap_cpu_comp_dead); 1532cab7a7e5SSebastian Andrzej Siewior if (ret) 1533cab7a7e5SSebastian Andrzej Siewior goto hp_fail; 1534cab7a7e5SSebastian Andrzej Siewior 1535f1c54846SDan Streetman pool = __zswap_pool_create_fallback(); 1536ae3d89a7SDan Streetman if (pool) { 1537f1c54846SDan Streetman pr_info("loaded using pool %s/%s\n", pool->tfm_name, 1538f1c54846SDan Streetman zpool_get_type(pool->zpool)); 1539f1c54846SDan Streetman list_add(&pool->list, &zswap_pools); 1540ae3d89a7SDan Streetman zswap_has_pool = true; 1541ae3d89a7SDan Streetman } else { 1542ae3d89a7SDan Streetman pr_err("pool creation failed\n"); 1543ae3d89a7SDan Streetman zswap_enabled = false; 1544ae3d89a7SDan Streetman } 154560105e12SMinchan Kim 154645190f01SVitaly Wool shrink_wq = create_workqueue("zswap-shrink"); 154745190f01SVitaly Wool if (!shrink_wq) 154845190f01SVitaly Wool goto fallback_fail; 154945190f01SVitaly Wool 15501da0d94aSChristoph Hellwig ret = frontswap_register_ops(&zswap_frontswap_ops); 15511da0d94aSChristoph Hellwig if (ret) 15521da0d94aSChristoph Hellwig goto destroy_wq; 15532b281117SSeth Jennings if (zswap_debugfs_init()) 15542b281117SSeth Jennings pr_warn("debugfs initialization failed\n"); 15559021ccecSLiu Shixin zswap_init_state = ZSWAP_INIT_SUCCEED; 15562b281117SSeth Jennings return 0; 1557f1c54846SDan Streetman 15581da0d94aSChristoph Hellwig destroy_wq: 15591da0d94aSChristoph Hellwig destroy_workqueue(shrink_wq); 156045190f01SVitaly Wool fallback_fail: 156138aeb071SDan Carpenter if (pool) 156245190f01SVitaly Wool zswap_pool_destroy(pool); 1563cab7a7e5SSebastian Andrzej Siewior hp_fail: 1564ad7ed770SSebastian Andrzej Siewior cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE); 1565f1c54846SDan Streetman dstmem_fail: 1566b7919122SLiu Shixin kmem_cache_destroy(zswap_entry_cache); 1567f1c54846SDan Streetman cache_fail: 1568d7b028f5SDan Streetman /* if built-in, we aren't unloaded on failure; don't allow use */ 15699021ccecSLiu Shixin zswap_init_state = ZSWAP_INIT_FAILED; 1570d7b028f5SDan Streetman zswap_enabled = false; 15712b281117SSeth Jennings return -ENOMEM; 15722b281117SSeth Jennings } 1573*141fdeecSLiu Shixin 1574*141fdeecSLiu Shixin static int __init zswap_init(void) 1575*141fdeecSLiu Shixin { 1576*141fdeecSLiu Shixin if (!zswap_enabled) 1577*141fdeecSLiu Shixin return 0; 1578*141fdeecSLiu Shixin return zswap_setup(); 1579*141fdeecSLiu Shixin } 15802b281117SSeth Jennings /* must be late so crypto has time to come up */ 1581*141fdeecSLiu Shixin late_initcall(zswap_init); 15822b281117SSeth Jennings 15832b281117SSeth Jennings MODULE_LICENSE("GPL"); 158468386da8SSeth Jennings MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>"); 15852b281117SSeth Jennings MODULE_DESCRIPTION("Compressed cache for swap pages"); 1586