xref: /linux/mm/zswap.c (revision 64fc2a947a9873700929ec0ef02b4654a04e0476)
1 /*
2  * zswap.c - zswap driver file
3  *
4  * zswap is a backend for frontswap that takes pages that are in the process
5  * of being swapped out and attempts to compress and store them in a
6  * RAM-based memory pool.  This can result in a significant I/O reduction on
7  * the swap device and, in the case where decompressing from RAM is faster
8  * than reading from the swap device, can also improve workload performance.
9  *
10  * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License
14  * as published by the Free Software Foundation; either version 2
15  * of the License, or (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21 */
22 
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 
25 #include <linux/module.h>
26 #include <linux/cpu.h>
27 #include <linux/highmem.h>
28 #include <linux/slab.h>
29 #include <linux/spinlock.h>
30 #include <linux/types.h>
31 #include <linux/atomic.h>
32 #include <linux/frontswap.h>
33 #include <linux/rbtree.h>
34 #include <linux/swap.h>
35 #include <linux/crypto.h>
36 #include <linux/mempool.h>
37 #include <linux/zpool.h>
38 
39 #include <linux/mm_types.h>
40 #include <linux/page-flags.h>
41 #include <linux/swapops.h>
42 #include <linux/writeback.h>
43 #include <linux/pagemap.h>
44 
45 /*********************************
46 * statistics
47 **********************************/
48 /* Total bytes used by the compressed storage */
49 static u64 zswap_pool_total_size;
50 /* The number of compressed pages currently stored in zswap */
51 static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
52 
53 /*
54  * The statistics below are not protected from concurrent access for
55  * performance reasons so they may not be a 100% accurate.  However,
56  * they do provide useful information on roughly how many times a
57  * certain event is occurring.
58 */
59 
60 /* Pool limit was hit (see zswap_max_pool_percent) */
61 static u64 zswap_pool_limit_hit;
62 /* Pages written back when pool limit was reached */
63 static u64 zswap_written_back_pages;
64 /* Store failed due to a reclaim failure after pool limit was reached */
65 static u64 zswap_reject_reclaim_fail;
66 /* Compressed page was too big for the allocator to (optimally) store */
67 static u64 zswap_reject_compress_poor;
68 /* Store failed because underlying allocator could not get memory */
69 static u64 zswap_reject_alloc_fail;
70 /* Store failed because the entry metadata could not be allocated (rare) */
71 static u64 zswap_reject_kmemcache_fail;
72 /* Duplicate store was encountered (rare) */
73 static u64 zswap_duplicate_entry;
74 
75 /*********************************
76 * tunables
77 **********************************/
78 
79 /* Enable/disable zswap (disabled by default) */
80 static bool zswap_enabled;
81 module_param_named(enabled, zswap_enabled, bool, 0644);
82 
83 /* Crypto compressor to use */
84 #define ZSWAP_COMPRESSOR_DEFAULT "lzo"
85 static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
86 static int zswap_compressor_param_set(const char *,
87 				      const struct kernel_param *);
88 static struct kernel_param_ops zswap_compressor_param_ops = {
89 	.set =		zswap_compressor_param_set,
90 	.get =		param_get_charp,
91 	.free =		param_free_charp,
92 };
93 module_param_cb(compressor, &zswap_compressor_param_ops,
94 		&zswap_compressor, 0644);
95 
96 /* Compressed storage zpool to use */
97 #define ZSWAP_ZPOOL_DEFAULT "zbud"
98 static char *zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
99 static int zswap_zpool_param_set(const char *, const struct kernel_param *);
100 static struct kernel_param_ops zswap_zpool_param_ops = {
101 	.set =		zswap_zpool_param_set,
102 	.get =		param_get_charp,
103 	.free =		param_free_charp,
104 };
105 module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
106 
107 /* The maximum percentage of memory that the compressed pool can occupy */
108 static unsigned int zswap_max_pool_percent = 20;
109 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
110 
111 /*********************************
112 * data structures
113 **********************************/
114 
115 struct zswap_pool {
116 	struct zpool *zpool;
117 	struct crypto_comp * __percpu *tfm;
118 	struct kref kref;
119 	struct list_head list;
120 	struct work_struct work;
121 	struct hlist_node node;
122 	char tfm_name[CRYPTO_MAX_ALG_NAME];
123 };
124 
125 /*
126  * struct zswap_entry
127  *
128  * This structure contains the metadata for tracking a single compressed
129  * page within zswap.
130  *
131  * rbnode - links the entry into red-black tree for the appropriate swap type
132  * offset - the swap offset for the entry.  Index into the red-black tree.
133  * refcount - the number of outstanding reference to the entry. This is needed
134  *            to protect against premature freeing of the entry by code
135  *            concurrent calls to load, invalidate, and writeback.  The lock
136  *            for the zswap_tree structure that contains the entry must
137  *            be held while changing the refcount.  Since the lock must
138  *            be held, there is no reason to also make refcount atomic.
139  * length - the length in bytes of the compressed page data.  Needed during
140  *          decompression
141  * pool - the zswap_pool the entry's data is in
142  * handle - zpool allocation handle that stores the compressed page data
143  */
144 struct zswap_entry {
145 	struct rb_node rbnode;
146 	pgoff_t offset;
147 	int refcount;
148 	unsigned int length;
149 	struct zswap_pool *pool;
150 	unsigned long handle;
151 };
152 
153 struct zswap_header {
154 	swp_entry_t swpentry;
155 };
156 
157 /*
158  * The tree lock in the zswap_tree struct protects a few things:
159  * - the rbtree
160  * - the refcount field of each entry in the tree
161  */
162 struct zswap_tree {
163 	struct rb_root rbroot;
164 	spinlock_t lock;
165 };
166 
167 static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
168 
169 /* RCU-protected iteration */
170 static LIST_HEAD(zswap_pools);
171 /* protects zswap_pools list modification */
172 static DEFINE_SPINLOCK(zswap_pools_lock);
173 /* pool counter to provide unique names to zpool */
174 static atomic_t zswap_pools_count = ATOMIC_INIT(0);
175 
176 /* used by param callback function */
177 static bool zswap_init_started;
178 
179 /*********************************
180 * helpers and fwd declarations
181 **********************************/
182 
183 #define zswap_pool_debug(msg, p)				\
184 	pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,		\
185 		 zpool_get_type((p)->zpool))
186 
187 static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
188 static int zswap_pool_get(struct zswap_pool *pool);
189 static void zswap_pool_put(struct zswap_pool *pool);
190 
191 static const struct zpool_ops zswap_zpool_ops = {
192 	.evict = zswap_writeback_entry
193 };
194 
195 static bool zswap_is_full(void)
196 {
197 	return totalram_pages * zswap_max_pool_percent / 100 <
198 		DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
199 }
200 
201 static void zswap_update_total_size(void)
202 {
203 	struct zswap_pool *pool;
204 	u64 total = 0;
205 
206 	rcu_read_lock();
207 
208 	list_for_each_entry_rcu(pool, &zswap_pools, list)
209 		total += zpool_get_total_size(pool->zpool);
210 
211 	rcu_read_unlock();
212 
213 	zswap_pool_total_size = total;
214 }
215 
216 /*********************************
217 * zswap entry functions
218 **********************************/
219 static struct kmem_cache *zswap_entry_cache;
220 
221 static int __init zswap_entry_cache_create(void)
222 {
223 	zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
224 	return zswap_entry_cache == NULL;
225 }
226 
227 static void __init zswap_entry_cache_destroy(void)
228 {
229 	kmem_cache_destroy(zswap_entry_cache);
230 }
231 
232 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
233 {
234 	struct zswap_entry *entry;
235 	entry = kmem_cache_alloc(zswap_entry_cache, gfp);
236 	if (!entry)
237 		return NULL;
238 	entry->refcount = 1;
239 	RB_CLEAR_NODE(&entry->rbnode);
240 	return entry;
241 }
242 
243 static void zswap_entry_cache_free(struct zswap_entry *entry)
244 {
245 	kmem_cache_free(zswap_entry_cache, entry);
246 }
247 
248 /*********************************
249 * rbtree functions
250 **********************************/
251 static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
252 {
253 	struct rb_node *node = root->rb_node;
254 	struct zswap_entry *entry;
255 
256 	while (node) {
257 		entry = rb_entry(node, struct zswap_entry, rbnode);
258 		if (entry->offset > offset)
259 			node = node->rb_left;
260 		else if (entry->offset < offset)
261 			node = node->rb_right;
262 		else
263 			return entry;
264 	}
265 	return NULL;
266 }
267 
268 /*
269  * In the case that a entry with the same offset is found, a pointer to
270  * the existing entry is stored in dupentry and the function returns -EEXIST
271  */
272 static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
273 			struct zswap_entry **dupentry)
274 {
275 	struct rb_node **link = &root->rb_node, *parent = NULL;
276 	struct zswap_entry *myentry;
277 
278 	while (*link) {
279 		parent = *link;
280 		myentry = rb_entry(parent, struct zswap_entry, rbnode);
281 		if (myentry->offset > entry->offset)
282 			link = &(*link)->rb_left;
283 		else if (myentry->offset < entry->offset)
284 			link = &(*link)->rb_right;
285 		else {
286 			*dupentry = myentry;
287 			return -EEXIST;
288 		}
289 	}
290 	rb_link_node(&entry->rbnode, parent, link);
291 	rb_insert_color(&entry->rbnode, root);
292 	return 0;
293 }
294 
295 static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
296 {
297 	if (!RB_EMPTY_NODE(&entry->rbnode)) {
298 		rb_erase(&entry->rbnode, root);
299 		RB_CLEAR_NODE(&entry->rbnode);
300 	}
301 }
302 
303 /*
304  * Carries out the common pattern of freeing and entry's zpool allocation,
305  * freeing the entry itself, and decrementing the number of stored pages.
306  */
307 static void zswap_free_entry(struct zswap_entry *entry)
308 {
309 	zpool_free(entry->pool->zpool, entry->handle);
310 	zswap_pool_put(entry->pool);
311 	zswap_entry_cache_free(entry);
312 	atomic_dec(&zswap_stored_pages);
313 	zswap_update_total_size();
314 }
315 
316 /* caller must hold the tree lock */
317 static void zswap_entry_get(struct zswap_entry *entry)
318 {
319 	entry->refcount++;
320 }
321 
322 /* caller must hold the tree lock
323 * remove from the tree and free it, if nobody reference the entry
324 */
325 static void zswap_entry_put(struct zswap_tree *tree,
326 			struct zswap_entry *entry)
327 {
328 	int refcount = --entry->refcount;
329 
330 	BUG_ON(refcount < 0);
331 	if (refcount == 0) {
332 		zswap_rb_erase(&tree->rbroot, entry);
333 		zswap_free_entry(entry);
334 	}
335 }
336 
337 /* caller must hold the tree lock */
338 static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
339 				pgoff_t offset)
340 {
341 	struct zswap_entry *entry;
342 
343 	entry = zswap_rb_search(root, offset);
344 	if (entry)
345 		zswap_entry_get(entry);
346 
347 	return entry;
348 }
349 
350 /*********************************
351 * per-cpu code
352 **********************************/
353 static DEFINE_PER_CPU(u8 *, zswap_dstmem);
354 
355 static int zswap_dstmem_prepare(unsigned int cpu)
356 {
357 	u8 *dst;
358 
359 	dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
360 	if (!dst) {
361 		pr_err("can't allocate compressor buffer\n");
362 		return -ENOMEM;
363 	}
364 	per_cpu(zswap_dstmem, cpu) = dst;
365 	return 0;
366 }
367 
368 static int zswap_dstmem_dead(unsigned int cpu)
369 {
370 	u8 *dst;
371 
372 	dst = per_cpu(zswap_dstmem, cpu);
373 	kfree(dst);
374 	per_cpu(zswap_dstmem, cpu) = NULL;
375 
376 	return 0;
377 }
378 
379 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
380 {
381 	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
382 	struct crypto_comp *tfm;
383 
384 	if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu)))
385 		return 0;
386 
387 	tfm = crypto_alloc_comp(pool->tfm_name, 0, 0);
388 	if (IS_ERR_OR_NULL(tfm)) {
389 		pr_err("could not alloc crypto comp %s : %ld\n",
390 		       pool->tfm_name, PTR_ERR(tfm));
391 		return -ENOMEM;
392 	}
393 	*per_cpu_ptr(pool->tfm, cpu) = tfm;
394 	return 0;
395 }
396 
397 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
398 {
399 	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
400 	struct crypto_comp *tfm;
401 
402 	tfm = *per_cpu_ptr(pool->tfm, cpu);
403 	if (!IS_ERR_OR_NULL(tfm))
404 		crypto_free_comp(tfm);
405 	*per_cpu_ptr(pool->tfm, cpu) = NULL;
406 	return 0;
407 }
408 
409 /*********************************
410 * pool functions
411 **********************************/
412 
413 static struct zswap_pool *__zswap_pool_current(void)
414 {
415 	struct zswap_pool *pool;
416 
417 	pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
418 	WARN_ON(!pool);
419 
420 	return pool;
421 }
422 
423 static struct zswap_pool *zswap_pool_current(void)
424 {
425 	assert_spin_locked(&zswap_pools_lock);
426 
427 	return __zswap_pool_current();
428 }
429 
430 static struct zswap_pool *zswap_pool_current_get(void)
431 {
432 	struct zswap_pool *pool;
433 
434 	rcu_read_lock();
435 
436 	pool = __zswap_pool_current();
437 	if (!pool || !zswap_pool_get(pool))
438 		pool = NULL;
439 
440 	rcu_read_unlock();
441 
442 	return pool;
443 }
444 
445 static struct zswap_pool *zswap_pool_last_get(void)
446 {
447 	struct zswap_pool *pool, *last = NULL;
448 
449 	rcu_read_lock();
450 
451 	list_for_each_entry_rcu(pool, &zswap_pools, list)
452 		last = pool;
453 	if (!WARN_ON(!last) && !zswap_pool_get(last))
454 		last = NULL;
455 
456 	rcu_read_unlock();
457 
458 	return last;
459 }
460 
461 /* type and compressor must be null-terminated */
462 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
463 {
464 	struct zswap_pool *pool;
465 
466 	assert_spin_locked(&zswap_pools_lock);
467 
468 	list_for_each_entry_rcu(pool, &zswap_pools, list) {
469 		if (strcmp(pool->tfm_name, compressor))
470 			continue;
471 		if (strcmp(zpool_get_type(pool->zpool), type))
472 			continue;
473 		/* if we can't get it, it's about to be destroyed */
474 		if (!zswap_pool_get(pool))
475 			continue;
476 		return pool;
477 	}
478 
479 	return NULL;
480 }
481 
482 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
483 {
484 	struct zswap_pool *pool;
485 	char name[38]; /* 'zswap' + 32 char (max) num + \0 */
486 	gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
487 	int ret;
488 
489 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
490 	if (!pool) {
491 		pr_err("pool alloc failed\n");
492 		return NULL;
493 	}
494 
495 	/* unique name for each pool specifically required by zsmalloc */
496 	snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
497 
498 	pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
499 	if (!pool->zpool) {
500 		pr_err("%s zpool not available\n", type);
501 		goto error;
502 	}
503 	pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
504 
505 	strlcpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
506 	pool->tfm = alloc_percpu(struct crypto_comp *);
507 	if (!pool->tfm) {
508 		pr_err("percpu alloc failed\n");
509 		goto error;
510 	}
511 
512 	ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
513 				       &pool->node);
514 	if (ret)
515 		goto error;
516 	pr_debug("using %s compressor\n", pool->tfm_name);
517 
518 	/* being the current pool takes 1 ref; this func expects the
519 	 * caller to always add the new pool as the current pool
520 	 */
521 	kref_init(&pool->kref);
522 	INIT_LIST_HEAD(&pool->list);
523 
524 	zswap_pool_debug("created", pool);
525 
526 	return pool;
527 
528 error:
529 	free_percpu(pool->tfm);
530 	if (pool->zpool)
531 		zpool_destroy_pool(pool->zpool);
532 	kfree(pool);
533 	return NULL;
534 }
535 
536 static __init struct zswap_pool *__zswap_pool_create_fallback(void)
537 {
538 	if (!crypto_has_comp(zswap_compressor, 0, 0)) {
539 		if (!strcmp(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT)) {
540 			pr_err("default compressor %s not available\n",
541 			       zswap_compressor);
542 			return NULL;
543 		}
544 		pr_err("compressor %s not available, using default %s\n",
545 		       zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT);
546 		param_free_charp(&zswap_compressor);
547 		zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
548 	}
549 	if (!zpool_has_pool(zswap_zpool_type)) {
550 		if (!strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) {
551 			pr_err("default zpool %s not available\n",
552 			       zswap_zpool_type);
553 			return NULL;
554 		}
555 		pr_err("zpool %s not available, using default %s\n",
556 		       zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT);
557 		param_free_charp(&zswap_zpool_type);
558 		zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
559 	}
560 
561 	return zswap_pool_create(zswap_zpool_type, zswap_compressor);
562 }
563 
564 static void zswap_pool_destroy(struct zswap_pool *pool)
565 {
566 	zswap_pool_debug("destroying", pool);
567 
568 	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
569 	free_percpu(pool->tfm);
570 	zpool_destroy_pool(pool->zpool);
571 	kfree(pool);
572 }
573 
574 static int __must_check zswap_pool_get(struct zswap_pool *pool)
575 {
576 	return kref_get_unless_zero(&pool->kref);
577 }
578 
579 static void __zswap_pool_release(struct work_struct *work)
580 {
581 	struct zswap_pool *pool = container_of(work, typeof(*pool), work);
582 
583 	synchronize_rcu();
584 
585 	/* nobody should have been able to get a kref... */
586 	WARN_ON(kref_get_unless_zero(&pool->kref));
587 
588 	/* pool is now off zswap_pools list and has no references. */
589 	zswap_pool_destroy(pool);
590 }
591 
592 static void __zswap_pool_empty(struct kref *kref)
593 {
594 	struct zswap_pool *pool;
595 
596 	pool = container_of(kref, typeof(*pool), kref);
597 
598 	spin_lock(&zswap_pools_lock);
599 
600 	WARN_ON(pool == zswap_pool_current());
601 
602 	list_del_rcu(&pool->list);
603 
604 	INIT_WORK(&pool->work, __zswap_pool_release);
605 	schedule_work(&pool->work);
606 
607 	spin_unlock(&zswap_pools_lock);
608 }
609 
610 static void zswap_pool_put(struct zswap_pool *pool)
611 {
612 	kref_put(&pool->kref, __zswap_pool_empty);
613 }
614 
615 /*********************************
616 * param callbacks
617 **********************************/
618 
619 /* val must be a null-terminated string */
620 static int __zswap_param_set(const char *val, const struct kernel_param *kp,
621 			     char *type, char *compressor)
622 {
623 	struct zswap_pool *pool, *put_pool = NULL;
624 	char *s = strstrip((char *)val);
625 	int ret;
626 
627 	/* no change required */
628 	if (!strcmp(s, *(char **)kp->arg))
629 		return 0;
630 
631 	/* if this is load-time (pre-init) param setting,
632 	 * don't create a pool; that's done during init.
633 	 */
634 	if (!zswap_init_started)
635 		return param_set_charp(s, kp);
636 
637 	if (!type) {
638 		if (!zpool_has_pool(s)) {
639 			pr_err("zpool %s not available\n", s);
640 			return -ENOENT;
641 		}
642 		type = s;
643 	} else if (!compressor) {
644 		if (!crypto_has_comp(s, 0, 0)) {
645 			pr_err("compressor %s not available\n", s);
646 			return -ENOENT;
647 		}
648 		compressor = s;
649 	} else {
650 		WARN_ON(1);
651 		return -EINVAL;
652 	}
653 
654 	spin_lock(&zswap_pools_lock);
655 
656 	pool = zswap_pool_find_get(type, compressor);
657 	if (pool) {
658 		zswap_pool_debug("using existing", pool);
659 		list_del_rcu(&pool->list);
660 	} else {
661 		spin_unlock(&zswap_pools_lock);
662 		pool = zswap_pool_create(type, compressor);
663 		spin_lock(&zswap_pools_lock);
664 	}
665 
666 	if (pool)
667 		ret = param_set_charp(s, kp);
668 	else
669 		ret = -EINVAL;
670 
671 	if (!ret) {
672 		put_pool = zswap_pool_current();
673 		list_add_rcu(&pool->list, &zswap_pools);
674 	} else if (pool) {
675 		/* add the possibly pre-existing pool to the end of the pools
676 		 * list; if it's new (and empty) then it'll be removed and
677 		 * destroyed by the put after we drop the lock
678 		 */
679 		list_add_tail_rcu(&pool->list, &zswap_pools);
680 		put_pool = pool;
681 	}
682 
683 	spin_unlock(&zswap_pools_lock);
684 
685 	/* drop the ref from either the old current pool,
686 	 * or the new pool we failed to add
687 	 */
688 	if (put_pool)
689 		zswap_pool_put(put_pool);
690 
691 	return ret;
692 }
693 
694 static int zswap_compressor_param_set(const char *val,
695 				      const struct kernel_param *kp)
696 {
697 	return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
698 }
699 
700 static int zswap_zpool_param_set(const char *val,
701 				 const struct kernel_param *kp)
702 {
703 	return __zswap_param_set(val, kp, NULL, zswap_compressor);
704 }
705 
706 /*********************************
707 * writeback code
708 **********************************/
709 /* return enum for zswap_get_swap_cache_page */
710 enum zswap_get_swap_ret {
711 	ZSWAP_SWAPCACHE_NEW,
712 	ZSWAP_SWAPCACHE_EXIST,
713 	ZSWAP_SWAPCACHE_FAIL,
714 };
715 
716 /*
717  * zswap_get_swap_cache_page
718  *
719  * This is an adaption of read_swap_cache_async()
720  *
721  * This function tries to find a page with the given swap entry
722  * in the swapper_space address space (the swap cache).  If the page
723  * is found, it is returned in retpage.  Otherwise, a page is allocated,
724  * added to the swap cache, and returned in retpage.
725  *
726  * If success, the swap cache page is returned in retpage
727  * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
728  * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
729  *     the new page is added to swapcache and locked
730  * Returns ZSWAP_SWAPCACHE_FAIL on error
731  */
732 static int zswap_get_swap_cache_page(swp_entry_t entry,
733 				struct page **retpage)
734 {
735 	bool page_was_allocated;
736 
737 	*retpage = __read_swap_cache_async(entry, GFP_KERNEL,
738 			NULL, 0, &page_was_allocated);
739 	if (page_was_allocated)
740 		return ZSWAP_SWAPCACHE_NEW;
741 	if (!*retpage)
742 		return ZSWAP_SWAPCACHE_FAIL;
743 	return ZSWAP_SWAPCACHE_EXIST;
744 }
745 
746 /*
747  * Attempts to free an entry by adding a page to the swap cache,
748  * decompressing the entry data into the page, and issuing a
749  * bio write to write the page back to the swap device.
750  *
751  * This can be thought of as a "resumed writeback" of the page
752  * to the swap device.  We are basically resuming the same swap
753  * writeback path that was intercepted with the frontswap_store()
754  * in the first place.  After the page has been decompressed into
755  * the swap cache, the compressed version stored by zswap can be
756  * freed.
757  */
758 static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
759 {
760 	struct zswap_header *zhdr;
761 	swp_entry_t swpentry;
762 	struct zswap_tree *tree;
763 	pgoff_t offset;
764 	struct zswap_entry *entry;
765 	struct page *page;
766 	struct crypto_comp *tfm;
767 	u8 *src, *dst;
768 	unsigned int dlen;
769 	int ret;
770 	struct writeback_control wbc = {
771 		.sync_mode = WB_SYNC_NONE,
772 	};
773 
774 	/* extract swpentry from data */
775 	zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
776 	swpentry = zhdr->swpentry; /* here */
777 	zpool_unmap_handle(pool, handle);
778 	tree = zswap_trees[swp_type(swpentry)];
779 	offset = swp_offset(swpentry);
780 
781 	/* find and ref zswap entry */
782 	spin_lock(&tree->lock);
783 	entry = zswap_entry_find_get(&tree->rbroot, offset);
784 	if (!entry) {
785 		/* entry was invalidated */
786 		spin_unlock(&tree->lock);
787 		return 0;
788 	}
789 	spin_unlock(&tree->lock);
790 	BUG_ON(offset != entry->offset);
791 
792 	/* try to allocate swap cache page */
793 	switch (zswap_get_swap_cache_page(swpentry, &page)) {
794 	case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
795 		ret = -ENOMEM;
796 		goto fail;
797 
798 	case ZSWAP_SWAPCACHE_EXIST:
799 		/* page is already in the swap cache, ignore for now */
800 		put_page(page);
801 		ret = -EEXIST;
802 		goto fail;
803 
804 	case ZSWAP_SWAPCACHE_NEW: /* page is locked */
805 		/* decompress */
806 		dlen = PAGE_SIZE;
807 		src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
808 				ZPOOL_MM_RO) + sizeof(struct zswap_header);
809 		dst = kmap_atomic(page);
810 		tfm = *get_cpu_ptr(entry->pool->tfm);
811 		ret = crypto_comp_decompress(tfm, src, entry->length,
812 					     dst, &dlen);
813 		put_cpu_ptr(entry->pool->tfm);
814 		kunmap_atomic(dst);
815 		zpool_unmap_handle(entry->pool->zpool, entry->handle);
816 		BUG_ON(ret);
817 		BUG_ON(dlen != PAGE_SIZE);
818 
819 		/* page is up to date */
820 		SetPageUptodate(page);
821 	}
822 
823 	/* move it to the tail of the inactive list after end_writeback */
824 	SetPageReclaim(page);
825 
826 	/* start writeback */
827 	__swap_writepage(page, &wbc, end_swap_bio_write);
828 	put_page(page);
829 	zswap_written_back_pages++;
830 
831 	spin_lock(&tree->lock);
832 	/* drop local reference */
833 	zswap_entry_put(tree, entry);
834 
835 	/*
836 	* There are two possible situations for entry here:
837 	* (1) refcount is 1(normal case),  entry is valid and on the tree
838 	* (2) refcount is 0, entry is freed and not on the tree
839 	*     because invalidate happened during writeback
840 	*  search the tree and free the entry if find entry
841 	*/
842 	if (entry == zswap_rb_search(&tree->rbroot, offset))
843 		zswap_entry_put(tree, entry);
844 	spin_unlock(&tree->lock);
845 
846 	goto end;
847 
848 	/*
849 	* if we get here due to ZSWAP_SWAPCACHE_EXIST
850 	* a load may happening concurrently
851 	* it is safe and okay to not free the entry
852 	* if we free the entry in the following put
853 	* it it either okay to return !0
854 	*/
855 fail:
856 	spin_lock(&tree->lock);
857 	zswap_entry_put(tree, entry);
858 	spin_unlock(&tree->lock);
859 
860 end:
861 	return ret;
862 }
863 
864 static int zswap_shrink(void)
865 {
866 	struct zswap_pool *pool;
867 	int ret;
868 
869 	pool = zswap_pool_last_get();
870 	if (!pool)
871 		return -ENOENT;
872 
873 	ret = zpool_shrink(pool->zpool, 1, NULL);
874 
875 	zswap_pool_put(pool);
876 
877 	return ret;
878 }
879 
880 /*********************************
881 * frontswap hooks
882 **********************************/
883 /* attempts to compress and store an single page */
884 static int zswap_frontswap_store(unsigned type, pgoff_t offset,
885 				struct page *page)
886 {
887 	struct zswap_tree *tree = zswap_trees[type];
888 	struct zswap_entry *entry, *dupentry;
889 	struct crypto_comp *tfm;
890 	int ret;
891 	unsigned int dlen = PAGE_SIZE, len;
892 	unsigned long handle;
893 	char *buf;
894 	u8 *src, *dst;
895 	struct zswap_header *zhdr;
896 
897 	if (!zswap_enabled || !tree) {
898 		ret = -ENODEV;
899 		goto reject;
900 	}
901 
902 	/* reclaim space if needed */
903 	if (zswap_is_full()) {
904 		zswap_pool_limit_hit++;
905 		if (zswap_shrink()) {
906 			zswap_reject_reclaim_fail++;
907 			ret = -ENOMEM;
908 			goto reject;
909 		}
910 	}
911 
912 	/* allocate entry */
913 	entry = zswap_entry_cache_alloc(GFP_KERNEL);
914 	if (!entry) {
915 		zswap_reject_kmemcache_fail++;
916 		ret = -ENOMEM;
917 		goto reject;
918 	}
919 
920 	/* if entry is successfully added, it keeps the reference */
921 	entry->pool = zswap_pool_current_get();
922 	if (!entry->pool) {
923 		ret = -EINVAL;
924 		goto freepage;
925 	}
926 
927 	/* compress */
928 	dst = get_cpu_var(zswap_dstmem);
929 	tfm = *get_cpu_ptr(entry->pool->tfm);
930 	src = kmap_atomic(page);
931 	ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen);
932 	kunmap_atomic(src);
933 	put_cpu_ptr(entry->pool->tfm);
934 	if (ret) {
935 		ret = -EINVAL;
936 		goto put_dstmem;
937 	}
938 
939 	/* store */
940 	len = dlen + sizeof(struct zswap_header);
941 	ret = zpool_malloc(entry->pool->zpool, len,
942 			   __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM,
943 			   &handle);
944 	if (ret == -ENOSPC) {
945 		zswap_reject_compress_poor++;
946 		goto put_dstmem;
947 	}
948 	if (ret) {
949 		zswap_reject_alloc_fail++;
950 		goto put_dstmem;
951 	}
952 	zhdr = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
953 	zhdr->swpentry = swp_entry(type, offset);
954 	buf = (u8 *)(zhdr + 1);
955 	memcpy(buf, dst, dlen);
956 	zpool_unmap_handle(entry->pool->zpool, handle);
957 	put_cpu_var(zswap_dstmem);
958 
959 	/* populate entry */
960 	entry->offset = offset;
961 	entry->handle = handle;
962 	entry->length = dlen;
963 
964 	/* map */
965 	spin_lock(&tree->lock);
966 	do {
967 		ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
968 		if (ret == -EEXIST) {
969 			zswap_duplicate_entry++;
970 			/* remove from rbtree */
971 			zswap_rb_erase(&tree->rbroot, dupentry);
972 			zswap_entry_put(tree, dupentry);
973 		}
974 	} while (ret == -EEXIST);
975 	spin_unlock(&tree->lock);
976 
977 	/* update stats */
978 	atomic_inc(&zswap_stored_pages);
979 	zswap_update_total_size();
980 
981 	return 0;
982 
983 put_dstmem:
984 	put_cpu_var(zswap_dstmem);
985 	zswap_pool_put(entry->pool);
986 freepage:
987 	zswap_entry_cache_free(entry);
988 reject:
989 	return ret;
990 }
991 
992 /*
993  * returns 0 if the page was successfully decompressed
994  * return -1 on entry not found or error
995 */
996 static int zswap_frontswap_load(unsigned type, pgoff_t offset,
997 				struct page *page)
998 {
999 	struct zswap_tree *tree = zswap_trees[type];
1000 	struct zswap_entry *entry;
1001 	struct crypto_comp *tfm;
1002 	u8 *src, *dst;
1003 	unsigned int dlen;
1004 	int ret;
1005 
1006 	/* find */
1007 	spin_lock(&tree->lock);
1008 	entry = zswap_entry_find_get(&tree->rbroot, offset);
1009 	if (!entry) {
1010 		/* entry was written back */
1011 		spin_unlock(&tree->lock);
1012 		return -1;
1013 	}
1014 	spin_unlock(&tree->lock);
1015 
1016 	/* decompress */
1017 	dlen = PAGE_SIZE;
1018 	src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
1019 			ZPOOL_MM_RO) + sizeof(struct zswap_header);
1020 	dst = kmap_atomic(page);
1021 	tfm = *get_cpu_ptr(entry->pool->tfm);
1022 	ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);
1023 	put_cpu_ptr(entry->pool->tfm);
1024 	kunmap_atomic(dst);
1025 	zpool_unmap_handle(entry->pool->zpool, entry->handle);
1026 	BUG_ON(ret);
1027 
1028 	spin_lock(&tree->lock);
1029 	zswap_entry_put(tree, entry);
1030 	spin_unlock(&tree->lock);
1031 
1032 	return 0;
1033 }
1034 
1035 /* frees an entry in zswap */
1036 static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
1037 {
1038 	struct zswap_tree *tree = zswap_trees[type];
1039 	struct zswap_entry *entry;
1040 
1041 	/* find */
1042 	spin_lock(&tree->lock);
1043 	entry = zswap_rb_search(&tree->rbroot, offset);
1044 	if (!entry) {
1045 		/* entry was written back */
1046 		spin_unlock(&tree->lock);
1047 		return;
1048 	}
1049 
1050 	/* remove from rbtree */
1051 	zswap_rb_erase(&tree->rbroot, entry);
1052 
1053 	/* drop the initial reference from entry creation */
1054 	zswap_entry_put(tree, entry);
1055 
1056 	spin_unlock(&tree->lock);
1057 }
1058 
1059 /* frees all zswap entries for the given swap type */
1060 static void zswap_frontswap_invalidate_area(unsigned type)
1061 {
1062 	struct zswap_tree *tree = zswap_trees[type];
1063 	struct zswap_entry *entry, *n;
1064 
1065 	if (!tree)
1066 		return;
1067 
1068 	/* walk the tree and free everything */
1069 	spin_lock(&tree->lock);
1070 	rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
1071 		zswap_free_entry(entry);
1072 	tree->rbroot = RB_ROOT;
1073 	spin_unlock(&tree->lock);
1074 	kfree(tree);
1075 	zswap_trees[type] = NULL;
1076 }
1077 
1078 static void zswap_frontswap_init(unsigned type)
1079 {
1080 	struct zswap_tree *tree;
1081 
1082 	tree = kzalloc(sizeof(struct zswap_tree), GFP_KERNEL);
1083 	if (!tree) {
1084 		pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1085 		return;
1086 	}
1087 
1088 	tree->rbroot = RB_ROOT;
1089 	spin_lock_init(&tree->lock);
1090 	zswap_trees[type] = tree;
1091 }
1092 
1093 static struct frontswap_ops zswap_frontswap_ops = {
1094 	.store = zswap_frontswap_store,
1095 	.load = zswap_frontswap_load,
1096 	.invalidate_page = zswap_frontswap_invalidate_page,
1097 	.invalidate_area = zswap_frontswap_invalidate_area,
1098 	.init = zswap_frontswap_init
1099 };
1100 
1101 /*********************************
1102 * debugfs functions
1103 **********************************/
1104 #ifdef CONFIG_DEBUG_FS
1105 #include <linux/debugfs.h>
1106 
1107 static struct dentry *zswap_debugfs_root;
1108 
1109 static int __init zswap_debugfs_init(void)
1110 {
1111 	if (!debugfs_initialized())
1112 		return -ENODEV;
1113 
1114 	zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1115 	if (!zswap_debugfs_root)
1116 		return -ENOMEM;
1117 
1118 	debugfs_create_u64("pool_limit_hit", S_IRUGO,
1119 			zswap_debugfs_root, &zswap_pool_limit_hit);
1120 	debugfs_create_u64("reject_reclaim_fail", S_IRUGO,
1121 			zswap_debugfs_root, &zswap_reject_reclaim_fail);
1122 	debugfs_create_u64("reject_alloc_fail", S_IRUGO,
1123 			zswap_debugfs_root, &zswap_reject_alloc_fail);
1124 	debugfs_create_u64("reject_kmemcache_fail", S_IRUGO,
1125 			zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1126 	debugfs_create_u64("reject_compress_poor", S_IRUGO,
1127 			zswap_debugfs_root, &zswap_reject_compress_poor);
1128 	debugfs_create_u64("written_back_pages", S_IRUGO,
1129 			zswap_debugfs_root, &zswap_written_back_pages);
1130 	debugfs_create_u64("duplicate_entry", S_IRUGO,
1131 			zswap_debugfs_root, &zswap_duplicate_entry);
1132 	debugfs_create_u64("pool_total_size", S_IRUGO,
1133 			zswap_debugfs_root, &zswap_pool_total_size);
1134 	debugfs_create_atomic_t("stored_pages", S_IRUGO,
1135 			zswap_debugfs_root, &zswap_stored_pages);
1136 
1137 	return 0;
1138 }
1139 
1140 static void __exit zswap_debugfs_exit(void)
1141 {
1142 	debugfs_remove_recursive(zswap_debugfs_root);
1143 }
1144 #else
1145 static int __init zswap_debugfs_init(void)
1146 {
1147 	return 0;
1148 }
1149 
1150 static void __exit zswap_debugfs_exit(void) { }
1151 #endif
1152 
1153 /*********************************
1154 * module init and exit
1155 **********************************/
1156 static int __init init_zswap(void)
1157 {
1158 	struct zswap_pool *pool;
1159 	int ret;
1160 
1161 	zswap_init_started = true;
1162 
1163 	if (zswap_entry_cache_create()) {
1164 		pr_err("entry cache creation failed\n");
1165 		goto cache_fail;
1166 	}
1167 
1168 	ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare",
1169 				zswap_dstmem_prepare, zswap_dstmem_dead);
1170 	if (ret) {
1171 		pr_err("dstmem alloc failed\n");
1172 		goto dstmem_fail;
1173 	}
1174 
1175 	ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1176 				      "mm/zswap_pool:prepare",
1177 				      zswap_cpu_comp_prepare,
1178 				      zswap_cpu_comp_dead);
1179 	if (ret)
1180 		goto hp_fail;
1181 
1182 	pool = __zswap_pool_create_fallback();
1183 	if (!pool) {
1184 		pr_err("pool creation failed\n");
1185 		goto pool_fail;
1186 	}
1187 	pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1188 		zpool_get_type(pool->zpool));
1189 
1190 	list_add(&pool->list, &zswap_pools);
1191 
1192 	frontswap_register_ops(&zswap_frontswap_ops);
1193 	if (zswap_debugfs_init())
1194 		pr_warn("debugfs initialization failed\n");
1195 	return 0;
1196 
1197 pool_fail:
1198 	cpuhp_remove_state_nocalls(CPUHP_MM_ZSWP_POOL_PREPARE);
1199 hp_fail:
1200 	cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE);
1201 dstmem_fail:
1202 	zswap_entry_cache_destroy();
1203 cache_fail:
1204 	return -ENOMEM;
1205 }
1206 /* must be late so crypto has time to come up */
1207 late_initcall(init_zswap);
1208 
1209 MODULE_LICENSE("GPL");
1210 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1211 MODULE_DESCRIPTION("Compressed cache for swap pages");
1212