xref: /linux/drivers/block/zram/zcomp.c (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2014 Sergey Senozhatsky.
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/string.h>
8 #include <linux/err.h>
9 #include <linux/slab.h>
10 #include <linux/wait.h>
11 #include <linux/sched.h>
12 #include <linux/cpu.h>
13 #include <linux/crypto.h>
14 #include <linux/vmalloc.h>
15 
16 #include "zcomp.h"
17 
18 static const char * const backends[] = {
19 #if IS_ENABLED(CONFIG_CRYPTO_LZO)
20 	"lzo",
21 	"lzo-rle",
22 #endif
23 #if IS_ENABLED(CONFIG_CRYPTO_LZ4)
24 	"lz4",
25 #endif
26 #if IS_ENABLED(CONFIG_CRYPTO_LZ4HC)
27 	"lz4hc",
28 #endif
29 #if IS_ENABLED(CONFIG_CRYPTO_842)
30 	"842",
31 #endif
32 #if IS_ENABLED(CONFIG_CRYPTO_ZSTD)
33 	"zstd",
34 #endif
35 };
36 
zcomp_strm_free(struct zcomp_strm * zstrm)37 static void zcomp_strm_free(struct zcomp_strm *zstrm)
38 {
39 	if (!IS_ERR_OR_NULL(zstrm->tfm))
40 		crypto_free_comp(zstrm->tfm);
41 	vfree(zstrm->buffer);
42 	zstrm->tfm = NULL;
43 	zstrm->buffer = NULL;
44 }
45 
46 /*
47  * Initialize zcomp_strm structure with ->tfm initialized by backend, and
48  * ->buffer. Return a negative value on error.
49  */
zcomp_strm_init(struct zcomp_strm * zstrm,struct zcomp * comp)50 static int zcomp_strm_init(struct zcomp_strm *zstrm, struct zcomp *comp)
51 {
52 	zstrm->tfm = crypto_alloc_comp(comp->name, 0, 0);
53 	/*
54 	 * allocate 2 pages. 1 for compressed data, plus 1 extra for the
55 	 * case when compressed size is larger than the original one
56 	 */
57 	zstrm->buffer = vzalloc(2 * PAGE_SIZE);
58 	if (IS_ERR_OR_NULL(zstrm->tfm) || !zstrm->buffer) {
59 		zcomp_strm_free(zstrm);
60 		return -ENOMEM;
61 	}
62 	return 0;
63 }
64 
zcomp_available_algorithm(const char * comp)65 bool zcomp_available_algorithm(const char *comp)
66 {
67 	/*
68 	 * Crypto does not ignore a trailing new line symbol,
69 	 * so make sure you don't supply a string containing
70 	 * one.
71 	 * This also means that we permit zcomp initialisation
72 	 * with any compressing algorithm known to crypto api.
73 	 */
74 	return crypto_has_comp(comp, 0, 0) == 1;
75 }
76 
77 /* show available compressors */
zcomp_available_show(const char * comp,char * buf)78 ssize_t zcomp_available_show(const char *comp, char *buf)
79 {
80 	bool known_algorithm = false;
81 	ssize_t sz = 0;
82 	int i;
83 
84 	for (i = 0; i < ARRAY_SIZE(backends); i++) {
85 		if (!strcmp(comp, backends[i])) {
86 			known_algorithm = true;
87 			sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
88 					"[%s] ", backends[i]);
89 		} else {
90 			sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
91 					"%s ", backends[i]);
92 		}
93 	}
94 
95 	/*
96 	 * Out-of-tree module known to crypto api or a missing
97 	 * entry in `backends'.
98 	 */
99 	if (!known_algorithm && crypto_has_comp(comp, 0, 0) == 1)
100 		sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
101 				"[%s] ", comp);
102 
103 	sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
104 	return sz;
105 }
106 
zcomp_stream_get(struct zcomp * comp)107 struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
108 {
109 	local_lock(&comp->stream->lock);
110 	return this_cpu_ptr(comp->stream);
111 }
112 
zcomp_stream_put(struct zcomp * comp)113 void zcomp_stream_put(struct zcomp *comp)
114 {
115 	local_unlock(&comp->stream->lock);
116 }
117 
zcomp_compress(struct zcomp_strm * zstrm,const void * src,unsigned int * dst_len)118 int zcomp_compress(struct zcomp_strm *zstrm,
119 		const void *src, unsigned int *dst_len)
120 {
121 	/*
122 	 * Our dst memory (zstrm->buffer) is always `2 * PAGE_SIZE' sized
123 	 * because sometimes we can endup having a bigger compressed data
124 	 * due to various reasons: for example compression algorithms tend
125 	 * to add some padding to the compressed buffer. Speaking of padding,
126 	 * comp algorithm `842' pads the compressed length to multiple of 8
127 	 * and returns -ENOSP when the dst memory is not big enough, which
128 	 * is not something that ZRAM wants to see. We can handle the
129 	 * `compressed_size > PAGE_SIZE' case easily in ZRAM, but when we
130 	 * receive -ERRNO from the compressing backend we can't help it
131 	 * anymore. To make `842' happy we need to tell the exact size of
132 	 * the dst buffer, zram_drv will take care of the fact that
133 	 * compressed buffer is too big.
134 	 */
135 	*dst_len = PAGE_SIZE * 2;
136 
137 	return crypto_comp_compress(zstrm->tfm,
138 			src, PAGE_SIZE,
139 			zstrm->buffer, dst_len);
140 }
141 
zcomp_decompress(struct zcomp_strm * zstrm,const void * src,unsigned int src_len,void * dst)142 int zcomp_decompress(struct zcomp_strm *zstrm,
143 		const void *src, unsigned int src_len, void *dst)
144 {
145 	unsigned int dst_len = PAGE_SIZE;
146 
147 	return crypto_comp_decompress(zstrm->tfm,
148 			src, src_len,
149 			dst, &dst_len);
150 }
151 
zcomp_cpu_up_prepare(unsigned int cpu,struct hlist_node * node)152 int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
153 {
154 	struct zcomp *comp = hlist_entry(node, struct zcomp, node);
155 	struct zcomp_strm *zstrm;
156 	int ret;
157 
158 	zstrm = per_cpu_ptr(comp->stream, cpu);
159 	local_lock_init(&zstrm->lock);
160 
161 	ret = zcomp_strm_init(zstrm, comp);
162 	if (ret)
163 		pr_err("Can't allocate a compression stream\n");
164 	return ret;
165 }
166 
zcomp_cpu_dead(unsigned int cpu,struct hlist_node * node)167 int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
168 {
169 	struct zcomp *comp = hlist_entry(node, struct zcomp, node);
170 	struct zcomp_strm *zstrm;
171 
172 	zstrm = per_cpu_ptr(comp->stream, cpu);
173 	zcomp_strm_free(zstrm);
174 	return 0;
175 }
176 
zcomp_init(struct zcomp * comp)177 static int zcomp_init(struct zcomp *comp)
178 {
179 	int ret;
180 
181 	comp->stream = alloc_percpu(struct zcomp_strm);
182 	if (!comp->stream)
183 		return -ENOMEM;
184 
185 	ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
186 	if (ret < 0)
187 		goto cleanup;
188 	return 0;
189 
190 cleanup:
191 	free_percpu(comp->stream);
192 	return ret;
193 }
194 
zcomp_destroy(struct zcomp * comp)195 void zcomp_destroy(struct zcomp *comp)
196 {
197 	cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
198 	free_percpu(comp->stream);
199 	kfree(comp);
200 }
201 
202 /*
203  * search available compressors for requested algorithm.
204  * allocate new zcomp and initialize it. return compressing
205  * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
206  * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
207  * case of allocation error, or any other error potentially
208  * returned by zcomp_init().
209  */
zcomp_create(const char * alg)210 struct zcomp *zcomp_create(const char *alg)
211 {
212 	struct zcomp *comp;
213 	int error;
214 
215 	/*
216 	 * Crypto API will execute /sbin/modprobe if the compression module
217 	 * is not loaded yet. We must do it here, otherwise we are about to
218 	 * call /sbin/modprobe under CPU hot-plug lock.
219 	 */
220 	if (!zcomp_available_algorithm(alg))
221 		return ERR_PTR(-EINVAL);
222 
223 	comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
224 	if (!comp)
225 		return ERR_PTR(-ENOMEM);
226 
227 	comp->name = alg;
228 	error = zcomp_init(comp);
229 	if (error) {
230 		kfree(comp);
231 		return ERR_PTR(error);
232 	}
233 	return comp;
234 }
235