xref: /linux/crypto/scompress.c (revision 8f8d5745bb520c76b81abef4a2cb3023d0313bfd)
1 /*
2  * Synchronous Compression operations
3  *
4  * Copyright 2015 LG Electronics Inc.
5  * Copyright (c) 2016, Intel Corporation
6  * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License as published by the Free
10  * Software Foundation; either version 2 of the License, or (at your option)
11  * any later version.
12  *
13  */
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/seq_file.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/crypto.h>
21 #include <linux/compiler.h>
22 #include <linux/vmalloc.h>
23 #include <crypto/algapi.h>
24 #include <linux/cryptouser.h>
25 #include <net/netlink.h>
26 #include <linux/scatterlist.h>
27 #include <crypto/scatterwalk.h>
28 #include <crypto/internal/acompress.h>
29 #include <crypto/internal/scompress.h>
30 #include "internal.h"
31 
32 static const struct crypto_type crypto_scomp_type;
33 static void * __percpu *scomp_src_scratches;
34 static void * __percpu *scomp_dst_scratches;
35 static int scomp_scratch_users;
36 static DEFINE_MUTEX(scomp_lock);
37 
38 #ifdef CONFIG_NET
39 static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
40 {
41 	struct crypto_report_comp rscomp;
42 
43 	memset(&rscomp, 0, sizeof(rscomp));
44 
45 	strscpy(rscomp.type, "scomp", sizeof(rscomp.type));
46 
47 	return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
48 		       sizeof(rscomp), &rscomp);
49 }
50 #else
51 static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
52 {
53 	return -ENOSYS;
54 }
55 #endif
56 
57 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
58 	__maybe_unused;
59 
60 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
61 {
62 	seq_puts(m, "type         : scomp\n");
63 }
64 
65 static void crypto_scomp_free_scratches(void * __percpu *scratches)
66 {
67 	int i;
68 
69 	if (!scratches)
70 		return;
71 
72 	for_each_possible_cpu(i)
73 		vfree(*per_cpu_ptr(scratches, i));
74 
75 	free_percpu(scratches);
76 }
77 
78 static void * __percpu *crypto_scomp_alloc_scratches(void)
79 {
80 	void * __percpu *scratches;
81 	int i;
82 
83 	scratches = alloc_percpu(void *);
84 	if (!scratches)
85 		return NULL;
86 
87 	for_each_possible_cpu(i) {
88 		void *scratch;
89 
90 		scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
91 		if (!scratch)
92 			goto error;
93 		*per_cpu_ptr(scratches, i) = scratch;
94 	}
95 
96 	return scratches;
97 
98 error:
99 	crypto_scomp_free_scratches(scratches);
100 	return NULL;
101 }
102 
103 static void crypto_scomp_free_all_scratches(void)
104 {
105 	if (!--scomp_scratch_users) {
106 		crypto_scomp_free_scratches(scomp_src_scratches);
107 		crypto_scomp_free_scratches(scomp_dst_scratches);
108 		scomp_src_scratches = NULL;
109 		scomp_dst_scratches = NULL;
110 	}
111 }
112 
113 static int crypto_scomp_alloc_all_scratches(void)
114 {
115 	if (!scomp_scratch_users++) {
116 		scomp_src_scratches = crypto_scomp_alloc_scratches();
117 		if (!scomp_src_scratches)
118 			return -ENOMEM;
119 		scomp_dst_scratches = crypto_scomp_alloc_scratches();
120 		if (!scomp_dst_scratches) {
121 			crypto_scomp_free_scratches(scomp_src_scratches);
122 			scomp_src_scratches = NULL;
123 			return -ENOMEM;
124 		}
125 	}
126 	return 0;
127 }
128 
129 static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
130 {
131 	int ret;
132 
133 	mutex_lock(&scomp_lock);
134 	ret = crypto_scomp_alloc_all_scratches();
135 	mutex_unlock(&scomp_lock);
136 
137 	return ret;
138 }
139 
140 static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
141 {
142 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
143 	void **tfm_ctx = acomp_tfm_ctx(tfm);
144 	struct crypto_scomp *scomp = *tfm_ctx;
145 	void **ctx = acomp_request_ctx(req);
146 	const int cpu = get_cpu();
147 	u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
148 	u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
149 	int ret;
150 
151 	if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
152 		ret = -EINVAL;
153 		goto out;
154 	}
155 
156 	if (req->dst && !req->dlen) {
157 		ret = -EINVAL;
158 		goto out;
159 	}
160 
161 	if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
162 		req->dlen = SCOMP_SCRATCH_SIZE;
163 
164 	scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
165 	if (dir)
166 		ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
167 					    scratch_dst, &req->dlen, *ctx);
168 	else
169 		ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
170 					      scratch_dst, &req->dlen, *ctx);
171 	if (!ret) {
172 		if (!req->dst) {
173 			req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
174 			if (!req->dst)
175 				goto out;
176 		}
177 		scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
178 					 1);
179 	}
180 out:
181 	put_cpu();
182 	return ret;
183 }
184 
185 static int scomp_acomp_compress(struct acomp_req *req)
186 {
187 	return scomp_acomp_comp_decomp(req, 1);
188 }
189 
190 static int scomp_acomp_decompress(struct acomp_req *req)
191 {
192 	return scomp_acomp_comp_decomp(req, 0);
193 }
194 
195 static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
196 {
197 	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
198 
199 	crypto_free_scomp(*ctx);
200 
201 	mutex_lock(&scomp_lock);
202 	crypto_scomp_free_all_scratches();
203 	mutex_unlock(&scomp_lock);
204 }
205 
206 int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
207 {
208 	struct crypto_alg *calg = tfm->__crt_alg;
209 	struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
210 	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
211 	struct crypto_scomp *scomp;
212 
213 	if (!crypto_mod_get(calg))
214 		return -EAGAIN;
215 
216 	scomp = crypto_create_tfm(calg, &crypto_scomp_type);
217 	if (IS_ERR(scomp)) {
218 		crypto_mod_put(calg);
219 		return PTR_ERR(scomp);
220 	}
221 
222 	*ctx = scomp;
223 	tfm->exit = crypto_exit_scomp_ops_async;
224 
225 	crt->compress = scomp_acomp_compress;
226 	crt->decompress = scomp_acomp_decompress;
227 	crt->dst_free = sgl_free;
228 	crt->reqsize = sizeof(void *);
229 
230 	return 0;
231 }
232 
233 struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
234 {
235 	struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
236 	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
237 	struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
238 	struct crypto_scomp *scomp = *tfm_ctx;
239 	void *ctx;
240 
241 	ctx = crypto_scomp_alloc_ctx(scomp);
242 	if (IS_ERR(ctx)) {
243 		kfree(req);
244 		return NULL;
245 	}
246 
247 	*req->__ctx = ctx;
248 
249 	return req;
250 }
251 
252 void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
253 {
254 	struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
255 	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
256 	struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
257 	struct crypto_scomp *scomp = *tfm_ctx;
258 	void *ctx = *req->__ctx;
259 
260 	if (ctx)
261 		crypto_scomp_free_ctx(scomp, ctx);
262 }
263 
264 static const struct crypto_type crypto_scomp_type = {
265 	.extsize = crypto_alg_extsize,
266 	.init_tfm = crypto_scomp_init_tfm,
267 #ifdef CONFIG_PROC_FS
268 	.show = crypto_scomp_show,
269 #endif
270 	.report = crypto_scomp_report,
271 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
272 	.maskset = CRYPTO_ALG_TYPE_MASK,
273 	.type = CRYPTO_ALG_TYPE_SCOMPRESS,
274 	.tfmsize = offsetof(struct crypto_scomp, base),
275 };
276 
277 int crypto_register_scomp(struct scomp_alg *alg)
278 {
279 	struct crypto_alg *base = &alg->base;
280 
281 	base->cra_type = &crypto_scomp_type;
282 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
283 	base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
284 
285 	return crypto_register_alg(base);
286 }
287 EXPORT_SYMBOL_GPL(crypto_register_scomp);
288 
289 int crypto_unregister_scomp(struct scomp_alg *alg)
290 {
291 	return crypto_unregister_alg(&alg->base);
292 }
293 EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
294 
295 int crypto_register_scomps(struct scomp_alg *algs, int count)
296 {
297 	int i, ret;
298 
299 	for (i = 0; i < count; i++) {
300 		ret = crypto_register_scomp(&algs[i]);
301 		if (ret)
302 			goto err;
303 	}
304 
305 	return 0;
306 
307 err:
308 	for (--i; i >= 0; --i)
309 		crypto_unregister_scomp(&algs[i]);
310 
311 	return ret;
312 }
313 EXPORT_SYMBOL_GPL(crypto_register_scomps);
314 
315 void crypto_unregister_scomps(struct scomp_alg *algs, int count)
316 {
317 	int i;
318 
319 	for (i = count - 1; i >= 0; --i)
320 		crypto_unregister_scomp(&algs[i]);
321 }
322 EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
323 
324 MODULE_LICENSE("GPL");
325 MODULE_DESCRIPTION("Synchronous compression type");
326