xref: /linux/crypto/scompress.c (revision da6f9bf40ac267b5c720694a817beea84fa40f77)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Synchronous Compression operations
4  *
5  * Copyright 2015 LG Electronics Inc.
6  * Copyright (c) 2016, Intel Corporation
7  * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8  */
9 
10 #include <crypto/internal/acompress.h>
11 #include <crypto/internal/scompress.h>
12 #include <crypto/scatterwalk.h>
13 #include <linux/cryptouser.h>
14 #include <linux/err.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/scatterlist.h>
18 #include <linux/seq_file.h>
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 #include <linux/vmalloc.h>
22 #include <net/netlink.h>
23 
24 #include "compress.h"
25 
26 struct scomp_scratch {
27 	spinlock_t	lock;
28 	void		*src;
29 	void		*dst;
30 };
31 
32 static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
33 	.lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock),
34 };
35 
36 static const struct crypto_type crypto_scomp_type;
37 static int scomp_scratch_users;
38 static DEFINE_MUTEX(scomp_lock);
39 
40 static int __maybe_unused crypto_scomp_report(
41 	struct sk_buff *skb, struct crypto_alg *alg)
42 {
43 	struct crypto_report_comp rscomp;
44 
45 	memset(&rscomp, 0, sizeof(rscomp));
46 
47 	strscpy(rscomp.type, "scomp", sizeof(rscomp.type));
48 
49 	return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
50 		       sizeof(rscomp), &rscomp);
51 }
52 
53 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
54 	__maybe_unused;
55 
56 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
57 {
58 	seq_puts(m, "type         : scomp\n");
59 }
60 
61 static void crypto_scomp_free_scratches(void)
62 {
63 	struct scomp_scratch *scratch;
64 	int i;
65 
66 	for_each_possible_cpu(i) {
67 		scratch = per_cpu_ptr(&scomp_scratch, i);
68 
69 		vfree(scratch->src);
70 		vfree(scratch->dst);
71 		scratch->src = NULL;
72 		scratch->dst = NULL;
73 	}
74 }
75 
76 static int crypto_scomp_alloc_scratches(void)
77 {
78 	struct scomp_scratch *scratch;
79 	int i;
80 
81 	for_each_possible_cpu(i) {
82 		void *mem;
83 
84 		scratch = per_cpu_ptr(&scomp_scratch, i);
85 
86 		mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
87 		if (!mem)
88 			goto error;
89 		scratch->src = mem;
90 		mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
91 		if (!mem)
92 			goto error;
93 		scratch->dst = mem;
94 	}
95 	return 0;
96 error:
97 	crypto_scomp_free_scratches();
98 	return -ENOMEM;
99 }
100 
101 static void scomp_free_streams(struct scomp_alg *alg)
102 {
103 	struct crypto_acomp_stream __percpu *stream = alg->stream;
104 	int i;
105 
106 	for_each_possible_cpu(i) {
107 		struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i);
108 
109 		if (!ps->ctx)
110 			break;
111 
112 		alg->free_ctx(ps);
113 	}
114 
115 	free_percpu(stream);
116 }
117 
118 static int scomp_alloc_streams(struct scomp_alg *alg)
119 {
120 	struct crypto_acomp_stream __percpu *stream;
121 	int i;
122 
123 	stream = alloc_percpu(struct crypto_acomp_stream);
124 	if (!stream)
125 		return -ENOMEM;
126 
127 	for_each_possible_cpu(i) {
128 		struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i);
129 
130 		ps->ctx = alg->alloc_ctx();
131 		if (IS_ERR(ps->ctx)) {
132 			scomp_free_streams(alg);
133 			return PTR_ERR(ps->ctx);
134 		}
135 
136 		spin_lock_init(&ps->lock);
137 	}
138 
139 	alg->stream = stream;
140 	return 0;
141 }
142 
143 static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
144 {
145 	struct scomp_alg *alg = crypto_scomp_alg(__crypto_scomp_tfm(tfm));
146 	int ret = 0;
147 
148 	mutex_lock(&scomp_lock);
149 	if (!alg->stream) {
150 		ret = scomp_alloc_streams(alg);
151 		if (ret)
152 			goto unlock;
153 	}
154 	if (!scomp_scratch_users++)
155 		ret = crypto_scomp_alloc_scratches();
156 unlock:
157 	mutex_unlock(&scomp_lock);
158 
159 	return ret;
160 }
161 
162 static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
163 {
164 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
165 	void **tfm_ctx = acomp_tfm_ctx(tfm);
166 	struct crypto_scomp *scomp = *tfm_ctx;
167 	struct crypto_acomp_stream *stream;
168 	struct scomp_scratch *scratch;
169 	void *src, *dst;
170 	unsigned int dlen;
171 	int ret;
172 
173 	if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
174 		return -EINVAL;
175 
176 	if (req->dst && !req->dlen)
177 		return -EINVAL;
178 
179 	if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
180 		req->dlen = SCOMP_SCRATCH_SIZE;
181 
182 	dlen = req->dlen;
183 
184 	scratch = raw_cpu_ptr(&scomp_scratch);
185 	spin_lock_bh(&scratch->lock);
186 
187 	if (sg_nents(req->src) == 1 && !PageHighMem(sg_page(req->src))) {
188 		src = page_to_virt(sg_page(req->src)) + req->src->offset;
189 	} else {
190 		scatterwalk_map_and_copy(scratch->src, req->src, 0,
191 					 req->slen, 0);
192 		src = scratch->src;
193 	}
194 
195 	if (req->dst && sg_nents(req->dst) == 1 && !PageHighMem(sg_page(req->dst)))
196 		dst = page_to_virt(sg_page(req->dst)) + req->dst->offset;
197 	else
198 		dst = scratch->dst;
199 
200 	stream = raw_cpu_ptr(crypto_scomp_alg(scomp)->stream);
201 	spin_lock(&stream->lock);
202 	if (dir)
203 		ret = crypto_scomp_compress(scomp, src, req->slen,
204 					    dst, &req->dlen, stream->ctx);
205 	else
206 		ret = crypto_scomp_decompress(scomp, src, req->slen,
207 					      dst, &req->dlen, stream->ctx);
208 	spin_unlock(&stream->lock);
209 	if (!ret) {
210 		if (!req->dst) {
211 			req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
212 			if (!req->dst) {
213 				ret = -ENOMEM;
214 				goto out;
215 			}
216 		} else if (req->dlen > dlen) {
217 			ret = -ENOSPC;
218 			goto out;
219 		}
220 		if (dst == scratch->dst) {
221 			scatterwalk_map_and_copy(scratch->dst, req->dst, 0,
222 						 req->dlen, 1);
223 		} else {
224 			int nr_pages = DIV_ROUND_UP(req->dst->offset + req->dlen, PAGE_SIZE);
225 			int i;
226 			struct page *dst_page = sg_page(req->dst);
227 
228 			for (i = 0; i < nr_pages; i++)
229 				flush_dcache_page(dst_page + i);
230 		}
231 	}
232 out:
233 	spin_unlock_bh(&scratch->lock);
234 	return ret;
235 }
236 
237 static int scomp_acomp_compress(struct acomp_req *req)
238 {
239 	return scomp_acomp_comp_decomp(req, 1);
240 }
241 
242 static int scomp_acomp_decompress(struct acomp_req *req)
243 {
244 	return scomp_acomp_comp_decomp(req, 0);
245 }
246 
247 static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
248 {
249 	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
250 
251 	crypto_free_scomp(*ctx);
252 
253 	mutex_lock(&scomp_lock);
254 	if (!--scomp_scratch_users)
255 		crypto_scomp_free_scratches();
256 	mutex_unlock(&scomp_lock);
257 }
258 
259 int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
260 {
261 	struct crypto_alg *calg = tfm->__crt_alg;
262 	struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
263 	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
264 	struct crypto_scomp *scomp;
265 
266 	if (!crypto_mod_get(calg))
267 		return -EAGAIN;
268 
269 	scomp = crypto_create_tfm(calg, &crypto_scomp_type);
270 	if (IS_ERR(scomp)) {
271 		crypto_mod_put(calg);
272 		return PTR_ERR(scomp);
273 	}
274 
275 	*ctx = scomp;
276 	tfm->exit = crypto_exit_scomp_ops_async;
277 
278 	crt->compress = scomp_acomp_compress;
279 	crt->decompress = scomp_acomp_decompress;
280 	crt->dst_free = sgl_free;
281 
282 	return 0;
283 }
284 
285 static void crypto_scomp_destroy(struct crypto_alg *alg)
286 {
287 	scomp_free_streams(__crypto_scomp_alg(alg));
288 }
289 
290 static const struct crypto_type crypto_scomp_type = {
291 	.extsize = crypto_alg_extsize,
292 	.init_tfm = crypto_scomp_init_tfm,
293 	.destroy = crypto_scomp_destroy,
294 #ifdef CONFIG_PROC_FS
295 	.show = crypto_scomp_show,
296 #endif
297 #if IS_ENABLED(CONFIG_CRYPTO_USER)
298 	.report = crypto_scomp_report,
299 #endif
300 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
301 	.maskset = CRYPTO_ALG_TYPE_MASK,
302 	.type = CRYPTO_ALG_TYPE_SCOMPRESS,
303 	.tfmsize = offsetof(struct crypto_scomp, base),
304 };
305 
306 int crypto_register_scomp(struct scomp_alg *alg)
307 {
308 	struct crypto_alg *base = &alg->calg.base;
309 
310 	comp_prepare_alg(&alg->calg);
311 
312 	base->cra_type = &crypto_scomp_type;
313 	base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
314 
315 	return crypto_register_alg(base);
316 }
317 EXPORT_SYMBOL_GPL(crypto_register_scomp);
318 
319 void crypto_unregister_scomp(struct scomp_alg *alg)
320 {
321 	crypto_unregister_alg(&alg->base);
322 }
323 EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
324 
325 int crypto_register_scomps(struct scomp_alg *algs, int count)
326 {
327 	int i, ret;
328 
329 	for (i = 0; i < count; i++) {
330 		ret = crypto_register_scomp(&algs[i]);
331 		if (ret)
332 			goto err;
333 	}
334 
335 	return 0;
336 
337 err:
338 	for (--i; i >= 0; --i)
339 		crypto_unregister_scomp(&algs[i]);
340 
341 	return ret;
342 }
343 EXPORT_SYMBOL_GPL(crypto_register_scomps);
344 
345 void crypto_unregister_scomps(struct scomp_alg *algs, int count)
346 {
347 	int i;
348 
349 	for (i = count - 1; i >= 0; --i)
350 		crypto_unregister_scomp(&algs[i]);
351 }
352 EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
353 
354 MODULE_LICENSE("GPL");
355 MODULE_DESCRIPTION("Synchronous compression type");
356