xref: /linux/crypto/scompress.c (revision 7fffcb5cceea5cec643da76671607c6cc5c8e8be)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Synchronous Compression operations
4  *
5  * Copyright 2015 LG Electronics Inc.
6  * Copyright (c) 2016, Intel Corporation
7  * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8  */
9 
10 #include <crypto/internal/acompress.h>
11 #include <crypto/internal/scompress.h>
12 #include <crypto/scatterwalk.h>
13 #include <linux/cryptouser.h>
14 #include <linux/err.h>
15 #include <linux/highmem.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/overflow.h>
19 #include <linux/scatterlist.h>
20 #include <linux/seq_file.h>
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/vmalloc.h>
24 #include <net/netlink.h>
25 
26 #include "compress.h"
27 
28 #define SCOMP_SCRATCH_SIZE 65400
29 
30 struct scomp_scratch {
31 	spinlock_t	lock;
32 	union {
33 		void	*src;
34 		unsigned long saddr;
35 	};
36 	void		*dst;
37 };
38 
39 static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
40 	.lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock),
41 };
42 
43 static const struct crypto_type crypto_scomp_type;
44 static int scomp_scratch_users;
45 static DEFINE_MUTEX(scomp_lock);
46 
47 static int __maybe_unused crypto_scomp_report(
48 	struct sk_buff *skb, struct crypto_alg *alg)
49 {
50 	struct crypto_report_comp rscomp;
51 
52 	memset(&rscomp, 0, sizeof(rscomp));
53 
54 	strscpy(rscomp.type, "scomp", sizeof(rscomp.type));
55 
56 	return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
57 		       sizeof(rscomp), &rscomp);
58 }
59 
60 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
61 	__maybe_unused;
62 
63 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
64 {
65 	seq_puts(m, "type         : scomp\n");
66 }
67 
68 static void crypto_scomp_free_scratches(void)
69 {
70 	struct scomp_scratch *scratch;
71 	int i;
72 
73 	for_each_possible_cpu(i) {
74 		scratch = per_cpu_ptr(&scomp_scratch, i);
75 
76 		free_page(scratch->saddr);
77 		vfree(scratch->dst);
78 		scratch->src = NULL;
79 		scratch->dst = NULL;
80 	}
81 }
82 
83 static int crypto_scomp_alloc_scratches(void)
84 {
85 	struct scomp_scratch *scratch;
86 	int i;
87 
88 	for_each_possible_cpu(i) {
89 		struct page *page;
90 		void *mem;
91 
92 		scratch = per_cpu_ptr(&scomp_scratch, i);
93 
94 		page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, 0);
95 		if (!page)
96 			goto error;
97 		scratch->src = page_address(page);
98 		mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
99 		if (!mem)
100 			goto error;
101 		scratch->dst = mem;
102 	}
103 	return 0;
104 error:
105 	crypto_scomp_free_scratches();
106 	return -ENOMEM;
107 }
108 
109 static void scomp_free_streams(struct scomp_alg *alg)
110 {
111 	struct crypto_acomp_stream __percpu *stream = alg->stream;
112 	int i;
113 
114 	alg->stream = NULL;
115 	if (!stream)
116 		return;
117 
118 	for_each_possible_cpu(i) {
119 		struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i);
120 
121 		if (IS_ERR_OR_NULL(ps->ctx))
122 			break;
123 
124 		alg->free_ctx(ps->ctx);
125 	}
126 
127 	free_percpu(stream);
128 }
129 
130 static int scomp_alloc_streams(struct scomp_alg *alg)
131 {
132 	struct crypto_acomp_stream __percpu *stream;
133 	int i;
134 
135 	stream = alloc_percpu(struct crypto_acomp_stream);
136 	if (!stream)
137 		return -ENOMEM;
138 
139 	alg->stream = stream;
140 
141 	for_each_possible_cpu(i) {
142 		struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i);
143 
144 		ps->ctx = alg->alloc_ctx();
145 		if (IS_ERR(ps->ctx)) {
146 			scomp_free_streams(alg);
147 			return PTR_ERR(ps->ctx);
148 		}
149 
150 		spin_lock_init(&ps->lock);
151 	}
152 	return 0;
153 }
154 
155 static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
156 {
157 	struct scomp_alg *alg = crypto_scomp_alg(__crypto_scomp_tfm(tfm));
158 	int ret = 0;
159 
160 	mutex_lock(&scomp_lock);
161 	if (!alg->stream) {
162 		ret = scomp_alloc_streams(alg);
163 		if (ret)
164 			goto unlock;
165 	}
166 	if (!scomp_scratch_users) {
167 		ret = crypto_scomp_alloc_scratches();
168 		if (ret)
169 			goto unlock;
170 		scomp_scratch_users++;
171 	}
172 unlock:
173 	mutex_unlock(&scomp_lock);
174 
175 	return ret;
176 }
177 
178 static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
179 {
180 	struct scomp_scratch *scratch = raw_cpu_ptr(&scomp_scratch);
181 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
182 	struct crypto_scomp **tfm_ctx = acomp_tfm_ctx(tfm);
183 	struct crypto_scomp *scomp = *tfm_ctx;
184 	struct crypto_acomp_stream *stream;
185 	unsigned int slen = req->slen;
186 	unsigned int dlen = req->dlen;
187 	struct page *spage, *dpage;
188 	unsigned int n;
189 	const u8 *src;
190 	size_t soff;
191 	size_t doff;
192 	u8 *dst;
193 	int ret;
194 
195 	if (!req->src || !slen)
196 		return -EINVAL;
197 
198 	if (!req->dst || !dlen)
199 		return -EINVAL;
200 
201 	if (acomp_request_src_isvirt(req))
202 		src = req->svirt;
203 	else {
204 		src = scratch->src;
205 		do {
206 			if (acomp_request_src_isfolio(req)) {
207 				spage = folio_page(req->sfolio, 0);
208 				soff = req->soff;
209 			} else if (slen <= req->src->length) {
210 				spage = sg_page(req->src);
211 				soff = req->src->offset;
212 			} else
213 				break;
214 
215 			spage = nth_page(spage, soff / PAGE_SIZE);
216 			soff = offset_in_page(soff);
217 
218 			n = slen / PAGE_SIZE;
219 			n += (offset_in_page(slen) + soff - 1) / PAGE_SIZE;
220 			if (PageHighMem(nth_page(spage, n)) &&
221 			    size_add(soff, slen) > PAGE_SIZE)
222 				break;
223 			src = kmap_local_page(spage) + soff;
224 		} while (0);
225 	}
226 
227 	if (acomp_request_dst_isvirt(req))
228 		dst = req->dvirt;
229 	else {
230 		unsigned int max = SCOMP_SCRATCH_SIZE;
231 
232 		dst = scratch->dst;
233 		do {
234 			if (acomp_request_dst_isfolio(req)) {
235 				dpage = folio_page(req->dfolio, 0);
236 				doff = req->doff;
237 			} else if (dlen <= req->dst->length) {
238 				dpage = sg_page(req->dst);
239 				doff = req->dst->offset;
240 			} else
241 				break;
242 
243 			dpage = nth_page(dpage, doff / PAGE_SIZE);
244 			doff = offset_in_page(doff);
245 
246 			n = dlen / PAGE_SIZE;
247 			n += (offset_in_page(dlen) + doff - 1) / PAGE_SIZE;
248 			if (PageHighMem(dpage + n) &&
249 			    size_add(doff, dlen) > PAGE_SIZE)
250 				break;
251 			dst = kmap_local_page(dpage) + doff;
252 			max = dlen;
253 		} while (0);
254 		dlen = min(dlen, max);
255 	}
256 
257 	spin_lock_bh(&scratch->lock);
258 
259 	if (src == scratch->src)
260 		memcpy_from_sglist(scratch->src, req->src, 0, slen);
261 
262 	stream = raw_cpu_ptr(crypto_scomp_alg(scomp)->stream);
263 	spin_lock(&stream->lock);
264 	if (dir)
265 		ret = crypto_scomp_compress(scomp, src, slen,
266 					    dst, &dlen, stream->ctx);
267 	else
268 		ret = crypto_scomp_decompress(scomp, src, slen,
269 					      dst, &dlen, stream->ctx);
270 
271 	if (dst == scratch->dst)
272 		memcpy_to_sglist(req->dst, 0, dst, dlen);
273 
274 	spin_unlock(&stream->lock);
275 	spin_unlock_bh(&scratch->lock);
276 
277 	req->dlen = dlen;
278 
279 	if (!acomp_request_dst_isvirt(req) && dst != scratch->dst) {
280 		kunmap_local(dst);
281 		dlen += doff;
282 		for (;;) {
283 			flush_dcache_page(dpage);
284 			if (dlen <= PAGE_SIZE)
285 				break;
286 			dlen -= PAGE_SIZE;
287 			dpage = nth_page(dpage, 1);
288 		}
289 	}
290 	if (!acomp_request_src_isvirt(req) && src != scratch->src)
291 		kunmap_local(src);
292 
293 	return ret;
294 }
295 
296 static int scomp_acomp_chain(struct acomp_req *req, int dir)
297 {
298 	struct acomp_req *r2;
299 	int err;
300 
301 	err = scomp_acomp_comp_decomp(req, dir);
302 	req->base.err = err;
303 
304 	list_for_each_entry(r2, &req->base.list, base.list)
305 		r2->base.err = scomp_acomp_comp_decomp(r2, dir);
306 
307 	return err;
308 }
309 
310 static int scomp_acomp_compress(struct acomp_req *req)
311 {
312 	return scomp_acomp_chain(req, 1);
313 }
314 
315 static int scomp_acomp_decompress(struct acomp_req *req)
316 {
317 	return scomp_acomp_chain(req, 0);
318 }
319 
320 static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
321 {
322 	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
323 
324 	crypto_free_scomp(*ctx);
325 
326 	mutex_lock(&scomp_lock);
327 	if (!--scomp_scratch_users)
328 		crypto_scomp_free_scratches();
329 	mutex_unlock(&scomp_lock);
330 }
331 
332 int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
333 {
334 	struct crypto_alg *calg = tfm->__crt_alg;
335 	struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
336 	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
337 	struct crypto_scomp *scomp;
338 
339 	if (!crypto_mod_get(calg))
340 		return -EAGAIN;
341 
342 	scomp = crypto_create_tfm(calg, &crypto_scomp_type);
343 	if (IS_ERR(scomp)) {
344 		crypto_mod_put(calg);
345 		return PTR_ERR(scomp);
346 	}
347 
348 	*ctx = scomp;
349 	tfm->exit = crypto_exit_scomp_ops_async;
350 
351 	crt->compress = scomp_acomp_compress;
352 	crt->decompress = scomp_acomp_decompress;
353 
354 	return 0;
355 }
356 
357 static void crypto_scomp_destroy(struct crypto_alg *alg)
358 {
359 	scomp_free_streams(__crypto_scomp_alg(alg));
360 }
361 
362 static const struct crypto_type crypto_scomp_type = {
363 	.extsize = crypto_alg_extsize,
364 	.init_tfm = crypto_scomp_init_tfm,
365 	.destroy = crypto_scomp_destroy,
366 #ifdef CONFIG_PROC_FS
367 	.show = crypto_scomp_show,
368 #endif
369 #if IS_ENABLED(CONFIG_CRYPTO_USER)
370 	.report = crypto_scomp_report,
371 #endif
372 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
373 	.maskset = CRYPTO_ALG_TYPE_MASK,
374 	.type = CRYPTO_ALG_TYPE_SCOMPRESS,
375 	.tfmsize = offsetof(struct crypto_scomp, base),
376 };
377 
378 static void scomp_prepare_alg(struct scomp_alg *alg)
379 {
380 	struct crypto_alg *base = &alg->calg.base;
381 
382 	comp_prepare_alg(&alg->calg);
383 
384 	base->cra_flags |= CRYPTO_ALG_REQ_CHAIN;
385 }
386 
387 int crypto_register_scomp(struct scomp_alg *alg)
388 {
389 	struct crypto_alg *base = &alg->calg.base;
390 
391 	scomp_prepare_alg(alg);
392 
393 	base->cra_type = &crypto_scomp_type;
394 	base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
395 
396 	return crypto_register_alg(base);
397 }
398 EXPORT_SYMBOL_GPL(crypto_register_scomp);
399 
400 void crypto_unregister_scomp(struct scomp_alg *alg)
401 {
402 	crypto_unregister_alg(&alg->base);
403 }
404 EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
405 
406 int crypto_register_scomps(struct scomp_alg *algs, int count)
407 {
408 	int i, ret;
409 
410 	for (i = 0; i < count; i++) {
411 		ret = crypto_register_scomp(&algs[i]);
412 		if (ret)
413 			goto err;
414 	}
415 
416 	return 0;
417 
418 err:
419 	for (--i; i >= 0; --i)
420 		crypto_unregister_scomp(&algs[i]);
421 
422 	return ret;
423 }
424 EXPORT_SYMBOL_GPL(crypto_register_scomps);
425 
426 void crypto_unregister_scomps(struct scomp_alg *algs, int count)
427 {
428 	int i;
429 
430 	for (i = count - 1; i >= 0; --i)
431 		crypto_unregister_scomp(&algs[i]);
432 }
433 EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
434 
435 MODULE_LICENSE("GPL");
436 MODULE_DESCRIPTION("Synchronous compression type");
437