xref: /linux/crypto/acompress.c (revision 47b5b6f9eb736b1868b0f9c1a1575b5922451cc6)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Asynchronous Compression operations
4  *
5  * Copyright (c) 2016, Intel Corporation
6  * Authors: Weigang Li <weigang.li@intel.com>
7  *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8  */
9 
10 #include <crypto/internal/acompress.h>
11 #include <crypto/scatterwalk.h>
12 #include <linux/cryptouser.h>
13 #include <linux/cpumask.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/page-flags.h>
18 #include <linux/percpu.h>
19 #include <linux/scatterlist.h>
20 #include <linux/sched.h>
21 #include <linux/seq_file.h>
22 #include <linux/slab.h>
23 #include <linux/smp.h>
24 #include <linux/spinlock.h>
25 #include <linux/string.h>
26 #include <linux/workqueue.h>
27 #include <net/netlink.h>
28 
29 #include "compress.h"
30 
31 struct crypto_scomp;
32 
33 enum {
34 	ACOMP_WALK_SLEEP = 1 << 0,
35 	ACOMP_WALK_SRC_LINEAR = 1 << 1,
36 	ACOMP_WALK_DST_LINEAR = 1 << 2,
37 };
38 
39 static const struct crypto_type crypto_acomp_type;
40 
41 static void acomp_reqchain_done(void *data, int err);
42 
43 static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
44 {
45 	return container_of(alg, struct acomp_alg, calg.base);
46 }
47 
48 static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
49 {
50 	return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
51 }
52 
53 static int __maybe_unused crypto_acomp_report(
54 	struct sk_buff *skb, struct crypto_alg *alg)
55 {
56 	struct crypto_report_acomp racomp;
57 
58 	memset(&racomp, 0, sizeof(racomp));
59 
60 	strscpy(racomp.type, "acomp", sizeof(racomp.type));
61 
62 	return nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(racomp), &racomp);
63 }
64 
65 static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
66 	__maybe_unused;
67 
68 static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
69 {
70 	seq_puts(m, "type         : acomp\n");
71 }
72 
73 static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
74 {
75 	struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
76 	struct acomp_alg *alg = crypto_acomp_alg(acomp);
77 
78 	if (alg->exit)
79 		alg->exit(acomp);
80 
81 	if (acomp_is_async(acomp))
82 		crypto_free_acomp(acomp->fb);
83 }
84 
85 static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
86 {
87 	struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
88 	struct acomp_alg *alg = crypto_acomp_alg(acomp);
89 	struct crypto_acomp *fb = NULL;
90 	int err;
91 
92 	acomp->fb = acomp;
93 
94 	if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
95 		return crypto_init_scomp_ops_async(tfm);
96 
97 	if (acomp_is_async(acomp)) {
98 		fb = crypto_alloc_acomp(crypto_acomp_alg_name(acomp), 0,
99 					CRYPTO_ALG_ASYNC);
100 		if (IS_ERR(fb))
101 			return PTR_ERR(fb);
102 
103 		err = -EINVAL;
104 		if (crypto_acomp_reqsize(fb) > MAX_SYNC_COMP_REQSIZE)
105 			goto out_free_fb;
106 
107 		acomp->fb = fb;
108 	}
109 
110 	acomp->compress = alg->compress;
111 	acomp->decompress = alg->decompress;
112 	acomp->reqsize = alg->base.cra_reqsize ?: alg->reqsize;
113 
114 	acomp->base.exit = crypto_acomp_exit_tfm;
115 
116 	if (!alg->init)
117 		return 0;
118 
119 	err = alg->init(acomp);
120 	if (err)
121 		goto out_free_fb;
122 
123 	return 0;
124 
125 out_free_fb:
126 	crypto_free_acomp(fb);
127 	return err;
128 }
129 
130 static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
131 {
132 	int extsize = crypto_alg_extsize(alg);
133 
134 	if (alg->cra_type != &crypto_acomp_type)
135 		extsize += sizeof(struct crypto_scomp *);
136 
137 	return extsize;
138 }
139 
140 static const struct crypto_type crypto_acomp_type = {
141 	.extsize = crypto_acomp_extsize,
142 	.init_tfm = crypto_acomp_init_tfm,
143 #ifdef CONFIG_PROC_FS
144 	.show = crypto_acomp_show,
145 #endif
146 #if IS_ENABLED(CONFIG_CRYPTO_USER)
147 	.report = crypto_acomp_report,
148 #endif
149 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
150 	.maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
151 	.type = CRYPTO_ALG_TYPE_ACOMPRESS,
152 	.tfmsize = offsetof(struct crypto_acomp, base),
153 };
154 
155 struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
156 					u32 mask)
157 {
158 	return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
159 }
160 EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
161 
162 struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
163 					u32 mask, int node)
164 {
165 	return crypto_alloc_tfm_node(alg_name, &crypto_acomp_type, type, mask,
166 				node);
167 }
168 EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node);
169 
170 static void acomp_save_req(struct acomp_req *req, crypto_completion_t cplt)
171 {
172 	struct acomp_req_chain *state = &req->chain;
173 
174 	state->compl = req->base.complete;
175 	state->data = req->base.data;
176 	req->base.complete = cplt;
177 	req->base.data = state;
178 }
179 
180 static void acomp_restore_req(struct acomp_req *req)
181 {
182 	struct acomp_req_chain *state = req->base.data;
183 
184 	req->base.complete = state->compl;
185 	req->base.data = state->data;
186 }
187 
188 static void acomp_reqchain_virt(struct acomp_req *req)
189 {
190 	struct acomp_req_chain *state = &req->chain;
191 	unsigned int slen = req->slen;
192 	unsigned int dlen = req->dlen;
193 
194 	if (state->flags & CRYPTO_ACOMP_REQ_SRC_VIRT)
195 		acomp_request_set_src_dma(req, state->src, slen);
196 	if (state->flags & CRYPTO_ACOMP_REQ_DST_VIRT)
197 		acomp_request_set_dst_dma(req, state->dst, dlen);
198 }
199 
200 static void acomp_virt_to_sg(struct acomp_req *req)
201 {
202 	struct acomp_req_chain *state = &req->chain;
203 
204 	state->flags = req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
205 					  CRYPTO_ACOMP_REQ_DST_VIRT);
206 
207 	if (acomp_request_src_isvirt(req)) {
208 		unsigned int slen = req->slen;
209 		const u8 *svirt = req->svirt;
210 
211 		state->src = svirt;
212 		sg_init_one(&state->ssg, svirt, slen);
213 		acomp_request_set_src_sg(req, &state->ssg, slen);
214 	}
215 
216 	if (acomp_request_dst_isvirt(req)) {
217 		unsigned int dlen = req->dlen;
218 		u8 *dvirt = req->dvirt;
219 
220 		state->dst = dvirt;
221 		sg_init_one(&state->dsg, dvirt, dlen);
222 		acomp_request_set_dst_sg(req, &state->dsg, dlen);
223 	}
224 }
225 
226 static int acomp_do_nondma(struct acomp_req *req, bool comp)
227 {
228 	ACOMP_FBREQ_ON_STACK(fbreq, req);
229 	int err;
230 
231 	if (comp)
232 		err = crypto_acomp_compress(fbreq);
233 	else
234 		err = crypto_acomp_decompress(fbreq);
235 
236 	req->dlen = fbreq->dlen;
237 	return err;
238 }
239 
240 static int acomp_do_one_req(struct acomp_req *req, bool comp)
241 {
242 	if (acomp_request_isnondma(req))
243 		return acomp_do_nondma(req, comp);
244 
245 	acomp_virt_to_sg(req);
246 	return comp ? crypto_acomp_reqtfm(req)->compress(req) :
247 		      crypto_acomp_reqtfm(req)->decompress(req);
248 }
249 
250 static int acomp_reqchain_finish(struct acomp_req *req, int err)
251 {
252 	acomp_reqchain_virt(req);
253 	acomp_restore_req(req);
254 	return err;
255 }
256 
257 static void acomp_reqchain_done(void *data, int err)
258 {
259 	struct acomp_req *req = data;
260 	crypto_completion_t compl;
261 
262 	compl = req->chain.compl;
263 	data = req->chain.data;
264 
265 	if (err == -EINPROGRESS)
266 		goto notify;
267 
268 	err = acomp_reqchain_finish(req, err);
269 
270 notify:
271 	compl(data, err);
272 }
273 
274 static int acomp_do_req_chain(struct acomp_req *req, bool comp)
275 {
276 	int err;
277 
278 	acomp_save_req(req, acomp_reqchain_done);
279 
280 	err = acomp_do_one_req(req, comp);
281 	if (err == -EBUSY || err == -EINPROGRESS)
282 		return err;
283 
284 	return acomp_reqchain_finish(req, err);
285 }
286 
287 int crypto_acomp_compress(struct acomp_req *req)
288 {
289 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
290 
291 	if (acomp_req_on_stack(req) && acomp_is_async(tfm))
292 		return -EAGAIN;
293 	if (crypto_acomp_req_chain(tfm) || acomp_request_issg(req))
294 		crypto_acomp_reqtfm(req)->compress(req);
295 	return acomp_do_req_chain(req, true);
296 }
297 EXPORT_SYMBOL_GPL(crypto_acomp_compress);
298 
299 int crypto_acomp_decompress(struct acomp_req *req)
300 {
301 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
302 
303 	if (acomp_req_on_stack(req) && acomp_is_async(tfm))
304 		return -EAGAIN;
305 	if (crypto_acomp_req_chain(tfm) || acomp_request_issg(req))
306 		crypto_acomp_reqtfm(req)->decompress(req);
307 	return acomp_do_req_chain(req, false);
308 }
309 EXPORT_SYMBOL_GPL(crypto_acomp_decompress);
310 
311 void comp_prepare_alg(struct comp_alg_common *alg)
312 {
313 	struct crypto_alg *base = &alg->base;
314 
315 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
316 }
317 
318 int crypto_register_acomp(struct acomp_alg *alg)
319 {
320 	struct crypto_alg *base = &alg->calg.base;
321 
322 	comp_prepare_alg(&alg->calg);
323 
324 	base->cra_type = &crypto_acomp_type;
325 	base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
326 
327 	return crypto_register_alg(base);
328 }
329 EXPORT_SYMBOL_GPL(crypto_register_acomp);
330 
331 void crypto_unregister_acomp(struct acomp_alg *alg)
332 {
333 	crypto_unregister_alg(&alg->base);
334 }
335 EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
336 
337 int crypto_register_acomps(struct acomp_alg *algs, int count)
338 {
339 	int i, ret;
340 
341 	for (i = 0; i < count; i++) {
342 		ret = crypto_register_acomp(&algs[i]);
343 		if (ret)
344 			goto err;
345 	}
346 
347 	return 0;
348 
349 err:
350 	for (--i; i >= 0; --i)
351 		crypto_unregister_acomp(&algs[i]);
352 
353 	return ret;
354 }
355 EXPORT_SYMBOL_GPL(crypto_register_acomps);
356 
357 void crypto_unregister_acomps(struct acomp_alg *algs, int count)
358 {
359 	int i;
360 
361 	for (i = count - 1; i >= 0; --i)
362 		crypto_unregister_acomp(&algs[i]);
363 }
364 EXPORT_SYMBOL_GPL(crypto_unregister_acomps);
365 
366 static void acomp_stream_workfn(struct work_struct *work)
367 {
368 	struct crypto_acomp_streams *s =
369 		container_of(work, struct crypto_acomp_streams, stream_work);
370 	struct crypto_acomp_stream __percpu *streams = s->streams;
371 	int cpu;
372 
373 	for_each_cpu(cpu, &s->stream_want) {
374 		struct crypto_acomp_stream *ps;
375 		void *ctx;
376 
377 		ps = per_cpu_ptr(streams, cpu);
378 		if (ps->ctx)
379 			continue;
380 
381 		ctx = s->alloc_ctx();
382 		if (IS_ERR(ctx))
383 			break;
384 
385 		spin_lock_bh(&ps->lock);
386 		ps->ctx = ctx;
387 		spin_unlock_bh(&ps->lock);
388 
389 		cpumask_clear_cpu(cpu, &s->stream_want);
390 	}
391 }
392 
393 void crypto_acomp_free_streams(struct crypto_acomp_streams *s)
394 {
395 	struct crypto_acomp_stream __percpu *streams = s->streams;
396 	void (*free_ctx)(void *);
397 	int i;
398 
399 	s->streams = NULL;
400 	if (!streams)
401 		return;
402 
403 	cancel_work_sync(&s->stream_work);
404 	free_ctx = s->free_ctx;
405 
406 	for_each_possible_cpu(i) {
407 		struct crypto_acomp_stream *ps = per_cpu_ptr(streams, i);
408 
409 		if (!ps->ctx)
410 			continue;
411 
412 		free_ctx(ps->ctx);
413 	}
414 
415 	free_percpu(streams);
416 }
417 EXPORT_SYMBOL_GPL(crypto_acomp_free_streams);
418 
419 int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s)
420 {
421 	struct crypto_acomp_stream __percpu *streams;
422 	struct crypto_acomp_stream *ps;
423 	unsigned int i;
424 	void *ctx;
425 
426 	if (s->streams)
427 		return 0;
428 
429 	streams = alloc_percpu(struct crypto_acomp_stream);
430 	if (!streams)
431 		return -ENOMEM;
432 
433 	ctx = s->alloc_ctx();
434 	if (IS_ERR(ctx)) {
435 		free_percpu(streams);
436 		return PTR_ERR(ctx);
437 	}
438 
439 	i = cpumask_first(cpu_possible_mask);
440 	ps = per_cpu_ptr(streams, i);
441 	ps->ctx = ctx;
442 
443 	for_each_possible_cpu(i) {
444 		ps = per_cpu_ptr(streams, i);
445 		spin_lock_init(&ps->lock);
446 	}
447 
448 	s->streams = streams;
449 
450 	INIT_WORK(&s->stream_work, acomp_stream_workfn);
451 	return 0;
452 }
453 EXPORT_SYMBOL_GPL(crypto_acomp_alloc_streams);
454 
455 struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
456 	struct crypto_acomp_streams *s) __acquires(stream)
457 {
458 	struct crypto_acomp_stream __percpu *streams = s->streams;
459 	int cpu = raw_smp_processor_id();
460 	struct crypto_acomp_stream *ps;
461 
462 	ps = per_cpu_ptr(streams, cpu);
463 	spin_lock_bh(&ps->lock);
464 	if (likely(ps->ctx))
465 		return ps;
466 	spin_unlock(&ps->lock);
467 
468 	cpumask_set_cpu(cpu, &s->stream_want);
469 	schedule_work(&s->stream_work);
470 
471 	ps = per_cpu_ptr(streams, cpumask_first(cpu_possible_mask));
472 	spin_lock(&ps->lock);
473 	return ps;
474 }
475 EXPORT_SYMBOL_GPL(crypto_acomp_lock_stream_bh);
476 
477 void acomp_walk_done_src(struct acomp_walk *walk, int used)
478 {
479 	walk->slen -= used;
480 	if ((walk->flags & ACOMP_WALK_SRC_LINEAR))
481 		scatterwalk_advance(&walk->in, used);
482 	else
483 		scatterwalk_done_src(&walk->in, used);
484 
485 	if ((walk->flags & ACOMP_WALK_SLEEP))
486 		cond_resched();
487 }
488 EXPORT_SYMBOL_GPL(acomp_walk_done_src);
489 
490 void acomp_walk_done_dst(struct acomp_walk *walk, int used)
491 {
492 	walk->dlen -= used;
493 	if ((walk->flags & ACOMP_WALK_DST_LINEAR))
494 		scatterwalk_advance(&walk->out, used);
495 	else
496 		scatterwalk_done_dst(&walk->out, used);
497 
498 	if ((walk->flags & ACOMP_WALK_SLEEP))
499 		cond_resched();
500 }
501 EXPORT_SYMBOL_GPL(acomp_walk_done_dst);
502 
503 int acomp_walk_next_src(struct acomp_walk *walk)
504 {
505 	unsigned int slen = walk->slen;
506 	unsigned int max = UINT_MAX;
507 
508 	if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
509 		max = PAGE_SIZE;
510 	if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
511 		walk->in.__addr = (void *)(((u8 *)walk->in.sg) +
512 					   walk->in.offset);
513 		return min(slen, max);
514 	}
515 
516 	return slen ? scatterwalk_next(&walk->in, slen) : 0;
517 }
518 EXPORT_SYMBOL_GPL(acomp_walk_next_src);
519 
520 int acomp_walk_next_dst(struct acomp_walk *walk)
521 {
522 	unsigned int dlen = walk->dlen;
523 	unsigned int max = UINT_MAX;
524 
525 	if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
526 		max = PAGE_SIZE;
527 	if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
528 		walk->out.__addr = (void *)(((u8 *)walk->out.sg) +
529 					    walk->out.offset);
530 		return min(dlen, max);
531 	}
532 
533 	return dlen ? scatterwalk_next(&walk->out, dlen) : 0;
534 }
535 EXPORT_SYMBOL_GPL(acomp_walk_next_dst);
536 
537 int acomp_walk_virt(struct acomp_walk *__restrict walk,
538 		    struct acomp_req *__restrict req)
539 {
540 	struct scatterlist *src = req->src;
541 	struct scatterlist *dst = req->dst;
542 
543 	walk->slen = req->slen;
544 	walk->dlen = req->dlen;
545 
546 	if (!walk->slen || !walk->dlen)
547 		return -EINVAL;
548 
549 	walk->flags = 0;
550 	if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP))
551 		walk->flags |= ACOMP_WALK_SLEEP;
552 	if ((req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT))
553 		walk->flags |= ACOMP_WALK_SRC_LINEAR;
554 	if ((req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT))
555 		walk->flags |= ACOMP_WALK_DST_LINEAR;
556 
557 	if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
558 		walk->in.sg = (void *)req->svirt;
559 		walk->in.offset = 0;
560 	} else
561 		scatterwalk_start(&walk->in, src);
562 	if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
563 		walk->out.sg = (void *)req->dvirt;
564 		walk->out.offset = 0;
565 	} else
566 		scatterwalk_start(&walk->out, dst);
567 
568 	return 0;
569 }
570 EXPORT_SYMBOL_GPL(acomp_walk_virt);
571 
572 struct acomp_req *acomp_request_clone(struct acomp_req *req,
573 				      size_t total, gfp_t gfp)
574 {
575 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
576 	struct acomp_req *nreq;
577 
578 	nreq = kmalloc(total, gfp);
579 	if (!nreq) {
580 		acomp_request_set_tfm(req, tfm->fb);
581 		req->base.flags = CRYPTO_TFM_REQ_ON_STACK;
582 		return req;
583 	}
584 
585 	memcpy(nreq, req, total);
586 	acomp_request_set_tfm(req, tfm);
587 	return req;
588 }
589 EXPORT_SYMBOL_GPL(acomp_request_clone);
590 
591 MODULE_LICENSE("GPL");
592 MODULE_DESCRIPTION("Asynchronous compression type");
593