xref: /linux/crypto/acompress.c (revision ecaa4be1280a4faf72dc4b6b4f6d867332d5762e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Asynchronous Compression operations
4  *
5  * Copyright (c) 2016, Intel Corporation
6  * Authors: Weigang Li <weigang.li@intel.com>
7  *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8  */
9 
10 #include <crypto/internal/acompress.h>
11 #include <crypto/scatterwalk.h>
12 #include <linux/cryptouser.h>
13 #include <linux/cpumask.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/page-flags.h>
18 #include <linux/percpu.h>
19 #include <linux/scatterlist.h>
20 #include <linux/sched.h>
21 #include <linux/seq_file.h>
22 #include <linux/slab.h>
23 #include <linux/smp.h>
24 #include <linux/spinlock.h>
25 #include <linux/string.h>
26 #include <linux/workqueue.h>
27 #include <net/netlink.h>
28 
29 #include "compress.h"
30 
31 struct crypto_scomp;
32 
33 enum {
34 	ACOMP_WALK_SLEEP = 1 << 0,
35 	ACOMP_WALK_SRC_LINEAR = 1 << 1,
36 	ACOMP_WALK_DST_LINEAR = 1 << 2,
37 };
38 
39 static const struct crypto_type crypto_acomp_type;
40 
41 static void acomp_reqchain_done(void *data, int err);
42 
43 static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
44 {
45 	return container_of(alg, struct acomp_alg, calg.base);
46 }
47 
48 static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
49 {
50 	return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
51 }
52 
53 static int __maybe_unused crypto_acomp_report(
54 	struct sk_buff *skb, struct crypto_alg *alg)
55 {
56 	struct crypto_report_acomp racomp;
57 
58 	memset(&racomp, 0, sizeof(racomp));
59 
60 	strscpy(racomp.type, "acomp", sizeof(racomp.type));
61 
62 	return nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(racomp), &racomp);
63 }
64 
65 static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
66 	__maybe_unused;
67 
68 static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
69 {
70 	seq_puts(m, "type         : acomp\n");
71 }
72 
73 static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
74 {
75 	struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
76 	struct acomp_alg *alg = crypto_acomp_alg(acomp);
77 
78 	if (alg->exit)
79 		alg->exit(acomp);
80 
81 	if (acomp_is_async(acomp))
82 		crypto_free_acomp(acomp->fb);
83 }
84 
85 static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
86 {
87 	struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
88 	struct acomp_alg *alg = crypto_acomp_alg(acomp);
89 	struct crypto_acomp *fb = NULL;
90 	int err;
91 
92 	acomp->fb = acomp;
93 
94 	if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
95 		return crypto_init_scomp_ops_async(tfm);
96 
97 	if (acomp_is_async(acomp)) {
98 		fb = crypto_alloc_acomp(crypto_acomp_alg_name(acomp), 0,
99 					CRYPTO_ALG_ASYNC);
100 		if (IS_ERR(fb))
101 			return PTR_ERR(fb);
102 
103 		err = -EINVAL;
104 		if (crypto_acomp_reqsize(fb) > MAX_SYNC_COMP_REQSIZE)
105 			goto out_free_fb;
106 
107 		acomp->fb = fb;
108 	}
109 
110 	acomp->compress = alg->compress;
111 	acomp->decompress = alg->decompress;
112 	acomp->reqsize = alg->base.cra_reqsize;
113 
114 	acomp->base.exit = crypto_acomp_exit_tfm;
115 
116 	if (!alg->init)
117 		return 0;
118 
119 	err = alg->init(acomp);
120 	if (err)
121 		goto out_free_fb;
122 
123 	return 0;
124 
125 out_free_fb:
126 	crypto_free_acomp(fb);
127 	return err;
128 }
129 
130 static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
131 {
132 	int extsize = crypto_alg_extsize(alg);
133 
134 	if (alg->cra_type != &crypto_acomp_type)
135 		extsize += sizeof(struct crypto_scomp *);
136 
137 	return extsize;
138 }
139 
140 static const struct crypto_type crypto_acomp_type = {
141 	.extsize = crypto_acomp_extsize,
142 	.init_tfm = crypto_acomp_init_tfm,
143 #ifdef CONFIG_PROC_FS
144 	.show = crypto_acomp_show,
145 #endif
146 #if IS_ENABLED(CONFIG_CRYPTO_USER)
147 	.report = crypto_acomp_report,
148 #endif
149 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
150 	.maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
151 	.type = CRYPTO_ALG_TYPE_ACOMPRESS,
152 	.tfmsize = offsetof(struct crypto_acomp, base),
153 	.algsize = offsetof(struct acomp_alg, base),
154 };
155 
156 struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
157 					u32 mask)
158 {
159 	return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
160 }
161 EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
162 
163 struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
164 					u32 mask, int node)
165 {
166 	return crypto_alloc_tfm_node(alg_name, &crypto_acomp_type, type, mask,
167 				node);
168 }
169 EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node);
170 
171 static void acomp_save_req(struct acomp_req *req, crypto_completion_t cplt)
172 {
173 	struct acomp_req_chain *state = &req->chain;
174 
175 	state->compl = req->base.complete;
176 	state->data = req->base.data;
177 	req->base.complete = cplt;
178 	req->base.data = state;
179 }
180 
181 static void acomp_restore_req(struct acomp_req *req)
182 {
183 	struct acomp_req_chain *state = req->base.data;
184 
185 	req->base.complete = state->compl;
186 	req->base.data = state->data;
187 }
188 
189 static void acomp_reqchain_virt(struct acomp_req *req)
190 {
191 	struct acomp_req_chain *state = &req->chain;
192 	unsigned int slen = req->slen;
193 	unsigned int dlen = req->dlen;
194 
195 	if (state->flags & CRYPTO_ACOMP_REQ_SRC_VIRT)
196 		acomp_request_set_src_dma(req, state->src, slen);
197 	if (state->flags & CRYPTO_ACOMP_REQ_DST_VIRT)
198 		acomp_request_set_dst_dma(req, state->dst, dlen);
199 }
200 
201 static void acomp_virt_to_sg(struct acomp_req *req)
202 {
203 	struct acomp_req_chain *state = &req->chain;
204 
205 	state->flags = req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
206 					  CRYPTO_ACOMP_REQ_DST_VIRT);
207 
208 	if (acomp_request_src_isvirt(req)) {
209 		unsigned int slen = req->slen;
210 		const u8 *svirt = req->svirt;
211 
212 		state->src = svirt;
213 		sg_init_one(&state->ssg, svirt, slen);
214 		acomp_request_set_src_sg(req, &state->ssg, slen);
215 	}
216 
217 	if (acomp_request_dst_isvirt(req)) {
218 		unsigned int dlen = req->dlen;
219 		u8 *dvirt = req->dvirt;
220 
221 		state->dst = dvirt;
222 		sg_init_one(&state->dsg, dvirt, dlen);
223 		acomp_request_set_dst_sg(req, &state->dsg, dlen);
224 	}
225 }
226 
227 static int acomp_do_nondma(struct acomp_req *req, bool comp)
228 {
229 	ACOMP_FBREQ_ON_STACK(fbreq, req);
230 	int err;
231 
232 	if (comp)
233 		err = crypto_acomp_compress(fbreq);
234 	else
235 		err = crypto_acomp_decompress(fbreq);
236 
237 	req->dlen = fbreq->dlen;
238 	return err;
239 }
240 
241 static int acomp_do_one_req(struct acomp_req *req, bool comp)
242 {
243 	if (acomp_request_isnondma(req))
244 		return acomp_do_nondma(req, comp);
245 
246 	acomp_virt_to_sg(req);
247 	return comp ? crypto_acomp_reqtfm(req)->compress(req) :
248 		      crypto_acomp_reqtfm(req)->decompress(req);
249 }
250 
251 static int acomp_reqchain_finish(struct acomp_req *req, int err)
252 {
253 	acomp_reqchain_virt(req);
254 	acomp_restore_req(req);
255 	return err;
256 }
257 
258 static void acomp_reqchain_done(void *data, int err)
259 {
260 	struct acomp_req *req = data;
261 	crypto_completion_t compl;
262 
263 	compl = req->chain.compl;
264 	data = req->chain.data;
265 
266 	if (err == -EINPROGRESS)
267 		goto notify;
268 
269 	err = acomp_reqchain_finish(req, err);
270 
271 notify:
272 	compl(data, err);
273 }
274 
275 static int acomp_do_req_chain(struct acomp_req *req, bool comp)
276 {
277 	int err;
278 
279 	acomp_save_req(req, acomp_reqchain_done);
280 
281 	err = acomp_do_one_req(req, comp);
282 	if (err == -EBUSY || err == -EINPROGRESS)
283 		return err;
284 
285 	return acomp_reqchain_finish(req, err);
286 }
287 
288 int crypto_acomp_compress(struct acomp_req *req)
289 {
290 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
291 
292 	if (acomp_req_on_stack(req) && acomp_is_async(tfm))
293 		return -EAGAIN;
294 	if (crypto_acomp_req_chain(tfm) || acomp_request_issg(req))
295 		crypto_acomp_reqtfm(req)->compress(req);
296 	return acomp_do_req_chain(req, true);
297 }
298 EXPORT_SYMBOL_GPL(crypto_acomp_compress);
299 
300 int crypto_acomp_decompress(struct acomp_req *req)
301 {
302 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
303 
304 	if (acomp_req_on_stack(req) && acomp_is_async(tfm))
305 		return -EAGAIN;
306 	if (crypto_acomp_req_chain(tfm) || acomp_request_issg(req))
307 		crypto_acomp_reqtfm(req)->decompress(req);
308 	return acomp_do_req_chain(req, false);
309 }
310 EXPORT_SYMBOL_GPL(crypto_acomp_decompress);
311 
312 void comp_prepare_alg(struct comp_alg_common *alg)
313 {
314 	struct crypto_alg *base = &alg->base;
315 
316 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
317 }
318 
319 int crypto_register_acomp(struct acomp_alg *alg)
320 {
321 	struct crypto_alg *base = &alg->calg.base;
322 
323 	comp_prepare_alg(&alg->calg);
324 
325 	base->cra_type = &crypto_acomp_type;
326 	base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
327 
328 	return crypto_register_alg(base);
329 }
330 EXPORT_SYMBOL_GPL(crypto_register_acomp);
331 
332 void crypto_unregister_acomp(struct acomp_alg *alg)
333 {
334 	crypto_unregister_alg(&alg->base);
335 }
336 EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
337 
338 int crypto_register_acomps(struct acomp_alg *algs, int count)
339 {
340 	int i, ret;
341 
342 	for (i = 0; i < count; i++) {
343 		ret = crypto_register_acomp(&algs[i]);
344 		if (ret)
345 			goto err;
346 	}
347 
348 	return 0;
349 
350 err:
351 	for (--i; i >= 0; --i)
352 		crypto_unregister_acomp(&algs[i]);
353 
354 	return ret;
355 }
356 EXPORT_SYMBOL_GPL(crypto_register_acomps);
357 
358 void crypto_unregister_acomps(struct acomp_alg *algs, int count)
359 {
360 	int i;
361 
362 	for (i = count - 1; i >= 0; --i)
363 		crypto_unregister_acomp(&algs[i]);
364 }
365 EXPORT_SYMBOL_GPL(crypto_unregister_acomps);
366 
367 static void acomp_stream_workfn(struct work_struct *work)
368 {
369 	struct crypto_acomp_streams *s =
370 		container_of(work, struct crypto_acomp_streams, stream_work);
371 	struct crypto_acomp_stream __percpu *streams = s->streams;
372 	int cpu;
373 
374 	for_each_cpu(cpu, &s->stream_want) {
375 		struct crypto_acomp_stream *ps;
376 		void *ctx;
377 
378 		ps = per_cpu_ptr(streams, cpu);
379 		if (ps->ctx)
380 			continue;
381 
382 		ctx = s->alloc_ctx();
383 		if (IS_ERR(ctx))
384 			break;
385 
386 		spin_lock_bh(&ps->lock);
387 		ps->ctx = ctx;
388 		spin_unlock_bh(&ps->lock);
389 
390 		cpumask_clear_cpu(cpu, &s->stream_want);
391 	}
392 }
393 
394 void crypto_acomp_free_streams(struct crypto_acomp_streams *s)
395 {
396 	struct crypto_acomp_stream __percpu *streams = s->streams;
397 	void (*free_ctx)(void *);
398 	int i;
399 
400 	s->streams = NULL;
401 	if (!streams)
402 		return;
403 
404 	cancel_work_sync(&s->stream_work);
405 	free_ctx = s->free_ctx;
406 
407 	for_each_possible_cpu(i) {
408 		struct crypto_acomp_stream *ps = per_cpu_ptr(streams, i);
409 
410 		if (!ps->ctx)
411 			continue;
412 
413 		free_ctx(ps->ctx);
414 	}
415 
416 	free_percpu(streams);
417 }
418 EXPORT_SYMBOL_GPL(crypto_acomp_free_streams);
419 
420 int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s)
421 {
422 	struct crypto_acomp_stream __percpu *streams;
423 	struct crypto_acomp_stream *ps;
424 	unsigned int i;
425 	void *ctx;
426 
427 	if (s->streams)
428 		return 0;
429 
430 	streams = alloc_percpu(struct crypto_acomp_stream);
431 	if (!streams)
432 		return -ENOMEM;
433 
434 	ctx = s->alloc_ctx();
435 	if (IS_ERR(ctx)) {
436 		free_percpu(streams);
437 		return PTR_ERR(ctx);
438 	}
439 
440 	i = cpumask_first(cpu_possible_mask);
441 	ps = per_cpu_ptr(streams, i);
442 	ps->ctx = ctx;
443 
444 	for_each_possible_cpu(i) {
445 		ps = per_cpu_ptr(streams, i);
446 		spin_lock_init(&ps->lock);
447 	}
448 
449 	s->streams = streams;
450 
451 	INIT_WORK(&s->stream_work, acomp_stream_workfn);
452 	return 0;
453 }
454 EXPORT_SYMBOL_GPL(crypto_acomp_alloc_streams);
455 
456 struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
457 	struct crypto_acomp_streams *s) __acquires(stream)
458 {
459 	struct crypto_acomp_stream __percpu *streams = s->streams;
460 	int cpu = raw_smp_processor_id();
461 	struct crypto_acomp_stream *ps;
462 
463 	ps = per_cpu_ptr(streams, cpu);
464 	spin_lock_bh(&ps->lock);
465 	if (likely(ps->ctx))
466 		return ps;
467 	spin_unlock(&ps->lock);
468 
469 	cpumask_set_cpu(cpu, &s->stream_want);
470 	schedule_work(&s->stream_work);
471 
472 	ps = per_cpu_ptr(streams, cpumask_first(cpu_possible_mask));
473 	spin_lock(&ps->lock);
474 	return ps;
475 }
476 EXPORT_SYMBOL_GPL(crypto_acomp_lock_stream_bh);
477 
478 void acomp_walk_done_src(struct acomp_walk *walk, int used)
479 {
480 	walk->slen -= used;
481 	if ((walk->flags & ACOMP_WALK_SRC_LINEAR))
482 		scatterwalk_advance(&walk->in, used);
483 	else
484 		scatterwalk_done_src(&walk->in, used);
485 
486 	if ((walk->flags & ACOMP_WALK_SLEEP))
487 		cond_resched();
488 }
489 EXPORT_SYMBOL_GPL(acomp_walk_done_src);
490 
491 void acomp_walk_done_dst(struct acomp_walk *walk, int used)
492 {
493 	walk->dlen -= used;
494 	if ((walk->flags & ACOMP_WALK_DST_LINEAR))
495 		scatterwalk_advance(&walk->out, used);
496 	else
497 		scatterwalk_done_dst(&walk->out, used);
498 
499 	if ((walk->flags & ACOMP_WALK_SLEEP))
500 		cond_resched();
501 }
502 EXPORT_SYMBOL_GPL(acomp_walk_done_dst);
503 
504 int acomp_walk_next_src(struct acomp_walk *walk)
505 {
506 	unsigned int slen = walk->slen;
507 	unsigned int max = UINT_MAX;
508 
509 	if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
510 		max = PAGE_SIZE;
511 	if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
512 		walk->in.__addr = (void *)(((u8 *)walk->in.sg) +
513 					   walk->in.offset);
514 		return min(slen, max);
515 	}
516 
517 	return slen ? scatterwalk_next(&walk->in, slen) : 0;
518 }
519 EXPORT_SYMBOL_GPL(acomp_walk_next_src);
520 
521 int acomp_walk_next_dst(struct acomp_walk *walk)
522 {
523 	unsigned int dlen = walk->dlen;
524 	unsigned int max = UINT_MAX;
525 
526 	if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
527 		max = PAGE_SIZE;
528 	if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
529 		walk->out.__addr = (void *)(((u8 *)walk->out.sg) +
530 					    walk->out.offset);
531 		return min(dlen, max);
532 	}
533 
534 	return dlen ? scatterwalk_next(&walk->out, dlen) : 0;
535 }
536 EXPORT_SYMBOL_GPL(acomp_walk_next_dst);
537 
538 int acomp_walk_virt(struct acomp_walk *__restrict walk,
539 		    struct acomp_req *__restrict req)
540 {
541 	struct scatterlist *src = req->src;
542 	struct scatterlist *dst = req->dst;
543 
544 	walk->slen = req->slen;
545 	walk->dlen = req->dlen;
546 
547 	if (!walk->slen || !walk->dlen)
548 		return -EINVAL;
549 
550 	walk->flags = 0;
551 	if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP))
552 		walk->flags |= ACOMP_WALK_SLEEP;
553 	if ((req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT))
554 		walk->flags |= ACOMP_WALK_SRC_LINEAR;
555 	if ((req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT))
556 		walk->flags |= ACOMP_WALK_DST_LINEAR;
557 
558 	if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
559 		walk->in.sg = (void *)req->svirt;
560 		walk->in.offset = 0;
561 	} else
562 		scatterwalk_start(&walk->in, src);
563 	if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
564 		walk->out.sg = (void *)req->dvirt;
565 		walk->out.offset = 0;
566 	} else
567 		scatterwalk_start(&walk->out, dst);
568 
569 	return 0;
570 }
571 EXPORT_SYMBOL_GPL(acomp_walk_virt);
572 
573 struct acomp_req *acomp_request_clone(struct acomp_req *req,
574 				      size_t total, gfp_t gfp)
575 {
576 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
577 	struct acomp_req *nreq;
578 
579 	nreq = kmalloc(total, gfp);
580 	if (!nreq) {
581 		acomp_request_set_tfm(req, tfm->fb);
582 		req->base.flags = CRYPTO_TFM_REQ_ON_STACK;
583 		return req;
584 	}
585 
586 	memcpy(nreq, req, total);
587 	acomp_request_set_tfm(req, tfm);
588 	return req;
589 }
590 EXPORT_SYMBOL_GPL(acomp_request_clone);
591 
592 MODULE_LICENSE("GPL");
593 MODULE_DESCRIPTION("Asynchronous compression type");
594