xref: /linux/crypto/acompress.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Asynchronous Compression operations
4  *
5  * Copyright (c) 2016, Intel Corporation
6  * Authors: Weigang Li <weigang.li@intel.com>
7  *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8  */
9 
10 #include <crypto/internal/acompress.h>
11 #include <crypto/scatterwalk.h>
12 #include <linux/cryptouser.h>
13 #include <linux/cpumask.h>
14 #include <linux/err.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/percpu.h>
18 #include <linux/scatterlist.h>
19 #include <linux/sched.h>
20 #include <linux/seq_file.h>
21 #include <linux/smp.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/workqueue.h>
25 #include <net/netlink.h>
26 
27 #include "compress.h"
28 
29 struct crypto_scomp;
30 
31 enum {
32 	ACOMP_WALK_SLEEP = 1 << 0,
33 	ACOMP_WALK_SRC_LINEAR = 1 << 1,
34 	ACOMP_WALK_DST_LINEAR = 1 << 2,
35 };
36 
37 static const struct crypto_type crypto_acomp_type;
38 
39 static void acomp_reqchain_done(void *data, int err);
40 
41 static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
42 {
43 	return container_of(alg, struct acomp_alg, calg.base);
44 }
45 
46 static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
47 {
48 	return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
49 }
50 
51 static int __maybe_unused crypto_acomp_report(
52 	struct sk_buff *skb, struct crypto_alg *alg)
53 {
54 	struct crypto_report_acomp racomp;
55 
56 	memset(&racomp, 0, sizeof(racomp));
57 
58 	strscpy(racomp.type, "acomp", sizeof(racomp.type));
59 
60 	return nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(racomp), &racomp);
61 }
62 
63 static void __maybe_unused crypto_acomp_show(struct seq_file *m,
64 					     struct crypto_alg *alg)
65 {
66 	seq_puts(m, "type         : acomp\n");
67 }
68 
69 static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
70 {
71 	struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
72 	struct acomp_alg *alg = crypto_acomp_alg(acomp);
73 
74 	if (alg->exit)
75 		alg->exit(acomp);
76 
77 	if (acomp_is_async(acomp))
78 		crypto_free_acomp(crypto_acomp_fb(acomp));
79 }
80 
81 static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
82 {
83 	struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
84 	struct acomp_alg *alg = crypto_acomp_alg(acomp);
85 	struct crypto_acomp *fb = NULL;
86 	int err;
87 
88 	if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
89 		return crypto_init_scomp_ops_async(tfm);
90 
91 	if (acomp_is_async(acomp)) {
92 		fb = crypto_alloc_acomp(crypto_acomp_alg_name(acomp), 0,
93 					CRYPTO_ALG_ASYNC);
94 		if (IS_ERR(fb))
95 			return PTR_ERR(fb);
96 
97 		err = -EINVAL;
98 		if (crypto_acomp_reqsize(fb) > MAX_SYNC_COMP_REQSIZE)
99 			goto out_free_fb;
100 
101 		tfm->fb = crypto_acomp_tfm(fb);
102 	}
103 
104 	acomp->compress = alg->compress;
105 	acomp->decompress = alg->decompress;
106 	acomp->reqsize = alg->base.cra_reqsize;
107 
108 	acomp->base.exit = crypto_acomp_exit_tfm;
109 
110 	if (!alg->init)
111 		return 0;
112 
113 	err = alg->init(acomp);
114 	if (err)
115 		goto out_free_fb;
116 
117 	return 0;
118 
119 out_free_fb:
120 	crypto_free_acomp(fb);
121 	return err;
122 }
123 
124 static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
125 {
126 	int extsize = crypto_alg_extsize(alg);
127 
128 	if (alg->cra_type != &crypto_acomp_type)
129 		extsize += sizeof(struct crypto_scomp *);
130 
131 	return extsize;
132 }
133 
134 static const struct crypto_type crypto_acomp_type = {
135 	.extsize = crypto_acomp_extsize,
136 	.init_tfm = crypto_acomp_init_tfm,
137 #ifdef CONFIG_PROC_FS
138 	.show = crypto_acomp_show,
139 #endif
140 #if IS_ENABLED(CONFIG_CRYPTO_USER)
141 	.report = crypto_acomp_report,
142 #endif
143 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
144 	.maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
145 	.type = CRYPTO_ALG_TYPE_ACOMPRESS,
146 	.tfmsize = offsetof(struct crypto_acomp, base),
147 	.algsize = offsetof(struct acomp_alg, base),
148 };
149 
150 struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
151 					u32 mask)
152 {
153 	return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
154 }
155 EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
156 
157 struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
158 					u32 mask, int node)
159 {
160 	return crypto_alloc_tfm_node(alg_name, &crypto_acomp_type, type, mask,
161 				node);
162 }
163 EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node);
164 
165 static void acomp_save_req(struct acomp_req *req, crypto_completion_t cplt)
166 {
167 	struct acomp_req_chain *state = &req->chain;
168 
169 	state->compl = req->base.complete;
170 	state->data = req->base.data;
171 	req->base.complete = cplt;
172 	req->base.data = state;
173 }
174 
175 static void acomp_restore_req(struct acomp_req *req)
176 {
177 	struct acomp_req_chain *state = req->base.data;
178 
179 	req->base.complete = state->compl;
180 	req->base.data = state->data;
181 }
182 
183 static void acomp_reqchain_virt(struct acomp_req *req)
184 {
185 	struct acomp_req_chain *state = &req->chain;
186 	unsigned int slen = req->slen;
187 	unsigned int dlen = req->dlen;
188 
189 	if (state->flags & CRYPTO_ACOMP_REQ_SRC_VIRT)
190 		acomp_request_set_src_dma(req, state->src, slen);
191 	if (state->flags & CRYPTO_ACOMP_REQ_DST_VIRT)
192 		acomp_request_set_dst_dma(req, state->dst, dlen);
193 }
194 
195 static void acomp_virt_to_sg(struct acomp_req *req)
196 {
197 	struct acomp_req_chain *state = &req->chain;
198 
199 	state->flags = req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
200 					  CRYPTO_ACOMP_REQ_DST_VIRT);
201 
202 	if (acomp_request_src_isvirt(req)) {
203 		unsigned int slen = req->slen;
204 		const u8 *svirt = req->svirt;
205 
206 		state->src = svirt;
207 		sg_init_one(&state->ssg, svirt, slen);
208 		acomp_request_set_src_sg(req, &state->ssg, slen);
209 	}
210 
211 	if (acomp_request_dst_isvirt(req)) {
212 		unsigned int dlen = req->dlen;
213 		u8 *dvirt = req->dvirt;
214 
215 		state->dst = dvirt;
216 		sg_init_one(&state->dsg, dvirt, dlen);
217 		acomp_request_set_dst_sg(req, &state->dsg, dlen);
218 	}
219 }
220 
221 static int acomp_do_nondma(struct acomp_req *req, bool comp)
222 {
223 	ACOMP_FBREQ_ON_STACK(fbreq, req);
224 	int err;
225 
226 	if (comp)
227 		err = crypto_acomp_compress(fbreq);
228 	else
229 		err = crypto_acomp_decompress(fbreq);
230 
231 	req->dlen = fbreq->dlen;
232 	return err;
233 }
234 
235 static int acomp_do_one_req(struct acomp_req *req, bool comp)
236 {
237 	if (acomp_request_isnondma(req))
238 		return acomp_do_nondma(req, comp);
239 
240 	acomp_virt_to_sg(req);
241 	return comp ? crypto_acomp_reqtfm(req)->compress(req) :
242 		      crypto_acomp_reqtfm(req)->decompress(req);
243 }
244 
245 static int acomp_reqchain_finish(struct acomp_req *req, int err)
246 {
247 	acomp_reqchain_virt(req);
248 	acomp_restore_req(req);
249 	return err;
250 }
251 
252 static void acomp_reqchain_done(void *data, int err)
253 {
254 	struct acomp_req *req = data;
255 	crypto_completion_t compl;
256 
257 	compl = req->chain.compl;
258 	data = req->chain.data;
259 
260 	if (err == -EINPROGRESS)
261 		goto notify;
262 
263 	err = acomp_reqchain_finish(req, err);
264 
265 notify:
266 	compl(data, err);
267 }
268 
269 static int acomp_do_req_chain(struct acomp_req *req, bool comp)
270 {
271 	int err;
272 
273 	acomp_save_req(req, acomp_reqchain_done);
274 
275 	err = acomp_do_one_req(req, comp);
276 	if (err == -EBUSY || err == -EINPROGRESS)
277 		return err;
278 
279 	return acomp_reqchain_finish(req, err);
280 }
281 
282 int crypto_acomp_compress(struct acomp_req *req)
283 {
284 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
285 
286 	if (acomp_req_on_stack(req) && acomp_is_async(tfm))
287 		return -EAGAIN;
288 	if (crypto_acomp_req_virt(tfm) || acomp_request_issg(req))
289 		return crypto_acomp_reqtfm(req)->compress(req);
290 	return acomp_do_req_chain(req, true);
291 }
292 EXPORT_SYMBOL_GPL(crypto_acomp_compress);
293 
294 int crypto_acomp_decompress(struct acomp_req *req)
295 {
296 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
297 
298 	if (acomp_req_on_stack(req) && acomp_is_async(tfm))
299 		return -EAGAIN;
300 	if (crypto_acomp_req_virt(tfm) || acomp_request_issg(req))
301 		return crypto_acomp_reqtfm(req)->decompress(req);
302 	return acomp_do_req_chain(req, false);
303 }
304 EXPORT_SYMBOL_GPL(crypto_acomp_decompress);
305 
306 void comp_prepare_alg(struct comp_alg_common *alg)
307 {
308 	struct crypto_alg *base = &alg->base;
309 
310 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
311 }
312 
313 int crypto_register_acomp(struct acomp_alg *alg)
314 {
315 	struct crypto_alg *base = &alg->calg.base;
316 
317 	comp_prepare_alg(&alg->calg);
318 
319 	base->cra_type = &crypto_acomp_type;
320 	base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
321 
322 	return crypto_register_alg(base);
323 }
324 EXPORT_SYMBOL_GPL(crypto_register_acomp);
325 
326 void crypto_unregister_acomp(struct acomp_alg *alg)
327 {
328 	crypto_unregister_alg(&alg->base);
329 }
330 EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
331 
332 int crypto_register_acomps(struct acomp_alg *algs, int count)
333 {
334 	int i, ret;
335 
336 	for (i = 0; i < count; i++) {
337 		ret = crypto_register_acomp(&algs[i]);
338 		if (ret) {
339 			crypto_unregister_acomps(algs, i);
340 			return ret;
341 		}
342 	}
343 
344 	return 0;
345 }
346 EXPORT_SYMBOL_GPL(crypto_register_acomps);
347 
348 void crypto_unregister_acomps(struct acomp_alg *algs, int count)
349 {
350 	int i;
351 
352 	for (i = count - 1; i >= 0; --i)
353 		crypto_unregister_acomp(&algs[i]);
354 }
355 EXPORT_SYMBOL_GPL(crypto_unregister_acomps);
356 
357 static void acomp_stream_workfn(struct work_struct *work)
358 {
359 	struct crypto_acomp_streams *s =
360 		container_of(work, struct crypto_acomp_streams, stream_work);
361 	struct crypto_acomp_stream __percpu *streams = s->streams;
362 	int cpu;
363 
364 	for_each_cpu(cpu, &s->stream_want) {
365 		struct crypto_acomp_stream *ps;
366 		void *ctx;
367 
368 		ps = per_cpu_ptr(streams, cpu);
369 		if (ps->ctx)
370 			continue;
371 
372 		ctx = s->alloc_ctx();
373 		if (IS_ERR(ctx))
374 			break;
375 
376 		spin_lock_bh(&ps->lock);
377 		ps->ctx = ctx;
378 		spin_unlock_bh(&ps->lock);
379 
380 		cpumask_clear_cpu(cpu, &s->stream_want);
381 	}
382 }
383 
384 void crypto_acomp_free_streams(struct crypto_acomp_streams *s)
385 {
386 	struct crypto_acomp_stream __percpu *streams = s->streams;
387 	void (*free_ctx)(void *);
388 	int i;
389 
390 	s->streams = NULL;
391 	if (!streams)
392 		return;
393 
394 	cancel_work_sync(&s->stream_work);
395 	free_ctx = s->free_ctx;
396 
397 	for_each_possible_cpu(i) {
398 		struct crypto_acomp_stream *ps = per_cpu_ptr(streams, i);
399 
400 		if (!ps->ctx)
401 			continue;
402 
403 		free_ctx(ps->ctx);
404 	}
405 
406 	free_percpu(streams);
407 }
408 EXPORT_SYMBOL_GPL(crypto_acomp_free_streams);
409 
410 int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s)
411 {
412 	struct crypto_acomp_stream __percpu *streams;
413 	struct crypto_acomp_stream *ps;
414 	unsigned int i;
415 	void *ctx;
416 
417 	if (s->streams)
418 		return 0;
419 
420 	streams = alloc_percpu(struct crypto_acomp_stream);
421 	if (!streams)
422 		return -ENOMEM;
423 
424 	ctx = s->alloc_ctx();
425 	if (IS_ERR(ctx)) {
426 		free_percpu(streams);
427 		return PTR_ERR(ctx);
428 	}
429 
430 	i = cpumask_first(cpu_possible_mask);
431 	ps = per_cpu_ptr(streams, i);
432 	ps->ctx = ctx;
433 
434 	for_each_possible_cpu(i) {
435 		ps = per_cpu_ptr(streams, i);
436 		spin_lock_init(&ps->lock);
437 	}
438 
439 	s->streams = streams;
440 
441 	INIT_WORK(&s->stream_work, acomp_stream_workfn);
442 	return 0;
443 }
444 EXPORT_SYMBOL_GPL(crypto_acomp_alloc_streams);
445 
446 struct crypto_acomp_stream *_crypto_acomp_lock_stream_bh(
447 	struct crypto_acomp_streams *s)
448 {
449 	struct crypto_acomp_stream __percpu *streams = s->streams;
450 	int cpu = raw_smp_processor_id();
451 	struct crypto_acomp_stream *ps;
452 
453 	ps = per_cpu_ptr(streams, cpu);
454 	spin_lock_bh(&ps->lock);
455 	if (likely(ps->ctx))
456 		return ps;
457 	spin_unlock(&ps->lock);
458 
459 	cpumask_set_cpu(cpu, &s->stream_want);
460 	schedule_work(&s->stream_work);
461 
462 	ps = per_cpu_ptr(streams, cpumask_first(cpu_possible_mask));
463 	spin_lock(&ps->lock);
464 	return ps;
465 }
466 EXPORT_SYMBOL_GPL(_crypto_acomp_lock_stream_bh);
467 
468 void acomp_walk_done_src(struct acomp_walk *walk, int used)
469 {
470 	walk->slen -= used;
471 	if ((walk->flags & ACOMP_WALK_SRC_LINEAR))
472 		scatterwalk_advance(&walk->in, used);
473 	else
474 		scatterwalk_done_src(&walk->in, used);
475 
476 	if ((walk->flags & ACOMP_WALK_SLEEP))
477 		cond_resched();
478 }
479 EXPORT_SYMBOL_GPL(acomp_walk_done_src);
480 
481 void acomp_walk_done_dst(struct acomp_walk *walk, int used)
482 {
483 	walk->dlen -= used;
484 	if ((walk->flags & ACOMP_WALK_DST_LINEAR))
485 		scatterwalk_advance(&walk->out, used);
486 	else
487 		scatterwalk_done_dst(&walk->out, used);
488 
489 	if ((walk->flags & ACOMP_WALK_SLEEP))
490 		cond_resched();
491 }
492 EXPORT_SYMBOL_GPL(acomp_walk_done_dst);
493 
494 int acomp_walk_next_src(struct acomp_walk *walk)
495 {
496 	unsigned int slen = walk->slen;
497 	unsigned int max = UINT_MAX;
498 
499 	if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
500 		max = PAGE_SIZE;
501 	if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
502 		walk->in.__addr = (void *)(((u8 *)walk->in.sg) +
503 					   walk->in.offset);
504 		return min(slen, max);
505 	}
506 
507 	return slen ? scatterwalk_next(&walk->in, slen) : 0;
508 }
509 EXPORT_SYMBOL_GPL(acomp_walk_next_src);
510 
511 int acomp_walk_next_dst(struct acomp_walk *walk)
512 {
513 	unsigned int dlen = walk->dlen;
514 	unsigned int max = UINT_MAX;
515 
516 	if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
517 		max = PAGE_SIZE;
518 	if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
519 		walk->out.__addr = (void *)(((u8 *)walk->out.sg) +
520 					    walk->out.offset);
521 		return min(dlen, max);
522 	}
523 
524 	return dlen ? scatterwalk_next(&walk->out, dlen) : 0;
525 }
526 EXPORT_SYMBOL_GPL(acomp_walk_next_dst);
527 
528 int acomp_walk_virt(struct acomp_walk *__restrict walk,
529 		    struct acomp_req *__restrict req, bool atomic)
530 {
531 	struct scatterlist *src = req->src;
532 	struct scatterlist *dst = req->dst;
533 
534 	walk->slen = req->slen;
535 	walk->dlen = req->dlen;
536 
537 	if (!walk->slen || !walk->dlen)
538 		return -EINVAL;
539 
540 	walk->flags = 0;
541 	if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
542 		walk->flags |= ACOMP_WALK_SLEEP;
543 	if ((req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT))
544 		walk->flags |= ACOMP_WALK_SRC_LINEAR;
545 	if ((req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT))
546 		walk->flags |= ACOMP_WALK_DST_LINEAR;
547 
548 	if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
549 		walk->in.sg = (void *)req->svirt;
550 		walk->in.offset = 0;
551 	} else
552 		scatterwalk_start(&walk->in, src);
553 	if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
554 		walk->out.sg = (void *)req->dvirt;
555 		walk->out.offset = 0;
556 	} else
557 		scatterwalk_start(&walk->out, dst);
558 
559 	return 0;
560 }
561 EXPORT_SYMBOL_GPL(acomp_walk_virt);
562 
563 struct acomp_req *acomp_request_clone(struct acomp_req *req,
564 				      size_t total, gfp_t gfp)
565 {
566 	struct acomp_req *nreq;
567 
568 	nreq = container_of(crypto_request_clone(&req->base, total, gfp),
569 			    struct acomp_req, base);
570 	if (nreq == req)
571 		return req;
572 
573 	if (req->src == &req->chain.ssg)
574 		nreq->src = &nreq->chain.ssg;
575 	if (req->dst == &req->chain.dsg)
576 		nreq->dst = &nreq->chain.dsg;
577 	return nreq;
578 }
579 EXPORT_SYMBOL_GPL(acomp_request_clone);
580 
581 MODULE_LICENSE("GPL");
582 MODULE_DESCRIPTION("Asynchronous compression type");
583