xref: /linux/crypto/acompress.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Asynchronous Compression operations
4  *
5  * Copyright (c) 2016, Intel Corporation
6  * Authors: Weigang Li <weigang.li@intel.com>
7  *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8  */
9 
10 #include <crypto/internal/acompress.h>
11 #include <crypto/scatterwalk.h>
12 #include <linux/cryptouser.h>
13 #include <linux/cpumask.h>
14 #include <linux/err.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/percpu.h>
18 #include <linux/scatterlist.h>
19 #include <linux/sched.h>
20 #include <linux/seq_file.h>
21 #include <linux/smp.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/workqueue.h>
25 #include <net/netlink.h>
26 
27 #include "compress.h"
28 
29 struct crypto_scomp;
30 
31 enum {
32 	ACOMP_WALK_SLEEP = 1 << 0,
33 	ACOMP_WALK_SRC_LINEAR = 1 << 1,
34 	ACOMP_WALK_DST_LINEAR = 1 << 2,
35 };
36 
37 static const struct crypto_type crypto_acomp_type;
38 
39 static void acomp_reqchain_done(void *data, int err);
40 
41 static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
42 {
43 	return container_of(alg, struct acomp_alg, calg.base);
44 }
45 
46 static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
47 {
48 	return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
49 }
50 
51 static int __maybe_unused crypto_acomp_report(
52 	struct sk_buff *skb, struct crypto_alg *alg)
53 {
54 	struct crypto_report_acomp racomp;
55 
56 	memset(&racomp, 0, sizeof(racomp));
57 
58 	strscpy(racomp.type, "acomp", sizeof(racomp.type));
59 
60 	return nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(racomp), &racomp);
61 }
62 
63 static void __maybe_unused crypto_acomp_show(struct seq_file *m,
64 					     struct crypto_alg *alg)
65 {
66 	seq_puts(m, "type         : acomp\n");
67 }
68 
69 static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
70 {
71 	struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
72 	struct acomp_alg *alg = crypto_acomp_alg(acomp);
73 
74 	if (alg->exit)
75 		alg->exit(acomp);
76 
77 	if (acomp_is_async(acomp))
78 		crypto_free_acomp(crypto_acomp_fb(acomp));
79 }
80 
81 static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
82 {
83 	struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
84 	struct acomp_alg *alg = crypto_acomp_alg(acomp);
85 	struct crypto_acomp *fb = NULL;
86 	int err;
87 
88 	if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
89 		return crypto_init_scomp_ops_async(tfm);
90 
91 	if (acomp_is_async(acomp)) {
92 		fb = crypto_alloc_acomp(crypto_acomp_alg_name(acomp), 0,
93 					CRYPTO_ALG_ASYNC);
94 		if (IS_ERR(fb))
95 			return PTR_ERR(fb);
96 
97 		err = -EINVAL;
98 		if (crypto_acomp_reqsize(fb) > MAX_SYNC_COMP_REQSIZE)
99 			goto out_free_fb;
100 
101 		tfm->fb = crypto_acomp_tfm(fb);
102 	}
103 
104 	acomp->compress = alg->compress;
105 	acomp->decompress = alg->decompress;
106 	acomp->reqsize = alg->base.cra_reqsize;
107 
108 	acomp->base.exit = crypto_acomp_exit_tfm;
109 
110 	if (!alg->init)
111 		return 0;
112 
113 	err = alg->init(acomp);
114 	if (err)
115 		goto out_free_fb;
116 
117 	return 0;
118 
119 out_free_fb:
120 	crypto_free_acomp(fb);
121 	return err;
122 }
123 
124 static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
125 {
126 	int extsize = crypto_alg_extsize(alg);
127 
128 	if (alg->cra_type != &crypto_acomp_type)
129 		extsize += sizeof(struct crypto_scomp *);
130 
131 	return extsize;
132 }
133 
134 static const struct crypto_type crypto_acomp_type = {
135 	.extsize = crypto_acomp_extsize,
136 	.init_tfm = crypto_acomp_init_tfm,
137 #ifdef CONFIG_PROC_FS
138 	.show = crypto_acomp_show,
139 #endif
140 #if IS_ENABLED(CONFIG_CRYPTO_USER)
141 	.report = crypto_acomp_report,
142 #endif
143 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
144 	.maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
145 	.type = CRYPTO_ALG_TYPE_ACOMPRESS,
146 	.tfmsize = offsetof(struct crypto_acomp, base),
147 	.algsize = offsetof(struct acomp_alg, base),
148 };
149 
150 struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
151 					u32 mask)
152 {
153 	return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
154 }
155 EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
156 
157 struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
158 					u32 mask, int node)
159 {
160 	return crypto_alloc_tfm_node(alg_name, &crypto_acomp_type, type, mask,
161 				node);
162 }
163 EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node);
164 
165 static void acomp_save_req(struct acomp_req *req, crypto_completion_t cplt)
166 {
167 	struct acomp_req_chain *state = &req->chain;
168 
169 	state->compl = req->base.complete;
170 	state->data = req->base.data;
171 	req->base.complete = cplt;
172 	req->base.data = req;
173 }
174 
175 static void acomp_restore_req(struct acomp_req *req)
176 {
177 	req->base.complete = req->chain.compl;
178 	req->base.data = req->chain.data;
179 }
180 
181 static void acomp_reqchain_virt(struct acomp_req *req)
182 {
183 	struct acomp_req_chain *state = &req->chain;
184 	unsigned int slen = req->slen;
185 	unsigned int dlen = req->dlen;
186 
187 	if (state->flags & CRYPTO_ACOMP_REQ_SRC_VIRT)
188 		acomp_request_set_src_dma(req, state->src, slen);
189 	if (state->flags & CRYPTO_ACOMP_REQ_DST_VIRT)
190 		acomp_request_set_dst_dma(req, state->dst, dlen);
191 }
192 
193 static void acomp_virt_to_sg(struct acomp_req *req)
194 {
195 	struct acomp_req_chain *state = &req->chain;
196 
197 	state->flags = req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
198 					  CRYPTO_ACOMP_REQ_DST_VIRT);
199 
200 	if (acomp_request_src_isvirt(req)) {
201 		unsigned int slen = req->slen;
202 		const u8 *svirt = req->svirt;
203 
204 		state->src = svirt;
205 		sg_init_one(&state->ssg, svirt, slen);
206 		acomp_request_set_src_sg(req, &state->ssg, slen);
207 	}
208 
209 	if (acomp_request_dst_isvirt(req)) {
210 		unsigned int dlen = req->dlen;
211 		u8 *dvirt = req->dvirt;
212 
213 		state->dst = dvirt;
214 		sg_init_one(&state->dsg, dvirt, dlen);
215 		acomp_request_set_dst_sg(req, &state->dsg, dlen);
216 	}
217 }
218 
219 static int acomp_do_nondma(struct acomp_req *req, bool comp)
220 {
221 	ACOMP_FBREQ_ON_STACK(fbreq, req);
222 	int err;
223 
224 	if (comp)
225 		err = crypto_acomp_compress(fbreq);
226 	else
227 		err = crypto_acomp_decompress(fbreq);
228 
229 	req->dlen = fbreq->dlen;
230 	return err;
231 }
232 
233 static int acomp_do_one_req(struct acomp_req *req, bool comp)
234 {
235 	if (acomp_request_isnondma(req))
236 		return acomp_do_nondma(req, comp);
237 
238 	acomp_virt_to_sg(req);
239 	return comp ? crypto_acomp_reqtfm(req)->compress(req) :
240 		      crypto_acomp_reqtfm(req)->decompress(req);
241 }
242 
243 static int acomp_reqchain_finish(struct acomp_req *req, int err)
244 {
245 	acomp_reqchain_virt(req);
246 	acomp_restore_req(req);
247 	return err;
248 }
249 
250 static void acomp_reqchain_done(void *data, int err)
251 {
252 	struct acomp_req *req = data;
253 	crypto_completion_t compl;
254 
255 	compl = req->chain.compl;
256 	data = req->chain.data;
257 
258 	if (err == -EINPROGRESS)
259 		goto notify;
260 
261 	err = acomp_reqchain_finish(req, err);
262 
263 notify:
264 	compl(data, err);
265 }
266 
267 static int acomp_do_req_chain(struct acomp_req *req, bool comp)
268 {
269 	int err;
270 
271 	acomp_save_req(req, acomp_reqchain_done);
272 
273 	err = acomp_do_one_req(req, comp);
274 	if (err == -EBUSY || err == -EINPROGRESS)
275 		return err;
276 
277 	return acomp_reqchain_finish(req, err);
278 }
279 
280 int crypto_acomp_compress(struct acomp_req *req)
281 {
282 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
283 
284 	if (acomp_req_on_stack(req) && acomp_is_async(tfm))
285 		return -EAGAIN;
286 	if (crypto_acomp_req_virt(tfm) || acomp_request_issg(req))
287 		return crypto_acomp_reqtfm(req)->compress(req);
288 	return acomp_do_req_chain(req, true);
289 }
290 EXPORT_SYMBOL_GPL(crypto_acomp_compress);
291 
292 int crypto_acomp_decompress(struct acomp_req *req)
293 {
294 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
295 
296 	if (acomp_req_on_stack(req) && acomp_is_async(tfm))
297 		return -EAGAIN;
298 	if (crypto_acomp_req_virt(tfm) || acomp_request_issg(req))
299 		return crypto_acomp_reqtfm(req)->decompress(req);
300 	return acomp_do_req_chain(req, false);
301 }
302 EXPORT_SYMBOL_GPL(crypto_acomp_decompress);
303 
304 void comp_prepare_alg(struct comp_alg_common *alg)
305 {
306 	struct crypto_alg *base = &alg->base;
307 
308 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
309 }
310 
311 int crypto_register_acomp(struct acomp_alg *alg)
312 {
313 	struct crypto_alg *base = &alg->calg.base;
314 
315 	comp_prepare_alg(&alg->calg);
316 
317 	base->cra_type = &crypto_acomp_type;
318 	base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
319 
320 	return crypto_register_alg(base);
321 }
322 EXPORT_SYMBOL_GPL(crypto_register_acomp);
323 
324 void crypto_unregister_acomp(struct acomp_alg *alg)
325 {
326 	crypto_unregister_alg(&alg->base);
327 }
328 EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
329 
330 int crypto_register_acomps(struct acomp_alg *algs, int count)
331 {
332 	int i, ret;
333 
334 	for (i = 0; i < count; i++) {
335 		ret = crypto_register_acomp(&algs[i]);
336 		if (ret) {
337 			crypto_unregister_acomps(algs, i);
338 			return ret;
339 		}
340 	}
341 
342 	return 0;
343 }
344 EXPORT_SYMBOL_GPL(crypto_register_acomps);
345 
346 void crypto_unregister_acomps(struct acomp_alg *algs, int count)
347 {
348 	int i;
349 
350 	for (i = count - 1; i >= 0; --i)
351 		crypto_unregister_acomp(&algs[i]);
352 }
353 EXPORT_SYMBOL_GPL(crypto_unregister_acomps);
354 
355 static void acomp_stream_workfn(struct work_struct *work)
356 {
357 	struct crypto_acomp_streams *s =
358 		container_of(work, struct crypto_acomp_streams, stream_work);
359 	struct crypto_acomp_stream __percpu *streams = s->streams;
360 	int cpu;
361 
362 	for_each_cpu(cpu, &s->stream_want) {
363 		struct crypto_acomp_stream *ps;
364 		void *ctx;
365 
366 		ps = per_cpu_ptr(streams, cpu);
367 		if (ps->ctx)
368 			continue;
369 
370 		ctx = s->alloc_ctx();
371 		if (IS_ERR(ctx))
372 			break;
373 
374 		spin_lock_bh(&ps->lock);
375 		ps->ctx = ctx;
376 		spin_unlock_bh(&ps->lock);
377 
378 		cpumask_clear_cpu(cpu, &s->stream_want);
379 	}
380 }
381 
382 void crypto_acomp_free_streams(struct crypto_acomp_streams *s)
383 {
384 	struct crypto_acomp_stream __percpu *streams = s->streams;
385 	void (*free_ctx)(void *);
386 	int i;
387 
388 	s->streams = NULL;
389 	if (!streams)
390 		return;
391 
392 	cancel_work_sync(&s->stream_work);
393 	free_ctx = s->free_ctx;
394 
395 	for_each_possible_cpu(i) {
396 		struct crypto_acomp_stream *ps = per_cpu_ptr(streams, i);
397 
398 		if (!ps->ctx)
399 			continue;
400 
401 		free_ctx(ps->ctx);
402 	}
403 
404 	free_percpu(streams);
405 }
406 EXPORT_SYMBOL_GPL(crypto_acomp_free_streams);
407 
408 int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s)
409 {
410 	struct crypto_acomp_stream __percpu *streams;
411 	struct crypto_acomp_stream *ps;
412 	unsigned int i;
413 	void *ctx;
414 
415 	if (s->streams)
416 		return 0;
417 
418 	streams = alloc_percpu(struct crypto_acomp_stream);
419 	if (!streams)
420 		return -ENOMEM;
421 
422 	ctx = s->alloc_ctx();
423 	if (IS_ERR(ctx)) {
424 		free_percpu(streams);
425 		return PTR_ERR(ctx);
426 	}
427 
428 	i = cpumask_first(cpu_possible_mask);
429 	ps = per_cpu_ptr(streams, i);
430 	ps->ctx = ctx;
431 
432 	for_each_possible_cpu(i) {
433 		ps = per_cpu_ptr(streams, i);
434 		spin_lock_init(&ps->lock);
435 	}
436 
437 	s->streams = streams;
438 
439 	INIT_WORK(&s->stream_work, acomp_stream_workfn);
440 	return 0;
441 }
442 EXPORT_SYMBOL_GPL(crypto_acomp_alloc_streams);
443 
444 struct crypto_acomp_stream *_crypto_acomp_lock_stream_bh(
445 	struct crypto_acomp_streams *s)
446 {
447 	struct crypto_acomp_stream __percpu *streams = s->streams;
448 	int cpu = raw_smp_processor_id();
449 	struct crypto_acomp_stream *ps;
450 
451 	ps = per_cpu_ptr(streams, cpu);
452 	spin_lock_bh(&ps->lock);
453 	if (likely(ps->ctx))
454 		return ps;
455 	spin_unlock(&ps->lock);
456 
457 	cpumask_set_cpu(cpu, &s->stream_want);
458 	schedule_work(&s->stream_work);
459 
460 	ps = per_cpu_ptr(streams, cpumask_first(cpu_possible_mask));
461 	spin_lock(&ps->lock);
462 	return ps;
463 }
464 EXPORT_SYMBOL_GPL(_crypto_acomp_lock_stream_bh);
465 
466 void acomp_walk_done_src(struct acomp_walk *walk, int used)
467 {
468 	walk->slen -= used;
469 	if ((walk->flags & ACOMP_WALK_SRC_LINEAR))
470 		scatterwalk_advance(&walk->in, used);
471 	else
472 		scatterwalk_done_src(&walk->in, used);
473 
474 	if ((walk->flags & ACOMP_WALK_SLEEP))
475 		cond_resched();
476 }
477 EXPORT_SYMBOL_GPL(acomp_walk_done_src);
478 
479 void acomp_walk_done_dst(struct acomp_walk *walk, int used)
480 {
481 	walk->dlen -= used;
482 	if ((walk->flags & ACOMP_WALK_DST_LINEAR))
483 		scatterwalk_advance(&walk->out, used);
484 	else
485 		scatterwalk_done_dst(&walk->out, used);
486 
487 	if ((walk->flags & ACOMP_WALK_SLEEP))
488 		cond_resched();
489 }
490 EXPORT_SYMBOL_GPL(acomp_walk_done_dst);
491 
492 int acomp_walk_next_src(struct acomp_walk *walk)
493 {
494 	unsigned int slen = walk->slen;
495 	unsigned int max = UINT_MAX;
496 
497 	if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
498 		max = PAGE_SIZE;
499 	if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
500 		walk->in.__addr = (void *)(((u8 *)walk->in.sg) +
501 					   walk->in.offset);
502 		return min(slen, max);
503 	}
504 
505 	return slen ? scatterwalk_next(&walk->in, slen) : 0;
506 }
507 EXPORT_SYMBOL_GPL(acomp_walk_next_src);
508 
509 int acomp_walk_next_dst(struct acomp_walk *walk)
510 {
511 	unsigned int dlen = walk->dlen;
512 	unsigned int max = UINT_MAX;
513 
514 	if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
515 		max = PAGE_SIZE;
516 	if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
517 		walk->out.__addr = (void *)(((u8 *)walk->out.sg) +
518 					    walk->out.offset);
519 		return min(dlen, max);
520 	}
521 
522 	return dlen ? scatterwalk_next(&walk->out, dlen) : 0;
523 }
524 EXPORT_SYMBOL_GPL(acomp_walk_next_dst);
525 
526 int acomp_walk_virt(struct acomp_walk *__restrict walk,
527 		    struct acomp_req *__restrict req, bool atomic)
528 {
529 	struct scatterlist *src = req->src;
530 	struct scatterlist *dst = req->dst;
531 
532 	walk->slen = req->slen;
533 	walk->dlen = req->dlen;
534 
535 	if (!walk->slen || !walk->dlen)
536 		return -EINVAL;
537 
538 	walk->flags = 0;
539 	if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
540 		walk->flags |= ACOMP_WALK_SLEEP;
541 	if ((req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT))
542 		walk->flags |= ACOMP_WALK_SRC_LINEAR;
543 	if ((req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT))
544 		walk->flags |= ACOMP_WALK_DST_LINEAR;
545 
546 	if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
547 		walk->in.sg = (void *)req->svirt;
548 		walk->in.offset = 0;
549 	} else
550 		scatterwalk_start(&walk->in, src);
551 	if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
552 		walk->out.sg = (void *)req->dvirt;
553 		walk->out.offset = 0;
554 	} else
555 		scatterwalk_start(&walk->out, dst);
556 
557 	return 0;
558 }
559 EXPORT_SYMBOL_GPL(acomp_walk_virt);
560 
561 struct acomp_req *acomp_request_clone(struct acomp_req *req,
562 				      size_t total, gfp_t gfp)
563 {
564 	struct acomp_req *nreq;
565 
566 	nreq = container_of(crypto_request_clone(&req->base, total, gfp),
567 			    struct acomp_req, base);
568 	if (nreq == req)
569 		return req;
570 
571 	if (req->src == &req->chain.ssg)
572 		nreq->src = &nreq->chain.ssg;
573 	if (req->dst == &req->chain.dsg)
574 		nreq->dst = &nreq->chain.dsg;
575 	return nreq;
576 }
577 EXPORT_SYMBOL_GPL(acomp_request_clone);
578 
579 MODULE_LICENSE("GPL");
580 MODULE_DESCRIPTION("Asynchronous compression type");
581