xref: /linux/crypto/acompress.c (revision 39fc22a8e53e96392f9b2c840e386272affbe6ba)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Asynchronous Compression operations
4  *
5  * Copyright (c) 2016, Intel Corporation
6  * Authors: Weigang Li <weigang.li@intel.com>
7  *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8  */
9 
10 #include <crypto/internal/acompress.h>
11 #include <linux/cryptouser.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/page-flags.h>
16 #include <linux/seq_file.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
19 #include <net/netlink.h>
20 
21 #include "compress.h"
22 
23 struct crypto_scomp;
24 
25 static const struct crypto_type crypto_acomp_type;
26 
27 static void acomp_reqchain_done(void *data, int err);
28 
29 static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
30 {
31 	return container_of(alg, struct acomp_alg, calg.base);
32 }
33 
34 static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
35 {
36 	return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
37 }
38 
39 static int __maybe_unused crypto_acomp_report(
40 	struct sk_buff *skb, struct crypto_alg *alg)
41 {
42 	struct crypto_report_acomp racomp;
43 
44 	memset(&racomp, 0, sizeof(racomp));
45 
46 	strscpy(racomp.type, "acomp", sizeof(racomp.type));
47 
48 	return nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(racomp), &racomp);
49 }
50 
51 static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
52 	__maybe_unused;
53 
54 static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
55 {
56 	seq_puts(m, "type         : acomp\n");
57 }
58 
59 static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
60 {
61 	struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
62 	struct acomp_alg *alg = crypto_acomp_alg(acomp);
63 
64 	if (alg->exit)
65 		alg->exit(acomp);
66 
67 	if (acomp_is_async(acomp))
68 		crypto_free_acomp(acomp->fb);
69 }
70 
71 static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
72 {
73 	struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
74 	struct acomp_alg *alg = crypto_acomp_alg(acomp);
75 	struct crypto_acomp *fb = NULL;
76 	int err;
77 
78 	acomp->fb = acomp;
79 
80 	if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
81 		return crypto_init_scomp_ops_async(tfm);
82 
83 	if (acomp_is_async(acomp)) {
84 		fb = crypto_alloc_acomp(crypto_acomp_alg_name(acomp), 0,
85 					CRYPTO_ALG_ASYNC);
86 		if (IS_ERR(fb))
87 			return PTR_ERR(fb);
88 
89 		err = -EINVAL;
90 		if (crypto_acomp_reqsize(fb) > MAX_SYNC_COMP_REQSIZE)
91 			goto out_free_fb;
92 
93 		acomp->fb = fb;
94 	}
95 
96 	acomp->compress = alg->compress;
97 	acomp->decompress = alg->decompress;
98 	acomp->reqsize = alg->reqsize;
99 
100 	acomp->base.exit = crypto_acomp_exit_tfm;
101 
102 	if (!alg->init)
103 		return 0;
104 
105 	err = alg->init(acomp);
106 	if (err)
107 		goto out_free_fb;
108 
109 	return 0;
110 
111 out_free_fb:
112 	crypto_free_acomp(fb);
113 	return err;
114 }
115 
116 static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
117 {
118 	int extsize = crypto_alg_extsize(alg);
119 
120 	if (alg->cra_type != &crypto_acomp_type)
121 		extsize += sizeof(struct crypto_scomp *);
122 
123 	return extsize;
124 }
125 
126 static const struct crypto_type crypto_acomp_type = {
127 	.extsize = crypto_acomp_extsize,
128 	.init_tfm = crypto_acomp_init_tfm,
129 #ifdef CONFIG_PROC_FS
130 	.show = crypto_acomp_show,
131 #endif
132 #if IS_ENABLED(CONFIG_CRYPTO_USER)
133 	.report = crypto_acomp_report,
134 #endif
135 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
136 	.maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
137 	.type = CRYPTO_ALG_TYPE_ACOMPRESS,
138 	.tfmsize = offsetof(struct crypto_acomp, base),
139 };
140 
141 struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
142 					u32 mask)
143 {
144 	return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
145 }
146 EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
147 
148 struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
149 					u32 mask, int node)
150 {
151 	return crypto_alloc_tfm_node(alg_name, &crypto_acomp_type, type, mask,
152 				node);
153 }
154 EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node);
155 
156 static void acomp_save_req(struct acomp_req *req, crypto_completion_t cplt)
157 {
158 	struct acomp_req_chain *state = &req->chain;
159 
160 	state->compl = req->base.complete;
161 	state->data = req->base.data;
162 	req->base.complete = cplt;
163 	req->base.data = state;
164 	state->req0 = req;
165 }
166 
167 static void acomp_restore_req(struct acomp_req *req)
168 {
169 	struct acomp_req_chain *state = req->base.data;
170 
171 	req->base.complete = state->compl;
172 	req->base.data = state->data;
173 }
174 
175 static void acomp_reqchain_virt(struct acomp_req_chain *state, int err)
176 {
177 	struct acomp_req *req = state->cur;
178 	unsigned int slen = req->slen;
179 	unsigned int dlen = req->dlen;
180 
181 	req->base.err = err;
182 	state = &req->chain;
183 
184 	if (state->flags & CRYPTO_ACOMP_REQ_SRC_VIRT)
185 		acomp_request_set_src_dma(req, state->src, slen);
186 	else if (state->flags & CRYPTO_ACOMP_REQ_SRC_FOLIO)
187 		acomp_request_set_src_folio(req, state->sfolio, state->soff, slen);
188 	if (state->flags & CRYPTO_ACOMP_REQ_DST_VIRT)
189 		acomp_request_set_dst_dma(req, state->dst, dlen);
190 	else if (state->flags & CRYPTO_ACOMP_REQ_DST_FOLIO)
191 		acomp_request_set_dst_folio(req, state->dfolio, state->doff, dlen);
192 }
193 
194 static void acomp_virt_to_sg(struct acomp_req *req)
195 {
196 	struct acomp_req_chain *state = &req->chain;
197 
198 	state->flags = req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
199 					  CRYPTO_ACOMP_REQ_DST_VIRT |
200 					  CRYPTO_ACOMP_REQ_SRC_FOLIO |
201 					  CRYPTO_ACOMP_REQ_DST_FOLIO);
202 
203 	if (acomp_request_src_isvirt(req)) {
204 		unsigned int slen = req->slen;
205 		const u8 *svirt = req->svirt;
206 
207 		state->src = svirt;
208 		sg_init_one(&state->ssg, svirt, slen);
209 		acomp_request_set_src_sg(req, &state->ssg, slen);
210 	} else if (acomp_request_src_isfolio(req)) {
211 		struct folio *folio = req->sfolio;
212 		unsigned int slen = req->slen;
213 		size_t off = req->soff;
214 
215 		state->sfolio = folio;
216 		state->soff = off;
217 		sg_init_table(&state->ssg, 1);
218 		sg_set_page(&state->ssg, folio_page(folio, off / PAGE_SIZE),
219 			    slen, off % PAGE_SIZE);
220 		acomp_request_set_src_sg(req, &state->ssg, slen);
221 	}
222 
223 	if (acomp_request_dst_isvirt(req)) {
224 		unsigned int dlen = req->dlen;
225 		u8 *dvirt = req->dvirt;
226 
227 		state->dst = dvirt;
228 		sg_init_one(&state->dsg, dvirt, dlen);
229 		acomp_request_set_dst_sg(req, &state->dsg, dlen);
230 	} else if (acomp_request_dst_isfolio(req)) {
231 		struct folio *folio = req->dfolio;
232 		unsigned int dlen = req->dlen;
233 		size_t off = req->doff;
234 
235 		state->dfolio = folio;
236 		state->doff = off;
237 		sg_init_table(&state->dsg, 1);
238 		sg_set_page(&state->dsg, folio_page(folio, off / PAGE_SIZE),
239 			    dlen, off % PAGE_SIZE);
240 		acomp_request_set_src_sg(req, &state->dsg, dlen);
241 	}
242 }
243 
244 static int acomp_do_nondma(struct acomp_req_chain *state,
245 			   struct acomp_req *req)
246 {
247 	u32 keep = CRYPTO_ACOMP_REQ_SRC_VIRT |
248 		   CRYPTO_ACOMP_REQ_SRC_NONDMA |
249 		   CRYPTO_ACOMP_REQ_DST_VIRT |
250 		   CRYPTO_ACOMP_REQ_DST_NONDMA;
251 	ACOMP_REQUEST_ON_STACK(fbreq, crypto_acomp_reqtfm(req));
252 	int err;
253 
254 	acomp_request_set_callback(fbreq, req->base.flags, NULL, NULL);
255 	fbreq->base.flags &= ~keep;
256 	fbreq->base.flags |= req->base.flags & keep;
257 	fbreq->src = req->src;
258 	fbreq->dst = req->dst;
259 	fbreq->slen = req->slen;
260 	fbreq->dlen = req->dlen;
261 
262 	if (state->op == crypto_acomp_reqtfm(req)->compress)
263 		err = crypto_acomp_compress(fbreq);
264 	else
265 		err = crypto_acomp_decompress(fbreq);
266 
267 	req->dlen = fbreq->dlen;
268 	return err;
269 }
270 
271 static int acomp_do_one_req(struct acomp_req_chain *state,
272 			    struct acomp_req *req)
273 {
274 	state->cur = req;
275 
276 	if (acomp_request_isnondma(req))
277 		return acomp_do_nondma(state, req);
278 
279 	acomp_virt_to_sg(req);
280 	return state->op(req);
281 }
282 
283 static int acomp_reqchain_finish(struct acomp_req *req0, int err, u32 mask)
284 {
285 	struct acomp_req_chain *state = req0->base.data;
286 	struct acomp_req *req = state->cur;
287 	struct acomp_req *n;
288 
289 	acomp_reqchain_virt(state, err);
290 
291 	if (req != req0)
292 		list_add_tail(&req->base.list, &req0->base.list);
293 
294 	list_for_each_entry_safe(req, n, &state->head, base.list) {
295 		list_del_init(&req->base.list);
296 
297 		req->base.flags &= mask;
298 		req->base.complete = acomp_reqchain_done;
299 		req->base.data = state;
300 
301 		err = acomp_do_one_req(state, req);
302 
303 		if (err == -EINPROGRESS) {
304 			if (!list_empty(&state->head))
305 				err = -EBUSY;
306 			goto out;
307 		}
308 
309 		if (err == -EBUSY)
310 			goto out;
311 
312 		acomp_reqchain_virt(state, err);
313 		list_add_tail(&req->base.list, &req0->base.list);
314 	}
315 
316 	acomp_restore_req(req0);
317 
318 out:
319 	return err;
320 }
321 
322 static void acomp_reqchain_done(void *data, int err)
323 {
324 	struct acomp_req_chain *state = data;
325 	crypto_completion_t compl = state->compl;
326 
327 	data = state->data;
328 
329 	if (err == -EINPROGRESS) {
330 		if (!list_empty(&state->head))
331 			return;
332 		goto notify;
333 	}
334 
335 	err = acomp_reqchain_finish(state->req0, err,
336 				    CRYPTO_TFM_REQ_MAY_BACKLOG);
337 	if (err == -EBUSY)
338 		return;
339 
340 notify:
341 	compl(data, err);
342 }
343 
344 static int acomp_do_req_chain(struct acomp_req *req,
345 			      int (*op)(struct acomp_req *req))
346 {
347 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
348 	struct acomp_req_chain *state;
349 	int err;
350 
351 	if (crypto_acomp_req_chain(tfm) ||
352 	    (!acomp_request_chained(req) && acomp_request_issg(req)))
353 		return op(req);
354 
355 	acomp_save_req(req, acomp_reqchain_done);
356 	state = req->base.data;
357 
358 	state->op = op;
359 	state->src = NULL;
360 	INIT_LIST_HEAD(&state->head);
361 	list_splice_init(&req->base.list, &state->head);
362 
363 	err = acomp_do_one_req(state, req);
364 	if (err == -EBUSY || err == -EINPROGRESS)
365 		return -EBUSY;
366 
367 	return acomp_reqchain_finish(req, err, ~0);
368 }
369 
370 int crypto_acomp_compress(struct acomp_req *req)
371 {
372 	return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->compress);
373 }
374 EXPORT_SYMBOL_GPL(crypto_acomp_compress);
375 
376 int crypto_acomp_decompress(struct acomp_req *req)
377 {
378 	return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->decompress);
379 }
380 EXPORT_SYMBOL_GPL(crypto_acomp_decompress);
381 
382 void comp_prepare_alg(struct comp_alg_common *alg)
383 {
384 	struct crypto_alg *base = &alg->base;
385 
386 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
387 }
388 
389 int crypto_register_acomp(struct acomp_alg *alg)
390 {
391 	struct crypto_alg *base = &alg->calg.base;
392 
393 	comp_prepare_alg(&alg->calg);
394 
395 	base->cra_type = &crypto_acomp_type;
396 	base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
397 
398 	return crypto_register_alg(base);
399 }
400 EXPORT_SYMBOL_GPL(crypto_register_acomp);
401 
402 void crypto_unregister_acomp(struct acomp_alg *alg)
403 {
404 	crypto_unregister_alg(&alg->base);
405 }
406 EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
407 
408 int crypto_register_acomps(struct acomp_alg *algs, int count)
409 {
410 	int i, ret;
411 
412 	for (i = 0; i < count; i++) {
413 		ret = crypto_register_acomp(&algs[i]);
414 		if (ret)
415 			goto err;
416 	}
417 
418 	return 0;
419 
420 err:
421 	for (--i; i >= 0; --i)
422 		crypto_unregister_acomp(&algs[i]);
423 
424 	return ret;
425 }
426 EXPORT_SYMBOL_GPL(crypto_register_acomps);
427 
428 void crypto_unregister_acomps(struct acomp_alg *algs, int count)
429 {
430 	int i;
431 
432 	for (i = count - 1; i >= 0; --i)
433 		crypto_unregister_acomp(&algs[i]);
434 }
435 EXPORT_SYMBOL_GPL(crypto_unregister_acomps);
436 
437 MODULE_LICENSE("GPL");
438 MODULE_DESCRIPTION("Asynchronous compression type");
439