xref: /linux/crypto/acompress.c (revision dfd28c89fa91d92b7790ec4d1e8d8d5b4e8f1b19)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Asynchronous Compression operations
4  *
5  * Copyright (c) 2016, Intel Corporation
6  * Authors: Weigang Li <weigang.li@intel.com>
7  *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8  */
9 
10 #include <crypto/internal/acompress.h>
11 #include <linux/cryptouser.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/seq_file.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
18 #include <net/netlink.h>
19 
20 #include "compress.h"
21 
22 struct crypto_scomp;
23 
24 static const struct crypto_type crypto_acomp_type;
25 
26 static void acomp_reqchain_done(void *data, int err);
27 
28 static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
29 {
30 	return container_of(alg, struct acomp_alg, calg.base);
31 }
32 
33 static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
34 {
35 	return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
36 }
37 
38 static int __maybe_unused crypto_acomp_report(
39 	struct sk_buff *skb, struct crypto_alg *alg)
40 {
41 	struct crypto_report_acomp racomp;
42 
43 	memset(&racomp, 0, sizeof(racomp));
44 
45 	strscpy(racomp.type, "acomp", sizeof(racomp.type));
46 
47 	return nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(racomp), &racomp);
48 }
49 
50 static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
51 	__maybe_unused;
52 
53 static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
54 {
55 	seq_puts(m, "type         : acomp\n");
56 }
57 
58 static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
59 {
60 	struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
61 	struct acomp_alg *alg = crypto_acomp_alg(acomp);
62 
63 	if (alg->exit)
64 		alg->exit(acomp);
65 
66 	if (acomp_is_async(acomp))
67 		crypto_free_acomp(acomp->fb);
68 }
69 
70 static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
71 {
72 	struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
73 	struct acomp_alg *alg = crypto_acomp_alg(acomp);
74 	struct crypto_acomp *fb = NULL;
75 	int err;
76 
77 	acomp->fb = acomp;
78 
79 	if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
80 		return crypto_init_scomp_ops_async(tfm);
81 
82 	if (acomp_is_async(acomp)) {
83 		fb = crypto_alloc_acomp(crypto_acomp_alg_name(acomp), 0,
84 					CRYPTO_ALG_ASYNC);
85 		if (IS_ERR(fb))
86 			return PTR_ERR(fb);
87 
88 		err = -EINVAL;
89 		if (crypto_acomp_reqsize(fb) > MAX_SYNC_COMP_REQSIZE)
90 			goto out_free_fb;
91 
92 		acomp->fb = fb;
93 	}
94 
95 	acomp->compress = alg->compress;
96 	acomp->decompress = alg->decompress;
97 	acomp->reqsize = alg->reqsize;
98 
99 	acomp->base.exit = crypto_acomp_exit_tfm;
100 
101 	if (!alg->init)
102 		return 0;
103 
104 	err = alg->init(acomp);
105 	if (err)
106 		goto out_free_fb;
107 
108 	return 0;
109 
110 out_free_fb:
111 	crypto_free_acomp(fb);
112 	return err;
113 }
114 
115 static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
116 {
117 	int extsize = crypto_alg_extsize(alg);
118 
119 	if (alg->cra_type != &crypto_acomp_type)
120 		extsize += sizeof(struct crypto_scomp *);
121 
122 	return extsize;
123 }
124 
125 static const struct crypto_type crypto_acomp_type = {
126 	.extsize = crypto_acomp_extsize,
127 	.init_tfm = crypto_acomp_init_tfm,
128 #ifdef CONFIG_PROC_FS
129 	.show = crypto_acomp_show,
130 #endif
131 #if IS_ENABLED(CONFIG_CRYPTO_USER)
132 	.report = crypto_acomp_report,
133 #endif
134 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
135 	.maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
136 	.type = CRYPTO_ALG_TYPE_ACOMPRESS,
137 	.tfmsize = offsetof(struct crypto_acomp, base),
138 };
139 
140 struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
141 					u32 mask)
142 {
143 	return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
144 }
145 EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
146 
147 struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
148 					u32 mask, int node)
149 {
150 	return crypto_alloc_tfm_node(alg_name, &crypto_acomp_type, type, mask,
151 				node);
152 }
153 EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node);
154 
155 static bool acomp_request_has_nondma(struct acomp_req *req)
156 {
157 	struct acomp_req *r2;
158 
159 	if (acomp_request_isnondma(req))
160 		return true;
161 
162 	list_for_each_entry(r2, &req->base.list, base.list)
163 		if (acomp_request_isnondma(r2))
164 			return true;
165 
166 	return false;
167 }
168 
169 static void acomp_save_req(struct acomp_req *req, crypto_completion_t cplt)
170 {
171 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
172 	struct acomp_req_chain *state = &req->chain;
173 
174 	if (!acomp_is_async(tfm))
175 		return;
176 
177 	state->compl = req->base.complete;
178 	state->data = req->base.data;
179 	req->base.complete = cplt;
180 	req->base.data = state;
181 	state->req0 = req;
182 }
183 
184 static void acomp_restore_req(struct acomp_req_chain *state)
185 {
186 	struct acomp_req *req = state->req0;
187 	struct crypto_acomp *tfm;
188 
189 	tfm = crypto_acomp_reqtfm(req);
190 	if (!acomp_is_async(tfm))
191 		return;
192 
193 	req->base.complete = state->compl;
194 	req->base.data = state->data;
195 }
196 
197 static void acomp_reqchain_virt(struct acomp_req_chain *state, int err)
198 {
199 	struct acomp_req *req = state->cur;
200 	unsigned int slen = req->slen;
201 	unsigned int dlen = req->dlen;
202 
203 	req->base.err = err;
204 	state = &req->chain;
205 
206 	if (state->src)
207 		acomp_request_set_src_dma(req, state->src, slen);
208 	if (state->dst)
209 		acomp_request_set_dst_dma(req, state->dst, dlen);
210 	state->src = NULL;
211 	state->dst = NULL;
212 }
213 
214 static void acomp_virt_to_sg(struct acomp_req *req)
215 {
216 	struct acomp_req_chain *state = &req->chain;
217 
218 	if (acomp_request_src_isvirt(req)) {
219 		unsigned int slen = req->slen;
220 		const u8 *svirt = req->svirt;
221 
222 		state->src = svirt;
223 		sg_init_one(&state->ssg, svirt, slen);
224 		acomp_request_set_src_sg(req, &state->ssg, slen);
225 	}
226 
227 	if (acomp_request_dst_isvirt(req)) {
228 		unsigned int dlen = req->dlen;
229 		u8 *dvirt = req->dvirt;
230 
231 		state->dst = dvirt;
232 		sg_init_one(&state->dsg, dvirt, dlen);
233 		acomp_request_set_dst_sg(req, &state->dsg, dlen);
234 	}
235 }
236 
237 static int acomp_reqchain_finish(struct acomp_req_chain *state,
238 				 int err, u32 mask)
239 {
240 	struct acomp_req *req0 = state->req0;
241 	struct acomp_req *req = state->cur;
242 	struct acomp_req *n;
243 
244 	acomp_reqchain_virt(state, err);
245 
246 	if (req != req0)
247 		list_add_tail(&req->base.list, &req0->base.list);
248 
249 	list_for_each_entry_safe(req, n, &state->head, base.list) {
250 		list_del_init(&req->base.list);
251 
252 		req->base.flags &= mask;
253 		req->base.complete = acomp_reqchain_done;
254 		req->base.data = state;
255 		state->cur = req;
256 
257 		acomp_virt_to_sg(req);
258 		err = state->op(req);
259 
260 		if (err == -EINPROGRESS) {
261 			if (!list_empty(&state->head))
262 				err = -EBUSY;
263 			goto out;
264 		}
265 
266 		if (err == -EBUSY)
267 			goto out;
268 
269 		acomp_reqchain_virt(state, err);
270 		list_add_tail(&req->base.list, &req0->base.list);
271 	}
272 
273 	acomp_restore_req(state);
274 
275 out:
276 	return err;
277 }
278 
279 static void acomp_reqchain_done(void *data, int err)
280 {
281 	struct acomp_req_chain *state = data;
282 	crypto_completion_t compl = state->compl;
283 
284 	data = state->data;
285 
286 	if (err == -EINPROGRESS) {
287 		if (!list_empty(&state->head))
288 			return;
289 		goto notify;
290 	}
291 
292 	err = acomp_reqchain_finish(state, err, CRYPTO_TFM_REQ_MAY_BACKLOG);
293 	if (err == -EBUSY)
294 		return;
295 
296 notify:
297 	compl(data, err);
298 }
299 
300 static int acomp_do_req_chain(struct acomp_req *req,
301 			      int (*op)(struct acomp_req *req))
302 {
303 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
304 	struct acomp_req_chain *state = &req->chain;
305 	int err;
306 
307 	if (crypto_acomp_req_chain(tfm) ||
308 	    (!acomp_request_chained(req) && !acomp_request_isvirt(req)))
309 		return op(req);
310 
311 	/*
312 	 * There are no in-kernel users that do this.  If and ever
313 	 * such users come into being then we could add a fall-back
314 	 * path.
315 	 */
316 	if (acomp_request_has_nondma(req))
317 		return -EINVAL;
318 
319 	if (acomp_is_async(tfm)) {
320 		acomp_save_req(req, acomp_reqchain_done);
321 		state = req->base.data;
322 	}
323 
324 	state->op = op;
325 	state->cur = req;
326 	state->src = NULL;
327 	INIT_LIST_HEAD(&state->head);
328 	list_splice_init(&req->base.list, &state->head);
329 
330 	acomp_virt_to_sg(req);
331 	err = op(req);
332 	if (err == -EBUSY || err == -EINPROGRESS)
333 		return -EBUSY;
334 
335 	return acomp_reqchain_finish(state, err, ~0);
336 }
337 
338 int crypto_acomp_compress(struct acomp_req *req)
339 {
340 	return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->compress);
341 }
342 EXPORT_SYMBOL_GPL(crypto_acomp_compress);
343 
344 int crypto_acomp_decompress(struct acomp_req *req)
345 {
346 	return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->decompress);
347 }
348 EXPORT_SYMBOL_GPL(crypto_acomp_decompress);
349 
350 void comp_prepare_alg(struct comp_alg_common *alg)
351 {
352 	struct crypto_alg *base = &alg->base;
353 
354 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
355 }
356 
357 int crypto_register_acomp(struct acomp_alg *alg)
358 {
359 	struct crypto_alg *base = &alg->calg.base;
360 
361 	comp_prepare_alg(&alg->calg);
362 
363 	base->cra_type = &crypto_acomp_type;
364 	base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
365 
366 	return crypto_register_alg(base);
367 }
368 EXPORT_SYMBOL_GPL(crypto_register_acomp);
369 
370 void crypto_unregister_acomp(struct acomp_alg *alg)
371 {
372 	crypto_unregister_alg(&alg->base);
373 }
374 EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
375 
376 int crypto_register_acomps(struct acomp_alg *algs, int count)
377 {
378 	int i, ret;
379 
380 	for (i = 0; i < count; i++) {
381 		ret = crypto_register_acomp(&algs[i]);
382 		if (ret)
383 			goto err;
384 	}
385 
386 	return 0;
387 
388 err:
389 	for (--i; i >= 0; --i)
390 		crypto_unregister_acomp(&algs[i]);
391 
392 	return ret;
393 }
394 EXPORT_SYMBOL_GPL(crypto_register_acomps);
395 
396 void crypto_unregister_acomps(struct acomp_alg *algs, int count)
397 {
398 	int i;
399 
400 	for (i = count - 1; i >= 0; --i)
401 		crypto_unregister_acomp(&algs[i]);
402 }
403 EXPORT_SYMBOL_GPL(crypto_unregister_acomps);
404 
405 MODULE_LICENSE("GPL");
406 MODULE_DESCRIPTION("Asynchronous compression type");
407