xref: /linux/crypto/acompress.c (revision 02c974294c740bfb747ec64933e12148eb3d99e1)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Asynchronous Compression operations
4  *
5  * Copyright (c) 2016, Intel Corporation
6  * Authors: Weigang Li <weigang.li@intel.com>
7  *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8  */
9 
10 #include <crypto/internal/acompress.h>
11 #include <linux/cryptouser.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/seq_file.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
18 #include <net/netlink.h>
19 
20 #include "compress.h"
21 
22 struct crypto_scomp;
23 
24 static const struct crypto_type crypto_acomp_type;
25 
26 static void acomp_reqchain_done(void *data, int err);
27 
28 static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
29 {
30 	return container_of(alg, struct acomp_alg, calg.base);
31 }
32 
33 static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
34 {
35 	return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
36 }
37 
38 static int __maybe_unused crypto_acomp_report(
39 	struct sk_buff *skb, struct crypto_alg *alg)
40 {
41 	struct crypto_report_acomp racomp;
42 
43 	memset(&racomp, 0, sizeof(racomp));
44 
45 	strscpy(racomp.type, "acomp", sizeof(racomp.type));
46 
47 	return nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(racomp), &racomp);
48 }
49 
50 static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
51 	__maybe_unused;
52 
53 static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
54 {
55 	seq_puts(m, "type         : acomp\n");
56 }
57 
58 static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
59 {
60 	struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
61 	struct acomp_alg *alg = crypto_acomp_alg(acomp);
62 
63 	alg->exit(acomp);
64 }
65 
66 static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
67 {
68 	struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
69 	struct acomp_alg *alg = crypto_acomp_alg(acomp);
70 
71 	if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
72 		return crypto_init_scomp_ops_async(tfm);
73 
74 	acomp->compress = alg->compress;
75 	acomp->decompress = alg->decompress;
76 	acomp->reqsize = alg->reqsize;
77 
78 	if (alg->exit)
79 		acomp->base.exit = crypto_acomp_exit_tfm;
80 
81 	if (alg->init)
82 		return alg->init(acomp);
83 
84 	return 0;
85 }
86 
87 static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
88 {
89 	int extsize = crypto_alg_extsize(alg);
90 
91 	if (alg->cra_type != &crypto_acomp_type)
92 		extsize += sizeof(struct crypto_scomp *);
93 
94 	return extsize;
95 }
96 
97 static const struct crypto_type crypto_acomp_type = {
98 	.extsize = crypto_acomp_extsize,
99 	.init_tfm = crypto_acomp_init_tfm,
100 #ifdef CONFIG_PROC_FS
101 	.show = crypto_acomp_show,
102 #endif
103 #if IS_ENABLED(CONFIG_CRYPTO_USER)
104 	.report = crypto_acomp_report,
105 #endif
106 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
107 	.maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
108 	.type = CRYPTO_ALG_TYPE_ACOMPRESS,
109 	.tfmsize = offsetof(struct crypto_acomp, base),
110 };
111 
112 struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
113 					u32 mask)
114 {
115 	return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
116 }
117 EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
118 
119 struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
120 					u32 mask, int node)
121 {
122 	return crypto_alloc_tfm_node(alg_name, &crypto_acomp_type, type, mask,
123 				node);
124 }
125 EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node);
126 
127 static bool acomp_request_has_nondma(struct acomp_req *req)
128 {
129 	struct acomp_req *r2;
130 
131 	if (acomp_request_isnondma(req))
132 		return true;
133 
134 	list_for_each_entry(r2, &req->base.list, base.list)
135 		if (acomp_request_isnondma(r2))
136 			return true;
137 
138 	return false;
139 }
140 
141 static void acomp_save_req(struct acomp_req *req, crypto_completion_t cplt)
142 {
143 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
144 	struct acomp_req_chain *state = &req->chain;
145 
146 	if (!acomp_is_async(tfm))
147 		return;
148 
149 	state->compl = req->base.complete;
150 	state->data = req->base.data;
151 	req->base.complete = cplt;
152 	req->base.data = state;
153 	state->req0 = req;
154 }
155 
156 static void acomp_restore_req(struct acomp_req_chain *state)
157 {
158 	struct acomp_req *req = state->req0;
159 	struct crypto_acomp *tfm;
160 
161 	tfm = crypto_acomp_reqtfm(req);
162 	if (!acomp_is_async(tfm))
163 		return;
164 
165 	req->base.complete = state->compl;
166 	req->base.data = state->data;
167 }
168 
169 static void acomp_reqchain_virt(struct acomp_req_chain *state, int err)
170 {
171 	struct acomp_req *req = state->cur;
172 	unsigned int slen = req->slen;
173 	unsigned int dlen = req->dlen;
174 
175 	req->base.err = err;
176 	state = &req->chain;
177 
178 	if (state->src)
179 		acomp_request_set_src_dma(req, state->src, slen);
180 	if (state->dst)
181 		acomp_request_set_dst_dma(req, state->dst, dlen);
182 	state->src = NULL;
183 	state->dst = NULL;
184 }
185 
186 static void acomp_virt_to_sg(struct acomp_req *req)
187 {
188 	struct acomp_req_chain *state = &req->chain;
189 
190 	if (acomp_request_src_isvirt(req)) {
191 		unsigned int slen = req->slen;
192 		const u8 *svirt = req->svirt;
193 
194 		state->src = svirt;
195 		sg_init_one(&state->ssg, svirt, slen);
196 		acomp_request_set_src_sg(req, &state->ssg, slen);
197 	}
198 
199 	if (acomp_request_dst_isvirt(req)) {
200 		unsigned int dlen = req->dlen;
201 		u8 *dvirt = req->dvirt;
202 
203 		state->dst = dvirt;
204 		sg_init_one(&state->dsg, dvirt, dlen);
205 		acomp_request_set_dst_sg(req, &state->dsg, dlen);
206 	}
207 }
208 
209 static int acomp_reqchain_finish(struct acomp_req_chain *state,
210 				 int err, u32 mask)
211 {
212 	struct acomp_req *req0 = state->req0;
213 	struct acomp_req *req = state->cur;
214 	struct acomp_req *n;
215 
216 	acomp_reqchain_virt(state, err);
217 
218 	if (req != req0)
219 		list_add_tail(&req->base.list, &req0->base.list);
220 
221 	list_for_each_entry_safe(req, n, &state->head, base.list) {
222 		list_del_init(&req->base.list);
223 
224 		req->base.flags &= mask;
225 		req->base.complete = acomp_reqchain_done;
226 		req->base.data = state;
227 		state->cur = req;
228 
229 		acomp_virt_to_sg(req);
230 		err = state->op(req);
231 
232 		if (err == -EINPROGRESS) {
233 			if (!list_empty(&state->head))
234 				err = -EBUSY;
235 			goto out;
236 		}
237 
238 		if (err == -EBUSY)
239 			goto out;
240 
241 		acomp_reqchain_virt(state, err);
242 		list_add_tail(&req->base.list, &req0->base.list);
243 	}
244 
245 	acomp_restore_req(state);
246 
247 out:
248 	return err;
249 }
250 
251 static void acomp_reqchain_done(void *data, int err)
252 {
253 	struct acomp_req_chain *state = data;
254 	crypto_completion_t compl = state->compl;
255 
256 	data = state->data;
257 
258 	if (err == -EINPROGRESS) {
259 		if (!list_empty(&state->head))
260 			return;
261 		goto notify;
262 	}
263 
264 	err = acomp_reqchain_finish(state, err, CRYPTO_TFM_REQ_MAY_BACKLOG);
265 	if (err == -EBUSY)
266 		return;
267 
268 notify:
269 	compl(data, err);
270 }
271 
272 static int acomp_do_req_chain(struct acomp_req *req,
273 			      int (*op)(struct acomp_req *req))
274 {
275 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
276 	struct acomp_req_chain *state = &req->chain;
277 	int err;
278 
279 	if (crypto_acomp_req_chain(tfm) ||
280 	    (!acomp_request_chained(req) && !acomp_request_isvirt(req)))
281 		return op(req);
282 
283 	/*
284 	 * There are no in-kernel users that do this.  If and ever
285 	 * such users come into being then we could add a fall-back
286 	 * path.
287 	 */
288 	if (acomp_request_has_nondma(req))
289 		return -EINVAL;
290 
291 	if (acomp_is_async(tfm)) {
292 		acomp_save_req(req, acomp_reqchain_done);
293 		state = req->base.data;
294 	}
295 
296 	state->op = op;
297 	state->cur = req;
298 	state->src = NULL;
299 	INIT_LIST_HEAD(&state->head);
300 	list_splice_init(&req->base.list, &state->head);
301 
302 	acomp_virt_to_sg(req);
303 	err = op(req);
304 	if (err == -EBUSY || err == -EINPROGRESS)
305 		return -EBUSY;
306 
307 	return acomp_reqchain_finish(state, err, ~0);
308 }
309 
310 int crypto_acomp_compress(struct acomp_req *req)
311 {
312 	return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->compress);
313 }
314 EXPORT_SYMBOL_GPL(crypto_acomp_compress);
315 
316 int crypto_acomp_decompress(struct acomp_req *req)
317 {
318 	return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->decompress);
319 }
320 EXPORT_SYMBOL_GPL(crypto_acomp_decompress);
321 
322 void comp_prepare_alg(struct comp_alg_common *alg)
323 {
324 	struct crypto_alg *base = &alg->base;
325 
326 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
327 }
328 
329 int crypto_register_acomp(struct acomp_alg *alg)
330 {
331 	struct crypto_alg *base = &alg->calg.base;
332 
333 	comp_prepare_alg(&alg->calg);
334 
335 	base->cra_type = &crypto_acomp_type;
336 	base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
337 
338 	return crypto_register_alg(base);
339 }
340 EXPORT_SYMBOL_GPL(crypto_register_acomp);
341 
342 void crypto_unregister_acomp(struct acomp_alg *alg)
343 {
344 	crypto_unregister_alg(&alg->base);
345 }
346 EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
347 
348 int crypto_register_acomps(struct acomp_alg *algs, int count)
349 {
350 	int i, ret;
351 
352 	for (i = 0; i < count; i++) {
353 		ret = crypto_register_acomp(&algs[i]);
354 		if (ret)
355 			goto err;
356 	}
357 
358 	return 0;
359 
360 err:
361 	for (--i; i >= 0; --i)
362 		crypto_unregister_acomp(&algs[i]);
363 
364 	return ret;
365 }
366 EXPORT_SYMBOL_GPL(crypto_register_acomps);
367 
368 void crypto_unregister_acomps(struct acomp_alg *algs, int count)
369 {
370 	int i;
371 
372 	for (i = count - 1; i >= 0; --i)
373 		crypto_unregister_acomp(&algs[i]);
374 }
375 EXPORT_SYMBOL_GPL(crypto_unregister_acomps);
376 
377 MODULE_LICENSE("GPL");
378 MODULE_DESCRIPTION("Asynchronous compression type");
379