xref: /linux/crypto/acompress.c (revision da6f9bf40ac267b5c720694a817beea84fa40f77)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Asynchronous Compression operations
4  *
5  * Copyright (c) 2016, Intel Corporation
6  * Authors: Weigang Li <weigang.li@intel.com>
7  *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8  */
9 
10 #include <crypto/internal/acompress.h>
11 #include <linux/cryptouser.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/seq_file.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
18 #include <net/netlink.h>
19 
20 #include "compress.h"
21 
22 struct crypto_scomp;
23 
24 static const struct crypto_type crypto_acomp_type;
25 
26 static void acomp_reqchain_done(void *data, int err);
27 
28 static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
29 {
30 	return container_of(alg, struct acomp_alg, calg.base);
31 }
32 
33 static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
34 {
35 	return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
36 }
37 
38 static int __maybe_unused crypto_acomp_report(
39 	struct sk_buff *skb, struct crypto_alg *alg)
40 {
41 	struct crypto_report_acomp racomp;
42 
43 	memset(&racomp, 0, sizeof(racomp));
44 
45 	strscpy(racomp.type, "acomp", sizeof(racomp.type));
46 
47 	return nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(racomp), &racomp);
48 }
49 
50 static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
51 	__maybe_unused;
52 
53 static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
54 {
55 	seq_puts(m, "type         : acomp\n");
56 }
57 
58 static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
59 {
60 	struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
61 	struct acomp_alg *alg = crypto_acomp_alg(acomp);
62 
63 	alg->exit(acomp);
64 }
65 
66 static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
67 {
68 	struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
69 	struct acomp_alg *alg = crypto_acomp_alg(acomp);
70 
71 	if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
72 		return crypto_init_scomp_ops_async(tfm);
73 
74 	acomp->compress = alg->compress;
75 	acomp->decompress = alg->decompress;
76 	acomp->dst_free = alg->dst_free;
77 	acomp->reqsize = alg->reqsize;
78 
79 	if (alg->exit)
80 		acomp->base.exit = crypto_acomp_exit_tfm;
81 
82 	if (alg->init)
83 		return alg->init(acomp);
84 
85 	return 0;
86 }
87 
88 static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
89 {
90 	int extsize = crypto_alg_extsize(alg);
91 
92 	if (alg->cra_type != &crypto_acomp_type)
93 		extsize += sizeof(struct crypto_scomp *);
94 
95 	return extsize;
96 }
97 
98 static const struct crypto_type crypto_acomp_type = {
99 	.extsize = crypto_acomp_extsize,
100 	.init_tfm = crypto_acomp_init_tfm,
101 #ifdef CONFIG_PROC_FS
102 	.show = crypto_acomp_show,
103 #endif
104 #if IS_ENABLED(CONFIG_CRYPTO_USER)
105 	.report = crypto_acomp_report,
106 #endif
107 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
108 	.maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
109 	.type = CRYPTO_ALG_TYPE_ACOMPRESS,
110 	.tfmsize = offsetof(struct crypto_acomp, base),
111 };
112 
113 struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
114 					u32 mask)
115 {
116 	return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
117 }
118 EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
119 
120 struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
121 					u32 mask, int node)
122 {
123 	return crypto_alloc_tfm_node(alg_name, &crypto_acomp_type, type, mask,
124 				node);
125 }
126 EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node);
127 
128 static bool acomp_request_has_nondma(struct acomp_req *req)
129 {
130 	struct acomp_req *r2;
131 
132 	if (acomp_request_isnondma(req))
133 		return true;
134 
135 	list_for_each_entry(r2, &req->base.list, base.list)
136 		if (acomp_request_isnondma(r2))
137 			return true;
138 
139 	return false;
140 }
141 
142 static void acomp_save_req(struct acomp_req *req, crypto_completion_t cplt)
143 {
144 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
145 	struct acomp_req_chain *state = &req->chain;
146 
147 	if (!acomp_is_async(tfm))
148 		return;
149 
150 	state->compl = req->base.complete;
151 	state->data = req->base.data;
152 	req->base.complete = cplt;
153 	req->base.data = state;
154 	state->req0 = req;
155 }
156 
157 static void acomp_restore_req(struct acomp_req_chain *state)
158 {
159 	struct acomp_req *req = state->req0;
160 	struct crypto_acomp *tfm;
161 
162 	tfm = crypto_acomp_reqtfm(req);
163 	if (!acomp_is_async(tfm))
164 		return;
165 
166 	req->base.complete = state->compl;
167 	req->base.data = state->data;
168 }
169 
170 static void acomp_reqchain_virt(struct acomp_req_chain *state, int err)
171 {
172 	struct acomp_req *req = state->cur;
173 	unsigned int slen = req->slen;
174 	unsigned int dlen = req->dlen;
175 
176 	req->base.err = err;
177 	state = &req->chain;
178 
179 	if (state->src)
180 		acomp_request_set_src_dma(req, state->src, slen);
181 	if (state->dst)
182 		acomp_request_set_dst_dma(req, state->dst, dlen);
183 	state->src = NULL;
184 	state->dst = NULL;
185 }
186 
187 static void acomp_virt_to_sg(struct acomp_req *req)
188 {
189 	struct acomp_req_chain *state = &req->chain;
190 
191 	if (acomp_request_src_isvirt(req)) {
192 		unsigned int slen = req->slen;
193 		const u8 *svirt = req->svirt;
194 
195 		state->src = svirt;
196 		sg_init_one(&state->ssg, svirt, slen);
197 		acomp_request_set_src_sg(req, &state->ssg, slen);
198 	}
199 
200 	if (acomp_request_dst_isvirt(req)) {
201 		unsigned int dlen = req->dlen;
202 		u8 *dvirt = req->dvirt;
203 
204 		state->dst = dvirt;
205 		sg_init_one(&state->dsg, dvirt, dlen);
206 		acomp_request_set_dst_sg(req, &state->dsg, dlen);
207 	}
208 }
209 
210 static int acomp_reqchain_finish(struct acomp_req_chain *state,
211 				 int err, u32 mask)
212 {
213 	struct acomp_req *req0 = state->req0;
214 	struct acomp_req *req = state->cur;
215 	struct acomp_req *n;
216 
217 	acomp_reqchain_virt(state, err);
218 
219 	if (req != req0)
220 		list_add_tail(&req->base.list, &req0->base.list);
221 
222 	list_for_each_entry_safe(req, n, &state->head, base.list) {
223 		list_del_init(&req->base.list);
224 
225 		req->base.flags &= mask;
226 		req->base.complete = acomp_reqchain_done;
227 		req->base.data = state;
228 		state->cur = req;
229 
230 		acomp_virt_to_sg(req);
231 		err = state->op(req);
232 
233 		if (err == -EINPROGRESS) {
234 			if (!list_empty(&state->head))
235 				err = -EBUSY;
236 			goto out;
237 		}
238 
239 		if (err == -EBUSY)
240 			goto out;
241 
242 		acomp_reqchain_virt(state, err);
243 		list_add_tail(&req->base.list, &req0->base.list);
244 	}
245 
246 	acomp_restore_req(state);
247 
248 out:
249 	return err;
250 }
251 
252 static void acomp_reqchain_done(void *data, int err)
253 {
254 	struct acomp_req_chain *state = data;
255 	crypto_completion_t compl = state->compl;
256 
257 	data = state->data;
258 
259 	if (err == -EINPROGRESS) {
260 		if (!list_empty(&state->head))
261 			return;
262 		goto notify;
263 	}
264 
265 	err = acomp_reqchain_finish(state, err, CRYPTO_TFM_REQ_MAY_BACKLOG);
266 	if (err == -EBUSY)
267 		return;
268 
269 notify:
270 	compl(data, err);
271 }
272 
273 static int acomp_do_req_chain(struct acomp_req *req,
274 			      int (*op)(struct acomp_req *req))
275 {
276 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
277 	struct acomp_req_chain *state = &req->chain;
278 	int err;
279 
280 	if (crypto_acomp_req_chain(tfm) ||
281 	    (!acomp_request_chained(req) && !acomp_request_isvirt(req)))
282 		return op(req);
283 
284 	/*
285 	 * There are no in-kernel users that do this.  If and ever
286 	 * such users come into being then we could add a fall-back
287 	 * path.
288 	 */
289 	if (acomp_request_has_nondma(req))
290 		return -EINVAL;
291 
292 	if (acomp_is_async(tfm)) {
293 		acomp_save_req(req, acomp_reqchain_done);
294 		state = req->base.data;
295 	}
296 
297 	state->op = op;
298 	state->cur = req;
299 	state->src = NULL;
300 	INIT_LIST_HEAD(&state->head);
301 	list_splice_init(&req->base.list, &state->head);
302 
303 	acomp_virt_to_sg(req);
304 	err = op(req);
305 	if (err == -EBUSY || err == -EINPROGRESS)
306 		return -EBUSY;
307 
308 	return acomp_reqchain_finish(state, err, ~0);
309 }
310 
311 int crypto_acomp_compress(struct acomp_req *req)
312 {
313 	return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->compress);
314 }
315 EXPORT_SYMBOL_GPL(crypto_acomp_compress);
316 
317 int crypto_acomp_decompress(struct acomp_req *req)
318 {
319 	return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->decompress);
320 }
321 EXPORT_SYMBOL_GPL(crypto_acomp_decompress);
322 
323 void comp_prepare_alg(struct comp_alg_common *alg)
324 {
325 	struct crypto_alg *base = &alg->base;
326 
327 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
328 }
329 
330 int crypto_register_acomp(struct acomp_alg *alg)
331 {
332 	struct crypto_alg *base = &alg->calg.base;
333 
334 	comp_prepare_alg(&alg->calg);
335 
336 	base->cra_type = &crypto_acomp_type;
337 	base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
338 
339 	return crypto_register_alg(base);
340 }
341 EXPORT_SYMBOL_GPL(crypto_register_acomp);
342 
343 void crypto_unregister_acomp(struct acomp_alg *alg)
344 {
345 	crypto_unregister_alg(&alg->base);
346 }
347 EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
348 
349 int crypto_register_acomps(struct acomp_alg *algs, int count)
350 {
351 	int i, ret;
352 
353 	for (i = 0; i < count; i++) {
354 		ret = crypto_register_acomp(&algs[i]);
355 		if (ret)
356 			goto err;
357 	}
358 
359 	return 0;
360 
361 err:
362 	for (--i; i >= 0; --i)
363 		crypto_unregister_acomp(&algs[i]);
364 
365 	return ret;
366 }
367 EXPORT_SYMBOL_GPL(crypto_register_acomps);
368 
369 void crypto_unregister_acomps(struct acomp_alg *algs, int count)
370 {
371 	int i;
372 
373 	for (i = count - 1; i >= 0; --i)
374 		crypto_unregister_acomp(&algs[i]);
375 }
376 EXPORT_SYMBOL_GPL(crypto_unregister_acomps);
377 
378 MODULE_LICENSE("GPL");
379 MODULE_DESCRIPTION("Asynchronous compression type");
380