xref: /linux/include/crypto/acompress.h (revision dfd28c89fa91d92b7790ec4d1e8d8d5b4e8f1b19)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Asynchronous Compression operations
4  *
5  * Copyright (c) 2016, Intel Corporation
6  * Authors: Weigang Li <weigang.li@intel.com>
7  *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8  */
9 #ifndef _CRYPTO_ACOMP_H
10 #define _CRYPTO_ACOMP_H
11 
12 #include <linux/atomic.h>
13 #include <linux/args.h>
14 #include <linux/compiler_types.h>
15 #include <linux/container_of.h>
16 #include <linux/crypto.h>
17 #include <linux/err.h>
18 #include <linux/scatterlist.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock_types.h>
21 #include <linux/types.h>
22 
23 /* Set this bit if source is virtual address instead of SG list. */
24 #define CRYPTO_ACOMP_REQ_SRC_VIRT	0x00000002
25 
26 /* Set this bit for if virtual address source cannot be used for DMA. */
27 #define CRYPTO_ACOMP_REQ_SRC_NONDMA	0x00000004
28 
29 /* Set this bit if destination is virtual address instead of SG list. */
30 #define CRYPTO_ACOMP_REQ_DST_VIRT	0x00000008
31 
32 /* Set this bit for if virtual address destination cannot be used for DMA. */
33 #define CRYPTO_ACOMP_REQ_DST_NONDMA	0x00000010
34 
35 #define CRYPTO_ACOMP_DST_MAX		131072
36 
37 #define	MAX_SYNC_COMP_REQSIZE		0
38 
39 #define ACOMP_REQUEST_ALLOC(name, tfm, gfp) \
40         char __##name##_req[sizeof(struct acomp_req) + \
41                             MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
42         struct acomp_req *name = acomp_request_on_stack_init( \
43                 __##name##_req, (tfm), (gfp), false)
44 
45 struct acomp_req;
46 
47 struct acomp_req_chain {
48 	struct list_head head;
49 	struct acomp_req *req0;
50 	struct acomp_req *cur;
51 	int (*op)(struct acomp_req *req);
52 	crypto_completion_t compl;
53 	void *data;
54 	struct scatterlist ssg;
55 	struct scatterlist dsg;
56 	const u8 *src;
57 	u8 *dst;
58 };
59 
60 /**
61  * struct acomp_req - asynchronous (de)compression request
62  *
63  * @base:	Common attributes for asynchronous crypto requests
64  * @src:	Source Data
65  * @dst:	Destination data
66  * @slen:	Size of the input buffer
67  * @dlen:	Size of the output buffer and number of bytes produced
68  * @chain:	Private API code data, do not use
69  * @__ctx:	Start of private context data
70  */
71 struct acomp_req {
72 	struct crypto_async_request base;
73 	union {
74 		struct scatterlist *src;
75 		const u8 *svirt;
76 	};
77 	union {
78 		struct scatterlist *dst;
79 		u8 *dvirt;
80 	};
81 	unsigned int slen;
82 	unsigned int dlen;
83 
84 	struct acomp_req_chain chain;
85 
86 	void *__ctx[] CRYPTO_MINALIGN_ATTR;
87 };
88 
89 /**
90  * struct crypto_acomp - user-instantiated objects which encapsulate
91  * algorithms and core processing logic
92  *
93  * @compress:		Function performs a compress operation
94  * @decompress:		Function performs a de-compress operation
95  * @reqsize:		Context size for (de)compression requests
96  * @fb:			Synchronous fallback tfm
97  * @base:		Common crypto API algorithm data structure
98  */
99 struct crypto_acomp {
100 	int (*compress)(struct acomp_req *req);
101 	int (*decompress)(struct acomp_req *req);
102 	unsigned int reqsize;
103 	struct crypto_acomp *fb;
104 	struct crypto_tfm base;
105 };
106 
107 struct crypto_acomp_stream {
108 	spinlock_t lock;
109 	void *ctx;
110 };
111 
112 #define COMP_ALG_COMMON {			\
113 	struct crypto_alg base;			\
114 	struct crypto_acomp_stream __percpu *stream;	\
115 }
116 struct comp_alg_common COMP_ALG_COMMON;
117 
118 /**
119  * DOC: Asynchronous Compression API
120  *
121  * The Asynchronous Compression API is used with the algorithms of type
122  * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto)
123  */
124 
125 /**
126  * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle
127  * @alg_name:	is the cra_name / name or cra_driver_name / driver name of the
128  *		compression algorithm e.g. "deflate"
129  * @type:	specifies the type of the algorithm
130  * @mask:	specifies the mask for the algorithm
131  *
132  * Allocate a handle for a compression algorithm. The returned struct
133  * crypto_acomp is the handle that is required for any subsequent
134  * API invocation for the compression operations.
135  *
136  * Return:	allocated handle in case of success; IS_ERR() is true in case
137  *		of an error, PTR_ERR() returns the error code.
138  */
139 struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
140 					u32 mask);
141 /**
142  * crypto_alloc_acomp_node() -- allocate ACOMPRESS tfm handle with desired NUMA node
143  * @alg_name:	is the cra_name / name or cra_driver_name / driver name of the
144  *		compression algorithm e.g. "deflate"
145  * @type:	specifies the type of the algorithm
146  * @mask:	specifies the mask for the algorithm
147  * @node:	specifies the NUMA node the ZIP hardware belongs to
148  *
149  * Allocate a handle for a compression algorithm. Drivers should try to use
150  * (de)compressors on the specified NUMA node.
151  * The returned struct crypto_acomp is the handle that is required for any
152  * subsequent API invocation for the compression operations.
153  *
154  * Return:	allocated handle in case of success; IS_ERR() is true in case
155  *		of an error, PTR_ERR() returns the error code.
156  */
157 struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
158 					u32 mask, int node);
159 
160 static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
161 {
162 	return &tfm->base;
163 }
164 
165 static inline struct comp_alg_common *__crypto_comp_alg_common(
166 	struct crypto_alg *alg)
167 {
168 	return container_of(alg, struct comp_alg_common, base);
169 }
170 
171 static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
172 {
173 	return container_of(tfm, struct crypto_acomp, base);
174 }
175 
176 static inline struct comp_alg_common *crypto_comp_alg_common(
177 	struct crypto_acomp *tfm)
178 {
179 	return __crypto_comp_alg_common(crypto_acomp_tfm(tfm)->__crt_alg);
180 }
181 
182 static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
183 {
184 	return tfm->reqsize;
185 }
186 
187 static inline void acomp_request_set_tfm(struct acomp_req *req,
188 					 struct crypto_acomp *tfm)
189 {
190 	req->base.tfm = crypto_acomp_tfm(tfm);
191 }
192 
193 static inline bool acomp_is_async(struct crypto_acomp *tfm)
194 {
195 	return crypto_comp_alg_common(tfm)->base.cra_flags &
196 	       CRYPTO_ALG_ASYNC;
197 }
198 
199 static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
200 {
201 	return __crypto_acomp_tfm(req->base.tfm);
202 }
203 
204 /**
205  * crypto_free_acomp() -- free ACOMPRESS tfm handle
206  *
207  * @tfm:	ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
208  *
209  * If @tfm is a NULL or error pointer, this function does nothing.
210  */
211 static inline void crypto_free_acomp(struct crypto_acomp *tfm)
212 {
213 	crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm));
214 }
215 
216 static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
217 {
218 	type &= ~CRYPTO_ALG_TYPE_MASK;
219 	type |= CRYPTO_ALG_TYPE_ACOMPRESS;
220 	mask |= CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
221 
222 	return crypto_has_alg(alg_name, type, mask);
223 }
224 
225 static inline const char *crypto_acomp_alg_name(struct crypto_acomp *tfm)
226 {
227 	return crypto_tfm_alg_name(crypto_acomp_tfm(tfm));
228 }
229 
230 static inline const char *crypto_acomp_driver_name(struct crypto_acomp *tfm)
231 {
232 	return crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
233 }
234 
235 /**
236  * acomp_request_alloc() -- allocates asynchronous (de)compression request
237  *
238  * @tfm:	ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
239  * @gfp:	gfp to pass to kzalloc (defaults to GFP_KERNEL)
240  *
241  * Return:	allocated handle in case of success or NULL in case of an error
242  */
243 static inline struct acomp_req *acomp_request_alloc_extra_noprof(
244 	struct crypto_acomp *tfm, size_t extra, gfp_t gfp)
245 {
246 	struct acomp_req *req;
247 	size_t len;
248 
249 	len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN);
250 	if (check_add_overflow(len, extra, &len))
251 		return NULL;
252 
253 	req = kzalloc_noprof(len, gfp);
254 	if (likely(req))
255 		acomp_request_set_tfm(req, tfm);
256 	return req;
257 }
258 #define acomp_request_alloc_noprof(tfm, ...) \
259 	CONCATENATE(acomp_request_alloc_noprof_, COUNT_ARGS(__VA_ARGS__))( \
260 		tfm, ##__VA_ARGS__)
261 #define acomp_request_alloc_noprof_0(tfm) \
262 	acomp_request_alloc_noprof_1(tfm, GFP_KERNEL)
263 #define acomp_request_alloc_noprof_1(tfm, gfp) \
264 	acomp_request_alloc_extra_noprof(tfm, 0, gfp)
265 #define acomp_request_alloc(...)	alloc_hooks(acomp_request_alloc_noprof(__VA_ARGS__))
266 
267 /**
268  * acomp_request_alloc_extra() -- allocate acomp request with extra memory
269  *
270  * @tfm:	ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
271  * @extra:	amount of extra memory
272  * @gfp:	gfp to pass to kzalloc
273  *
274  * Return:	allocated handle in case of success or NULL in case of an error
275  */
276 #define acomp_request_alloc_extra(...)	alloc_hooks(acomp_request_alloc_extra_noprof(__VA_ARGS__))
277 
278 static inline void *acomp_request_extra(struct acomp_req *req)
279 {
280 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
281 	size_t len;
282 
283 	len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN);
284 	return (void *)((char *)req + len);
285 }
286 
287 /**
288  * acomp_request_free() -- zeroize and free asynchronous (de)compression
289  *			   request as well as the output buffer if allocated
290  *			   inside the algorithm
291  *
292  * @req:	request to free
293  */
294 static inline void acomp_request_free(struct acomp_req *req)
295 {
296 	if (!req || (req->base.flags & CRYPTO_TFM_REQ_ON_STACK))
297 		return;
298 	kfree_sensitive(req);
299 }
300 
301 /**
302  * acomp_request_set_callback() -- Sets an asynchronous callback
303  *
304  * Callback will be called when an asynchronous operation on a given
305  * request is finished.
306  *
307  * @req:	request that the callback will be set for
308  * @flgs:	specify for instance if the operation may backlog
309  * @cmlp:	callback which will be called
310  * @data:	private data used by the caller
311  */
312 static inline void acomp_request_set_callback(struct acomp_req *req,
313 					      u32 flgs,
314 					      crypto_completion_t cmpl,
315 					      void *data)
316 {
317 	u32 keep = CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA |
318 		   CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA |
319 		   CRYPTO_TFM_REQ_ON_STACK;
320 
321 	req->base.complete = cmpl;
322 	req->base.data = data;
323 	req->base.flags &= keep;
324 	req->base.flags |= flgs & ~keep;
325 
326 	crypto_reqchain_init(&req->base);
327 }
328 
329 /**
330  * acomp_request_set_params() -- Sets request parameters
331  *
332  * Sets parameters required by an acomp operation
333  *
334  * @req:	asynchronous compress request
335  * @src:	pointer to input buffer scatterlist
336  * @dst:	pointer to output buffer scatterlist. If this is NULL, the
337  *		acomp layer will allocate the output memory
338  * @slen:	size of the input buffer
339  * @dlen:	size of the output buffer. If dst is NULL, this can be used by
340  *		the user to specify the maximum amount of memory to allocate
341  */
342 static inline void acomp_request_set_params(struct acomp_req *req,
343 					    struct scatterlist *src,
344 					    struct scatterlist *dst,
345 					    unsigned int slen,
346 					    unsigned int dlen)
347 {
348 	req->src = src;
349 	req->dst = dst;
350 	req->slen = slen;
351 	req->dlen = dlen;
352 
353 	req->base.flags &= ~(CRYPTO_ACOMP_REQ_SRC_VIRT |
354 			     CRYPTO_ACOMP_REQ_SRC_NONDMA |
355 			     CRYPTO_ACOMP_REQ_DST_VIRT |
356 			     CRYPTO_ACOMP_REQ_DST_NONDMA);
357 }
358 
359 /**
360  * acomp_request_set_src_sg() -- Sets source scatterlist
361  *
362  * Sets source scatterlist required by an acomp operation.
363  *
364  * @req:	asynchronous compress request
365  * @src:	pointer to input buffer scatterlist
366  * @slen:	size of the input buffer
367  */
368 static inline void acomp_request_set_src_sg(struct acomp_req *req,
369 					    struct scatterlist *src,
370 					    unsigned int slen)
371 {
372 	req->src = src;
373 	req->slen = slen;
374 
375 	req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
376 	req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT;
377 }
378 
379 /**
380  * acomp_request_set_src_dma() -- Sets DMA source virtual address
381  *
382  * Sets source virtual address required by an acomp operation.
383  * The address must be usable for DMA.
384  *
385  * @req:	asynchronous compress request
386  * @src:	virtual address pointer to input buffer
387  * @slen:	size of the input buffer
388  */
389 static inline void acomp_request_set_src_dma(struct acomp_req *req,
390 					     const u8 *src, unsigned int slen)
391 {
392 	req->svirt = src;
393 	req->slen = slen;
394 
395 	req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
396 	req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT;
397 }
398 
399 /**
400  * acomp_request_set_src_nondma() -- Sets non-DMA source virtual address
401  *
402  * Sets source virtual address required by an acomp operation.
403  * The address can not be used for DMA.
404  *
405  * @req:	asynchronous compress request
406  * @src:	virtual address pointer to input buffer
407  * @slen:	size of the input buffer
408  */
409 static inline void acomp_request_set_src_nondma(struct acomp_req *req,
410 						const u8 *src,
411 						unsigned int slen)
412 {
413 	req->svirt = src;
414 	req->slen = slen;
415 
416 	req->base.flags |= CRYPTO_ACOMP_REQ_SRC_NONDMA;
417 	req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT;
418 }
419 
420 /**
421  * acomp_request_set_dst_sg() -- Sets destination scatterlist
422  *
423  * Sets destination scatterlist required by an acomp operation.
424  *
425  * @req:	asynchronous compress request
426  * @dst:	pointer to output buffer scatterlist
427  * @dlen:	size of the output buffer
428  */
429 static inline void acomp_request_set_dst_sg(struct acomp_req *req,
430 					    struct scatterlist *dst,
431 					    unsigned int dlen)
432 {
433 	req->dst = dst;
434 	req->dlen = dlen;
435 
436 	req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
437 	req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT;
438 }
439 
440 /**
441  * acomp_request_set_dst_dma() -- Sets DMA destination virtual address
442  *
443  * Sets destination virtual address required by an acomp operation.
444  * The address must be usable for DMA.
445  *
446  * @req:	asynchronous compress request
447  * @dst:	virtual address pointer to output buffer
448  * @dlen:	size of the output buffer
449  */
450 static inline void acomp_request_set_dst_dma(struct acomp_req *req,
451 					     u8 *dst, unsigned int dlen)
452 {
453 	req->dvirt = dst;
454 	req->dlen = dlen;
455 
456 	req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
457 	req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
458 }
459 
460 /**
461  * acomp_request_set_dst_nondma() -- Sets non-DMA destination virtual address
462  *
463  * Sets destination virtual address required by an acomp operation.
464  * The address can not be used for DMA.
465  *
466  * @req:	asynchronous compress request
467  * @dst:	virtual address pointer to output buffer
468  * @dlen:	size of the output buffer
469  */
470 static inline void acomp_request_set_dst_nondma(struct acomp_req *req,
471 						u8 *dst, unsigned int dlen)
472 {
473 	req->dvirt = dst;
474 	req->dlen = dlen;
475 
476 	req->base.flags |= CRYPTO_ACOMP_REQ_DST_NONDMA;
477 	req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
478 }
479 
480 static inline void acomp_request_chain(struct acomp_req *req,
481 				       struct acomp_req *head)
482 {
483 	crypto_request_chain(&req->base, &head->base);
484 }
485 
486 /**
487  * crypto_acomp_compress() -- Invoke asynchronous compress operation
488  *
489  * Function invokes the asynchronous compress operation
490  *
491  * @req:	asynchronous compress request
492  *
493  * Return:	zero on success; error code in case of error
494  */
495 int crypto_acomp_compress(struct acomp_req *req);
496 
497 /**
498  * crypto_acomp_decompress() -- Invoke asynchronous decompress operation
499  *
500  * Function invokes the asynchronous decompress operation
501  *
502  * @req:	asynchronous compress request
503  *
504  * Return:	zero on success; error code in case of error
505  */
506 int crypto_acomp_decompress(struct acomp_req *req);
507 
508 static inline struct acomp_req *acomp_request_on_stack_init(
509 	char *buf, struct crypto_acomp *tfm, gfp_t gfp, bool stackonly)
510 {
511 	struct acomp_req *req;
512 
513 	if (!stackonly && (req = acomp_request_alloc(tfm, gfp)))
514 		return req;
515 
516 	req = (void *)buf;
517 	acomp_request_set_tfm(req, tfm->fb);
518 	req->base.flags = CRYPTO_TFM_REQ_ON_STACK;
519 
520 	return req;
521 }
522 
523 #endif
524