xref: /linux/include/crypto/acompress.h (revision 47b5b6f9eb736b1868b0f9c1a1575b5922451cc6)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Asynchronous Compression operations
4  *
5  * Copyright (c) 2016, Intel Corporation
6  * Authors: Weigang Li <weigang.li@intel.com>
7  *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8  */
9 #ifndef _CRYPTO_ACOMP_H
10 #define _CRYPTO_ACOMP_H
11 
12 #include <linux/atomic.h>
13 #include <linux/args.h>
14 #include <linux/compiler_types.h>
15 #include <linux/container_of.h>
16 #include <linux/crypto.h>
17 #include <linux/err.h>
18 #include <linux/scatterlist.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock_types.h>
21 #include <linux/types.h>
22 
23 /* Set this bit if source is virtual address instead of SG list. */
24 #define CRYPTO_ACOMP_REQ_SRC_VIRT	0x00000002
25 
26 /* Set this bit for if virtual address source cannot be used for DMA. */
27 #define CRYPTO_ACOMP_REQ_SRC_NONDMA	0x00000004
28 
29 /* Set this bit if destination is virtual address instead of SG list. */
30 #define CRYPTO_ACOMP_REQ_DST_VIRT	0x00000008
31 
32 /* Set this bit for if virtual address destination cannot be used for DMA. */
33 #define CRYPTO_ACOMP_REQ_DST_NONDMA	0x00000010
34 
35 /* Private flags that should not be touched by the user. */
36 #define CRYPTO_ACOMP_REQ_PRIVATE \
37 	(CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA | \
38 	 CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA)
39 
40 #define CRYPTO_ACOMP_DST_MAX		131072
41 
42 #define	MAX_SYNC_COMP_REQSIZE		0
43 
44 #define ACOMP_REQUEST_ON_STACK(name, tfm) \
45         char __##name##_req[sizeof(struct acomp_req) + \
46                             MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
47         struct acomp_req *name = acomp_request_on_stack_init( \
48                 __##name##_req, (tfm))
49 
50 #define ACOMP_REQUEST_CLONE(name, gfp) \
51 	acomp_request_clone(name, sizeof(__##name##_req), gfp)
52 
53 struct acomp_req;
54 struct folio;
55 
56 struct acomp_req_chain {
57 	crypto_completion_t compl;
58 	void *data;
59 	struct scatterlist ssg;
60 	struct scatterlist dsg;
61 	union {
62 		const u8 *src;
63 		struct folio *sfolio;
64 	};
65 	union {
66 		u8 *dst;
67 		struct folio *dfolio;
68 	};
69 	u32 flags;
70 };
71 
72 /**
73  * struct acomp_req - asynchronous (de)compression request
74  *
75  * @base:	Common attributes for asynchronous crypto requests
76  * @src:	Source scatterlist
77  * @dst:	Destination scatterlist
78  * @svirt:	Source virtual address
79  * @dvirt:	Destination virtual address
80  * @slen:	Size of the input buffer
81  * @dlen:	Size of the output buffer and number of bytes produced
82  * @chain:	Private API code data, do not use
83  * @__ctx:	Start of private context data
84  */
85 struct acomp_req {
86 	struct crypto_async_request base;
87 	union {
88 		struct scatterlist *src;
89 		const u8 *svirt;
90 	};
91 	union {
92 		struct scatterlist *dst;
93 		u8 *dvirt;
94 	};
95 	unsigned int slen;
96 	unsigned int dlen;
97 
98 	struct acomp_req_chain chain;
99 
100 	void *__ctx[] CRYPTO_MINALIGN_ATTR;
101 };
102 
103 /**
104  * struct crypto_acomp - user-instantiated objects which encapsulate
105  * algorithms and core processing logic
106  *
107  * @compress:		Function performs a compress operation
108  * @decompress:		Function performs a de-compress operation
109  * @reqsize:		Context size for (de)compression requests
110  * @fb:			Synchronous fallback tfm
111  * @base:		Common crypto API algorithm data structure
112  */
113 struct crypto_acomp {
114 	int (*compress)(struct acomp_req *req);
115 	int (*decompress)(struct acomp_req *req);
116 	unsigned int reqsize;
117 	struct crypto_acomp *fb;
118 	struct crypto_tfm base;
119 };
120 
121 #define COMP_ALG_COMMON {			\
122 	struct crypto_alg base;			\
123 }
124 struct comp_alg_common COMP_ALG_COMMON;
125 
126 /**
127  * DOC: Asynchronous Compression API
128  *
129  * The Asynchronous Compression API is used with the algorithms of type
130  * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto)
131  */
132 
133 /**
134  * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle
135  * @alg_name:	is the cra_name / name or cra_driver_name / driver name of the
136  *		compression algorithm e.g. "deflate"
137  * @type:	specifies the type of the algorithm
138  * @mask:	specifies the mask for the algorithm
139  *
140  * Allocate a handle for a compression algorithm. The returned struct
141  * crypto_acomp is the handle that is required for any subsequent
142  * API invocation for the compression operations.
143  *
144  * Return:	allocated handle in case of success; IS_ERR() is true in case
145  *		of an error, PTR_ERR() returns the error code.
146  */
147 struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
148 					u32 mask);
149 /**
150  * crypto_alloc_acomp_node() -- allocate ACOMPRESS tfm handle with desired NUMA node
151  * @alg_name:	is the cra_name / name or cra_driver_name / driver name of the
152  *		compression algorithm e.g. "deflate"
153  * @type:	specifies the type of the algorithm
154  * @mask:	specifies the mask for the algorithm
155  * @node:	specifies the NUMA node the ZIP hardware belongs to
156  *
157  * Allocate a handle for a compression algorithm. Drivers should try to use
158  * (de)compressors on the specified NUMA node.
159  * The returned struct crypto_acomp is the handle that is required for any
160  * subsequent API invocation for the compression operations.
161  *
162  * Return:	allocated handle in case of success; IS_ERR() is true in case
163  *		of an error, PTR_ERR() returns the error code.
164  */
165 struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
166 					u32 mask, int node);
167 
168 static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
169 {
170 	return &tfm->base;
171 }
172 
173 static inline struct comp_alg_common *__crypto_comp_alg_common(
174 	struct crypto_alg *alg)
175 {
176 	return container_of(alg, struct comp_alg_common, base);
177 }
178 
179 static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
180 {
181 	return container_of(tfm, struct crypto_acomp, base);
182 }
183 
184 static inline struct comp_alg_common *crypto_comp_alg_common(
185 	struct crypto_acomp *tfm)
186 {
187 	return __crypto_comp_alg_common(crypto_acomp_tfm(tfm)->__crt_alg);
188 }
189 
190 static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
191 {
192 	return tfm->reqsize;
193 }
194 
195 static inline void acomp_request_set_tfm(struct acomp_req *req,
196 					 struct crypto_acomp *tfm)
197 {
198 	crypto_request_set_tfm(&req->base, crypto_acomp_tfm(tfm));
199 }
200 
201 static inline bool acomp_is_async(struct crypto_acomp *tfm)
202 {
203 	return crypto_comp_alg_common(tfm)->base.cra_flags &
204 	       CRYPTO_ALG_ASYNC;
205 }
206 
207 static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
208 {
209 	return __crypto_acomp_tfm(req->base.tfm);
210 }
211 
212 /**
213  * crypto_free_acomp() -- free ACOMPRESS tfm handle
214  *
215  * @tfm:	ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
216  *
217  * If @tfm is a NULL or error pointer, this function does nothing.
218  */
219 static inline void crypto_free_acomp(struct crypto_acomp *tfm)
220 {
221 	crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm));
222 }
223 
224 static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
225 {
226 	type &= ~CRYPTO_ALG_TYPE_MASK;
227 	type |= CRYPTO_ALG_TYPE_ACOMPRESS;
228 	mask |= CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
229 
230 	return crypto_has_alg(alg_name, type, mask);
231 }
232 
233 static inline const char *crypto_acomp_alg_name(struct crypto_acomp *tfm)
234 {
235 	return crypto_tfm_alg_name(crypto_acomp_tfm(tfm));
236 }
237 
238 static inline const char *crypto_acomp_driver_name(struct crypto_acomp *tfm)
239 {
240 	return crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
241 }
242 
243 /**
244  * acomp_request_alloc() -- allocates asynchronous (de)compression request
245  *
246  * @tfm:	ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
247  * @gfp:	gfp to pass to kzalloc (defaults to GFP_KERNEL)
248  *
249  * Return:	allocated handle in case of success or NULL in case of an error
250  */
251 static inline struct acomp_req *acomp_request_alloc_extra_noprof(
252 	struct crypto_acomp *tfm, size_t extra, gfp_t gfp)
253 {
254 	struct acomp_req *req;
255 	size_t len;
256 
257 	len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN);
258 	if (check_add_overflow(len, extra, &len))
259 		return NULL;
260 
261 	req = kzalloc_noprof(len, gfp);
262 	if (likely(req))
263 		acomp_request_set_tfm(req, tfm);
264 	return req;
265 }
266 #define acomp_request_alloc_noprof(tfm, ...) \
267 	CONCATENATE(acomp_request_alloc_noprof_, COUNT_ARGS(__VA_ARGS__))( \
268 		tfm, ##__VA_ARGS__)
269 #define acomp_request_alloc_noprof_0(tfm) \
270 	acomp_request_alloc_noprof_1(tfm, GFP_KERNEL)
271 #define acomp_request_alloc_noprof_1(tfm, gfp) \
272 	acomp_request_alloc_extra_noprof(tfm, 0, gfp)
273 #define acomp_request_alloc(...)	alloc_hooks(acomp_request_alloc_noprof(__VA_ARGS__))
274 
275 /**
276  * acomp_request_alloc_extra() -- allocate acomp request with extra memory
277  *
278  * @tfm:	ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
279  * @extra:	amount of extra memory
280  * @gfp:	gfp to pass to kzalloc
281  *
282  * Return:	allocated handle in case of success or NULL in case of an error
283  */
284 #define acomp_request_alloc_extra(...)	alloc_hooks(acomp_request_alloc_extra_noprof(__VA_ARGS__))
285 
286 static inline void *acomp_request_extra(struct acomp_req *req)
287 {
288 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
289 	size_t len;
290 
291 	len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN);
292 	return (void *)((char *)req + len);
293 }
294 
295 static inline bool acomp_req_on_stack(struct acomp_req *req)
296 {
297 	return crypto_req_on_stack(&req->base);
298 }
299 
300 /**
301  * acomp_request_free() -- zeroize and free asynchronous (de)compression
302  *			   request as well as the output buffer if allocated
303  *			   inside the algorithm
304  *
305  * @req:	request to free
306  */
307 static inline void acomp_request_free(struct acomp_req *req)
308 {
309 	if (!req || acomp_req_on_stack(req))
310 		return;
311 	kfree_sensitive(req);
312 }
313 
314 /**
315  * acomp_request_set_callback() -- Sets an asynchronous callback
316  *
317  * Callback will be called when an asynchronous operation on a given
318  * request is finished.
319  *
320  * @req:	request that the callback will be set for
321  * @flgs:	specify for instance if the operation may backlog
322  * @cmlp:	callback which will be called
323  * @data:	private data used by the caller
324  */
325 static inline void acomp_request_set_callback(struct acomp_req *req,
326 					      u32 flgs,
327 					      crypto_completion_t cmpl,
328 					      void *data)
329 {
330 	flgs &= ~CRYPTO_ACOMP_REQ_PRIVATE;
331 	flgs |= req->base.flags & CRYPTO_ACOMP_REQ_PRIVATE;
332 	crypto_request_set_callback(&req->base, flgs, cmpl, data);
333 }
334 
335 /**
336  * acomp_request_set_params() -- Sets request parameters
337  *
338  * Sets parameters required by an acomp operation
339  *
340  * @req:	asynchronous compress request
341  * @src:	pointer to input buffer scatterlist
342  * @dst:	pointer to output buffer scatterlist. If this is NULL, the
343  *		acomp layer will allocate the output memory
344  * @slen:	size of the input buffer
345  * @dlen:	size of the output buffer. If dst is NULL, this can be used by
346  *		the user to specify the maximum amount of memory to allocate
347  */
348 static inline void acomp_request_set_params(struct acomp_req *req,
349 					    struct scatterlist *src,
350 					    struct scatterlist *dst,
351 					    unsigned int slen,
352 					    unsigned int dlen)
353 {
354 	req->src = src;
355 	req->dst = dst;
356 	req->slen = slen;
357 	req->dlen = dlen;
358 
359 	req->base.flags &= ~(CRYPTO_ACOMP_REQ_SRC_VIRT |
360 			     CRYPTO_ACOMP_REQ_SRC_NONDMA |
361 			     CRYPTO_ACOMP_REQ_DST_VIRT |
362 			     CRYPTO_ACOMP_REQ_DST_NONDMA);
363 }
364 
365 /**
366  * acomp_request_set_src_sg() -- Sets source scatterlist
367  *
368  * Sets source scatterlist required by an acomp operation.
369  *
370  * @req:	asynchronous compress request
371  * @src:	pointer to input buffer scatterlist
372  * @slen:	size of the input buffer
373  */
374 static inline void acomp_request_set_src_sg(struct acomp_req *req,
375 					    struct scatterlist *src,
376 					    unsigned int slen)
377 {
378 	req->src = src;
379 	req->slen = slen;
380 
381 	req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
382 	req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT;
383 }
384 
385 /**
386  * acomp_request_set_src_dma() -- Sets DMA source virtual address
387  *
388  * Sets source virtual address required by an acomp operation.
389  * The address must be usable for DMA.
390  *
391  * @req:	asynchronous compress request
392  * @src:	virtual address pointer to input buffer
393  * @slen:	size of the input buffer
394  */
395 static inline void acomp_request_set_src_dma(struct acomp_req *req,
396 					     const u8 *src, unsigned int slen)
397 {
398 	req->svirt = src;
399 	req->slen = slen;
400 
401 	req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
402 	req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT;
403 }
404 
405 /**
406  * acomp_request_set_src_nondma() -- Sets non-DMA source virtual address
407  *
408  * Sets source virtual address required by an acomp operation.
409  * The address can not be used for DMA.
410  *
411  * @req:	asynchronous compress request
412  * @src:	virtual address pointer to input buffer
413  * @slen:	size of the input buffer
414  */
415 static inline void acomp_request_set_src_nondma(struct acomp_req *req,
416 						const u8 *src,
417 						unsigned int slen)
418 {
419 	req->svirt = src;
420 	req->slen = slen;
421 
422 	req->base.flags |= CRYPTO_ACOMP_REQ_SRC_NONDMA;
423 	req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT;
424 }
425 
426 /**
427  * acomp_request_set_src_folio() -- Sets source folio
428  *
429  * Sets source folio required by an acomp operation.
430  *
431  * @req:	asynchronous compress request
432  * @folio:	pointer to input folio
433  * @off:	input folio offset
434  * @len:	size of the input buffer
435  */
436 static inline void acomp_request_set_src_folio(struct acomp_req *req,
437 					       struct folio *folio, size_t off,
438 					       unsigned int len)
439 {
440 	sg_init_table(&req->chain.ssg, 1);
441 	sg_set_folio(&req->chain.ssg, folio, len, off);
442 	acomp_request_set_src_sg(req, &req->chain.ssg, len);
443 }
444 
445 /**
446  * acomp_request_set_dst_sg() -- Sets destination scatterlist
447  *
448  * Sets destination scatterlist required by an acomp operation.
449  *
450  * @req:	asynchronous compress request
451  * @dst:	pointer to output buffer scatterlist
452  * @dlen:	size of the output buffer
453  */
454 static inline void acomp_request_set_dst_sg(struct acomp_req *req,
455 					    struct scatterlist *dst,
456 					    unsigned int dlen)
457 {
458 	req->dst = dst;
459 	req->dlen = dlen;
460 
461 	req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
462 	req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT;
463 }
464 
465 /**
466  * acomp_request_set_dst_dma() -- Sets DMA destination virtual address
467  *
468  * Sets destination virtual address required by an acomp operation.
469  * The address must be usable for DMA.
470  *
471  * @req:	asynchronous compress request
472  * @dst:	virtual address pointer to output buffer
473  * @dlen:	size of the output buffer
474  */
475 static inline void acomp_request_set_dst_dma(struct acomp_req *req,
476 					     u8 *dst, unsigned int dlen)
477 {
478 	req->dvirt = dst;
479 	req->dlen = dlen;
480 
481 	req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
482 	req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
483 }
484 
485 /**
486  * acomp_request_set_dst_nondma() -- Sets non-DMA destination virtual address
487  *
488  * Sets destination virtual address required by an acomp operation.
489  * The address can not be used for DMA.
490  *
491  * @req:	asynchronous compress request
492  * @dst:	virtual address pointer to output buffer
493  * @dlen:	size of the output buffer
494  */
495 static inline void acomp_request_set_dst_nondma(struct acomp_req *req,
496 						u8 *dst, unsigned int dlen)
497 {
498 	req->dvirt = dst;
499 	req->dlen = dlen;
500 
501 	req->base.flags |= CRYPTO_ACOMP_REQ_DST_NONDMA;
502 	req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
503 }
504 
505 /**
506  * acomp_request_set_dst_folio() -- Sets destination folio
507  *
508  * Sets destination folio required by an acomp operation.
509  *
510  * @req:	asynchronous compress request
511  * @folio:	pointer to input folio
512  * @off:	input folio offset
513  * @len:	size of the input buffer
514  */
515 static inline void acomp_request_set_dst_folio(struct acomp_req *req,
516 					       struct folio *folio, size_t off,
517 					       unsigned int len)
518 {
519 	sg_init_table(&req->chain.dsg, 1);
520 	sg_set_folio(&req->chain.dsg, folio, len, off);
521 	acomp_request_set_dst_sg(req, &req->chain.dsg, len);
522 }
523 
524 /**
525  * crypto_acomp_compress() -- Invoke asynchronous compress operation
526  *
527  * Function invokes the asynchronous compress operation
528  *
529  * @req:	asynchronous compress request
530  *
531  * Return:	zero on success; error code in case of error
532  */
533 int crypto_acomp_compress(struct acomp_req *req);
534 
535 /**
536  * crypto_acomp_decompress() -- Invoke asynchronous decompress operation
537  *
538  * Function invokes the asynchronous decompress operation
539  *
540  * @req:	asynchronous compress request
541  *
542  * Return:	zero on success; error code in case of error
543  */
544 int crypto_acomp_decompress(struct acomp_req *req);
545 
546 static inline struct acomp_req *acomp_request_on_stack_init(
547 	char *buf, struct crypto_acomp *tfm)
548 {
549 	struct acomp_req *req = (void *)buf;
550 
551 	acomp_request_set_tfm(req, tfm);
552 	req->base.flags = CRYPTO_TFM_REQ_ON_STACK;
553 	return req;
554 }
555 
556 struct acomp_req *acomp_request_clone(struct acomp_req *req,
557 				      size_t total, gfp_t gfp);
558 
559 #endif
560