xref: /linux/include/crypto/internal/acompress.h (revision 47b5b6f9eb736b1868b0f9c1a1575b5922451cc6)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Asynchronous Compression operations
4  *
5  * Copyright (c) 2016, Intel Corporation
6  * Authors: Weigang Li <weigang.li@intel.com>
7  *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8  */
9 #ifndef _CRYPTO_ACOMP_INT_H
10 #define _CRYPTO_ACOMP_INT_H
11 
12 #include <crypto/acompress.h>
13 #include <crypto/algapi.h>
14 #include <crypto/scatterwalk.h>
15 #include <linux/compiler_types.h>
16 #include <linux/cpumask_types.h>
17 #include <linux/spinlock.h>
18 #include <linux/workqueue_types.h>
19 
20 #define ACOMP_FBREQ_ON_STACK(name, req) \
21         char __##name##_req[sizeof(struct acomp_req) + \
22                             MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
23         struct acomp_req *name = acomp_fbreq_on_stack_init( \
24                 __##name##_req, (req))
25 
26 /**
27  * struct acomp_alg - asynchronous compression algorithm
28  *
29  * @compress:	Function performs a compress operation
30  * @decompress:	Function performs a de-compress operation
31  * @init:	Initialize the cryptographic transformation object.
32  *		This function is used to initialize the cryptographic
33  *		transformation object. This function is called only once at
34  *		the instantiation time, right after the transformation context
35  *		was allocated. In case the cryptographic hardware has some
36  *		special requirements which need to be handled by software, this
37  *		function shall check for the precise requirement of the
38  *		transformation and put any software fallbacks in place.
39  * @exit:	Deinitialize the cryptographic transformation object. This is a
40  *		counterpart to @init, used to remove various changes set in
41  *		@init.
42  *
43  * @reqsize:	Context size for (de)compression requests
44  * @base:	Common crypto API algorithm data structure
45  * @calg:	Cmonn algorithm data structure shared with scomp
46  */
47 struct acomp_alg {
48 	int (*compress)(struct acomp_req *req);
49 	int (*decompress)(struct acomp_req *req);
50 	int (*init)(struct crypto_acomp *tfm);
51 	void (*exit)(struct crypto_acomp *tfm);
52 
53 	unsigned int reqsize;
54 
55 	union {
56 		struct COMP_ALG_COMMON;
57 		struct comp_alg_common calg;
58 	};
59 };
60 
61 struct crypto_acomp_stream {
62 	spinlock_t lock;
63 	void *ctx;
64 };
65 
66 struct crypto_acomp_streams {
67 	/* These must come first because of struct scomp_alg. */
68 	void *(*alloc_ctx)(void);
69 	union {
70 		void (*free_ctx)(void *);
71 		void (*cfree_ctx)(const void *);
72 	};
73 
74 	struct crypto_acomp_stream __percpu *streams;
75 	struct work_struct stream_work;
76 	cpumask_t stream_want;
77 };
78 
79 struct acomp_walk {
80 	union {
81 		/* Virtual address of the source. */
82 		struct {
83 			struct {
84 				const void *const addr;
85 			} virt;
86 		} src;
87 
88 		/* Private field for the API, do not use. */
89 		struct scatter_walk in;
90 	};
91 
92 	union {
93 		/* Virtual address of the destination. */
94 		struct {
95 			struct {
96 				void *const addr;
97 			} virt;
98 		} dst;
99 
100 		/* Private field for the API, do not use. */
101 		struct scatter_walk out;
102 	};
103 
104 	unsigned int slen;
105 	unsigned int dlen;
106 
107 	int flags;
108 };
109 
110 /*
111  * Transform internal helpers.
112  */
113 static inline void *acomp_request_ctx(struct acomp_req *req)
114 {
115 	return req->__ctx;
116 }
117 
118 static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
119 {
120 	return tfm->base.__crt_ctx;
121 }
122 
123 static inline void acomp_request_complete(struct acomp_req *req,
124 					  int err)
125 {
126 	crypto_request_complete(&req->base, err);
127 }
128 
129 /**
130  * crypto_register_acomp() -- Register asynchronous compression algorithm
131  *
132  * Function registers an implementation of an asynchronous
133  * compression algorithm
134  *
135  * @alg:	algorithm definition
136  *
137  * Return:	zero on success; error code in case of error
138  */
139 int crypto_register_acomp(struct acomp_alg *alg);
140 
141 /**
142  * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm
143  *
144  * Function unregisters an implementation of an asynchronous
145  * compression algorithm
146  *
147  * @alg:	algorithm definition
148  */
149 void crypto_unregister_acomp(struct acomp_alg *alg);
150 
151 int crypto_register_acomps(struct acomp_alg *algs, int count);
152 void crypto_unregister_acomps(struct acomp_alg *algs, int count);
153 
154 static inline bool acomp_request_issg(struct acomp_req *req)
155 {
156 	return !(req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
157 				    CRYPTO_ACOMP_REQ_DST_VIRT));
158 }
159 
160 static inline bool acomp_request_src_isvirt(struct acomp_req *req)
161 {
162 	return req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT;
163 }
164 
165 static inline bool acomp_request_dst_isvirt(struct acomp_req *req)
166 {
167 	return req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT;
168 }
169 
170 static inline bool acomp_request_isvirt(struct acomp_req *req)
171 {
172 	return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
173 				  CRYPTO_ACOMP_REQ_DST_VIRT);
174 }
175 
176 static inline bool acomp_request_src_isnondma(struct acomp_req *req)
177 {
178 	return req->base.flags & CRYPTO_ACOMP_REQ_SRC_NONDMA;
179 }
180 
181 static inline bool acomp_request_dst_isnondma(struct acomp_req *req)
182 {
183 	return req->base.flags & CRYPTO_ACOMP_REQ_DST_NONDMA;
184 }
185 
186 static inline bool acomp_request_isnondma(struct acomp_req *req)
187 {
188 	return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_NONDMA |
189 				  CRYPTO_ACOMP_REQ_DST_NONDMA);
190 }
191 
192 static inline bool crypto_acomp_req_chain(struct crypto_acomp *tfm)
193 {
194 	return crypto_tfm_req_chain(&tfm->base);
195 }
196 
197 void crypto_acomp_free_streams(struct crypto_acomp_streams *s);
198 int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s);
199 
200 struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
201 	struct crypto_acomp_streams *s) __acquires(stream);
202 
203 static inline void crypto_acomp_unlock_stream_bh(
204 	struct crypto_acomp_stream *stream) __releases(stream)
205 {
206 	spin_unlock_bh(&stream->lock);
207 }
208 
209 void acomp_walk_done_src(struct acomp_walk *walk, int used);
210 void acomp_walk_done_dst(struct acomp_walk *walk, int used);
211 int acomp_walk_next_src(struct acomp_walk *walk);
212 int acomp_walk_next_dst(struct acomp_walk *walk);
213 int acomp_walk_virt(struct acomp_walk *__restrict walk,
214 		    struct acomp_req *__restrict req);
215 
216 static inline bool acomp_walk_more_src(const struct acomp_walk *walk, int cur)
217 {
218 	return walk->slen != cur;
219 }
220 
221 static inline u32 acomp_request_flags(struct acomp_req *req)
222 {
223 	return crypto_request_flags(&req->base) & ~CRYPTO_ACOMP_REQ_PRIVATE;
224 }
225 
226 static inline struct acomp_req *acomp_fbreq_on_stack_init(
227 	char *buf, struct acomp_req *old)
228 {
229 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(old);
230 	struct acomp_req *req = (void *)buf;
231 
232 	acomp_request_set_tfm(req, tfm->fb);
233 	req->base.flags = CRYPTO_TFM_REQ_ON_STACK;
234 	acomp_request_set_callback(req, acomp_request_flags(old), NULL, NULL);
235 	req->base.flags &= ~CRYPTO_ACOMP_REQ_PRIVATE;
236 	req->base.flags |= old->base.flags & CRYPTO_ACOMP_REQ_PRIVATE;
237 	req->src = old->src;
238 	req->dst = old->dst;
239 	req->slen = old->slen;
240 	req->dlen = old->dlen;
241 
242 	return req;
243 }
244 
245 #endif
246