xref: /linux/include/crypto/internal/acompress.h (revision fcab107abe1ab5be9dbe874baa722372da8f4f73)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Asynchronous Compression operations
4  *
5  * Copyright (c) 2016, Intel Corporation
6  * Authors: Weigang Li <weigang.li@intel.com>
7  *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8  */
9 #ifndef _CRYPTO_ACOMP_INT_H
10 #define _CRYPTO_ACOMP_INT_H
11 
12 #include <crypto/acompress.h>
13 #include <crypto/algapi.h>
14 #include <crypto/scatterwalk.h>
15 #include <linux/compiler_types.h>
16 #include <linux/cpumask_types.h>
17 #include <linux/spinlock.h>
18 #include <linux/workqueue_types.h>
19 
20 #define ACOMP_FBREQ_ON_STACK(name, req) \
21         char __##name##_req[sizeof(struct acomp_req) + \
22                             MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
23         struct acomp_req *name = acomp_fbreq_on_stack_init( \
24                 __##name##_req, (req))
25 
26 /**
27  * struct acomp_alg - asynchronous compression algorithm
28  *
29  * @compress:	Function performs a compress operation
30  * @decompress:	Function performs a de-compress operation
31  * @init:	Initialize the cryptographic transformation object.
32  *		This function is used to initialize the cryptographic
33  *		transformation object. This function is called only once at
34  *		the instantiation time, right after the transformation context
35  *		was allocated. In case the cryptographic hardware has some
36  *		special requirements which need to be handled by software, this
37  *		function shall check for the precise requirement of the
38  *		transformation and put any software fallbacks in place.
39  * @exit:	Deinitialize the cryptographic transformation object. This is a
40  *		counterpart to @init, used to remove various changes set in
41  *		@init.
42  *
43  * @base:	Common crypto API algorithm data structure
44  * @calg:	Cmonn algorithm data structure shared with scomp
45  */
46 struct acomp_alg {
47 	int (*compress)(struct acomp_req *req);
48 	int (*decompress)(struct acomp_req *req);
49 	int (*init)(struct crypto_acomp *tfm);
50 	void (*exit)(struct crypto_acomp *tfm);
51 
52 	union {
53 		struct COMP_ALG_COMMON;
54 		struct comp_alg_common calg;
55 	};
56 };
57 
58 struct crypto_acomp_stream {
59 	spinlock_t lock;
60 	void *ctx;
61 };
62 
63 struct crypto_acomp_streams {
64 	/* These must come first because of struct scomp_alg. */
65 	void *(*alloc_ctx)(void);
66 	union {
67 		void (*free_ctx)(void *);
68 		void (*cfree_ctx)(const void *);
69 	};
70 
71 	struct crypto_acomp_stream __percpu *streams;
72 	struct work_struct stream_work;
73 	cpumask_t stream_want;
74 };
75 
76 struct acomp_walk {
77 	union {
78 		/* Virtual address of the source. */
79 		struct {
80 			struct {
81 				const void *const addr;
82 			} virt;
83 		} src;
84 
85 		/* Private field for the API, do not use. */
86 		struct scatter_walk in;
87 	};
88 
89 	union {
90 		/* Virtual address of the destination. */
91 		struct {
92 			struct {
93 				void *const addr;
94 			} virt;
95 		} dst;
96 
97 		/* Private field for the API, do not use. */
98 		struct scatter_walk out;
99 	};
100 
101 	unsigned int slen;
102 	unsigned int dlen;
103 
104 	int flags;
105 };
106 
107 /*
108  * Transform internal helpers.
109  */
110 static inline void *acomp_request_ctx(struct acomp_req *req)
111 {
112 	return req->__ctx;
113 }
114 
115 static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
116 {
117 	return tfm->base.__crt_ctx;
118 }
119 
120 static inline void acomp_request_complete(struct acomp_req *req,
121 					  int err)
122 {
123 	crypto_request_complete(&req->base, err);
124 }
125 
126 /**
127  * crypto_register_acomp() -- Register asynchronous compression algorithm
128  *
129  * Function registers an implementation of an asynchronous
130  * compression algorithm
131  *
132  * @alg:	algorithm definition
133  *
134  * Return:	zero on success; error code in case of error
135  */
136 int crypto_register_acomp(struct acomp_alg *alg);
137 
138 /**
139  * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm
140  *
141  * Function unregisters an implementation of an asynchronous
142  * compression algorithm
143  *
144  * @alg:	algorithm definition
145  */
146 void crypto_unregister_acomp(struct acomp_alg *alg);
147 
148 int crypto_register_acomps(struct acomp_alg *algs, int count);
149 void crypto_unregister_acomps(struct acomp_alg *algs, int count);
150 
151 static inline bool acomp_request_issg(struct acomp_req *req)
152 {
153 	return !(req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
154 				    CRYPTO_ACOMP_REQ_DST_VIRT));
155 }
156 
157 static inline bool acomp_request_src_isvirt(struct acomp_req *req)
158 {
159 	return req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT;
160 }
161 
162 static inline bool acomp_request_dst_isvirt(struct acomp_req *req)
163 {
164 	return req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT;
165 }
166 
167 static inline bool acomp_request_isvirt(struct acomp_req *req)
168 {
169 	return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
170 				  CRYPTO_ACOMP_REQ_DST_VIRT);
171 }
172 
173 static inline bool acomp_request_src_isnondma(struct acomp_req *req)
174 {
175 	return req->base.flags & CRYPTO_ACOMP_REQ_SRC_NONDMA;
176 }
177 
178 static inline bool acomp_request_dst_isnondma(struct acomp_req *req)
179 {
180 	return req->base.flags & CRYPTO_ACOMP_REQ_DST_NONDMA;
181 }
182 
183 static inline bool acomp_request_isnondma(struct acomp_req *req)
184 {
185 	return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_NONDMA |
186 				  CRYPTO_ACOMP_REQ_DST_NONDMA);
187 }
188 
189 static inline bool crypto_acomp_req_virt(struct crypto_acomp *tfm)
190 {
191 	return crypto_tfm_req_virt(&tfm->base);
192 }
193 
194 void crypto_acomp_free_streams(struct crypto_acomp_streams *s);
195 int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s);
196 
197 struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
198 	struct crypto_acomp_streams *s) __acquires(stream);
199 
200 static inline void crypto_acomp_unlock_stream_bh(
201 	struct crypto_acomp_stream *stream) __releases(stream)
202 {
203 	spin_unlock_bh(&stream->lock);
204 }
205 
206 void acomp_walk_done_src(struct acomp_walk *walk, int used);
207 void acomp_walk_done_dst(struct acomp_walk *walk, int used);
208 int acomp_walk_next_src(struct acomp_walk *walk);
209 int acomp_walk_next_dst(struct acomp_walk *walk);
210 int acomp_walk_virt(struct acomp_walk *__restrict walk,
211 		    struct acomp_req *__restrict req, bool atomic);
212 
213 static inline bool acomp_walk_more_src(const struct acomp_walk *walk, int cur)
214 {
215 	return walk->slen != cur;
216 }
217 
218 static inline u32 acomp_request_flags(struct acomp_req *req)
219 {
220 	return crypto_request_flags(&req->base) & ~CRYPTO_ACOMP_REQ_PRIVATE;
221 }
222 
223 static inline struct crypto_acomp *crypto_acomp_fb(struct crypto_acomp *tfm)
224 {
225 	return __crypto_acomp_tfm(crypto_acomp_tfm(tfm)->fb);
226 }
227 
228 static inline struct acomp_req *acomp_fbreq_on_stack_init(
229 	char *buf, struct acomp_req *old)
230 {
231 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(old);
232 	struct acomp_req *req = (void *)buf;
233 
234 	crypto_stack_request_init(&req->base,
235 				  crypto_acomp_tfm(crypto_acomp_fb(tfm)));
236 	acomp_request_set_callback(req, acomp_request_flags(old), NULL, NULL);
237 	req->base.flags &= ~CRYPTO_ACOMP_REQ_PRIVATE;
238 	req->base.flags |= old->base.flags & CRYPTO_ACOMP_REQ_PRIVATE;
239 	req->src = old->src;
240 	req->dst = old->dst;
241 	req->slen = old->slen;
242 	req->dlen = old->dlen;
243 
244 	return req;
245 }
246 
247 #endif
248