xref: /linux/include/crypto/internal/skcipher.h (revision 0e9b70c1e3623fa110fb6be553e644524228ef60)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Symmetric key ciphers.
4  *
5  * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
6  */
7 
8 #ifndef _CRYPTO_INTERNAL_SKCIPHER_H
9 #define _CRYPTO_INTERNAL_SKCIPHER_H
10 
11 #include <crypto/algapi.h>
12 #include <crypto/internal/cipher.h>
13 #include <crypto/skcipher.h>
14 #include <linux/list.h>
15 #include <linux/types.h>
16 
17 /*
18  * Set this if your algorithm is sync but needs a reqsize larger
19  * than MAX_SYNC_SKCIPHER_REQSIZE.
20  *
21  * Reuse bit that is specific to hash algorithms.
22  */
23 #define CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE CRYPTO_ALG_OPTIONAL_KEY
24 
25 struct aead_request;
26 struct rtattr;
27 
28 struct skcipher_instance {
29 	void (*free)(struct skcipher_instance *inst);
30 	union {
31 		struct {
32 			char head[offsetof(struct skcipher_alg, base)];
33 			struct crypto_instance base;
34 		} s;
35 		struct skcipher_alg alg;
36 	};
37 };
38 
39 struct crypto_skcipher_spawn {
40 	struct crypto_spawn base;
41 };
42 
43 struct skcipher_walk {
44 	union {
45 		struct {
46 			struct page *page;
47 			unsigned long offset;
48 		} phys;
49 
50 		struct {
51 			u8 *page;
52 			void *addr;
53 		} virt;
54 	} src, dst;
55 
56 	struct scatter_walk in;
57 	unsigned int nbytes;
58 
59 	struct scatter_walk out;
60 	unsigned int total;
61 
62 	struct list_head buffers;
63 
64 	u8 *page;
65 	u8 *buffer;
66 	u8 *oiv;
67 	void *iv;
68 
69 	unsigned int ivsize;
70 
71 	int flags;
72 	unsigned int blocksize;
73 	unsigned int stride;
74 	unsigned int alignmask;
75 };
76 
77 static inline struct crypto_instance *skcipher_crypto_instance(
78 	struct skcipher_instance *inst)
79 {
80 	return &inst->s.base;
81 }
82 
83 static inline struct skcipher_instance *skcipher_alg_instance(
84 	struct crypto_skcipher *skcipher)
85 {
86 	return container_of(crypto_skcipher_alg(skcipher),
87 			    struct skcipher_instance, alg);
88 }
89 
90 static inline void *skcipher_instance_ctx(struct skcipher_instance *inst)
91 {
92 	return crypto_instance_ctx(skcipher_crypto_instance(inst));
93 }
94 
95 static inline void skcipher_request_complete(struct skcipher_request *req, int err)
96 {
97 	crypto_request_complete(&req->base, err);
98 }
99 
100 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
101 			 struct crypto_instance *inst,
102 			 const char *name, u32 type, u32 mask);
103 
104 static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
105 {
106 	crypto_drop_spawn(&spawn->base);
107 }
108 
109 static inline struct skcipher_alg *crypto_skcipher_spawn_alg(
110 	struct crypto_skcipher_spawn *spawn)
111 {
112 	return container_of(spawn->base.alg, struct skcipher_alg, base);
113 }
114 
115 static inline struct skcipher_alg *crypto_spawn_skcipher_alg(
116 	struct crypto_skcipher_spawn *spawn)
117 {
118 	return crypto_skcipher_spawn_alg(spawn);
119 }
120 
121 static inline struct crypto_skcipher *crypto_spawn_skcipher(
122 	struct crypto_skcipher_spawn *spawn)
123 {
124 	return crypto_spawn_tfm2(&spawn->base);
125 }
126 
127 static inline void crypto_skcipher_set_reqsize(
128 	struct crypto_skcipher *skcipher, unsigned int reqsize)
129 {
130 	skcipher->reqsize = reqsize;
131 }
132 
133 static inline void crypto_skcipher_set_reqsize_dma(
134 	struct crypto_skcipher *skcipher, unsigned int reqsize)
135 {
136 	reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1);
137 	skcipher->reqsize = reqsize;
138 }
139 
140 int crypto_register_skcipher(struct skcipher_alg *alg);
141 void crypto_unregister_skcipher(struct skcipher_alg *alg);
142 int crypto_register_skciphers(struct skcipher_alg *algs, int count);
143 void crypto_unregister_skciphers(struct skcipher_alg *algs, int count);
144 int skcipher_register_instance(struct crypto_template *tmpl,
145 			       struct skcipher_instance *inst);
146 
147 int skcipher_walk_done(struct skcipher_walk *walk, int err);
148 int skcipher_walk_virt(struct skcipher_walk *walk,
149 		       struct skcipher_request *req,
150 		       bool atomic);
151 int skcipher_walk_async(struct skcipher_walk *walk,
152 			struct skcipher_request *req);
153 int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
154 			       struct aead_request *req, bool atomic);
155 int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
156 			       struct aead_request *req, bool atomic);
157 void skcipher_walk_complete(struct skcipher_walk *walk, int err);
158 
159 static inline void skcipher_walk_abort(struct skcipher_walk *walk)
160 {
161 	skcipher_walk_done(walk, -ECANCELED);
162 }
163 
164 static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
165 {
166 	return crypto_tfm_ctx(&tfm->base);
167 }
168 
169 static inline void *crypto_skcipher_ctx_dma(struct crypto_skcipher *tfm)
170 {
171 	return crypto_tfm_ctx_dma(&tfm->base);
172 }
173 
174 static inline void *skcipher_request_ctx(struct skcipher_request *req)
175 {
176 	return req->__ctx;
177 }
178 
179 static inline void *skcipher_request_ctx_dma(struct skcipher_request *req)
180 {
181 	unsigned int align = crypto_dma_align();
182 
183 	if (align <= crypto_tfm_ctx_alignment())
184 		align = 1;
185 
186 	return PTR_ALIGN(skcipher_request_ctx(req), align);
187 }
188 
189 static inline u32 skcipher_request_flags(struct skcipher_request *req)
190 {
191 	return req->base.flags;
192 }
193 
194 static inline unsigned int crypto_skcipher_alg_min_keysize(
195 	struct skcipher_alg *alg)
196 {
197 	return alg->min_keysize;
198 }
199 
200 static inline unsigned int crypto_skcipher_alg_max_keysize(
201 	struct skcipher_alg *alg)
202 {
203 	return alg->max_keysize;
204 }
205 
206 static inline unsigned int crypto_skcipher_alg_walksize(
207 	struct skcipher_alg *alg)
208 {
209 	return alg->walksize;
210 }
211 
212 /**
213  * crypto_skcipher_walksize() - obtain walk size
214  * @tfm: cipher handle
215  *
216  * In some cases, algorithms can only perform optimally when operating on
217  * multiple blocks in parallel. This is reflected by the walksize, which
218  * must be a multiple of the chunksize (or equal if the concern does not
219  * apply)
220  *
221  * Return: walk size in bytes
222  */
223 static inline unsigned int crypto_skcipher_walksize(
224 	struct crypto_skcipher *tfm)
225 {
226 	return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm));
227 }
228 
229 /* Helpers for simple block cipher modes of operation */
230 struct skcipher_ctx_simple {
231 	struct crypto_cipher *cipher;	/* underlying block cipher */
232 };
233 static inline struct crypto_cipher *
234 skcipher_cipher_simple(struct crypto_skcipher *tfm)
235 {
236 	struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
237 
238 	return ctx->cipher;
239 }
240 
241 struct skcipher_instance *skcipher_alloc_instance_simple(
242 	struct crypto_template *tmpl, struct rtattr **tb);
243 
244 static inline struct crypto_alg *skcipher_ialg_simple(
245 	struct skcipher_instance *inst)
246 {
247 	struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst);
248 
249 	return crypto_spawn_cipher_alg(spawn);
250 }
251 
252 #endif	/* _CRYPTO_INTERNAL_SKCIPHER_H */
253 
254