xref: /linux/include/crypto/algapi.h (revision ae7d45fb7ca75e94b478e2404709ba3024774334)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Cryptographic API for algorithms (i.e., low-level API).
4  *
5  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6  */
7 #ifndef _CRYPTO_ALGAPI_H
8 #define _CRYPTO_ALGAPI_H
9 
10 #include <linux/align.h>
11 #include <linux/cache.h>
12 #include <linux/crypto.h>
13 #include <linux/kconfig.h>
14 #include <linux/list.h>
15 #include <linux/types.h>
16 
17 #include <asm/unaligned.h>
18 
19 /*
20  * Maximum values for blocksize and alignmask, used to allocate
21  * static buffers that are big enough for any combination of
22  * algs and architectures. Ciphers have a lower maximum size.
23  */
24 #define MAX_ALGAPI_BLOCKSIZE		160
25 #define MAX_ALGAPI_ALIGNMASK		127
26 #define MAX_CIPHER_BLOCKSIZE		16
27 #define MAX_CIPHER_ALIGNMASK		15
28 
29 #ifdef ARCH_DMA_MINALIGN
30 #define CRYPTO_DMA_ALIGN ARCH_DMA_MINALIGN
31 #else
32 #define CRYPTO_DMA_ALIGN CRYPTO_MINALIGN
33 #endif
34 
35 #define CRYPTO_DMA_PADDING ((CRYPTO_DMA_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
36 
37 /*
38  * Autoloaded crypto modules should only use a prefixed name to avoid allowing
39  * arbitrary modules to be loaded. Loading from userspace may still need the
40  * unprefixed names, so retains those aliases as well.
41  * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
42  * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
43  * expands twice on the same line. Instead, use a separate base name for the
44  * alias.
45  */
46 #define MODULE_ALIAS_CRYPTO(name)	\
47 		__MODULE_INFO(alias, alias_userspace, name);	\
48 		__MODULE_INFO(alias, alias_crypto, "crypto-" name)
49 
50 struct crypto_aead;
51 struct crypto_instance;
52 struct module;
53 struct notifier_block;
54 struct rtattr;
55 struct seq_file;
56 struct sk_buff;
57 
58 struct crypto_type {
59 	unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
60 	unsigned int (*extsize)(struct crypto_alg *alg);
61 	int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
62 	int (*init_tfm)(struct crypto_tfm *tfm);
63 	void (*show)(struct seq_file *m, struct crypto_alg *alg);
64 	int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
65 	void (*free)(struct crypto_instance *inst);
66 #ifdef CONFIG_CRYPTO_STATS
67 	int (*report_stat)(struct sk_buff *skb, struct crypto_alg *alg);
68 #endif
69 
70 	unsigned int type;
71 	unsigned int maskclear;
72 	unsigned int maskset;
73 	unsigned int tfmsize;
74 };
75 
76 struct crypto_instance {
77 	struct crypto_alg alg;
78 
79 	struct crypto_template *tmpl;
80 
81 	union {
82 		/* Node in list of instances after registration. */
83 		struct hlist_node list;
84 		/* List of attached spawns before registration. */
85 		struct crypto_spawn *spawns;
86 	};
87 
88 	void *__ctx[] CRYPTO_MINALIGN_ATTR;
89 };
90 
91 struct crypto_template {
92 	struct list_head list;
93 	struct hlist_head instances;
94 	struct module *module;
95 
96 	int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
97 
98 	char name[CRYPTO_MAX_ALG_NAME];
99 };
100 
101 struct crypto_spawn {
102 	struct list_head list;
103 	struct crypto_alg *alg;
104 	union {
105 		/* Back pointer to instance after registration.*/
106 		struct crypto_instance *inst;
107 		/* Spawn list pointer prior to registration. */
108 		struct crypto_spawn *next;
109 	};
110 	const struct crypto_type *frontend;
111 	u32 mask;
112 	bool dead;
113 	bool registered;
114 };
115 
116 struct crypto_queue {
117 	struct list_head list;
118 	struct list_head *backlog;
119 
120 	unsigned int qlen;
121 	unsigned int max_qlen;
122 };
123 
124 struct scatter_walk {
125 	struct scatterlist *sg;
126 	unsigned int offset;
127 };
128 
129 struct crypto_attr_alg {
130 	char name[CRYPTO_MAX_ALG_NAME];
131 };
132 
133 struct crypto_attr_type {
134 	u32 type;
135 	u32 mask;
136 };
137 
138 void crypto_mod_put(struct crypto_alg *alg);
139 
140 int crypto_register_template(struct crypto_template *tmpl);
141 int crypto_register_templates(struct crypto_template *tmpls, int count);
142 void crypto_unregister_template(struct crypto_template *tmpl);
143 void crypto_unregister_templates(struct crypto_template *tmpls, int count);
144 struct crypto_template *crypto_lookup_template(const char *name);
145 
146 int crypto_register_instance(struct crypto_template *tmpl,
147 			     struct crypto_instance *inst);
148 void crypto_unregister_instance(struct crypto_instance *inst);
149 
150 int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst,
151 		      const char *name, u32 type, u32 mask);
152 void crypto_drop_spawn(struct crypto_spawn *spawn);
153 struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
154 				    u32 mask);
155 void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
156 
157 struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
158 int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret);
159 const char *crypto_attr_alg_name(struct rtattr *rta);
160 int crypto_inst_setname(struct crypto_instance *inst, const char *name,
161 			struct crypto_alg *alg);
162 
163 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
164 int crypto_enqueue_request(struct crypto_queue *queue,
165 			   struct crypto_async_request *request);
166 void crypto_enqueue_request_head(struct crypto_queue *queue,
167 				 struct crypto_async_request *request);
168 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
169 static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
170 {
171 	return queue->qlen;
172 }
173 
174 void crypto_inc(u8 *a, unsigned int size);
175 void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
176 
177 static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
178 {
179 	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
180 	    __builtin_constant_p(size) &&
181 	    (size % sizeof(unsigned long)) == 0) {
182 		unsigned long *d = (unsigned long *)dst;
183 		unsigned long *s = (unsigned long *)src;
184 		unsigned long l;
185 
186 		while (size > 0) {
187 			l = get_unaligned(d) ^ get_unaligned(s++);
188 			put_unaligned(l, d++);
189 			size -= sizeof(unsigned long);
190 		}
191 	} else {
192 		__crypto_xor(dst, dst, src, size);
193 	}
194 }
195 
196 static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
197 				  unsigned int size)
198 {
199 	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
200 	    __builtin_constant_p(size) &&
201 	    (size % sizeof(unsigned long)) == 0) {
202 		unsigned long *d = (unsigned long *)dst;
203 		unsigned long *s1 = (unsigned long *)src1;
204 		unsigned long *s2 = (unsigned long *)src2;
205 		unsigned long l;
206 
207 		while (size > 0) {
208 			l = get_unaligned(s1++) ^ get_unaligned(s2++);
209 			put_unaligned(l, d++);
210 			size -= sizeof(unsigned long);
211 		}
212 	} else {
213 		__crypto_xor(dst, src1, src2, size);
214 	}
215 }
216 
217 static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
218 {
219 	return tfm->__crt_ctx;
220 }
221 
222 static inline void *crypto_tfm_ctx_align(struct crypto_tfm *tfm,
223 					 unsigned int align)
224 {
225 	if (align <= crypto_tfm_ctx_alignment())
226 		align = 1;
227 
228 	return PTR_ALIGN(crypto_tfm_ctx(tfm), align);
229 }
230 
231 static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
232 {
233 	return crypto_tfm_ctx_align(tfm, crypto_tfm_alg_alignmask(tfm) + 1);
234 }
235 
236 static inline unsigned int crypto_dma_align(void)
237 {
238 	return CRYPTO_DMA_ALIGN;
239 }
240 
241 static inline unsigned int crypto_dma_padding(void)
242 {
243 	return (crypto_dma_align() - 1) & ~(crypto_tfm_ctx_alignment() - 1);
244 }
245 
246 static inline void *crypto_tfm_ctx_dma(struct crypto_tfm *tfm)
247 {
248 	return crypto_tfm_ctx_align(tfm, crypto_dma_align());
249 }
250 
251 static inline struct crypto_instance *crypto_tfm_alg_instance(
252 	struct crypto_tfm *tfm)
253 {
254 	return container_of(tfm->__crt_alg, struct crypto_instance, alg);
255 }
256 
257 static inline void *crypto_instance_ctx(struct crypto_instance *inst)
258 {
259 	return inst->__ctx;
260 }
261 
262 static inline struct crypto_async_request *crypto_get_backlog(
263 	struct crypto_queue *queue)
264 {
265 	return queue->backlog == &queue->list ? NULL :
266 	       container_of(queue->backlog, struct crypto_async_request, list);
267 }
268 
269 static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off)
270 {
271 	return (algt->type ^ off) & algt->mask & off;
272 }
273 
274 /*
275  * When an algorithm uses another algorithm (e.g., if it's an instance of a
276  * template), these are the flags that should always be set on the "outer"
277  * algorithm if any "inner" algorithm has them set.
278  */
279 #define CRYPTO_ALG_INHERITED_FLAGS	\
280 	(CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |	\
281 	 CRYPTO_ALG_ALLOCATES_MEMORY)
282 
283 /*
284  * Given the type and mask that specify the flags restrictions on a template
285  * instance being created, return the mask that should be passed to
286  * crypto_grab_*() (along with type=0) to honor any request the user made to
287  * have any of the CRYPTO_ALG_INHERITED_FLAGS clear.
288  */
289 static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt)
290 {
291 	return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS);
292 }
293 
294 noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
295 
296 /**
297  * crypto_memneq - Compare two areas of memory without leaking
298  *		   timing information.
299  *
300  * @a: One area of memory
301  * @b: Another area of memory
302  * @size: The size of the area.
303  *
304  * Returns 0 when data is equal, 1 otherwise.
305  */
306 static inline int crypto_memneq(const void *a, const void *b, size_t size)
307 {
308 	return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
309 }
310 
311 int crypto_register_notifier(struct notifier_block *nb);
312 int crypto_unregister_notifier(struct notifier_block *nb);
313 
314 /* Crypto notification events. */
315 enum {
316 	CRYPTO_MSG_ALG_REQUEST,
317 	CRYPTO_MSG_ALG_REGISTER,
318 	CRYPTO_MSG_ALG_LOADED,
319 };
320 
321 static inline void crypto_request_complete(struct crypto_async_request *req,
322 					   int err)
323 {
324 	req->complete(req->data, err);
325 }
326 
327 #endif	/* _CRYPTO_ALGAPI_H */
328