xref: /linux/include/crypto/algapi.h (revision 75b1a8f9d62e50f05d0e4e9f3c8bcde32527ffc1)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Cryptographic API for algorithms (i.e., low-level API).
4  *
5  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6  */
7 #ifndef _CRYPTO_ALGAPI_H
8 #define _CRYPTO_ALGAPI_H
9 
10 #include <linux/crypto.h>
11 #include <linux/list.h>
12 #include <linux/kernel.h>
13 
14 /*
15  * Maximum values for blocksize and alignmask, used to allocate
16  * static buffers that are big enough for any combination of
17  * algs and architectures. Ciphers have a lower maximum size.
18  */
19 #define MAX_ALGAPI_BLOCKSIZE		160
20 #define MAX_ALGAPI_ALIGNMASK		63
21 #define MAX_CIPHER_BLOCKSIZE		16
22 #define MAX_CIPHER_ALIGNMASK		15
23 
24 struct crypto_aead;
25 struct crypto_instance;
26 struct module;
27 struct rtattr;
28 struct seq_file;
29 struct sk_buff;
30 
31 struct crypto_type {
32 	unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
33 	unsigned int (*extsize)(struct crypto_alg *alg);
34 	int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
35 	int (*init_tfm)(struct crypto_tfm *tfm);
36 	void (*show)(struct seq_file *m, struct crypto_alg *alg);
37 	int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
38 	void (*free)(struct crypto_instance *inst);
39 
40 	unsigned int type;
41 	unsigned int maskclear;
42 	unsigned int maskset;
43 	unsigned int tfmsize;
44 };
45 
46 struct crypto_instance {
47 	struct crypto_alg alg;
48 
49 	struct crypto_template *tmpl;
50 
51 	union {
52 		/* Node in list of instances after registration. */
53 		struct hlist_node list;
54 		/* List of attached spawns before registration. */
55 		struct crypto_spawn *spawns;
56 	};
57 
58 	void *__ctx[] CRYPTO_MINALIGN_ATTR;
59 };
60 
61 struct crypto_template {
62 	struct list_head list;
63 	struct hlist_head instances;
64 	struct module *module;
65 
66 	int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
67 
68 	char name[CRYPTO_MAX_ALG_NAME];
69 };
70 
71 struct crypto_spawn {
72 	struct list_head list;
73 	struct crypto_alg *alg;
74 	union {
75 		/* Back pointer to instance after registration.*/
76 		struct crypto_instance *inst;
77 		/* Spawn list pointer prior to registration. */
78 		struct crypto_spawn *next;
79 	};
80 	const struct crypto_type *frontend;
81 	u32 mask;
82 	bool dead;
83 	bool registered;
84 };
85 
86 struct crypto_queue {
87 	struct list_head list;
88 	struct list_head *backlog;
89 
90 	unsigned int qlen;
91 	unsigned int max_qlen;
92 };
93 
94 struct scatter_walk {
95 	struct scatterlist *sg;
96 	unsigned int offset;
97 };
98 
99 void crypto_mod_put(struct crypto_alg *alg);
100 
101 int crypto_register_template(struct crypto_template *tmpl);
102 int crypto_register_templates(struct crypto_template *tmpls, int count);
103 void crypto_unregister_template(struct crypto_template *tmpl);
104 void crypto_unregister_templates(struct crypto_template *tmpls, int count);
105 struct crypto_template *crypto_lookup_template(const char *name);
106 
107 int crypto_register_instance(struct crypto_template *tmpl,
108 			     struct crypto_instance *inst);
109 void crypto_unregister_instance(struct crypto_instance *inst);
110 
111 int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst,
112 		      const char *name, u32 type, u32 mask);
113 void crypto_drop_spawn(struct crypto_spawn *spawn);
114 struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
115 				    u32 mask);
116 void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
117 
118 struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
119 int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret);
120 const char *crypto_attr_alg_name(struct rtattr *rta);
121 int crypto_attr_u32(struct rtattr *rta, u32 *num);
122 int crypto_inst_setname(struct crypto_instance *inst, const char *name,
123 			struct crypto_alg *alg);
124 
125 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
126 int crypto_enqueue_request(struct crypto_queue *queue,
127 			   struct crypto_async_request *request);
128 void crypto_enqueue_request_head(struct crypto_queue *queue,
129 				 struct crypto_async_request *request);
130 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
131 static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
132 {
133 	return queue->qlen;
134 }
135 
136 void crypto_inc(u8 *a, unsigned int size);
137 void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
138 
139 static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
140 {
141 	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
142 	    __builtin_constant_p(size) &&
143 	    (size % sizeof(unsigned long)) == 0) {
144 		unsigned long *d = (unsigned long *)dst;
145 		unsigned long *s = (unsigned long *)src;
146 
147 		while (size > 0) {
148 			*d++ ^= *s++;
149 			size -= sizeof(unsigned long);
150 		}
151 	} else {
152 		__crypto_xor(dst, dst, src, size);
153 	}
154 }
155 
156 static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
157 				  unsigned int size)
158 {
159 	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
160 	    __builtin_constant_p(size) &&
161 	    (size % sizeof(unsigned long)) == 0) {
162 		unsigned long *d = (unsigned long *)dst;
163 		unsigned long *s1 = (unsigned long *)src1;
164 		unsigned long *s2 = (unsigned long *)src2;
165 
166 		while (size > 0) {
167 			*d++ = *s1++ ^ *s2++;
168 			size -= sizeof(unsigned long);
169 		}
170 	} else {
171 		__crypto_xor(dst, src1, src2, size);
172 	}
173 }
174 
175 static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
176 {
177 	return PTR_ALIGN(crypto_tfm_ctx(tfm),
178 			 crypto_tfm_alg_alignmask(tfm) + 1);
179 }
180 
181 static inline struct crypto_instance *crypto_tfm_alg_instance(
182 	struct crypto_tfm *tfm)
183 {
184 	return container_of(tfm->__crt_alg, struct crypto_instance, alg);
185 }
186 
187 static inline void *crypto_instance_ctx(struct crypto_instance *inst)
188 {
189 	return inst->__ctx;
190 }
191 
192 struct crypto_cipher_spawn {
193 	struct crypto_spawn base;
194 };
195 
196 static inline int crypto_grab_cipher(struct crypto_cipher_spawn *spawn,
197 				     struct crypto_instance *inst,
198 				     const char *name, u32 type, u32 mask)
199 {
200 	type &= ~CRYPTO_ALG_TYPE_MASK;
201 	type |= CRYPTO_ALG_TYPE_CIPHER;
202 	mask |= CRYPTO_ALG_TYPE_MASK;
203 	return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
204 }
205 
206 static inline void crypto_drop_cipher(struct crypto_cipher_spawn *spawn)
207 {
208 	crypto_drop_spawn(&spawn->base);
209 }
210 
211 static inline struct crypto_alg *crypto_spawn_cipher_alg(
212 	struct crypto_cipher_spawn *spawn)
213 {
214 	return spawn->base.alg;
215 }
216 
217 static inline struct crypto_cipher *crypto_spawn_cipher(
218 	struct crypto_cipher_spawn *spawn)
219 {
220 	u32 type = CRYPTO_ALG_TYPE_CIPHER;
221 	u32 mask = CRYPTO_ALG_TYPE_MASK;
222 
223 	return __crypto_cipher_cast(crypto_spawn_tfm(&spawn->base, type, mask));
224 }
225 
226 static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
227 {
228 	return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
229 }
230 
231 static inline struct crypto_async_request *crypto_get_backlog(
232 	struct crypto_queue *queue)
233 {
234 	return queue->backlog == &queue->list ? NULL :
235 	       container_of(queue->backlog, struct crypto_async_request, list);
236 }
237 
238 static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off)
239 {
240 	return (algt->type ^ off) & algt->mask & off;
241 }
242 
243 /*
244  * When an algorithm uses another algorithm (e.g., if it's an instance of a
245  * template), these are the flags that should always be set on the "outer"
246  * algorithm if any "inner" algorithm has them set.
247  */
248 #define CRYPTO_ALG_INHERITED_FLAGS	\
249 	(CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |	\
250 	 CRYPTO_ALG_ALLOCATES_MEMORY)
251 
252 /*
253  * Given the type and mask that specify the flags restrictions on a template
254  * instance being created, return the mask that should be passed to
255  * crypto_grab_*() (along with type=0) to honor any request the user made to
256  * have any of the CRYPTO_ALG_INHERITED_FLAGS clear.
257  */
258 static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt)
259 {
260 	return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS);
261 }
262 
263 noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
264 
265 /**
266  * crypto_memneq - Compare two areas of memory without leaking
267  *		   timing information.
268  *
269  * @a: One area of memory
270  * @b: Another area of memory
271  * @size: The size of the area.
272  *
273  * Returns 0 when data is equal, 1 otherwise.
274  */
275 static inline int crypto_memneq(const void *a, const void *b, size_t size)
276 {
277 	return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
278 }
279 
280 int crypto_register_notifier(struct notifier_block *nb);
281 int crypto_unregister_notifier(struct notifier_block *nb);
282 
283 /* Crypto notification events. */
284 enum {
285 	CRYPTO_MSG_ALG_REQUEST,
286 	CRYPTO_MSG_ALG_REGISTER,
287 	CRYPTO_MSG_ALG_LOADED,
288 };
289 
290 #endif	/* _CRYPTO_ALGAPI_H */
291