xref: /linux/crypto/api.c (revision 98066f2f8901ccf72f3c5d6c391c8fff1cabd49d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Scatterlist Cryptographic API.
4  *
5  * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
6  * Copyright (c) 2002 David S. Miller (davem@redhat.com)
7  * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
10  * and Nettle, by Niels Möller.
11  */
12 
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/jump_label.h>
16 #include <linux/kernel.h>
17 #include <linux/kmod.h>
18 #include <linux/module.h>
19 #include <linux/param.h>
20 #include <linux/sched/signal.h>
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/completion.h>
24 #include "internal.h"
25 
26 LIST_HEAD(crypto_alg_list);
27 EXPORT_SYMBOL_GPL(crypto_alg_list);
28 DECLARE_RWSEM(crypto_alg_sem);
29 EXPORT_SYMBOL_GPL(crypto_alg_sem);
30 
31 BLOCKING_NOTIFIER_HEAD(crypto_chain);
32 EXPORT_SYMBOL_GPL(crypto_chain);
33 
34 #if IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) && \
35     !IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)
36 DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished);
37 #endif
38 
39 static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg,
40 					     u32 type, u32 mask);
41 static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
42 					    u32 mask);
43 
44 struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
45 {
46 	return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
47 }
48 EXPORT_SYMBOL_GPL(crypto_mod_get);
49 
50 void crypto_mod_put(struct crypto_alg *alg)
51 {
52 	struct module *module = alg->cra_module;
53 
54 	crypto_alg_put(alg);
55 	module_put(module);
56 }
57 EXPORT_SYMBOL_GPL(crypto_mod_put);
58 
59 static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type,
60 					      u32 mask)
61 {
62 	struct crypto_alg *q, *alg = NULL;
63 	int best = -2;
64 
65 	list_for_each_entry(q, &crypto_alg_list, cra_list) {
66 		int exact, fuzzy;
67 
68 		if (crypto_is_moribund(q))
69 			continue;
70 
71 		if ((q->cra_flags ^ type) & mask)
72 			continue;
73 
74 		exact = !strcmp(q->cra_driver_name, name);
75 		fuzzy = !strcmp(q->cra_name, name);
76 		if (!exact && !(fuzzy && q->cra_priority > best))
77 			continue;
78 
79 		if (unlikely(!crypto_mod_get(q)))
80 			continue;
81 
82 		best = q->cra_priority;
83 		if (alg)
84 			crypto_mod_put(alg);
85 		alg = q;
86 
87 		if (exact)
88 			break;
89 	}
90 
91 	return alg;
92 }
93 
94 static void crypto_larval_destroy(struct crypto_alg *alg)
95 {
96 	struct crypto_larval *larval = (void *)alg;
97 
98 	BUG_ON(!crypto_is_larval(alg));
99 	if (!IS_ERR_OR_NULL(larval->adult))
100 		crypto_mod_put(larval->adult);
101 	kfree(larval);
102 }
103 
104 struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask)
105 {
106 	struct crypto_larval *larval;
107 
108 	larval = kzalloc(sizeof(*larval), GFP_KERNEL);
109 	if (!larval)
110 		return ERR_PTR(-ENOMEM);
111 
112 	type &= ~CRYPTO_ALG_TYPE_MASK | (mask ?: CRYPTO_ALG_TYPE_MASK);
113 
114 	larval->mask = mask;
115 	larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type;
116 	larval->alg.cra_priority = -1;
117 	larval->alg.cra_destroy = crypto_larval_destroy;
118 
119 	strscpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME);
120 	init_completion(&larval->completion);
121 
122 	return larval;
123 }
124 EXPORT_SYMBOL_GPL(crypto_larval_alloc);
125 
126 static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
127 					    u32 mask)
128 {
129 	struct crypto_alg *alg;
130 	struct crypto_larval *larval;
131 
132 	larval = crypto_larval_alloc(name, type, mask);
133 	if (IS_ERR(larval))
134 		return ERR_CAST(larval);
135 
136 	refcount_set(&larval->alg.cra_refcnt, 2);
137 
138 	down_write(&crypto_alg_sem);
139 	alg = __crypto_alg_lookup(name, type, mask);
140 	if (!alg) {
141 		alg = &larval->alg;
142 		list_add(&alg->cra_list, &crypto_alg_list);
143 	}
144 	up_write(&crypto_alg_sem);
145 
146 	if (alg != &larval->alg) {
147 		kfree(larval);
148 		if (crypto_is_larval(alg))
149 			alg = crypto_larval_wait(alg, type, mask);
150 	}
151 
152 	return alg;
153 }
154 
155 static void crypto_larval_kill(struct crypto_larval *larval)
156 {
157 	bool unlinked;
158 
159 	down_write(&crypto_alg_sem);
160 	unlinked = list_empty(&larval->alg.cra_list);
161 	if (!unlinked)
162 		list_del_init(&larval->alg.cra_list);
163 	up_write(&crypto_alg_sem);
164 
165 	if (unlinked)
166 		return;
167 
168 	complete_all(&larval->completion);
169 	crypto_alg_put(&larval->alg);
170 }
171 
172 void crypto_schedule_test(struct crypto_larval *larval)
173 {
174 	int err;
175 
176 	err = crypto_probing_notify(CRYPTO_MSG_ALG_REGISTER, larval->adult);
177 	WARN_ON_ONCE(err != NOTIFY_STOP);
178 }
179 EXPORT_SYMBOL_GPL(crypto_schedule_test);
180 
181 static void crypto_start_test(struct crypto_larval *larval)
182 {
183 	if (!crypto_is_test_larval(larval))
184 		return;
185 
186 	if (larval->test_started)
187 		return;
188 
189 	down_write(&crypto_alg_sem);
190 	if (larval->test_started) {
191 		up_write(&crypto_alg_sem);
192 		return;
193 	}
194 
195 	larval->test_started = true;
196 	up_write(&crypto_alg_sem);
197 
198 	crypto_schedule_test(larval);
199 }
200 
201 static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg,
202 					     u32 type, u32 mask)
203 {
204 	struct crypto_larval *larval;
205 	long time_left;
206 
207 again:
208 	larval = container_of(alg, struct crypto_larval, alg);
209 
210 	if (!crypto_boot_test_finished())
211 		crypto_start_test(larval);
212 
213 	time_left = wait_for_completion_killable_timeout(
214 		&larval->completion, 60 * HZ);
215 
216 	alg = larval->adult;
217 	if (time_left < 0)
218 		alg = ERR_PTR(-EINTR);
219 	else if (!time_left) {
220 		if (crypto_is_test_larval(larval))
221 			crypto_larval_kill(larval);
222 		alg = ERR_PTR(-ETIMEDOUT);
223 	} else if (!alg) {
224 		alg = &larval->alg;
225 		alg = crypto_alg_lookup(alg->cra_name, type, mask) ?:
226 		      ERR_PTR(-EAGAIN);
227 	} else if (IS_ERR(alg))
228 		;
229 	else if (crypto_is_test_larval(larval) &&
230 		 !(alg->cra_flags & CRYPTO_ALG_TESTED))
231 		alg = ERR_PTR(-EAGAIN);
232 	else if (alg->cra_flags & CRYPTO_ALG_FIPS_INTERNAL)
233 		alg = ERR_PTR(-EAGAIN);
234 	else if (!crypto_mod_get(alg))
235 		alg = ERR_PTR(-EAGAIN);
236 	crypto_mod_put(&larval->alg);
237 
238 	if (!IS_ERR(alg) && crypto_is_larval(alg))
239 		goto again;
240 
241 	return alg;
242 }
243 
244 static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
245 					    u32 mask)
246 {
247 	const u32 fips = CRYPTO_ALG_FIPS_INTERNAL;
248 	struct crypto_alg *alg;
249 	u32 test = 0;
250 
251 	if (!((type | mask) & CRYPTO_ALG_TESTED))
252 		test |= CRYPTO_ALG_TESTED;
253 
254 	down_read(&crypto_alg_sem);
255 	alg = __crypto_alg_lookup(name, (type | test) & ~fips,
256 				  (mask | test) & ~fips);
257 	if (alg) {
258 		if (((type | mask) ^ fips) & fips)
259 			mask |= fips;
260 		mask &= fips;
261 
262 		if (!crypto_is_larval(alg) &&
263 		    ((type ^ alg->cra_flags) & mask)) {
264 			/* Algorithm is disallowed in FIPS mode. */
265 			crypto_mod_put(alg);
266 			alg = ERR_PTR(-ENOENT);
267 		}
268 	} else if (test) {
269 		alg = __crypto_alg_lookup(name, type, mask);
270 		if (alg && !crypto_is_larval(alg)) {
271 			/* Test failed */
272 			crypto_mod_put(alg);
273 			alg = ERR_PTR(-ELIBBAD);
274 		}
275 	}
276 	up_read(&crypto_alg_sem);
277 
278 	return alg;
279 }
280 
281 static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
282 					       u32 mask)
283 {
284 	struct crypto_alg *alg;
285 
286 	if (!name)
287 		return ERR_PTR(-ENOENT);
288 
289 	type &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
290 	mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
291 
292 	alg = crypto_alg_lookup(name, type, mask);
293 	if (!alg && !(mask & CRYPTO_NOLOAD)) {
294 		request_module("crypto-%s", name);
295 
296 		if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
297 		      CRYPTO_ALG_NEED_FALLBACK))
298 			request_module("crypto-%s-all", name);
299 
300 		alg = crypto_alg_lookup(name, type, mask);
301 	}
302 
303 	if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg))
304 		alg = crypto_larval_wait(alg, type, mask);
305 	else if (alg)
306 		;
307 	else if (!(mask & CRYPTO_ALG_TESTED))
308 		alg = crypto_larval_add(name, type, mask);
309 	else
310 		alg = ERR_PTR(-ENOENT);
311 
312 	return alg;
313 }
314 
315 int crypto_probing_notify(unsigned long val, void *v)
316 {
317 	int ok;
318 
319 	ok = blocking_notifier_call_chain(&crypto_chain, val, v);
320 	if (ok == NOTIFY_DONE) {
321 		request_module("cryptomgr");
322 		ok = blocking_notifier_call_chain(&crypto_chain, val, v);
323 	}
324 
325 	return ok;
326 }
327 EXPORT_SYMBOL_GPL(crypto_probing_notify);
328 
329 struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
330 {
331 	struct crypto_alg *alg;
332 	struct crypto_alg *larval;
333 	int ok;
334 
335 	/*
336 	 * If the internal flag is set for a cipher, require a caller to
337 	 * invoke the cipher with the internal flag to use that cipher.
338 	 * Also, if a caller wants to allocate a cipher that may or may
339 	 * not be an internal cipher, use type | CRYPTO_ALG_INTERNAL and
340 	 * !(mask & CRYPTO_ALG_INTERNAL).
341 	 */
342 	if (!((type | mask) & CRYPTO_ALG_INTERNAL))
343 		mask |= CRYPTO_ALG_INTERNAL;
344 
345 	larval = crypto_larval_lookup(name, type, mask);
346 	if (IS_ERR(larval) || !crypto_is_larval(larval))
347 		return larval;
348 
349 	ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval);
350 
351 	if (ok == NOTIFY_STOP)
352 		alg = crypto_larval_wait(larval, type, mask);
353 	else {
354 		crypto_mod_put(larval);
355 		alg = ERR_PTR(-ENOENT);
356 	}
357 	crypto_larval_kill(container_of(larval, struct crypto_larval, alg));
358 	return alg;
359 }
360 EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup);
361 
362 static void crypto_exit_ops(struct crypto_tfm *tfm)
363 {
364 	const struct crypto_type *type = tfm->__crt_alg->cra_type;
365 
366 	if (type && tfm->exit)
367 		tfm->exit(tfm);
368 }
369 
370 static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
371 {
372 	const struct crypto_type *type_obj = alg->cra_type;
373 	unsigned int len;
374 
375 	len = alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1);
376 	if (type_obj)
377 		return len + type_obj->ctxsize(alg, type, mask);
378 
379 	switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
380 	default:
381 		BUG();
382 
383 	case CRYPTO_ALG_TYPE_CIPHER:
384 		len += crypto_cipher_ctxsize(alg);
385 		break;
386 	}
387 
388 	return len;
389 }
390 
391 void crypto_shoot_alg(struct crypto_alg *alg)
392 {
393 	down_write(&crypto_alg_sem);
394 	alg->cra_flags |= CRYPTO_ALG_DYING;
395 	up_write(&crypto_alg_sem);
396 }
397 EXPORT_SYMBOL_GPL(crypto_shoot_alg);
398 
399 struct crypto_tfm *__crypto_alloc_tfmgfp(struct crypto_alg *alg, u32 type,
400 					 u32 mask, gfp_t gfp)
401 {
402 	struct crypto_tfm *tfm;
403 	unsigned int tfm_size;
404 	int err = -ENOMEM;
405 
406 	tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask);
407 	tfm = kzalloc(tfm_size, gfp);
408 	if (tfm == NULL)
409 		goto out_err;
410 
411 	tfm->__crt_alg = alg;
412 	refcount_set(&tfm->refcnt, 1);
413 
414 	if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm)))
415 		goto cra_init_failed;
416 
417 	goto out;
418 
419 cra_init_failed:
420 	crypto_exit_ops(tfm);
421 	if (err == -EAGAIN)
422 		crypto_shoot_alg(alg);
423 	kfree(tfm);
424 out_err:
425 	tfm = ERR_PTR(err);
426 out:
427 	return tfm;
428 }
429 EXPORT_SYMBOL_GPL(__crypto_alloc_tfmgfp);
430 
431 struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
432 				      u32 mask)
433 {
434 	return __crypto_alloc_tfmgfp(alg, type, mask, GFP_KERNEL);
435 }
436 EXPORT_SYMBOL_GPL(__crypto_alloc_tfm);
437 
438 /*
439  *	crypto_alloc_base - Locate algorithm and allocate transform
440  *	@alg_name: Name of algorithm
441  *	@type: Type of algorithm
442  *	@mask: Mask for type comparison
443  *
444  *	This function should not be used by new algorithm types.
445  *	Please use crypto_alloc_tfm instead.
446  *
447  *	crypto_alloc_base() will first attempt to locate an already loaded
448  *	algorithm.  If that fails and the kernel supports dynamically loadable
449  *	modules, it will then attempt to load a module of the same name or
450  *	alias.  If that fails it will send a query to any loaded crypto manager
451  *	to construct an algorithm on the fly.  A refcount is grabbed on the
452  *	algorithm which is then associated with the new transform.
453  *
454  *	The returned transform is of a non-determinate type.  Most people
455  *	should use one of the more specific allocation functions such as
456  *	crypto_alloc_skcipher().
457  *
458  *	In case of error the return value is an error pointer.
459  */
460 struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask)
461 {
462 	struct crypto_tfm *tfm;
463 	int err;
464 
465 	for (;;) {
466 		struct crypto_alg *alg;
467 
468 		alg = crypto_alg_mod_lookup(alg_name, type, mask);
469 		if (IS_ERR(alg)) {
470 			err = PTR_ERR(alg);
471 			goto err;
472 		}
473 
474 		tfm = __crypto_alloc_tfm(alg, type, mask);
475 		if (!IS_ERR(tfm))
476 			return tfm;
477 
478 		crypto_mod_put(alg);
479 		err = PTR_ERR(tfm);
480 
481 err:
482 		if (err != -EAGAIN)
483 			break;
484 		if (fatal_signal_pending(current)) {
485 			err = -EINTR;
486 			break;
487 		}
488 	}
489 
490 	return ERR_PTR(err);
491 }
492 EXPORT_SYMBOL_GPL(crypto_alloc_base);
493 
494 static void *crypto_alloc_tfmmem(struct crypto_alg *alg,
495 				 const struct crypto_type *frontend, int node,
496 				 gfp_t gfp)
497 {
498 	struct crypto_tfm *tfm;
499 	unsigned int tfmsize;
500 	unsigned int total;
501 	char *mem;
502 
503 	tfmsize = frontend->tfmsize;
504 	total = tfmsize + sizeof(*tfm) + frontend->extsize(alg);
505 
506 	mem = kzalloc_node(total, gfp, node);
507 	if (mem == NULL)
508 		return ERR_PTR(-ENOMEM);
509 
510 	tfm = (struct crypto_tfm *)(mem + tfmsize);
511 	tfm->__crt_alg = alg;
512 	tfm->node = node;
513 	refcount_set(&tfm->refcnt, 1);
514 
515 	return mem;
516 }
517 
518 void *crypto_create_tfm_node(struct crypto_alg *alg,
519 			     const struct crypto_type *frontend,
520 			     int node)
521 {
522 	struct crypto_tfm *tfm;
523 	char *mem;
524 	int err;
525 
526 	mem = crypto_alloc_tfmmem(alg, frontend, node, GFP_KERNEL);
527 	if (IS_ERR(mem))
528 		goto out;
529 
530 	tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
531 	tfm->fb = tfm;
532 
533 	err = frontend->init_tfm(tfm);
534 	if (err)
535 		goto out_free_tfm;
536 
537 	if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm)))
538 		goto cra_init_failed;
539 
540 	goto out;
541 
542 cra_init_failed:
543 	crypto_exit_ops(tfm);
544 out_free_tfm:
545 	if (err == -EAGAIN)
546 		crypto_shoot_alg(alg);
547 	kfree(mem);
548 	mem = ERR_PTR(err);
549 out:
550 	return mem;
551 }
552 EXPORT_SYMBOL_GPL(crypto_create_tfm_node);
553 
554 void *crypto_clone_tfm(const struct crypto_type *frontend,
555 		       struct crypto_tfm *otfm)
556 {
557 	struct crypto_alg *alg = otfm->__crt_alg;
558 	struct crypto_tfm *tfm;
559 	char *mem;
560 
561 	mem = ERR_PTR(-ESTALE);
562 	if (unlikely(!crypto_mod_get(alg)))
563 		goto out;
564 
565 	mem = crypto_alloc_tfmmem(alg, frontend, otfm->node, GFP_ATOMIC);
566 	if (IS_ERR(mem)) {
567 		crypto_mod_put(alg);
568 		goto out;
569 	}
570 
571 	tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
572 	tfm->crt_flags = otfm->crt_flags;
573 	tfm->fb = tfm;
574 
575 out:
576 	return mem;
577 }
578 EXPORT_SYMBOL_GPL(crypto_clone_tfm);
579 
580 struct crypto_alg *crypto_find_alg(const char *alg_name,
581 				   const struct crypto_type *frontend,
582 				   u32 type, u32 mask)
583 {
584 	if (frontend) {
585 		type &= frontend->maskclear;
586 		mask &= frontend->maskclear;
587 		type |= frontend->type;
588 		mask |= frontend->maskset;
589 	}
590 
591 	return crypto_alg_mod_lookup(alg_name, type, mask);
592 }
593 EXPORT_SYMBOL_GPL(crypto_find_alg);
594 
595 /*
596  *	crypto_alloc_tfm_node - Locate algorithm and allocate transform
597  *	@alg_name: Name of algorithm
598  *	@frontend: Frontend algorithm type
599  *	@type: Type of algorithm
600  *	@mask: Mask for type comparison
601  *	@node: NUMA node in which users desire to put requests, if node is
602  *		NUMA_NO_NODE, it means users have no special requirement.
603  *
604  *	crypto_alloc_tfm() will first attempt to locate an already loaded
605  *	algorithm.  If that fails and the kernel supports dynamically loadable
606  *	modules, it will then attempt to load a module of the same name or
607  *	alias.  If that fails it will send a query to any loaded crypto manager
608  *	to construct an algorithm on the fly.  A refcount is grabbed on the
609  *	algorithm which is then associated with the new transform.
610  *
611  *	The returned transform is of a non-determinate type.  Most people
612  *	should use one of the more specific allocation functions such as
613  *	crypto_alloc_skcipher().
614  *
615  *	In case of error the return value is an error pointer.
616  */
617 
618 void *crypto_alloc_tfm_node(const char *alg_name,
619 		       const struct crypto_type *frontend, u32 type, u32 mask,
620 		       int node)
621 {
622 	void *tfm;
623 	int err;
624 
625 	for (;;) {
626 		struct crypto_alg *alg;
627 
628 		alg = crypto_find_alg(alg_name, frontend, type, mask);
629 		if (IS_ERR(alg)) {
630 			err = PTR_ERR(alg);
631 			goto err;
632 		}
633 
634 		tfm = crypto_create_tfm_node(alg, frontend, node);
635 		if (!IS_ERR(tfm))
636 			return tfm;
637 
638 		crypto_mod_put(alg);
639 		err = PTR_ERR(tfm);
640 
641 err:
642 		if (err != -EAGAIN)
643 			break;
644 		if (fatal_signal_pending(current)) {
645 			err = -EINTR;
646 			break;
647 		}
648 	}
649 
650 	return ERR_PTR(err);
651 }
652 EXPORT_SYMBOL_GPL(crypto_alloc_tfm_node);
653 
654 /*
655  *	crypto_destroy_tfm - Free crypto transform
656  *	@mem: Start of tfm slab
657  *	@tfm: Transform to free
658  *
659  *	This function frees up the transform and any associated resources,
660  *	then drops the refcount on the associated algorithm.
661  */
662 void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
663 {
664 	struct crypto_alg *alg;
665 
666 	if (IS_ERR_OR_NULL(mem))
667 		return;
668 
669 	if (!refcount_dec_and_test(&tfm->refcnt))
670 		return;
671 	alg = tfm->__crt_alg;
672 
673 	if (!tfm->exit && alg->cra_exit)
674 		alg->cra_exit(tfm);
675 	crypto_exit_ops(tfm);
676 	crypto_mod_put(alg);
677 	kfree_sensitive(mem);
678 }
679 EXPORT_SYMBOL_GPL(crypto_destroy_tfm);
680 
681 int crypto_has_alg(const char *name, u32 type, u32 mask)
682 {
683 	int ret = 0;
684 	struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask);
685 
686 	if (!IS_ERR(alg)) {
687 		crypto_mod_put(alg);
688 		ret = 1;
689 	}
690 
691 	return ret;
692 }
693 EXPORT_SYMBOL_GPL(crypto_has_alg);
694 
695 void crypto_req_done(void *data, int err)
696 {
697 	struct crypto_wait *wait = data;
698 
699 	if (err == -EINPROGRESS)
700 		return;
701 
702 	wait->err = err;
703 	complete(&wait->completion);
704 }
705 EXPORT_SYMBOL_GPL(crypto_req_done);
706 
707 void crypto_destroy_alg(struct crypto_alg *alg)
708 {
709 	if (alg->cra_type && alg->cra_type->destroy)
710 		alg->cra_type->destroy(alg);
711 	if (alg->cra_destroy)
712 		alg->cra_destroy(alg);
713 }
714 EXPORT_SYMBOL_GPL(crypto_destroy_alg);
715 
716 struct crypto_async_request *crypto_request_clone(
717 	struct crypto_async_request *req, size_t total, gfp_t gfp)
718 {
719 	struct crypto_tfm *tfm = req->tfm;
720 	struct crypto_async_request *nreq;
721 
722 	nreq = kmemdup(req, total, gfp);
723 	if (!nreq) {
724 		req->tfm = tfm->fb;
725 		return req;
726 	}
727 
728 	nreq->flags &= ~CRYPTO_TFM_REQ_ON_STACK;
729 	return nreq;
730 }
731 EXPORT_SYMBOL_GPL(crypto_request_clone);
732 
733 MODULE_DESCRIPTION("Cryptographic core API");
734 MODULE_LICENSE("GPL");
735