xref: /linux/crypto/api.c (revision 714ca27e9bf4608fcb1f627cd5599441f448771e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Scatterlist Cryptographic API.
4  *
5  * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
6  * Copyright (c) 2002 David S. Miller (davem@redhat.com)
7  * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
10  * and Nettle, by Niels Möller.
11  */
12 
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/jump_label.h>
16 #include <linux/kernel.h>
17 #include <linux/kmod.h>
18 #include <linux/module.h>
19 #include <linux/param.h>
20 #include <linux/sched/signal.h>
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/completion.h>
24 #include "internal.h"
25 
26 LIST_HEAD(crypto_alg_list);
27 EXPORT_SYMBOL_GPL(crypto_alg_list);
28 DECLARE_RWSEM(crypto_alg_sem);
29 EXPORT_SYMBOL_GPL(crypto_alg_sem);
30 
31 BLOCKING_NOTIFIER_HEAD(crypto_chain);
32 EXPORT_SYMBOL_GPL(crypto_chain);
33 
34 #if IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) && IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)
35 DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished);
36 #endif
37 
38 static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg,
39 					     u32 type, u32 mask);
40 static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
41 					    u32 mask);
42 
43 struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
44 {
45 	return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
46 }
47 EXPORT_SYMBOL_GPL(crypto_mod_get);
48 
49 void crypto_mod_put(struct crypto_alg *alg)
50 {
51 	struct module *module = alg->cra_module;
52 
53 	crypto_alg_put(alg);
54 	module_put(module);
55 }
56 EXPORT_SYMBOL_GPL(crypto_mod_put);
57 
58 static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type,
59 					      u32 mask)
60 {
61 	struct crypto_alg *q, *alg = NULL;
62 	int best = -2;
63 
64 	list_for_each_entry(q, &crypto_alg_list, cra_list) {
65 		int exact, fuzzy;
66 
67 		if (crypto_is_moribund(q))
68 			continue;
69 
70 		if ((q->cra_flags ^ type) & mask)
71 			continue;
72 
73 		exact = !strcmp(q->cra_driver_name, name);
74 		fuzzy = !strcmp(q->cra_name, name);
75 		if (!exact && !(fuzzy && q->cra_priority > best))
76 			continue;
77 
78 		if (unlikely(!crypto_mod_get(q)))
79 			continue;
80 
81 		best = q->cra_priority;
82 		if (alg)
83 			crypto_mod_put(alg);
84 		alg = q;
85 
86 		if (exact)
87 			break;
88 	}
89 
90 	return alg;
91 }
92 
93 static void crypto_larval_destroy(struct crypto_alg *alg)
94 {
95 	struct crypto_larval *larval = (void *)alg;
96 
97 	BUG_ON(!crypto_is_larval(alg));
98 	if (!IS_ERR_OR_NULL(larval->adult))
99 		crypto_mod_put(larval->adult);
100 	kfree(larval);
101 }
102 
103 struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask)
104 {
105 	struct crypto_larval *larval;
106 
107 	larval = kzalloc(sizeof(*larval), GFP_KERNEL);
108 	if (!larval)
109 		return ERR_PTR(-ENOMEM);
110 
111 	type &= ~CRYPTO_ALG_TYPE_MASK | (mask ?: CRYPTO_ALG_TYPE_MASK);
112 
113 	larval->mask = mask;
114 	larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type;
115 	larval->alg.cra_priority = -1;
116 	larval->alg.cra_destroy = crypto_larval_destroy;
117 
118 	strscpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME);
119 	init_completion(&larval->completion);
120 
121 	return larval;
122 }
123 EXPORT_SYMBOL_GPL(crypto_larval_alloc);
124 
125 static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
126 					    u32 mask)
127 {
128 	struct crypto_alg *alg;
129 	struct crypto_larval *larval;
130 
131 	larval = crypto_larval_alloc(name, type, mask);
132 	if (IS_ERR(larval))
133 		return ERR_CAST(larval);
134 
135 	refcount_set(&larval->alg.cra_refcnt, 2);
136 
137 	down_write(&crypto_alg_sem);
138 	alg = __crypto_alg_lookup(name, type, mask);
139 	if (!alg) {
140 		alg = &larval->alg;
141 		list_add(&alg->cra_list, &crypto_alg_list);
142 	}
143 	up_write(&crypto_alg_sem);
144 
145 	if (alg != &larval->alg) {
146 		kfree(larval);
147 		if (crypto_is_larval(alg))
148 			alg = crypto_larval_wait(alg, type, mask);
149 	}
150 
151 	return alg;
152 }
153 
154 static void crypto_larval_kill(struct crypto_larval *larval)
155 {
156 	bool unlinked;
157 
158 	down_write(&crypto_alg_sem);
159 	unlinked = list_empty(&larval->alg.cra_list);
160 	if (!unlinked)
161 		list_del_init(&larval->alg.cra_list);
162 	up_write(&crypto_alg_sem);
163 
164 	if (unlinked)
165 		return;
166 
167 	complete_all(&larval->completion);
168 	crypto_alg_put(&larval->alg);
169 }
170 
171 void crypto_schedule_test(struct crypto_larval *larval)
172 {
173 	int err;
174 
175 	err = crypto_probing_notify(CRYPTO_MSG_ALG_REGISTER, larval->adult);
176 	WARN_ON_ONCE(err != NOTIFY_STOP);
177 }
178 EXPORT_SYMBOL_GPL(crypto_schedule_test);
179 
180 static void crypto_start_test(struct crypto_larval *larval)
181 {
182 	if (!crypto_is_test_larval(larval))
183 		return;
184 
185 	if (larval->test_started)
186 		return;
187 
188 	down_write(&crypto_alg_sem);
189 	if (larval->test_started) {
190 		up_write(&crypto_alg_sem);
191 		return;
192 	}
193 
194 	larval->test_started = true;
195 	up_write(&crypto_alg_sem);
196 
197 	crypto_schedule_test(larval);
198 }
199 
200 static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg,
201 					     u32 type, u32 mask)
202 {
203 	struct crypto_larval *larval;
204 	long time_left;
205 
206 again:
207 	larval = container_of(alg, struct crypto_larval, alg);
208 
209 	if (!crypto_boot_test_finished())
210 		crypto_start_test(larval);
211 
212 	time_left = wait_for_completion_killable_timeout(
213 		&larval->completion, 60 * HZ);
214 
215 	alg = larval->adult;
216 	if (time_left < 0)
217 		alg = ERR_PTR(-EINTR);
218 	else if (!time_left) {
219 		if (crypto_is_test_larval(larval))
220 			crypto_larval_kill(larval);
221 		alg = ERR_PTR(-ETIMEDOUT);
222 	} else if (!alg) {
223 		alg = &larval->alg;
224 		alg = crypto_alg_lookup(alg->cra_name, type, mask) ?:
225 		      ERR_PTR(-EAGAIN);
226 	} else if (IS_ERR(alg))
227 		;
228 	else if (crypto_is_test_larval(larval) &&
229 		 !(alg->cra_flags & CRYPTO_ALG_TESTED))
230 		alg = ERR_PTR(-EAGAIN);
231 	else if (alg->cra_flags & CRYPTO_ALG_FIPS_INTERNAL)
232 		alg = ERR_PTR(-EAGAIN);
233 	else if (!crypto_mod_get(alg))
234 		alg = ERR_PTR(-EAGAIN);
235 	crypto_mod_put(&larval->alg);
236 
237 	if (!IS_ERR(alg) && crypto_is_larval(alg))
238 		goto again;
239 
240 	return alg;
241 }
242 
243 static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
244 					    u32 mask)
245 {
246 	const u32 fips = CRYPTO_ALG_FIPS_INTERNAL;
247 	struct crypto_alg *alg;
248 	u32 test = 0;
249 
250 	if (!((type | mask) & CRYPTO_ALG_TESTED))
251 		test |= CRYPTO_ALG_TESTED;
252 
253 	down_read(&crypto_alg_sem);
254 	alg = __crypto_alg_lookup(name, (type | test) & ~fips,
255 				  (mask | test) & ~fips);
256 	if (alg) {
257 		if (((type | mask) ^ fips) & fips)
258 			mask |= fips;
259 		mask &= fips;
260 
261 		if (!crypto_is_larval(alg) &&
262 		    ((type ^ alg->cra_flags) & mask)) {
263 			/* Algorithm is disallowed in FIPS mode. */
264 			crypto_mod_put(alg);
265 			alg = ERR_PTR(-ENOENT);
266 		}
267 	} else if (test) {
268 		alg = __crypto_alg_lookup(name, type, mask);
269 		if (alg && !crypto_is_larval(alg)) {
270 			/* Test failed */
271 			crypto_mod_put(alg);
272 			alg = ERR_PTR(-ELIBBAD);
273 		}
274 	}
275 	up_read(&crypto_alg_sem);
276 
277 	return alg;
278 }
279 
280 static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
281 					       u32 mask)
282 {
283 	struct crypto_alg *alg;
284 
285 	if (!name)
286 		return ERR_PTR(-ENOENT);
287 
288 	type &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
289 	mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
290 
291 	alg = crypto_alg_lookup(name, type, mask);
292 	if (!alg && !(mask & CRYPTO_NOLOAD)) {
293 		request_module("crypto-%s", name);
294 
295 		if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
296 		      CRYPTO_ALG_NEED_FALLBACK))
297 			request_module("crypto-%s-all", name);
298 
299 		alg = crypto_alg_lookup(name, type, mask);
300 	}
301 
302 	if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg))
303 		alg = crypto_larval_wait(alg, type, mask);
304 	else if (alg)
305 		;
306 	else if (!(mask & CRYPTO_ALG_TESTED))
307 		alg = crypto_larval_add(name, type, mask);
308 	else
309 		alg = ERR_PTR(-ENOENT);
310 
311 	return alg;
312 }
313 
314 int crypto_probing_notify(unsigned long val, void *v)
315 {
316 	int ok;
317 
318 	ok = blocking_notifier_call_chain(&crypto_chain, val, v);
319 	if (ok == NOTIFY_DONE) {
320 		request_module("cryptomgr");
321 		ok = blocking_notifier_call_chain(&crypto_chain, val, v);
322 	}
323 
324 	return ok;
325 }
326 EXPORT_SYMBOL_GPL(crypto_probing_notify);
327 
328 struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
329 {
330 	struct crypto_alg *alg;
331 	struct crypto_alg *larval;
332 	int ok;
333 
334 	/*
335 	 * If the internal flag is set for a cipher, require a caller to
336 	 * invoke the cipher with the internal flag to use that cipher.
337 	 * Also, if a caller wants to allocate a cipher that may or may
338 	 * not be an internal cipher, use type | CRYPTO_ALG_INTERNAL and
339 	 * !(mask & CRYPTO_ALG_INTERNAL).
340 	 */
341 	if (!((type | mask) & CRYPTO_ALG_INTERNAL))
342 		mask |= CRYPTO_ALG_INTERNAL;
343 
344 	larval = crypto_larval_lookup(name, type, mask);
345 	if (IS_ERR(larval) || !crypto_is_larval(larval))
346 		return larval;
347 
348 	ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval);
349 
350 	if (ok == NOTIFY_STOP)
351 		alg = crypto_larval_wait(larval, type, mask);
352 	else {
353 		crypto_mod_put(larval);
354 		alg = ERR_PTR(-ENOENT);
355 	}
356 	crypto_larval_kill(container_of(larval, struct crypto_larval, alg));
357 	return alg;
358 }
359 EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup);
360 
361 static void crypto_exit_ops(struct crypto_tfm *tfm)
362 {
363 	const struct crypto_type *type = tfm->__crt_alg->cra_type;
364 
365 	if (type && tfm->exit)
366 		tfm->exit(tfm);
367 }
368 
369 static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
370 {
371 	const struct crypto_type *type_obj = alg->cra_type;
372 	unsigned int len;
373 
374 	len = alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1);
375 	if (type_obj)
376 		return len + type_obj->ctxsize(alg, type, mask);
377 
378 	switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
379 	default:
380 		BUG();
381 
382 	case CRYPTO_ALG_TYPE_CIPHER:
383 		len += crypto_cipher_ctxsize(alg);
384 		break;
385 	}
386 
387 	return len;
388 }
389 
390 void crypto_shoot_alg(struct crypto_alg *alg)
391 {
392 	down_write(&crypto_alg_sem);
393 	alg->cra_flags |= CRYPTO_ALG_DYING;
394 	up_write(&crypto_alg_sem);
395 }
396 EXPORT_SYMBOL_GPL(crypto_shoot_alg);
397 
398 struct crypto_tfm *__crypto_alloc_tfmgfp(struct crypto_alg *alg, u32 type,
399 					 u32 mask, gfp_t gfp)
400 {
401 	struct crypto_tfm *tfm;
402 	unsigned int tfm_size;
403 	int err = -ENOMEM;
404 
405 	tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask);
406 	tfm = kzalloc(tfm_size, gfp);
407 	if (tfm == NULL)
408 		goto out_err;
409 
410 	tfm->__crt_alg = alg;
411 	refcount_set(&tfm->refcnt, 1);
412 
413 	if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm)))
414 		goto cra_init_failed;
415 
416 	goto out;
417 
418 cra_init_failed:
419 	crypto_exit_ops(tfm);
420 	if (err == -EAGAIN)
421 		crypto_shoot_alg(alg);
422 	kfree(tfm);
423 out_err:
424 	tfm = ERR_PTR(err);
425 out:
426 	return tfm;
427 }
428 EXPORT_SYMBOL_GPL(__crypto_alloc_tfmgfp);
429 
430 struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
431 				      u32 mask)
432 {
433 	return __crypto_alloc_tfmgfp(alg, type, mask, GFP_KERNEL);
434 }
435 EXPORT_SYMBOL_GPL(__crypto_alloc_tfm);
436 
437 /*
438  *	crypto_alloc_base - Locate algorithm and allocate transform
439  *	@alg_name: Name of algorithm
440  *	@type: Type of algorithm
441  *	@mask: Mask for type comparison
442  *
443  *	This function should not be used by new algorithm types.
444  *	Please use crypto_alloc_tfm instead.
445  *
446  *	crypto_alloc_base() will first attempt to locate an already loaded
447  *	algorithm.  If that fails and the kernel supports dynamically loadable
448  *	modules, it will then attempt to load a module of the same name or
449  *	alias.  If that fails it will send a query to any loaded crypto manager
450  *	to construct an algorithm on the fly.  A refcount is grabbed on the
451  *	algorithm which is then associated with the new transform.
452  *
453  *	The returned transform is of a non-determinate type.  Most people
454  *	should use one of the more specific allocation functions such as
455  *	crypto_alloc_skcipher().
456  *
457  *	In case of error the return value is an error pointer.
458  */
459 struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask)
460 {
461 	struct crypto_tfm *tfm;
462 	int err;
463 
464 	for (;;) {
465 		struct crypto_alg *alg;
466 
467 		alg = crypto_alg_mod_lookup(alg_name, type, mask);
468 		if (IS_ERR(alg)) {
469 			err = PTR_ERR(alg);
470 			goto err;
471 		}
472 
473 		tfm = __crypto_alloc_tfm(alg, type, mask);
474 		if (!IS_ERR(tfm))
475 			return tfm;
476 
477 		crypto_mod_put(alg);
478 		err = PTR_ERR(tfm);
479 
480 err:
481 		if (err != -EAGAIN)
482 			break;
483 		if (fatal_signal_pending(current)) {
484 			err = -EINTR;
485 			break;
486 		}
487 	}
488 
489 	return ERR_PTR(err);
490 }
491 EXPORT_SYMBOL_GPL(crypto_alloc_base);
492 
493 static void *crypto_alloc_tfmmem(struct crypto_alg *alg,
494 				 const struct crypto_type *frontend, int node,
495 				 gfp_t gfp)
496 {
497 	struct crypto_tfm *tfm;
498 	unsigned int tfmsize;
499 	unsigned int total;
500 	char *mem;
501 
502 	tfmsize = frontend->tfmsize;
503 	total = tfmsize + sizeof(*tfm) + frontend->extsize(alg);
504 
505 	mem = kzalloc_node(total, gfp, node);
506 	if (mem == NULL)
507 		return ERR_PTR(-ENOMEM);
508 
509 	tfm = (struct crypto_tfm *)(mem + tfmsize);
510 	tfm->__crt_alg = alg;
511 	tfm->node = node;
512 	refcount_set(&tfm->refcnt, 1);
513 
514 	return mem;
515 }
516 
517 void *crypto_create_tfm_node(struct crypto_alg *alg,
518 			     const struct crypto_type *frontend,
519 			     int node)
520 {
521 	struct crypto_tfm *tfm;
522 	char *mem;
523 	int err;
524 
525 	mem = crypto_alloc_tfmmem(alg, frontend, node, GFP_KERNEL);
526 	if (IS_ERR(mem))
527 		goto out;
528 
529 	tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
530 	tfm->fb = tfm;
531 
532 	err = frontend->init_tfm(tfm);
533 	if (err)
534 		goto out_free_tfm;
535 
536 	if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm)))
537 		goto cra_init_failed;
538 
539 	goto out;
540 
541 cra_init_failed:
542 	crypto_exit_ops(tfm);
543 out_free_tfm:
544 	if (err == -EAGAIN)
545 		crypto_shoot_alg(alg);
546 	kfree(mem);
547 	mem = ERR_PTR(err);
548 out:
549 	return mem;
550 }
551 EXPORT_SYMBOL_GPL(crypto_create_tfm_node);
552 
553 void *crypto_clone_tfm(const struct crypto_type *frontend,
554 		       struct crypto_tfm *otfm)
555 {
556 	struct crypto_alg *alg = otfm->__crt_alg;
557 	struct crypto_tfm *tfm;
558 	char *mem;
559 
560 	mem = ERR_PTR(-ESTALE);
561 	if (unlikely(!crypto_mod_get(alg)))
562 		goto out;
563 
564 	mem = crypto_alloc_tfmmem(alg, frontend, otfm->node, GFP_ATOMIC);
565 	if (IS_ERR(mem)) {
566 		crypto_mod_put(alg);
567 		goto out;
568 	}
569 
570 	tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
571 	tfm->crt_flags = otfm->crt_flags;
572 	tfm->fb = tfm;
573 
574 out:
575 	return mem;
576 }
577 EXPORT_SYMBOL_GPL(crypto_clone_tfm);
578 
579 struct crypto_alg *crypto_find_alg(const char *alg_name,
580 				   const struct crypto_type *frontend,
581 				   u32 type, u32 mask)
582 {
583 	if (frontend) {
584 		type &= frontend->maskclear;
585 		mask &= frontend->maskclear;
586 		type |= frontend->type;
587 		mask |= frontend->maskset;
588 	}
589 
590 	return crypto_alg_mod_lookup(alg_name, type, mask);
591 }
592 EXPORT_SYMBOL_GPL(crypto_find_alg);
593 
594 /*
595  *	crypto_alloc_tfm_node - Locate algorithm and allocate transform
596  *	@alg_name: Name of algorithm
597  *	@frontend: Frontend algorithm type
598  *	@type: Type of algorithm
599  *	@mask: Mask for type comparison
600  *	@node: NUMA node in which users desire to put requests, if node is
601  *		NUMA_NO_NODE, it means users have no special requirement.
602  *
603  *	crypto_alloc_tfm() will first attempt to locate an already loaded
604  *	algorithm.  If that fails and the kernel supports dynamically loadable
605  *	modules, it will then attempt to load a module of the same name or
606  *	alias.  If that fails it will send a query to any loaded crypto manager
607  *	to construct an algorithm on the fly.  A refcount is grabbed on the
608  *	algorithm which is then associated with the new transform.
609  *
610  *	The returned transform is of a non-determinate type.  Most people
611  *	should use one of the more specific allocation functions such as
612  *	crypto_alloc_skcipher().
613  *
614  *	In case of error the return value is an error pointer.
615  */
616 
617 void *crypto_alloc_tfm_node(const char *alg_name,
618 		       const struct crypto_type *frontend, u32 type, u32 mask,
619 		       int node)
620 {
621 	void *tfm;
622 	int err;
623 
624 	for (;;) {
625 		struct crypto_alg *alg;
626 
627 		alg = crypto_find_alg(alg_name, frontend, type, mask);
628 		if (IS_ERR(alg)) {
629 			err = PTR_ERR(alg);
630 			goto err;
631 		}
632 
633 		tfm = crypto_create_tfm_node(alg, frontend, node);
634 		if (!IS_ERR(tfm))
635 			return tfm;
636 
637 		crypto_mod_put(alg);
638 		err = PTR_ERR(tfm);
639 
640 err:
641 		if (err != -EAGAIN)
642 			break;
643 		if (fatal_signal_pending(current)) {
644 			err = -EINTR;
645 			break;
646 		}
647 	}
648 
649 	return ERR_PTR(err);
650 }
651 EXPORT_SYMBOL_GPL(crypto_alloc_tfm_node);
652 
653 /*
654  *	crypto_destroy_tfm - Free crypto transform
655  *	@mem: Start of tfm slab
656  *	@tfm: Transform to free
657  *
658  *	This function frees up the transform and any associated resources,
659  *	then drops the refcount on the associated algorithm.
660  */
661 void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
662 {
663 	struct crypto_alg *alg;
664 
665 	if (IS_ERR_OR_NULL(mem))
666 		return;
667 
668 	if (!refcount_dec_and_test(&tfm->refcnt))
669 		return;
670 	alg = tfm->__crt_alg;
671 
672 	if (!tfm->exit && alg->cra_exit)
673 		alg->cra_exit(tfm);
674 	crypto_exit_ops(tfm);
675 	crypto_mod_put(alg);
676 	kfree_sensitive(mem);
677 }
678 EXPORT_SYMBOL_GPL(crypto_destroy_tfm);
679 
680 int crypto_has_alg(const char *name, u32 type, u32 mask)
681 {
682 	int ret = 0;
683 	struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask);
684 
685 	if (!IS_ERR(alg)) {
686 		crypto_mod_put(alg);
687 		ret = 1;
688 	}
689 
690 	return ret;
691 }
692 EXPORT_SYMBOL_GPL(crypto_has_alg);
693 
694 void crypto_req_done(void *data, int err)
695 {
696 	struct crypto_wait *wait = data;
697 
698 	if (err == -EINPROGRESS)
699 		return;
700 
701 	wait->err = err;
702 	complete(&wait->completion);
703 }
704 EXPORT_SYMBOL_GPL(crypto_req_done);
705 
706 void crypto_destroy_alg(struct crypto_alg *alg)
707 {
708 	if (alg->cra_type && alg->cra_type->destroy)
709 		alg->cra_type->destroy(alg);
710 	if (alg->cra_destroy)
711 		alg->cra_destroy(alg);
712 }
713 EXPORT_SYMBOL_GPL(crypto_destroy_alg);
714 
715 struct crypto_async_request *crypto_request_clone(
716 	struct crypto_async_request *req, size_t total, gfp_t gfp)
717 {
718 	struct crypto_tfm *tfm = req->tfm;
719 	struct crypto_async_request *nreq;
720 
721 	nreq = kmemdup(req, total, gfp);
722 	if (!nreq) {
723 		req->tfm = tfm->fb;
724 		return req;
725 	}
726 
727 	nreq->flags &= ~CRYPTO_TFM_REQ_ON_STACK;
728 	return nreq;
729 }
730 EXPORT_SYMBOL_GPL(crypto_request_clone);
731 
732 MODULE_DESCRIPTION("Cryptographic core API");
733 MODULE_LICENSE("GPL");
734