xref: /linux/crypto/ahash.c (revision cbc9f5bcfa72d7e58cbace18c97e4df7e3a73058)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Asynchronous Cryptographic Hash operations.
4  *
5  * This is the implementation of the ahash (asynchronous hash) API.  It differs
6  * from shash (synchronous hash) in that ahash supports asynchronous operations,
7  * and it hashes data from scatterlists instead of virtually addressed buffers.
8  *
9  * The ahash API provides access to both ahash and shash algorithms.  The shash
10  * API only provides access to shash algorithms.
11  *
12  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
13  */
14 
15 #include <crypto/scatterwalk.h>
16 #include <linux/cryptouser.h>
17 #include <linux/err.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/seq_file.h>
24 #include <linux/string.h>
25 #include <linux/string_choices.h>
26 #include <net/netlink.h>
27 
28 #include "hash.h"
29 
30 #define CRYPTO_ALG_TYPE_AHASH_MASK	0x0000000e
31 
32 struct crypto_hash_walk {
33 	const char *data;
34 
35 	unsigned int offset;
36 	unsigned int flags;
37 
38 	struct page *pg;
39 	unsigned int entrylen;
40 
41 	unsigned int total;
42 	struct scatterlist *sg;
43 };
44 
45 struct ahash_save_req_state {
46 	struct ahash_request *req0;
47 	crypto_completion_t compl;
48 	void *data;
49 	struct scatterlist sg;
50 	const u8 *src;
51 	u8 *page;
52 	unsigned int offset;
53 	unsigned int nbytes;
54 	bool update;
55 };
56 
57 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt);
58 static void ahash_restore_req(struct ahash_request *req);
59 static void ahash_def_finup_done1(void *data, int err);
60 static int ahash_def_finup_finish1(struct ahash_request *req, int err);
61 static int ahash_def_finup(struct ahash_request *req);
62 
63 static int hash_walk_next(struct crypto_hash_walk *walk)
64 {
65 	unsigned int offset = walk->offset;
66 	unsigned int nbytes = min(walk->entrylen,
67 				  ((unsigned int)(PAGE_SIZE)) - offset);
68 
69 	walk->data = kmap_local_page(walk->pg);
70 	walk->data += offset;
71 	walk->entrylen -= nbytes;
72 	return nbytes;
73 }
74 
75 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
76 {
77 	struct scatterlist *sg;
78 
79 	sg = walk->sg;
80 	walk->offset = sg->offset;
81 	walk->pg = nth_page(sg_page(walk->sg), (walk->offset >> PAGE_SHIFT));
82 	walk->offset = offset_in_page(walk->offset);
83 	walk->entrylen = sg->length;
84 
85 	if (walk->entrylen > walk->total)
86 		walk->entrylen = walk->total;
87 	walk->total -= walk->entrylen;
88 
89 	return hash_walk_next(walk);
90 }
91 
92 static int crypto_hash_walk_first(struct ahash_request *req,
93 				  struct crypto_hash_walk *walk)
94 {
95 	walk->total = req->nbytes;
96 	walk->entrylen = 0;
97 
98 	if (!walk->total)
99 		return 0;
100 
101 	walk->flags = req->base.flags;
102 
103 	if (ahash_request_isvirt(req)) {
104 		walk->data = req->svirt;
105 		walk->total = 0;
106 		return req->nbytes;
107 	}
108 
109 	walk->sg = req->src;
110 
111 	return hash_walk_new_entry(walk);
112 }
113 
114 static int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
115 {
116 	if ((walk->flags & CRYPTO_AHASH_REQ_VIRT))
117 		return err;
118 
119 	walk->data -= walk->offset;
120 
121 	kunmap_local(walk->data);
122 	crypto_yield(walk->flags);
123 
124 	if (err)
125 		return err;
126 
127 	if (walk->entrylen) {
128 		walk->offset = 0;
129 		walk->pg++;
130 		return hash_walk_next(walk);
131 	}
132 
133 	if (!walk->total)
134 		return 0;
135 
136 	walk->sg = sg_next(walk->sg);
137 
138 	return hash_walk_new_entry(walk);
139 }
140 
141 static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
142 {
143 	return !(walk->entrylen | walk->total);
144 }
145 
146 /*
147  * For an ahash tfm that is using an shash algorithm (instead of an ahash
148  * algorithm), this returns the underlying shash tfm.
149  */
150 static inline struct crypto_shash *ahash_to_shash(struct crypto_ahash *tfm)
151 {
152 	return *(struct crypto_shash **)crypto_ahash_ctx(tfm);
153 }
154 
155 static inline struct shash_desc *prepare_shash_desc(struct ahash_request *req,
156 						    struct crypto_ahash *tfm)
157 {
158 	struct shash_desc *desc = ahash_request_ctx(req);
159 
160 	desc->tfm = ahash_to_shash(tfm);
161 	return desc;
162 }
163 
164 int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc)
165 {
166 	struct crypto_hash_walk walk;
167 	int nbytes;
168 
169 	for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0;
170 	     nbytes = crypto_hash_walk_done(&walk, nbytes))
171 		nbytes = crypto_shash_update(desc, walk.data, nbytes);
172 
173 	return nbytes;
174 }
175 EXPORT_SYMBOL_GPL(shash_ahash_update);
176 
177 int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc)
178 {
179 	struct crypto_hash_walk walk;
180 	int nbytes;
181 
182 	nbytes = crypto_hash_walk_first(req, &walk);
183 	if (!nbytes)
184 		return crypto_shash_final(desc, req->result);
185 
186 	do {
187 		nbytes = crypto_hash_walk_last(&walk) ?
188 			 crypto_shash_finup(desc, walk.data, nbytes,
189 					    req->result) :
190 			 crypto_shash_update(desc, walk.data, nbytes);
191 		nbytes = crypto_hash_walk_done(&walk, nbytes);
192 	} while (nbytes > 0);
193 
194 	return nbytes;
195 }
196 EXPORT_SYMBOL_GPL(shash_ahash_finup);
197 
198 int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
199 {
200 	unsigned int nbytes = req->nbytes;
201 	struct scatterlist *sg;
202 	unsigned int offset;
203 	struct page *page;
204 	const u8 *data;
205 	int err;
206 
207 	data = req->svirt;
208 	if (!nbytes || ahash_request_isvirt(req))
209 		return crypto_shash_digest(desc, data, nbytes, req->result);
210 
211 	sg = req->src;
212 	if (nbytes > sg->length)
213 		return crypto_shash_init(desc) ?:
214 		       shash_ahash_finup(req, desc);
215 
216 	page = sg_page(sg);
217 	offset = sg->offset;
218 	data = lowmem_page_address(page) + offset;
219 	if (!IS_ENABLED(CONFIG_HIGHMEM))
220 		return crypto_shash_digest(desc, data, nbytes, req->result);
221 
222 	page = nth_page(page, offset >> PAGE_SHIFT);
223 	offset = offset_in_page(offset);
224 
225 	if (nbytes > (unsigned int)PAGE_SIZE - offset)
226 		return crypto_shash_init(desc) ?:
227 		       shash_ahash_finup(req, desc);
228 
229 	data = kmap_local_page(page);
230 	err = crypto_shash_digest(desc, data + offset, nbytes,
231 				  req->result);
232 	kunmap_local(data);
233 	return err;
234 }
235 EXPORT_SYMBOL_GPL(shash_ahash_digest);
236 
237 static void crypto_exit_ahash_using_shash(struct crypto_tfm *tfm)
238 {
239 	struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
240 
241 	crypto_free_shash(*ctx);
242 }
243 
244 static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm)
245 {
246 	struct crypto_alg *calg = tfm->__crt_alg;
247 	struct crypto_ahash *crt = __crypto_ahash_cast(tfm);
248 	struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
249 	struct crypto_shash *shash;
250 
251 	if (!crypto_mod_get(calg))
252 		return -EAGAIN;
253 
254 	shash = crypto_create_tfm(calg, &crypto_shash_type);
255 	if (IS_ERR(shash)) {
256 		crypto_mod_put(calg);
257 		return PTR_ERR(shash);
258 	}
259 
260 	crt->using_shash = true;
261 	*ctx = shash;
262 	tfm->exit = crypto_exit_ahash_using_shash;
263 
264 	crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
265 				    CRYPTO_TFM_NEED_KEY);
266 	crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash);
267 
268 	return 0;
269 }
270 
271 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
272 			  unsigned int keylen)
273 {
274 	return -ENOSYS;
275 }
276 
277 static void ahash_set_needkey(struct crypto_ahash *tfm, struct ahash_alg *alg)
278 {
279 	if (alg->setkey != ahash_nosetkey &&
280 	    !(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
281 		crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
282 }
283 
284 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
285 			unsigned int keylen)
286 {
287 	if (likely(tfm->using_shash)) {
288 		struct crypto_shash *shash = ahash_to_shash(tfm);
289 		int err;
290 
291 		err = crypto_shash_setkey(shash, key, keylen);
292 		if (unlikely(err)) {
293 			crypto_ahash_set_flags(tfm,
294 					       crypto_shash_get_flags(shash) &
295 					       CRYPTO_TFM_NEED_KEY);
296 			return err;
297 		}
298 	} else {
299 		struct ahash_alg *alg = crypto_ahash_alg(tfm);
300 		int err;
301 
302 		err = alg->setkey(tfm, key, keylen);
303 		if (unlikely(err)) {
304 			ahash_set_needkey(tfm, alg);
305 			return err;
306 		}
307 	}
308 	crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
309 	return 0;
310 }
311 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
312 
313 static int ahash_reqchain_virt(struct ahash_save_req_state *state,
314 			       int err, u32 mask)
315 {
316 	struct ahash_request *req = state->req0;
317 	struct crypto_ahash *tfm;
318 
319 	tfm = crypto_ahash_reqtfm(req);
320 
321 	for (;;) {
322 		unsigned len = state->nbytes;
323 
324 		if (!state->offset)
325 			break;
326 
327 		if (state->offset == len || err) {
328 			u8 *result = req->result;
329 
330 			ahash_request_set_virt(req, state->src, result, len);
331 			state->offset = 0;
332 			break;
333 		}
334 
335 		len -= state->offset;
336 
337 		len = min(PAGE_SIZE, len);
338 		memcpy(state->page, state->src + state->offset, len);
339 		state->offset += len;
340 		req->nbytes = len;
341 
342 		err = crypto_ahash_alg(tfm)->update(req);
343 		if (err == -EINPROGRESS) {
344 			if (state->offset < state->nbytes)
345 				err = -EBUSY;
346 			break;
347 		}
348 
349 		if (err == -EBUSY)
350 			break;
351 	}
352 
353 	return err;
354 }
355 
356 static int ahash_reqchain_finish(struct ahash_request *req0,
357 				 struct ahash_save_req_state *state,
358 				 int err, u32 mask)
359 {
360 	u8 *page;
361 
362 	err = ahash_reqchain_virt(state, err, mask);
363 	if (err == -EINPROGRESS || err == -EBUSY)
364 		goto out;
365 
366 	page = state->page;
367 	if (page) {
368 		memset(page, 0, PAGE_SIZE);
369 		free_page((unsigned long)page);
370 	}
371 	ahash_restore_req(req0);
372 
373 out:
374 	return err;
375 }
376 
377 static void ahash_reqchain_done(void *data, int err)
378 {
379 	struct ahash_save_req_state *state = data;
380 	crypto_completion_t compl = state->compl;
381 
382 	data = state->data;
383 
384 	if (err == -EINPROGRESS) {
385 		if (state->offset < state->nbytes)
386 			return;
387 		goto notify;
388 	}
389 
390 	err = ahash_reqchain_finish(state->req0, state, err,
391 				    CRYPTO_TFM_REQ_MAY_BACKLOG);
392 	if (err == -EBUSY)
393 		return;
394 
395 notify:
396 	compl(data, err);
397 }
398 
399 static int ahash_do_req_chain(struct ahash_request *req,
400 			      int (*op)(struct ahash_request *req))
401 {
402 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
403 	bool update = op == crypto_ahash_alg(tfm)->update;
404 	struct ahash_save_req_state *state;
405 	struct ahash_save_req_state state0;
406 	u8 *page = NULL;
407 	int err;
408 
409 	if (crypto_ahash_req_chain(tfm) ||
410 	    !update || !ahash_request_isvirt(req))
411 		return op(req);
412 
413 	if (update && ahash_request_isvirt(req)) {
414 		page = (void *)__get_free_page(GFP_ATOMIC);
415 		err = -ENOMEM;
416 		if (!page)
417 			goto out;
418 	}
419 
420 	state = &state0;
421 	if (ahash_is_async(tfm)) {
422 		err = ahash_save_req(req, ahash_reqchain_done);
423 		if (err)
424 			goto out_free_page;
425 
426 		state = req->base.data;
427 	}
428 
429 	state->update = update;
430 	state->page = page;
431 	state->offset = 0;
432 	state->nbytes = 0;
433 
434 	if (page)
435 		sg_init_one(&state->sg, page, PAGE_SIZE);
436 
437 	if (update && ahash_request_isvirt(req) && req->nbytes) {
438 		unsigned len = req->nbytes;
439 		u8 *result = req->result;
440 
441 		state->src = req->svirt;
442 		state->nbytes = len;
443 
444 		len = min(PAGE_SIZE, len);
445 
446 		memcpy(page, req->svirt, len);
447 		state->offset = len;
448 
449 		ahash_request_set_crypt(req, &state->sg, result, len);
450 	}
451 
452 	err = op(req);
453 	if (err == -EINPROGRESS || err == -EBUSY) {
454 		if (state->offset < state->nbytes)
455 			err = -EBUSY;
456 		return err;
457 	}
458 
459 	return ahash_reqchain_finish(req, state, err, ~0);
460 
461 out_free_page:
462 	free_page((unsigned long)page);
463 
464 out:
465 	return err;
466 }
467 
468 int crypto_ahash_init(struct ahash_request *req)
469 {
470 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
471 
472 	if (likely(tfm->using_shash))
473 		return crypto_shash_init(prepare_shash_desc(req, tfm));
474 	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
475 		return -ENOKEY;
476 	return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->init);
477 }
478 EXPORT_SYMBOL_GPL(crypto_ahash_init);
479 
480 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
481 {
482 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
483 	struct ahash_save_req_state *state;
484 
485 	if (!ahash_is_async(tfm))
486 		return 0;
487 
488 	state = kmalloc(sizeof(*state), GFP_ATOMIC);
489 	if (!state)
490 		return -ENOMEM;
491 
492 	state->compl = req->base.complete;
493 	state->data = req->base.data;
494 	req->base.complete = cplt;
495 	req->base.data = state;
496 	state->req0 = req;
497 
498 	return 0;
499 }
500 
501 static void ahash_restore_req(struct ahash_request *req)
502 {
503 	struct ahash_save_req_state *state;
504 	struct crypto_ahash *tfm;
505 
506 	tfm = crypto_ahash_reqtfm(req);
507 	if (!ahash_is_async(tfm))
508 		return;
509 
510 	state = req->base.data;
511 
512 	req->base.complete = state->compl;
513 	req->base.data = state->data;
514 	kfree(state);
515 }
516 
517 int crypto_ahash_update(struct ahash_request *req)
518 {
519 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
520 
521 	if (likely(tfm->using_shash))
522 		return shash_ahash_update(req, ahash_request_ctx(req));
523 	return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->update);
524 }
525 EXPORT_SYMBOL_GPL(crypto_ahash_update);
526 
527 int crypto_ahash_final(struct ahash_request *req)
528 {
529 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
530 
531 	if (likely(tfm->using_shash))
532 		return crypto_shash_final(ahash_request_ctx(req), req->result);
533 	return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->final);
534 }
535 EXPORT_SYMBOL_GPL(crypto_ahash_final);
536 
537 int crypto_ahash_finup(struct ahash_request *req)
538 {
539 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
540 
541 	if (likely(tfm->using_shash))
542 		return shash_ahash_finup(req, ahash_request_ctx(req));
543 	if (!crypto_ahash_alg(tfm)->finup ||
544 	    (!crypto_ahash_req_chain(tfm) && ahash_request_isvirt(req)))
545 		return ahash_def_finup(req);
546 	return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->finup);
547 }
548 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
549 
550 static int ahash_def_digest_finish(struct ahash_request *req, int err)
551 {
552 	struct crypto_ahash *tfm;
553 
554 	if (err)
555 		goto out;
556 
557 	tfm = crypto_ahash_reqtfm(req);
558 	if (ahash_is_async(tfm))
559 		req->base.complete = ahash_def_finup_done1;
560 
561 	err = crypto_ahash_update(req);
562 	if (err == -EINPROGRESS || err == -EBUSY)
563 		return err;
564 
565 	return ahash_def_finup_finish1(req, err);
566 
567 out:
568 	ahash_restore_req(req);
569 	return err;
570 }
571 
572 static void ahash_def_digest_done(void *data, int err)
573 {
574 	struct ahash_save_req_state *state0 = data;
575 	struct ahash_save_req_state state;
576 	struct ahash_request *areq;
577 
578 	state = *state0;
579 	areq = state.req0;
580 	if (err == -EINPROGRESS)
581 		goto out;
582 
583 	areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
584 
585 	err = ahash_def_digest_finish(areq, err);
586 	if (err == -EINPROGRESS || err == -EBUSY)
587 		return;
588 
589 out:
590 	state.compl(state.data, err);
591 }
592 
593 static int ahash_def_digest(struct ahash_request *req)
594 {
595 	int err;
596 
597 	err = ahash_save_req(req, ahash_def_digest_done);
598 	if (err)
599 		return err;
600 
601 	err = crypto_ahash_init(req);
602 	if (err == -EINPROGRESS || err == -EBUSY)
603 		return err;
604 
605 	return ahash_def_digest_finish(req, err);
606 }
607 
608 int crypto_ahash_digest(struct ahash_request *req)
609 {
610 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
611 
612 	if (likely(tfm->using_shash))
613 		return shash_ahash_digest(req, prepare_shash_desc(req, tfm));
614 	if (!crypto_ahash_req_chain(tfm) && ahash_request_isvirt(req))
615 		return ahash_def_digest(req);
616 	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
617 		return -ENOKEY;
618 	return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->digest);
619 }
620 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
621 
622 static void ahash_def_finup_done2(void *data, int err)
623 {
624 	struct ahash_save_req_state *state = data;
625 	struct ahash_request *areq = state->req0;
626 
627 	if (err == -EINPROGRESS)
628 		return;
629 
630 	ahash_restore_req(areq);
631 	ahash_request_complete(areq, err);
632 }
633 
634 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
635 {
636 	struct crypto_ahash *tfm;
637 
638 	if (err)
639 		goto out;
640 
641 	tfm = crypto_ahash_reqtfm(req);
642 	if (ahash_is_async(tfm))
643 		req->base.complete = ahash_def_finup_done2;
644 
645 	err = crypto_ahash_final(req);
646 	if (err == -EINPROGRESS || err == -EBUSY)
647 		return err;
648 
649 out:
650 	ahash_restore_req(req);
651 	return err;
652 }
653 
654 static void ahash_def_finup_done1(void *data, int err)
655 {
656 	struct ahash_save_req_state *state0 = data;
657 	struct ahash_save_req_state state;
658 	struct ahash_request *areq;
659 
660 	state = *state0;
661 	areq = state.req0;
662 	if (err == -EINPROGRESS)
663 		goto out;
664 
665 	areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
666 
667 	err = ahash_def_finup_finish1(areq, err);
668 	if (err == -EINPROGRESS || err == -EBUSY)
669 		return;
670 
671 out:
672 	state.compl(state.data, err);
673 }
674 
675 static int ahash_def_finup(struct ahash_request *req)
676 {
677 	int err;
678 
679 	err = ahash_save_req(req, ahash_def_finup_done1);
680 	if (err)
681 		return err;
682 
683 	err = crypto_ahash_update(req);
684 	if (err == -EINPROGRESS || err == -EBUSY)
685 		return err;
686 
687 	return ahash_def_finup_finish1(req, err);
688 }
689 
690 int crypto_ahash_export(struct ahash_request *req, void *out)
691 {
692 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
693 
694 	if (likely(tfm->using_shash))
695 		return crypto_shash_export(ahash_request_ctx(req), out);
696 	return crypto_ahash_alg(tfm)->export(req, out);
697 }
698 EXPORT_SYMBOL_GPL(crypto_ahash_export);
699 
700 int crypto_ahash_import(struct ahash_request *req, const void *in)
701 {
702 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
703 
704 	if (likely(tfm->using_shash))
705 		return crypto_shash_import(prepare_shash_desc(req, tfm), in);
706 	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
707 		return -ENOKEY;
708 	return crypto_ahash_alg(tfm)->import(req, in);
709 }
710 EXPORT_SYMBOL_GPL(crypto_ahash_import);
711 
712 static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm)
713 {
714 	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
715 	struct ahash_alg *alg = crypto_ahash_alg(hash);
716 
717 	alg->exit_tfm(hash);
718 }
719 
720 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
721 {
722 	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
723 	struct ahash_alg *alg = crypto_ahash_alg(hash);
724 
725 	crypto_ahash_set_statesize(hash, alg->halg.statesize);
726 	crypto_ahash_set_reqsize(hash, crypto_tfm_alg_reqsize(tfm));
727 
728 	if (tfm->__crt_alg->cra_type == &crypto_shash_type)
729 		return crypto_init_ahash_using_shash(tfm);
730 
731 	ahash_set_needkey(hash, alg);
732 
733 	if (alg->exit_tfm)
734 		tfm->exit = crypto_ahash_exit_tfm;
735 
736 	return alg->init_tfm ? alg->init_tfm(hash) : 0;
737 }
738 
739 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
740 {
741 	if (alg->cra_type == &crypto_shash_type)
742 		return sizeof(struct crypto_shash *);
743 
744 	return crypto_alg_extsize(alg);
745 }
746 
747 static void crypto_ahash_free_instance(struct crypto_instance *inst)
748 {
749 	struct ahash_instance *ahash = ahash_instance(inst);
750 
751 	ahash->free(ahash);
752 }
753 
754 static int __maybe_unused crypto_ahash_report(
755 	struct sk_buff *skb, struct crypto_alg *alg)
756 {
757 	struct crypto_report_hash rhash;
758 
759 	memset(&rhash, 0, sizeof(rhash));
760 
761 	strscpy(rhash.type, "ahash", sizeof(rhash.type));
762 
763 	rhash.blocksize = alg->cra_blocksize;
764 	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
765 
766 	return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
767 }
768 
769 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
770 	__maybe_unused;
771 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
772 {
773 	seq_printf(m, "type         : ahash\n");
774 	seq_printf(m, "async        : %s\n",
775 		   str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC));
776 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
777 	seq_printf(m, "digestsize   : %u\n",
778 		   __crypto_hash_alg_common(alg)->digestsize);
779 }
780 
781 static const struct crypto_type crypto_ahash_type = {
782 	.extsize = crypto_ahash_extsize,
783 	.init_tfm = crypto_ahash_init_tfm,
784 	.free = crypto_ahash_free_instance,
785 #ifdef CONFIG_PROC_FS
786 	.show = crypto_ahash_show,
787 #endif
788 #if IS_ENABLED(CONFIG_CRYPTO_USER)
789 	.report = crypto_ahash_report,
790 #endif
791 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
792 	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
793 	.type = CRYPTO_ALG_TYPE_AHASH,
794 	.tfmsize = offsetof(struct crypto_ahash, base),
795 };
796 
797 int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
798 		      struct crypto_instance *inst,
799 		      const char *name, u32 type, u32 mask)
800 {
801 	spawn->base.frontend = &crypto_ahash_type;
802 	return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
803 }
804 EXPORT_SYMBOL_GPL(crypto_grab_ahash);
805 
806 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
807 					u32 mask)
808 {
809 	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
810 }
811 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
812 
813 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
814 {
815 	return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
816 }
817 EXPORT_SYMBOL_GPL(crypto_has_ahash);
818 
819 static bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
820 {
821 	struct crypto_alg *alg = &halg->base;
822 
823 	if (alg->cra_type == &crypto_shash_type)
824 		return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
825 
826 	return __crypto_ahash_alg(alg)->setkey != ahash_nosetkey;
827 }
828 
829 struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
830 {
831 	struct hash_alg_common *halg = crypto_hash_alg_common(hash);
832 	struct crypto_tfm *tfm = crypto_ahash_tfm(hash);
833 	struct crypto_ahash *nhash;
834 	struct ahash_alg *alg;
835 	int err;
836 
837 	if (!crypto_hash_alg_has_setkey(halg)) {
838 		tfm = crypto_tfm_get(tfm);
839 		if (IS_ERR(tfm))
840 			return ERR_CAST(tfm);
841 
842 		return hash;
843 	}
844 
845 	nhash = crypto_clone_tfm(&crypto_ahash_type, tfm);
846 
847 	if (IS_ERR(nhash))
848 		return nhash;
849 
850 	nhash->reqsize = hash->reqsize;
851 	nhash->statesize = hash->statesize;
852 
853 	if (likely(hash->using_shash)) {
854 		struct crypto_shash **nctx = crypto_ahash_ctx(nhash);
855 		struct crypto_shash *shash;
856 
857 		shash = crypto_clone_shash(ahash_to_shash(hash));
858 		if (IS_ERR(shash)) {
859 			err = PTR_ERR(shash);
860 			goto out_free_nhash;
861 		}
862 		nhash->using_shash = true;
863 		*nctx = shash;
864 		return nhash;
865 	}
866 
867 	err = -ENOSYS;
868 	alg = crypto_ahash_alg(hash);
869 	if (!alg->clone_tfm)
870 		goto out_free_nhash;
871 
872 	err = alg->clone_tfm(nhash, hash);
873 	if (err)
874 		goto out_free_nhash;
875 
876 	return nhash;
877 
878 out_free_nhash:
879 	crypto_free_ahash(nhash);
880 	return ERR_PTR(err);
881 }
882 EXPORT_SYMBOL_GPL(crypto_clone_ahash);
883 
884 static int ahash_prepare_alg(struct ahash_alg *alg)
885 {
886 	struct crypto_alg *base = &alg->halg.base;
887 	int err;
888 
889 	if (alg->halg.statesize == 0)
890 		return -EINVAL;
891 
892 	if (base->cra_reqsize && base->cra_reqsize < alg->halg.statesize)
893 		return -EINVAL;
894 
895 	err = hash_prepare_alg(&alg->halg);
896 	if (err)
897 		return err;
898 
899 	base->cra_type = &crypto_ahash_type;
900 	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
901 
902 	if (!alg->setkey)
903 		alg->setkey = ahash_nosetkey;
904 
905 	return 0;
906 }
907 
908 int crypto_register_ahash(struct ahash_alg *alg)
909 {
910 	struct crypto_alg *base = &alg->halg.base;
911 	int err;
912 
913 	err = ahash_prepare_alg(alg);
914 	if (err)
915 		return err;
916 
917 	return crypto_register_alg(base);
918 }
919 EXPORT_SYMBOL_GPL(crypto_register_ahash);
920 
921 void crypto_unregister_ahash(struct ahash_alg *alg)
922 {
923 	crypto_unregister_alg(&alg->halg.base);
924 }
925 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
926 
927 int crypto_register_ahashes(struct ahash_alg *algs, int count)
928 {
929 	int i, ret;
930 
931 	for (i = 0; i < count; i++) {
932 		ret = crypto_register_ahash(&algs[i]);
933 		if (ret)
934 			goto err;
935 	}
936 
937 	return 0;
938 
939 err:
940 	for (--i; i >= 0; --i)
941 		crypto_unregister_ahash(&algs[i]);
942 
943 	return ret;
944 }
945 EXPORT_SYMBOL_GPL(crypto_register_ahashes);
946 
947 void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
948 {
949 	int i;
950 
951 	for (i = count - 1; i >= 0; --i)
952 		crypto_unregister_ahash(&algs[i]);
953 }
954 EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
955 
956 int ahash_register_instance(struct crypto_template *tmpl,
957 			    struct ahash_instance *inst)
958 {
959 	int err;
960 
961 	if (WARN_ON(!inst->free))
962 		return -EINVAL;
963 
964 	err = ahash_prepare_alg(&inst->alg);
965 	if (err)
966 		return err;
967 
968 	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
969 }
970 EXPORT_SYMBOL_GPL(ahash_register_instance);
971 
972 MODULE_LICENSE("GPL");
973 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
974