xref: /linux/crypto/ahash.c (revision fc4bd01d9ff592f620c499686245c093440db0e8)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Asynchronous Cryptographic Hash operations.
4  *
5  * This is the implementation of the ahash (asynchronous hash) API.  It differs
6  * from shash (synchronous hash) in that ahash supports asynchronous operations,
7  * and it hashes data from scatterlists instead of virtually addressed buffers.
8  *
9  * The ahash API provides access to both ahash and shash algorithms.  The shash
10  * API only provides access to shash algorithms.
11  *
12  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
13  */
14 
15 #include <crypto/scatterwalk.h>
16 #include <linux/cryptouser.h>
17 #include <linux/err.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/seq_file.h>
23 #include <linux/string.h>
24 #include <linux/string_choices.h>
25 #include <net/netlink.h>
26 
27 #include "hash.h"
28 
29 #define CRYPTO_ALG_TYPE_AHASH_MASK	0x0000000e
30 
31 struct crypto_hash_walk {
32 	const char *data;
33 
34 	unsigned int offset;
35 	unsigned int flags;
36 
37 	struct page *pg;
38 	unsigned int entrylen;
39 
40 	unsigned int total;
41 	struct scatterlist *sg;
42 };
43 
44 struct ahash_save_req_state {
45 	struct list_head head;
46 	struct ahash_request *req0;
47 	struct ahash_request *cur;
48 	int (*op)(struct ahash_request *req);
49 	crypto_completion_t compl;
50 	void *data;
51 	struct scatterlist sg;
52 	const u8 *src;
53 	u8 *page;
54 	unsigned int offset;
55 	unsigned int nbytes;
56 };
57 
58 static void ahash_reqchain_done(void *data, int err);
59 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt);
60 static void ahash_restore_req(struct ahash_save_req_state *state);
61 static void ahash_def_finup_done1(void *data, int err);
62 static int ahash_def_finup(struct ahash_request *req);
63 
64 static int hash_walk_next(struct crypto_hash_walk *walk)
65 {
66 	unsigned int offset = walk->offset;
67 	unsigned int nbytes = min(walk->entrylen,
68 				  ((unsigned int)(PAGE_SIZE)) - offset);
69 
70 	walk->data = kmap_local_page(walk->pg);
71 	walk->data += offset;
72 	walk->entrylen -= nbytes;
73 	return nbytes;
74 }
75 
76 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
77 {
78 	struct scatterlist *sg;
79 
80 	sg = walk->sg;
81 	walk->offset = sg->offset;
82 	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
83 	walk->offset = offset_in_page(walk->offset);
84 	walk->entrylen = sg->length;
85 
86 	if (walk->entrylen > walk->total)
87 		walk->entrylen = walk->total;
88 	walk->total -= walk->entrylen;
89 
90 	return hash_walk_next(walk);
91 }
92 
93 static int crypto_hash_walk_first(struct ahash_request *req,
94 				  struct crypto_hash_walk *walk)
95 {
96 	walk->total = req->nbytes;
97 	walk->entrylen = 0;
98 
99 	if (!walk->total)
100 		return 0;
101 
102 	walk->flags = req->base.flags;
103 
104 	if (ahash_request_isvirt(req)) {
105 		walk->data = req->svirt;
106 		walk->total = 0;
107 		return req->nbytes;
108 	}
109 
110 	walk->sg = req->src;
111 
112 	return hash_walk_new_entry(walk);
113 }
114 
115 static int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
116 {
117 	if ((walk->flags & CRYPTO_AHASH_REQ_VIRT))
118 		return err;
119 
120 	walk->data -= walk->offset;
121 
122 	kunmap_local(walk->data);
123 	crypto_yield(walk->flags);
124 
125 	if (err)
126 		return err;
127 
128 	if (walk->entrylen) {
129 		walk->offset = 0;
130 		walk->pg++;
131 		return hash_walk_next(walk);
132 	}
133 
134 	if (!walk->total)
135 		return 0;
136 
137 	walk->sg = sg_next(walk->sg);
138 
139 	return hash_walk_new_entry(walk);
140 }
141 
142 static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
143 {
144 	return !(walk->entrylen | walk->total);
145 }
146 
147 /*
148  * For an ahash tfm that is using an shash algorithm (instead of an ahash
149  * algorithm), this returns the underlying shash tfm.
150  */
151 static inline struct crypto_shash *ahash_to_shash(struct crypto_ahash *tfm)
152 {
153 	return *(struct crypto_shash **)crypto_ahash_ctx(tfm);
154 }
155 
156 static inline struct shash_desc *prepare_shash_desc(struct ahash_request *req,
157 						    struct crypto_ahash *tfm)
158 {
159 	struct shash_desc *desc = ahash_request_ctx(req);
160 
161 	desc->tfm = ahash_to_shash(tfm);
162 	return desc;
163 }
164 
165 int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc)
166 {
167 	struct crypto_hash_walk walk;
168 	int nbytes;
169 
170 	for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0;
171 	     nbytes = crypto_hash_walk_done(&walk, nbytes))
172 		nbytes = crypto_shash_update(desc, walk.data, nbytes);
173 
174 	return nbytes;
175 }
176 EXPORT_SYMBOL_GPL(shash_ahash_update);
177 
178 int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc)
179 {
180 	struct crypto_hash_walk walk;
181 	int nbytes;
182 
183 	nbytes = crypto_hash_walk_first(req, &walk);
184 	if (!nbytes)
185 		return crypto_shash_final(desc, req->result);
186 
187 	do {
188 		nbytes = crypto_hash_walk_last(&walk) ?
189 			 crypto_shash_finup(desc, walk.data, nbytes,
190 					    req->result) :
191 			 crypto_shash_update(desc, walk.data, nbytes);
192 		nbytes = crypto_hash_walk_done(&walk, nbytes);
193 	} while (nbytes > 0);
194 
195 	return nbytes;
196 }
197 EXPORT_SYMBOL_GPL(shash_ahash_finup);
198 
199 int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
200 {
201 	unsigned int nbytes = req->nbytes;
202 	struct scatterlist *sg;
203 	unsigned int offset;
204 	int err;
205 
206 	if (ahash_request_isvirt(req))
207 		return crypto_shash_digest(desc, req->svirt, nbytes,
208 					   req->result);
209 
210 	if (nbytes &&
211 	    (sg = req->src, offset = sg->offset,
212 	     nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
213 		void *data;
214 
215 		data = kmap_local_page(sg_page(sg));
216 		err = crypto_shash_digest(desc, data + offset, nbytes,
217 					  req->result);
218 		kunmap_local(data);
219 	} else
220 		err = crypto_shash_init(desc) ?:
221 		      shash_ahash_finup(req, desc);
222 
223 	return err;
224 }
225 EXPORT_SYMBOL_GPL(shash_ahash_digest);
226 
227 static void crypto_exit_ahash_using_shash(struct crypto_tfm *tfm)
228 {
229 	struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
230 
231 	crypto_free_shash(*ctx);
232 }
233 
234 static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm)
235 {
236 	struct crypto_alg *calg = tfm->__crt_alg;
237 	struct crypto_ahash *crt = __crypto_ahash_cast(tfm);
238 	struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
239 	struct crypto_shash *shash;
240 
241 	if (!crypto_mod_get(calg))
242 		return -EAGAIN;
243 
244 	shash = crypto_create_tfm(calg, &crypto_shash_type);
245 	if (IS_ERR(shash)) {
246 		crypto_mod_put(calg);
247 		return PTR_ERR(shash);
248 	}
249 
250 	crt->using_shash = true;
251 	*ctx = shash;
252 	tfm->exit = crypto_exit_ahash_using_shash;
253 
254 	crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
255 				    CRYPTO_TFM_NEED_KEY);
256 	crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash);
257 
258 	return 0;
259 }
260 
261 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
262 			  unsigned int keylen)
263 {
264 	return -ENOSYS;
265 }
266 
267 static void ahash_set_needkey(struct crypto_ahash *tfm, struct ahash_alg *alg)
268 {
269 	if (alg->setkey != ahash_nosetkey &&
270 	    !(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
271 		crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
272 }
273 
274 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
275 			unsigned int keylen)
276 {
277 	if (likely(tfm->using_shash)) {
278 		struct crypto_shash *shash = ahash_to_shash(tfm);
279 		int err;
280 
281 		err = crypto_shash_setkey(shash, key, keylen);
282 		if (unlikely(err)) {
283 			crypto_ahash_set_flags(tfm,
284 					       crypto_shash_get_flags(shash) &
285 					       CRYPTO_TFM_NEED_KEY);
286 			return err;
287 		}
288 	} else {
289 		struct ahash_alg *alg = crypto_ahash_alg(tfm);
290 		int err;
291 
292 		err = alg->setkey(tfm, key, keylen);
293 		if (unlikely(err)) {
294 			ahash_set_needkey(tfm, alg);
295 			return err;
296 		}
297 	}
298 	crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
299 	return 0;
300 }
301 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
302 
303 static bool ahash_request_hasvirt(struct ahash_request *req)
304 {
305 	struct ahash_request *r2;
306 
307 	if (ahash_request_isvirt(req))
308 		return true;
309 
310 	list_for_each_entry(r2, &req->base.list, base.list)
311 		if (ahash_request_isvirt(r2))
312 			return true;
313 
314 	return false;
315 }
316 
317 static int ahash_reqchain_virt(struct ahash_save_req_state *state,
318 			       int err, u32 mask)
319 {
320 	struct ahash_request *req = state->cur;
321 
322 	for (;;) {
323 		unsigned len = state->nbytes;
324 
325 		req->base.err = err;
326 
327 		if (!state->offset)
328 			break;
329 
330 		if (state->offset == len || err) {
331 			u8 *result = req->result;
332 
333 			ahash_request_set_virt(req, state->src, result, len);
334 			state->offset = 0;
335 			break;
336 		}
337 
338 		len -= state->offset;
339 
340 		len = min(PAGE_SIZE, len);
341 		memcpy(state->page, state->src + state->offset, len);
342 		state->offset += len;
343 		req->nbytes = len;
344 
345 		err = state->op(req);
346 		if (err == -EINPROGRESS) {
347 			if (!list_empty(&state->head) ||
348 			    state->offset < state->nbytes)
349 				err = -EBUSY;
350 			break;
351 		}
352 
353 		if (err == -EBUSY)
354 			break;
355 	}
356 
357 	return err;
358 }
359 
360 static int ahash_reqchain_finish(struct ahash_save_req_state *state,
361 				 int err, u32 mask)
362 {
363 	struct ahash_request *req0 = state->req0;
364 	struct ahash_request *req = state->cur;
365 	struct crypto_ahash *tfm;
366 	struct ahash_request *n;
367 	bool update;
368 
369 	err = ahash_reqchain_virt(state, err, mask);
370 	if (err == -EINPROGRESS || err == -EBUSY)
371 		goto out;
372 
373 	if (req != req0)
374 		list_add_tail(&req->base.list, &req0->base.list);
375 
376 	tfm = crypto_ahash_reqtfm(req);
377 	update = state->op == crypto_ahash_alg(tfm)->update;
378 
379 	list_for_each_entry_safe(req, n, &state->head, base.list) {
380 		list_del_init(&req->base.list);
381 
382 		req->base.flags &= mask;
383 		req->base.complete = ahash_reqchain_done;
384 		req->base.data = state;
385 		state->cur = req;
386 
387 		if (update && ahash_request_isvirt(req) && req->nbytes) {
388 			unsigned len = req->nbytes;
389 			u8 *result = req->result;
390 
391 			state->src = req->svirt;
392 			state->nbytes = len;
393 
394 			len = min(PAGE_SIZE, len);
395 
396 			memcpy(state->page, req->svirt, len);
397 			state->offset = len;
398 
399 			ahash_request_set_crypt(req, &state->sg, result, len);
400 		}
401 
402 		err = state->op(req);
403 
404 		if (err == -EINPROGRESS) {
405 			if (!list_empty(&state->head) ||
406 			    state->offset < state->nbytes)
407 				err = -EBUSY;
408 			goto out;
409 		}
410 
411 		if (err == -EBUSY)
412 			goto out;
413 
414 		err = ahash_reqchain_virt(state, err, mask);
415 		if (err == -EINPROGRESS || err == -EBUSY)
416 			goto out;
417 
418 		list_add_tail(&req->base.list, &req0->base.list);
419 	}
420 
421 	ahash_restore_req(state);
422 
423 out:
424 	return err;
425 }
426 
427 static void ahash_reqchain_done(void *data, int err)
428 {
429 	struct ahash_save_req_state *state = data;
430 	crypto_completion_t compl = state->compl;
431 
432 	data = state->data;
433 
434 	if (err == -EINPROGRESS) {
435 		if (!list_empty(&state->head) || state->offset < state->nbytes)
436 			return;
437 		goto notify;
438 	}
439 
440 	err = ahash_reqchain_finish(state, err, CRYPTO_TFM_REQ_MAY_BACKLOG);
441 	if (err == -EBUSY)
442 		return;
443 
444 notify:
445 	compl(data, err);
446 }
447 
448 static int ahash_do_req_chain(struct ahash_request *req,
449 			      int (*op)(struct ahash_request *req))
450 {
451 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
452 	bool update = op == crypto_ahash_alg(tfm)->update;
453 	struct ahash_save_req_state *state;
454 	struct ahash_save_req_state state0;
455 	struct ahash_request *r2;
456 	u8 *page = NULL;
457 	int err;
458 
459 	if (crypto_ahash_req_chain(tfm) ||
460 	    (!ahash_request_chained(req) &&
461 	     (!update || !ahash_request_isvirt(req))))
462 		return op(req);
463 
464 	if (update && ahash_request_hasvirt(req)) {
465 		gfp_t gfp;
466 		u32 flags;
467 
468 		flags = ahash_request_flags(req);
469 		gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
470 		      GFP_KERNEL : GFP_ATOMIC;
471 		page = (void *)__get_free_page(gfp);
472 		err = -ENOMEM;
473 		if (!page)
474 			goto out_set_chain;
475 	}
476 
477 	state = &state0;
478 	if (ahash_is_async(tfm)) {
479 		err = ahash_save_req(req, ahash_reqchain_done);
480 		if (err)
481 			goto out_free_page;
482 
483 		state = req->base.data;
484 	}
485 
486 	state->op = op;
487 	state->cur = req;
488 	state->page = page;
489 	state->offset = 0;
490 	state->nbytes = 0;
491 	INIT_LIST_HEAD(&state->head);
492 	list_splice_init(&req->base.list, &state->head);
493 
494 	if (page)
495 		sg_init_one(&state->sg, page, PAGE_SIZE);
496 
497 	if (update && ahash_request_isvirt(req) && req->nbytes) {
498 		unsigned len = req->nbytes;
499 		u8 *result = req->result;
500 
501 		state->src = req->svirt;
502 		state->nbytes = len;
503 
504 		len = min(PAGE_SIZE, len);
505 
506 		memcpy(page, req->svirt, len);
507 		state->offset = len;
508 
509 		ahash_request_set_crypt(req, &state->sg, result, len);
510 	}
511 
512 	err = op(req);
513 	if (err == -EBUSY || err == -EINPROGRESS)
514 		return -EBUSY;
515 
516 	return ahash_reqchain_finish(state, err, ~0);
517 
518 out_free_page:
519 	if (page) {
520 		memset(page, 0, PAGE_SIZE);
521 		free_page((unsigned long)page);
522 	}
523 
524 out_set_chain:
525 	req->base.err = err;
526 	list_for_each_entry(r2, &req->base.list, base.list)
527 		r2->base.err = err;
528 
529 	return err;
530 }
531 
532 int crypto_ahash_init(struct ahash_request *req)
533 {
534 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
535 
536 	if (likely(tfm->using_shash)) {
537 		struct ahash_request *r2;
538 		int err;
539 
540 		err = crypto_shash_init(prepare_shash_desc(req, tfm));
541 		req->base.err = err;
542 
543 		list_for_each_entry(r2, &req->base.list, base.list) {
544 			struct shash_desc *desc;
545 
546 			desc = prepare_shash_desc(r2, tfm);
547 			r2->base.err = crypto_shash_init(desc);
548 		}
549 
550 		return err;
551 	}
552 
553 	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
554 		return -ENOKEY;
555 
556 	return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->init);
557 }
558 EXPORT_SYMBOL_GPL(crypto_ahash_init);
559 
560 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
561 {
562 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
563 	struct ahash_save_req_state *state;
564 	gfp_t gfp;
565 	u32 flags;
566 
567 	if (!ahash_is_async(tfm))
568 		return 0;
569 
570 	flags = ahash_request_flags(req);
571 	gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?  GFP_KERNEL : GFP_ATOMIC;
572 	state = kmalloc(sizeof(*state), gfp);
573 	if (!state)
574 		return -ENOMEM;
575 
576 	state->compl = req->base.complete;
577 	state->data = req->base.data;
578 	req->base.complete = cplt;
579 	req->base.data = state;
580 	state->req0 = req;
581 	state->page = NULL;
582 
583 	return 0;
584 }
585 
586 static void ahash_restore_req(struct ahash_save_req_state *state)
587 {
588 	struct ahash_request *req = state->req0;
589 	struct crypto_ahash *tfm;
590 
591 	free_page((unsigned long)state->page);
592 
593 	tfm = crypto_ahash_reqtfm(req);
594 	if (!ahash_is_async(tfm))
595 		return;
596 
597 	state = req->base.data;
598 
599 	req->base.complete = state->compl;
600 	req->base.data = state->data;
601 	kfree(state);
602 }
603 
604 int crypto_ahash_update(struct ahash_request *req)
605 {
606 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
607 
608 	if (likely(tfm->using_shash)) {
609 		struct ahash_request *r2;
610 		int err;
611 
612 		err = shash_ahash_update(req, ahash_request_ctx(req));
613 		req->base.err = err;
614 
615 		list_for_each_entry(r2, &req->base.list, base.list) {
616 			struct shash_desc *desc;
617 
618 			desc = ahash_request_ctx(r2);
619 			r2->base.err = shash_ahash_update(r2, desc);
620 		}
621 
622 		return err;
623 	}
624 
625 	return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->update);
626 }
627 EXPORT_SYMBOL_GPL(crypto_ahash_update);
628 
629 int crypto_ahash_final(struct ahash_request *req)
630 {
631 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
632 
633 	if (likely(tfm->using_shash)) {
634 		struct ahash_request *r2;
635 		int err;
636 
637 		err = crypto_shash_final(ahash_request_ctx(req), req->result);
638 		req->base.err = err;
639 
640 		list_for_each_entry(r2, &req->base.list, base.list) {
641 			struct shash_desc *desc;
642 
643 			desc = ahash_request_ctx(r2);
644 			r2->base.err = crypto_shash_final(desc, r2->result);
645 		}
646 
647 		return err;
648 	}
649 
650 	return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->final);
651 }
652 EXPORT_SYMBOL_GPL(crypto_ahash_final);
653 
654 int crypto_ahash_finup(struct ahash_request *req)
655 {
656 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
657 
658 	if (likely(tfm->using_shash)) {
659 		struct ahash_request *r2;
660 		int err;
661 
662 		err = shash_ahash_finup(req, ahash_request_ctx(req));
663 		req->base.err = err;
664 
665 		list_for_each_entry(r2, &req->base.list, base.list) {
666 			struct shash_desc *desc;
667 
668 			desc = ahash_request_ctx(r2);
669 			r2->base.err = shash_ahash_finup(r2, desc);
670 		}
671 
672 		return err;
673 	}
674 
675 	if (!crypto_ahash_alg(tfm)->finup ||
676 	    (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req)))
677 		return ahash_def_finup(req);
678 
679 	return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->finup);
680 }
681 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
682 
683 static int ahash_def_digest_finish(struct ahash_save_req_state *state, int err)
684 {
685 	struct ahash_request *req = state->req0;
686 	struct crypto_ahash *tfm;
687 
688 	if (err)
689 		goto out;
690 
691 	tfm = crypto_ahash_reqtfm(req);
692 	if (ahash_is_async(tfm))
693 		req->base.complete = ahash_def_finup_done1;
694 
695 	err = crypto_ahash_update(req);
696 	if (err == -EINPROGRESS || err == -EBUSY)
697 		return err;
698 
699 out:
700 	ahash_restore_req(state);
701 	return err;
702 }
703 
704 static void ahash_def_digest_done(void *data, int err)
705 {
706 	struct ahash_save_req_state *state0 = data;
707 	struct ahash_save_req_state state;
708 	struct ahash_request *areq;
709 
710 	state = *state0;
711 	areq = state.req0;
712 	if (err == -EINPROGRESS)
713 		goto out;
714 
715 	areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
716 
717 	err = ahash_def_digest_finish(state0, err);
718 	if (err == -EINPROGRESS || err == -EBUSY)
719 		return;
720 
721 out:
722 	state.compl(state.data, err);
723 }
724 
725 static int ahash_def_digest(struct ahash_request *req)
726 {
727 	struct ahash_save_req_state *state;
728 	int err;
729 
730 	err = ahash_save_req(req, ahash_def_digest_done);
731 	if (err)
732 		return err;
733 
734 	state = req->base.data;
735 
736 	err = crypto_ahash_init(req);
737 	if (err == -EINPROGRESS || err == -EBUSY)
738 		return err;
739 
740 	return ahash_def_digest_finish(state, err);
741 }
742 
743 int crypto_ahash_digest(struct ahash_request *req)
744 {
745 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
746 
747 	if (likely(tfm->using_shash)) {
748 		struct ahash_request *r2;
749 		int err;
750 
751 		err = shash_ahash_digest(req, prepare_shash_desc(req, tfm));
752 		req->base.err = err;
753 
754 		list_for_each_entry(r2, &req->base.list, base.list) {
755 			struct shash_desc *desc;
756 
757 			desc = prepare_shash_desc(r2, tfm);
758 			r2->base.err = shash_ahash_digest(r2, desc);
759 		}
760 
761 		return err;
762 	}
763 
764 	if (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req))
765 		return ahash_def_digest(req);
766 
767 	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
768 		return -ENOKEY;
769 
770 	return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->digest);
771 }
772 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
773 
774 static void ahash_def_finup_done2(void *data, int err)
775 {
776 	struct ahash_save_req_state *state = data;
777 	struct ahash_request *areq = state->req0;
778 
779 	if (err == -EINPROGRESS)
780 		return;
781 
782 	ahash_restore_req(state);
783 	ahash_request_complete(areq, err);
784 }
785 
786 static int ahash_def_finup_finish1(struct ahash_save_req_state *state, int err)
787 {
788 	struct ahash_request *req = state->req0;
789 	struct crypto_ahash *tfm;
790 
791 	if (err)
792 		goto out;
793 
794 	tfm = crypto_ahash_reqtfm(req);
795 	if (ahash_is_async(tfm))
796 		req->base.complete = ahash_def_finup_done2;
797 
798 	err = crypto_ahash_final(req);
799 	if (err == -EINPROGRESS || err == -EBUSY)
800 		return err;
801 
802 out:
803 	ahash_restore_req(state);
804 	return err;
805 }
806 
807 static void ahash_def_finup_done1(void *data, int err)
808 {
809 	struct ahash_save_req_state *state0 = data;
810 	struct ahash_save_req_state state;
811 	struct ahash_request *areq;
812 
813 	state = *state0;
814 	areq = state.req0;
815 	if (err == -EINPROGRESS)
816 		goto out;
817 
818 	areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
819 
820 	err = ahash_def_finup_finish1(state0, err);
821 	if (err == -EINPROGRESS || err == -EBUSY)
822 		return;
823 
824 out:
825 	state.compl(state.data, err);
826 }
827 
828 static int ahash_def_finup(struct ahash_request *req)
829 {
830 	struct ahash_save_req_state *state;
831 	int err;
832 
833 	err = ahash_save_req(req, ahash_def_finup_done1);
834 	if (err)
835 		return err;
836 
837 	state = req->base.data;
838 
839 	err = crypto_ahash_update(req);
840 	if (err == -EINPROGRESS || err == -EBUSY)
841 		return err;
842 
843 	return ahash_def_finup_finish1(state, err);
844 }
845 
846 int crypto_ahash_export(struct ahash_request *req, void *out)
847 {
848 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
849 
850 	if (likely(tfm->using_shash))
851 		return crypto_shash_export(ahash_request_ctx(req), out);
852 	return crypto_ahash_alg(tfm)->export(req, out);
853 }
854 EXPORT_SYMBOL_GPL(crypto_ahash_export);
855 
856 int crypto_ahash_import(struct ahash_request *req, const void *in)
857 {
858 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
859 
860 	if (likely(tfm->using_shash))
861 		return crypto_shash_import(prepare_shash_desc(req, tfm), in);
862 	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
863 		return -ENOKEY;
864 	return crypto_ahash_alg(tfm)->import(req, in);
865 }
866 EXPORT_SYMBOL_GPL(crypto_ahash_import);
867 
868 static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm)
869 {
870 	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
871 	struct ahash_alg *alg = crypto_ahash_alg(hash);
872 
873 	alg->exit_tfm(hash);
874 }
875 
876 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
877 {
878 	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
879 	struct ahash_alg *alg = crypto_ahash_alg(hash);
880 
881 	crypto_ahash_set_statesize(hash, alg->halg.statesize);
882 	crypto_ahash_set_reqsize(hash, alg->reqsize);
883 
884 	if (tfm->__crt_alg->cra_type == &crypto_shash_type)
885 		return crypto_init_ahash_using_shash(tfm);
886 
887 	ahash_set_needkey(hash, alg);
888 
889 	if (alg->exit_tfm)
890 		tfm->exit = crypto_ahash_exit_tfm;
891 
892 	return alg->init_tfm ? alg->init_tfm(hash) : 0;
893 }
894 
895 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
896 {
897 	if (alg->cra_type == &crypto_shash_type)
898 		return sizeof(struct crypto_shash *);
899 
900 	return crypto_alg_extsize(alg);
901 }
902 
903 static void crypto_ahash_free_instance(struct crypto_instance *inst)
904 {
905 	struct ahash_instance *ahash = ahash_instance(inst);
906 
907 	ahash->free(ahash);
908 }
909 
910 static int __maybe_unused crypto_ahash_report(
911 	struct sk_buff *skb, struct crypto_alg *alg)
912 {
913 	struct crypto_report_hash rhash;
914 
915 	memset(&rhash, 0, sizeof(rhash));
916 
917 	strscpy(rhash.type, "ahash", sizeof(rhash.type));
918 
919 	rhash.blocksize = alg->cra_blocksize;
920 	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
921 
922 	return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
923 }
924 
925 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
926 	__maybe_unused;
927 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
928 {
929 	seq_printf(m, "type         : ahash\n");
930 	seq_printf(m, "async        : %s\n",
931 		   str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC));
932 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
933 	seq_printf(m, "digestsize   : %u\n",
934 		   __crypto_hash_alg_common(alg)->digestsize);
935 }
936 
937 static const struct crypto_type crypto_ahash_type = {
938 	.extsize = crypto_ahash_extsize,
939 	.init_tfm = crypto_ahash_init_tfm,
940 	.free = crypto_ahash_free_instance,
941 #ifdef CONFIG_PROC_FS
942 	.show = crypto_ahash_show,
943 #endif
944 #if IS_ENABLED(CONFIG_CRYPTO_USER)
945 	.report = crypto_ahash_report,
946 #endif
947 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
948 	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
949 	.type = CRYPTO_ALG_TYPE_AHASH,
950 	.tfmsize = offsetof(struct crypto_ahash, base),
951 };
952 
953 int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
954 		      struct crypto_instance *inst,
955 		      const char *name, u32 type, u32 mask)
956 {
957 	spawn->base.frontend = &crypto_ahash_type;
958 	return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
959 }
960 EXPORT_SYMBOL_GPL(crypto_grab_ahash);
961 
962 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
963 					u32 mask)
964 {
965 	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
966 }
967 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
968 
969 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
970 {
971 	return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
972 }
973 EXPORT_SYMBOL_GPL(crypto_has_ahash);
974 
975 static bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
976 {
977 	struct crypto_alg *alg = &halg->base;
978 
979 	if (alg->cra_type == &crypto_shash_type)
980 		return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
981 
982 	return __crypto_ahash_alg(alg)->setkey != ahash_nosetkey;
983 }
984 
985 struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
986 {
987 	struct hash_alg_common *halg = crypto_hash_alg_common(hash);
988 	struct crypto_tfm *tfm = crypto_ahash_tfm(hash);
989 	struct crypto_ahash *nhash;
990 	struct ahash_alg *alg;
991 	int err;
992 
993 	if (!crypto_hash_alg_has_setkey(halg)) {
994 		tfm = crypto_tfm_get(tfm);
995 		if (IS_ERR(tfm))
996 			return ERR_CAST(tfm);
997 
998 		return hash;
999 	}
1000 
1001 	nhash = crypto_clone_tfm(&crypto_ahash_type, tfm);
1002 
1003 	if (IS_ERR(nhash))
1004 		return nhash;
1005 
1006 	nhash->reqsize = hash->reqsize;
1007 	nhash->statesize = hash->statesize;
1008 
1009 	if (likely(hash->using_shash)) {
1010 		struct crypto_shash **nctx = crypto_ahash_ctx(nhash);
1011 		struct crypto_shash *shash;
1012 
1013 		shash = crypto_clone_shash(ahash_to_shash(hash));
1014 		if (IS_ERR(shash)) {
1015 			err = PTR_ERR(shash);
1016 			goto out_free_nhash;
1017 		}
1018 		nhash->using_shash = true;
1019 		*nctx = shash;
1020 		return nhash;
1021 	}
1022 
1023 	err = -ENOSYS;
1024 	alg = crypto_ahash_alg(hash);
1025 	if (!alg->clone_tfm)
1026 		goto out_free_nhash;
1027 
1028 	err = alg->clone_tfm(nhash, hash);
1029 	if (err)
1030 		goto out_free_nhash;
1031 
1032 	return nhash;
1033 
1034 out_free_nhash:
1035 	crypto_free_ahash(nhash);
1036 	return ERR_PTR(err);
1037 }
1038 EXPORT_SYMBOL_GPL(crypto_clone_ahash);
1039 
1040 static int ahash_prepare_alg(struct ahash_alg *alg)
1041 {
1042 	struct crypto_alg *base = &alg->halg.base;
1043 	int err;
1044 
1045 	if (alg->halg.statesize == 0)
1046 		return -EINVAL;
1047 
1048 	if (alg->reqsize && alg->reqsize < alg->halg.statesize)
1049 		return -EINVAL;
1050 
1051 	err = hash_prepare_alg(&alg->halg);
1052 	if (err)
1053 		return err;
1054 
1055 	base->cra_type = &crypto_ahash_type;
1056 	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
1057 
1058 	if (!alg->setkey)
1059 		alg->setkey = ahash_nosetkey;
1060 
1061 	return 0;
1062 }
1063 
1064 int crypto_register_ahash(struct ahash_alg *alg)
1065 {
1066 	struct crypto_alg *base = &alg->halg.base;
1067 	int err;
1068 
1069 	err = ahash_prepare_alg(alg);
1070 	if (err)
1071 		return err;
1072 
1073 	return crypto_register_alg(base);
1074 }
1075 EXPORT_SYMBOL_GPL(crypto_register_ahash);
1076 
1077 void crypto_unregister_ahash(struct ahash_alg *alg)
1078 {
1079 	crypto_unregister_alg(&alg->halg.base);
1080 }
1081 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
1082 
1083 int crypto_register_ahashes(struct ahash_alg *algs, int count)
1084 {
1085 	int i, ret;
1086 
1087 	for (i = 0; i < count; i++) {
1088 		ret = crypto_register_ahash(&algs[i]);
1089 		if (ret)
1090 			goto err;
1091 	}
1092 
1093 	return 0;
1094 
1095 err:
1096 	for (--i; i >= 0; --i)
1097 		crypto_unregister_ahash(&algs[i]);
1098 
1099 	return ret;
1100 }
1101 EXPORT_SYMBOL_GPL(crypto_register_ahashes);
1102 
1103 void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
1104 {
1105 	int i;
1106 
1107 	for (i = count - 1; i >= 0; --i)
1108 		crypto_unregister_ahash(&algs[i]);
1109 }
1110 EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
1111 
1112 int ahash_register_instance(struct crypto_template *tmpl,
1113 			    struct ahash_instance *inst)
1114 {
1115 	int err;
1116 
1117 	if (WARN_ON(!inst->free))
1118 		return -EINVAL;
1119 
1120 	err = ahash_prepare_alg(&inst->alg);
1121 	if (err)
1122 		return err;
1123 
1124 	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
1125 }
1126 EXPORT_SYMBOL_GPL(ahash_register_instance);
1127 
1128 void ahash_request_free(struct ahash_request *req)
1129 {
1130 	struct ahash_request *tmp;
1131 	struct ahash_request *r2;
1132 
1133 	if (unlikely(!req))
1134 		return;
1135 
1136 	list_for_each_entry_safe(r2, tmp, &req->base.list, base.list)
1137 		kfree_sensitive(r2);
1138 
1139 	kfree_sensitive(req);
1140 }
1141 EXPORT_SYMBOL_GPL(ahash_request_free);
1142 
1143 MODULE_LICENSE("GPL");
1144 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
1145