xref: /linux/crypto/ahash.c (revision 714ca27e9bf4608fcb1f627cd5599441f448771e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Asynchronous Cryptographic Hash operations.
4  *
5  * This is the implementation of the ahash (asynchronous hash) API.  It differs
6  * from shash (synchronous hash) in that ahash supports asynchronous operations,
7  * and it hashes data from scatterlists instead of virtually addressed buffers.
8  *
9  * The ahash API provides access to both ahash and shash algorithms.  The shash
10  * API only provides access to shash algorithms.
11  *
12  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
13  */
14 
15 #include <linux/cryptouser.h>
16 #include <linux/err.h>
17 #include <linux/kernel.h>
18 #include <linux/mm.h>
19 #include <linux/module.h>
20 #include <linux/slab.h>
21 #include <linux/seq_file.h>
22 #include <linux/string.h>
23 #include <linux/string_choices.h>
24 #include <net/netlink.h>
25 
26 #include "hash.h"
27 
28 #define CRYPTO_ALG_TYPE_AHASH_MASK	0x0000000e
29 
30 struct crypto_hash_walk {
31 	const char *data;
32 
33 	unsigned int offset;
34 	unsigned int flags;
35 
36 	struct page *pg;
37 	unsigned int entrylen;
38 
39 	unsigned int total;
40 	struct scatterlist *sg;
41 };
42 
43 struct ahash_save_req_state {
44 	struct ahash_request *req0;
45 	crypto_completion_t compl;
46 	void *data;
47 	struct scatterlist sg;
48 	const u8 *src;
49 	u8 *page;
50 	unsigned int offset;
51 	unsigned int nbytes;
52 	bool update;
53 };
54 
55 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt);
56 static void ahash_restore_req(struct ahash_request *req);
57 static void ahash_def_finup_done1(void *data, int err);
58 static int ahash_def_finup_finish1(struct ahash_request *req, int err);
59 static int ahash_def_finup(struct ahash_request *req);
60 
61 static int hash_walk_next(struct crypto_hash_walk *walk)
62 {
63 	unsigned int offset = walk->offset;
64 	unsigned int nbytes = min(walk->entrylen,
65 				  ((unsigned int)(PAGE_SIZE)) - offset);
66 
67 	walk->data = kmap_local_page(walk->pg);
68 	walk->data += offset;
69 	walk->entrylen -= nbytes;
70 	return nbytes;
71 }
72 
73 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
74 {
75 	struct scatterlist *sg;
76 
77 	sg = walk->sg;
78 	walk->offset = sg->offset;
79 	walk->pg = nth_page(sg_page(walk->sg), (walk->offset >> PAGE_SHIFT));
80 	walk->offset = offset_in_page(walk->offset);
81 	walk->entrylen = sg->length;
82 
83 	if (walk->entrylen > walk->total)
84 		walk->entrylen = walk->total;
85 	walk->total -= walk->entrylen;
86 
87 	return hash_walk_next(walk);
88 }
89 
90 static int crypto_hash_walk_first(struct ahash_request *req,
91 				  struct crypto_hash_walk *walk)
92 {
93 	walk->total = req->nbytes;
94 	walk->entrylen = 0;
95 
96 	if (!walk->total)
97 		return 0;
98 
99 	walk->flags = req->base.flags;
100 
101 	if (ahash_request_isvirt(req)) {
102 		walk->data = req->svirt;
103 		walk->total = 0;
104 		return req->nbytes;
105 	}
106 
107 	walk->sg = req->src;
108 
109 	return hash_walk_new_entry(walk);
110 }
111 
112 static int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
113 {
114 	if ((walk->flags & CRYPTO_AHASH_REQ_VIRT))
115 		return err;
116 
117 	walk->data -= walk->offset;
118 
119 	kunmap_local(walk->data);
120 	crypto_yield(walk->flags);
121 
122 	if (err)
123 		return err;
124 
125 	if (walk->entrylen) {
126 		walk->offset = 0;
127 		walk->pg++;
128 		return hash_walk_next(walk);
129 	}
130 
131 	if (!walk->total)
132 		return 0;
133 
134 	walk->sg = sg_next(walk->sg);
135 
136 	return hash_walk_new_entry(walk);
137 }
138 
139 static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
140 {
141 	return !(walk->entrylen | walk->total);
142 }
143 
144 /*
145  * For an ahash tfm that is using an shash algorithm (instead of an ahash
146  * algorithm), this returns the underlying shash tfm.
147  */
148 static inline struct crypto_shash *ahash_to_shash(struct crypto_ahash *tfm)
149 {
150 	return *(struct crypto_shash **)crypto_ahash_ctx(tfm);
151 }
152 
153 static inline struct shash_desc *prepare_shash_desc(struct ahash_request *req,
154 						    struct crypto_ahash *tfm)
155 {
156 	struct shash_desc *desc = ahash_request_ctx(req);
157 
158 	desc->tfm = ahash_to_shash(tfm);
159 	return desc;
160 }
161 
162 int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc)
163 {
164 	struct crypto_hash_walk walk;
165 	int nbytes;
166 
167 	for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0;
168 	     nbytes = crypto_hash_walk_done(&walk, nbytes))
169 		nbytes = crypto_shash_update(desc, walk.data, nbytes);
170 
171 	return nbytes;
172 }
173 EXPORT_SYMBOL_GPL(shash_ahash_update);
174 
175 int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc)
176 {
177 	struct crypto_hash_walk walk;
178 	int nbytes;
179 
180 	nbytes = crypto_hash_walk_first(req, &walk);
181 	if (!nbytes)
182 		return crypto_shash_final(desc, req->result);
183 
184 	do {
185 		nbytes = crypto_hash_walk_last(&walk) ?
186 			 crypto_shash_finup(desc, walk.data, nbytes,
187 					    req->result) :
188 			 crypto_shash_update(desc, walk.data, nbytes);
189 		nbytes = crypto_hash_walk_done(&walk, nbytes);
190 	} while (nbytes > 0);
191 
192 	return nbytes;
193 }
194 EXPORT_SYMBOL_GPL(shash_ahash_finup);
195 
196 int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
197 {
198 	unsigned int nbytes = req->nbytes;
199 	struct scatterlist *sg;
200 	unsigned int offset;
201 	struct page *page;
202 	const u8 *data;
203 	int err;
204 
205 	data = req->svirt;
206 	if (!nbytes || ahash_request_isvirt(req))
207 		return crypto_shash_digest(desc, data, nbytes, req->result);
208 
209 	sg = req->src;
210 	if (nbytes > sg->length)
211 		return crypto_shash_init(desc) ?:
212 		       shash_ahash_finup(req, desc);
213 
214 	page = sg_page(sg);
215 	offset = sg->offset;
216 	data = lowmem_page_address(page) + offset;
217 	if (!IS_ENABLED(CONFIG_HIGHMEM))
218 		return crypto_shash_digest(desc, data, nbytes, req->result);
219 
220 	page = nth_page(page, offset >> PAGE_SHIFT);
221 	offset = offset_in_page(offset);
222 
223 	if (nbytes > (unsigned int)PAGE_SIZE - offset)
224 		return crypto_shash_init(desc) ?:
225 		       shash_ahash_finup(req, desc);
226 
227 	data = kmap_local_page(page);
228 	err = crypto_shash_digest(desc, data + offset, nbytes,
229 				  req->result);
230 	kunmap_local(data);
231 	return err;
232 }
233 EXPORT_SYMBOL_GPL(shash_ahash_digest);
234 
235 static void crypto_exit_ahash_using_shash(struct crypto_tfm *tfm)
236 {
237 	struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
238 
239 	crypto_free_shash(*ctx);
240 }
241 
242 static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm)
243 {
244 	struct crypto_alg *calg = tfm->__crt_alg;
245 	struct crypto_ahash *crt = __crypto_ahash_cast(tfm);
246 	struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
247 	struct crypto_shash *shash;
248 
249 	if (!crypto_mod_get(calg))
250 		return -EAGAIN;
251 
252 	shash = crypto_create_tfm(calg, &crypto_shash_type);
253 	if (IS_ERR(shash)) {
254 		crypto_mod_put(calg);
255 		return PTR_ERR(shash);
256 	}
257 
258 	crt->using_shash = true;
259 	*ctx = shash;
260 	tfm->exit = crypto_exit_ahash_using_shash;
261 
262 	crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
263 				    CRYPTO_TFM_NEED_KEY);
264 	crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash);
265 
266 	return 0;
267 }
268 
269 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
270 			  unsigned int keylen)
271 {
272 	return -ENOSYS;
273 }
274 
275 static void ahash_set_needkey(struct crypto_ahash *tfm, struct ahash_alg *alg)
276 {
277 	if (alg->setkey != ahash_nosetkey &&
278 	    !(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
279 		crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
280 }
281 
282 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
283 			unsigned int keylen)
284 {
285 	if (likely(tfm->using_shash)) {
286 		struct crypto_shash *shash = ahash_to_shash(tfm);
287 		int err;
288 
289 		err = crypto_shash_setkey(shash, key, keylen);
290 		if (unlikely(err)) {
291 			crypto_ahash_set_flags(tfm,
292 					       crypto_shash_get_flags(shash) &
293 					       CRYPTO_TFM_NEED_KEY);
294 			return err;
295 		}
296 	} else {
297 		struct ahash_alg *alg = crypto_ahash_alg(tfm);
298 		int err;
299 
300 		err = alg->setkey(tfm, key, keylen);
301 		if (!err && ahash_is_async(tfm))
302 			err = crypto_ahash_setkey(crypto_ahash_fb(tfm),
303 						  key, keylen);
304 		if (unlikely(err)) {
305 			ahash_set_needkey(tfm, alg);
306 			return err;
307 		}
308 	}
309 	crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
310 	return 0;
311 }
312 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
313 
314 static int ahash_reqchain_virt(struct ahash_save_req_state *state,
315 			       int err, u32 mask)
316 {
317 	struct ahash_request *req = state->req0;
318 	struct crypto_ahash *tfm;
319 
320 	tfm = crypto_ahash_reqtfm(req);
321 
322 	for (;;) {
323 		unsigned len = state->nbytes;
324 
325 		if (!state->offset)
326 			break;
327 
328 		if (state->offset == len || err) {
329 			u8 *result = req->result;
330 
331 			ahash_request_set_virt(req, state->src, result, len);
332 			state->offset = 0;
333 			break;
334 		}
335 
336 		len -= state->offset;
337 
338 		len = min(PAGE_SIZE, len);
339 		memcpy(state->page, state->src + state->offset, len);
340 		state->offset += len;
341 		req->nbytes = len;
342 
343 		err = crypto_ahash_alg(tfm)->update(req);
344 		if (err == -EINPROGRESS) {
345 			if (state->offset < state->nbytes)
346 				err = -EBUSY;
347 			break;
348 		}
349 
350 		if (err == -EBUSY)
351 			break;
352 	}
353 
354 	return err;
355 }
356 
357 static int ahash_reqchain_finish(struct ahash_request *req0,
358 				 struct ahash_save_req_state *state,
359 				 int err, u32 mask)
360 {
361 	u8 *page;
362 
363 	err = ahash_reqchain_virt(state, err, mask);
364 	if (err == -EINPROGRESS || err == -EBUSY)
365 		goto out;
366 
367 	page = state->page;
368 	if (page) {
369 		memset(page, 0, PAGE_SIZE);
370 		free_page((unsigned long)page);
371 	}
372 	ahash_restore_req(req0);
373 
374 out:
375 	return err;
376 }
377 
378 static void ahash_reqchain_done(void *data, int err)
379 {
380 	struct ahash_save_req_state *state = data;
381 	crypto_completion_t compl = state->compl;
382 
383 	data = state->data;
384 
385 	if (err == -EINPROGRESS) {
386 		if (state->offset < state->nbytes)
387 			return;
388 		goto notify;
389 	}
390 
391 	err = ahash_reqchain_finish(state->req0, state, err,
392 				    CRYPTO_TFM_REQ_MAY_BACKLOG);
393 	if (err == -EBUSY)
394 		return;
395 
396 notify:
397 	compl(data, err);
398 }
399 
400 static int ahash_do_req_chain(struct ahash_request *req,
401 			      int (*op)(struct ahash_request *req))
402 {
403 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
404 	bool update = op == crypto_ahash_alg(tfm)->update;
405 	struct ahash_save_req_state *state;
406 	struct ahash_save_req_state state0;
407 	u8 *page = NULL;
408 	int err;
409 
410 	if (crypto_ahash_req_virt(tfm) ||
411 	    !update || !ahash_request_isvirt(req))
412 		return op(req);
413 
414 	if (update && ahash_request_isvirt(req)) {
415 		page = (void *)__get_free_page(GFP_ATOMIC);
416 		err = -ENOMEM;
417 		if (!page)
418 			goto out;
419 	}
420 
421 	state = &state0;
422 	if (ahash_is_async(tfm)) {
423 		err = ahash_save_req(req, ahash_reqchain_done);
424 		if (err)
425 			goto out_free_page;
426 
427 		state = req->base.data;
428 	}
429 
430 	state->update = update;
431 	state->page = page;
432 	state->offset = 0;
433 	state->nbytes = 0;
434 
435 	if (page)
436 		sg_init_one(&state->sg, page, PAGE_SIZE);
437 
438 	if (update && ahash_request_isvirt(req) && req->nbytes) {
439 		unsigned len = req->nbytes;
440 		u8 *result = req->result;
441 
442 		state->src = req->svirt;
443 		state->nbytes = len;
444 
445 		len = min(PAGE_SIZE, len);
446 
447 		memcpy(page, req->svirt, len);
448 		state->offset = len;
449 
450 		ahash_request_set_crypt(req, &state->sg, result, len);
451 	}
452 
453 	err = op(req);
454 	if (err == -EINPROGRESS || err == -EBUSY) {
455 		if (state->offset < state->nbytes)
456 			err = -EBUSY;
457 		return err;
458 	}
459 
460 	return ahash_reqchain_finish(req, state, err, ~0);
461 
462 out_free_page:
463 	free_page((unsigned long)page);
464 
465 out:
466 	return err;
467 }
468 
469 int crypto_ahash_init(struct ahash_request *req)
470 {
471 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
472 
473 	if (likely(tfm->using_shash))
474 		return crypto_shash_init(prepare_shash_desc(req, tfm));
475 	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
476 		return -ENOKEY;
477 	if (ahash_req_on_stack(req) && ahash_is_async(tfm))
478 		return -EAGAIN;
479 	return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->init);
480 }
481 EXPORT_SYMBOL_GPL(crypto_ahash_init);
482 
483 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
484 {
485 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
486 	struct ahash_save_req_state *state;
487 
488 	if (!ahash_is_async(tfm))
489 		return 0;
490 
491 	state = kmalloc(sizeof(*state), GFP_ATOMIC);
492 	if (!state)
493 		return -ENOMEM;
494 
495 	state->compl = req->base.complete;
496 	state->data = req->base.data;
497 	req->base.complete = cplt;
498 	req->base.data = state;
499 	state->req0 = req;
500 
501 	return 0;
502 }
503 
504 static void ahash_restore_req(struct ahash_request *req)
505 {
506 	struct ahash_save_req_state *state;
507 	struct crypto_ahash *tfm;
508 
509 	tfm = crypto_ahash_reqtfm(req);
510 	if (!ahash_is_async(tfm))
511 		return;
512 
513 	state = req->base.data;
514 
515 	req->base.complete = state->compl;
516 	req->base.data = state->data;
517 	kfree(state);
518 }
519 
520 int crypto_ahash_update(struct ahash_request *req)
521 {
522 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
523 
524 	if (likely(tfm->using_shash))
525 		return shash_ahash_update(req, ahash_request_ctx(req));
526 	if (ahash_req_on_stack(req) && ahash_is_async(tfm))
527 		return -EAGAIN;
528 	return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->update);
529 }
530 EXPORT_SYMBOL_GPL(crypto_ahash_update);
531 
532 int crypto_ahash_final(struct ahash_request *req)
533 {
534 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
535 
536 	if (likely(tfm->using_shash))
537 		return crypto_shash_final(ahash_request_ctx(req), req->result);
538 	if (ahash_req_on_stack(req) && ahash_is_async(tfm))
539 		return -EAGAIN;
540 	return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->final);
541 }
542 EXPORT_SYMBOL_GPL(crypto_ahash_final);
543 
544 int crypto_ahash_finup(struct ahash_request *req)
545 {
546 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
547 
548 	if (likely(tfm->using_shash))
549 		return shash_ahash_finup(req, ahash_request_ctx(req));
550 	if (ahash_req_on_stack(req) && ahash_is_async(tfm))
551 		return -EAGAIN;
552 	if (!crypto_ahash_alg(tfm)->finup ||
553 	    (!crypto_ahash_req_virt(tfm) && ahash_request_isvirt(req)))
554 		return ahash_def_finup(req);
555 	return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->finup);
556 }
557 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
558 
559 static int ahash_def_digest_finish(struct ahash_request *req, int err)
560 {
561 	struct crypto_ahash *tfm;
562 
563 	if (err)
564 		goto out;
565 
566 	tfm = crypto_ahash_reqtfm(req);
567 	if (ahash_is_async(tfm))
568 		req->base.complete = ahash_def_finup_done1;
569 
570 	err = crypto_ahash_update(req);
571 	if (err == -EINPROGRESS || err == -EBUSY)
572 		return err;
573 
574 	return ahash_def_finup_finish1(req, err);
575 
576 out:
577 	ahash_restore_req(req);
578 	return err;
579 }
580 
581 static void ahash_def_digest_done(void *data, int err)
582 {
583 	struct ahash_save_req_state *state0 = data;
584 	struct ahash_save_req_state state;
585 	struct ahash_request *areq;
586 
587 	state = *state0;
588 	areq = state.req0;
589 	if (err == -EINPROGRESS)
590 		goto out;
591 
592 	areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
593 
594 	err = ahash_def_digest_finish(areq, err);
595 	if (err == -EINPROGRESS || err == -EBUSY)
596 		return;
597 
598 out:
599 	state.compl(state.data, err);
600 }
601 
602 static int ahash_def_digest(struct ahash_request *req)
603 {
604 	int err;
605 
606 	err = ahash_save_req(req, ahash_def_digest_done);
607 	if (err)
608 		return err;
609 
610 	err = crypto_ahash_init(req);
611 	if (err == -EINPROGRESS || err == -EBUSY)
612 		return err;
613 
614 	return ahash_def_digest_finish(req, err);
615 }
616 
617 int crypto_ahash_digest(struct ahash_request *req)
618 {
619 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
620 
621 	if (likely(tfm->using_shash))
622 		return shash_ahash_digest(req, prepare_shash_desc(req, tfm));
623 	if (ahash_req_on_stack(req) && ahash_is_async(tfm))
624 		return -EAGAIN;
625 	if (!crypto_ahash_req_virt(tfm) && ahash_request_isvirt(req))
626 		return ahash_def_digest(req);
627 	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
628 		return -ENOKEY;
629 	return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->digest);
630 }
631 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
632 
633 static void ahash_def_finup_done2(void *data, int err)
634 {
635 	struct ahash_save_req_state *state = data;
636 	struct ahash_request *areq = state->req0;
637 
638 	if (err == -EINPROGRESS)
639 		return;
640 
641 	ahash_restore_req(areq);
642 	ahash_request_complete(areq, err);
643 }
644 
645 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
646 {
647 	struct crypto_ahash *tfm;
648 
649 	if (err)
650 		goto out;
651 
652 	tfm = crypto_ahash_reqtfm(req);
653 	if (ahash_is_async(tfm))
654 		req->base.complete = ahash_def_finup_done2;
655 
656 	err = crypto_ahash_final(req);
657 	if (err == -EINPROGRESS || err == -EBUSY)
658 		return err;
659 
660 out:
661 	ahash_restore_req(req);
662 	return err;
663 }
664 
665 static void ahash_def_finup_done1(void *data, int err)
666 {
667 	struct ahash_save_req_state *state0 = data;
668 	struct ahash_save_req_state state;
669 	struct ahash_request *areq;
670 
671 	state = *state0;
672 	areq = state.req0;
673 	if (err == -EINPROGRESS)
674 		goto out;
675 
676 	areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
677 
678 	err = ahash_def_finup_finish1(areq, err);
679 	if (err == -EINPROGRESS || err == -EBUSY)
680 		return;
681 
682 out:
683 	state.compl(state.data, err);
684 }
685 
686 static int ahash_def_finup(struct ahash_request *req)
687 {
688 	int err;
689 
690 	err = ahash_save_req(req, ahash_def_finup_done1);
691 	if (err)
692 		return err;
693 
694 	err = crypto_ahash_update(req);
695 	if (err == -EINPROGRESS || err == -EBUSY)
696 		return err;
697 
698 	return ahash_def_finup_finish1(req, err);
699 }
700 
701 int crypto_ahash_export_core(struct ahash_request *req, void *out)
702 {
703 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
704 
705 	if (likely(tfm->using_shash))
706 		return crypto_shash_export_core(ahash_request_ctx(req), out);
707 	return crypto_ahash_alg(tfm)->export(req, out);
708 }
709 EXPORT_SYMBOL_GPL(crypto_ahash_export_core);
710 
711 int crypto_ahash_export(struct ahash_request *req, void *out)
712 {
713 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
714 
715 	if (likely(tfm->using_shash))
716 		return crypto_shash_export(ahash_request_ctx(req), out);
717 	return crypto_ahash_alg(tfm)->export(req, out);
718 }
719 EXPORT_SYMBOL_GPL(crypto_ahash_export);
720 
721 int crypto_ahash_import_core(struct ahash_request *req, const void *in)
722 {
723 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
724 
725 	if (likely(tfm->using_shash))
726 		return crypto_shash_import_core(prepare_shash_desc(req, tfm),
727 						in);
728 	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
729 		return -ENOKEY;
730 	return crypto_ahash_alg(tfm)->import(req, in);
731 }
732 EXPORT_SYMBOL_GPL(crypto_ahash_import_core);
733 
734 int crypto_ahash_import(struct ahash_request *req, const void *in)
735 {
736 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
737 
738 	if (likely(tfm->using_shash))
739 		return crypto_shash_import(prepare_shash_desc(req, tfm), in);
740 	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
741 		return -ENOKEY;
742 	return crypto_ahash_import_core(req, in);
743 }
744 EXPORT_SYMBOL_GPL(crypto_ahash_import);
745 
746 static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm)
747 {
748 	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
749 	struct ahash_alg *alg = crypto_ahash_alg(hash);
750 
751 	if (alg->exit_tfm)
752 		alg->exit_tfm(hash);
753 	else if (tfm->__crt_alg->cra_exit)
754 		tfm->__crt_alg->cra_exit(tfm);
755 
756 	if (ahash_is_async(hash))
757 		crypto_free_ahash(crypto_ahash_fb(hash));
758 }
759 
760 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
761 {
762 	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
763 	struct ahash_alg *alg = crypto_ahash_alg(hash);
764 	struct crypto_ahash *fb = NULL;
765 	int err;
766 
767 	crypto_ahash_set_statesize(hash, alg->halg.statesize);
768 	crypto_ahash_set_reqsize(hash, crypto_tfm_alg_reqsize(tfm));
769 
770 	if (tfm->__crt_alg->cra_type == &crypto_shash_type)
771 		return crypto_init_ahash_using_shash(tfm);
772 
773 	if (ahash_is_async(hash)) {
774 		fb = crypto_alloc_ahash(crypto_ahash_alg_name(hash),
775 					0, CRYPTO_ALG_ASYNC);
776 		if (IS_ERR(fb))
777 			return PTR_ERR(fb);
778 
779 		tfm->fb = crypto_ahash_tfm(fb);
780 	}
781 
782 	ahash_set_needkey(hash, alg);
783 
784 	tfm->exit = crypto_ahash_exit_tfm;
785 
786 	if (alg->init_tfm)
787 		err = alg->init_tfm(hash);
788 	else if (tfm->__crt_alg->cra_init)
789 		err = tfm->__crt_alg->cra_init(tfm);
790 	else
791 		return 0;
792 
793 	if (err)
794 		goto out_free_sync_hash;
795 
796 	if (!ahash_is_async(hash) && crypto_ahash_reqsize(hash) >
797 				     MAX_SYNC_HASH_REQSIZE)
798 		goto out_exit_tfm;
799 
800 	return 0;
801 
802 out_exit_tfm:
803 	if (alg->exit_tfm)
804 		alg->exit_tfm(hash);
805 	else if (tfm->__crt_alg->cra_exit)
806 		tfm->__crt_alg->cra_exit(tfm);
807 	err = -EINVAL;
808 out_free_sync_hash:
809 	crypto_free_ahash(fb);
810 	return err;
811 }
812 
813 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
814 {
815 	if (alg->cra_type == &crypto_shash_type)
816 		return sizeof(struct crypto_shash *);
817 
818 	return crypto_alg_extsize(alg);
819 }
820 
821 static void crypto_ahash_free_instance(struct crypto_instance *inst)
822 {
823 	struct ahash_instance *ahash = ahash_instance(inst);
824 
825 	ahash->free(ahash);
826 }
827 
828 static int __maybe_unused crypto_ahash_report(
829 	struct sk_buff *skb, struct crypto_alg *alg)
830 {
831 	struct crypto_report_hash rhash;
832 
833 	memset(&rhash, 0, sizeof(rhash));
834 
835 	strscpy(rhash.type, "ahash", sizeof(rhash.type));
836 
837 	rhash.blocksize = alg->cra_blocksize;
838 	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
839 
840 	return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
841 }
842 
843 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
844 	__maybe_unused;
845 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
846 {
847 	seq_printf(m, "type         : ahash\n");
848 	seq_printf(m, "async        : %s\n",
849 		   str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC));
850 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
851 	seq_printf(m, "digestsize   : %u\n",
852 		   __crypto_hash_alg_common(alg)->digestsize);
853 }
854 
855 static const struct crypto_type crypto_ahash_type = {
856 	.extsize = crypto_ahash_extsize,
857 	.init_tfm = crypto_ahash_init_tfm,
858 	.free = crypto_ahash_free_instance,
859 #ifdef CONFIG_PROC_FS
860 	.show = crypto_ahash_show,
861 #endif
862 #if IS_ENABLED(CONFIG_CRYPTO_USER)
863 	.report = crypto_ahash_report,
864 #endif
865 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
866 	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
867 	.type = CRYPTO_ALG_TYPE_AHASH,
868 	.tfmsize = offsetof(struct crypto_ahash, base),
869 	.algsize = offsetof(struct ahash_alg, halg.base),
870 };
871 
872 int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
873 		      struct crypto_instance *inst,
874 		      const char *name, u32 type, u32 mask)
875 {
876 	spawn->base.frontend = &crypto_ahash_type;
877 	return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
878 }
879 EXPORT_SYMBOL_GPL(crypto_grab_ahash);
880 
881 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
882 					u32 mask)
883 {
884 	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
885 }
886 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
887 
888 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
889 {
890 	return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
891 }
892 EXPORT_SYMBOL_GPL(crypto_has_ahash);
893 
894 static bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
895 {
896 	struct crypto_alg *alg = &halg->base;
897 
898 	if (alg->cra_type == &crypto_shash_type)
899 		return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
900 
901 	return __crypto_ahash_alg(alg)->setkey != ahash_nosetkey;
902 }
903 
904 struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
905 {
906 	struct hash_alg_common *halg = crypto_hash_alg_common(hash);
907 	struct crypto_tfm *tfm = crypto_ahash_tfm(hash);
908 	struct crypto_ahash *fb = NULL;
909 	struct crypto_ahash *nhash;
910 	struct ahash_alg *alg;
911 	int err;
912 
913 	if (!crypto_hash_alg_has_setkey(halg)) {
914 		tfm = crypto_tfm_get(tfm);
915 		if (IS_ERR(tfm))
916 			return ERR_CAST(tfm);
917 
918 		return hash;
919 	}
920 
921 	nhash = crypto_clone_tfm(&crypto_ahash_type, tfm);
922 
923 	if (IS_ERR(nhash))
924 		return nhash;
925 
926 	nhash->reqsize = hash->reqsize;
927 	nhash->statesize = hash->statesize;
928 
929 	if (likely(hash->using_shash)) {
930 		struct crypto_shash **nctx = crypto_ahash_ctx(nhash);
931 		struct crypto_shash *shash;
932 
933 		shash = crypto_clone_shash(ahash_to_shash(hash));
934 		if (IS_ERR(shash)) {
935 			err = PTR_ERR(shash);
936 			goto out_free_nhash;
937 		}
938 		crypto_ahash_tfm(nhash)->exit = crypto_exit_ahash_using_shash;
939 		nhash->using_shash = true;
940 		*nctx = shash;
941 		return nhash;
942 	}
943 
944 	if (ahash_is_async(hash)) {
945 		fb = crypto_clone_ahash(crypto_ahash_fb(hash));
946 		err = PTR_ERR(fb);
947 		if (IS_ERR(fb))
948 			goto out_free_nhash;
949 
950 		crypto_ahash_tfm(nhash)->fb = crypto_ahash_tfm(fb);
951 	}
952 
953 	err = -ENOSYS;
954 	alg = crypto_ahash_alg(hash);
955 	if (!alg->clone_tfm)
956 		goto out_free_fb;
957 
958 	err = alg->clone_tfm(nhash, hash);
959 	if (err)
960 		goto out_free_fb;
961 
962 	crypto_ahash_tfm(nhash)->exit = crypto_ahash_exit_tfm;
963 
964 	return nhash;
965 
966 out_free_fb:
967 	crypto_free_ahash(fb);
968 out_free_nhash:
969 	crypto_free_ahash(nhash);
970 	return ERR_PTR(err);
971 }
972 EXPORT_SYMBOL_GPL(crypto_clone_ahash);
973 
974 static int ahash_prepare_alg(struct ahash_alg *alg)
975 {
976 	struct crypto_alg *base = &alg->halg.base;
977 	int err;
978 
979 	if (alg->halg.statesize == 0)
980 		return -EINVAL;
981 
982 	if (base->cra_reqsize && base->cra_reqsize < alg->halg.statesize)
983 		return -EINVAL;
984 
985 	if (!(base->cra_flags & CRYPTO_ALG_ASYNC) &&
986 	    base->cra_reqsize > MAX_SYNC_HASH_REQSIZE)
987 		return -EINVAL;
988 
989 	err = hash_prepare_alg(&alg->halg);
990 	if (err)
991 		return err;
992 
993 	base->cra_type = &crypto_ahash_type;
994 	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
995 
996 	if (!alg->setkey)
997 		alg->setkey = ahash_nosetkey;
998 
999 	return 0;
1000 }
1001 
1002 int crypto_register_ahash(struct ahash_alg *alg)
1003 {
1004 	struct crypto_alg *base = &alg->halg.base;
1005 	int err;
1006 
1007 	err = ahash_prepare_alg(alg);
1008 	if (err)
1009 		return err;
1010 
1011 	return crypto_register_alg(base);
1012 }
1013 EXPORT_SYMBOL_GPL(crypto_register_ahash);
1014 
1015 void crypto_unregister_ahash(struct ahash_alg *alg)
1016 {
1017 	crypto_unregister_alg(&alg->halg.base);
1018 }
1019 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
1020 
1021 int crypto_register_ahashes(struct ahash_alg *algs, int count)
1022 {
1023 	int i, ret;
1024 
1025 	for (i = 0; i < count; i++) {
1026 		ret = crypto_register_ahash(&algs[i]);
1027 		if (ret)
1028 			goto err;
1029 	}
1030 
1031 	return 0;
1032 
1033 err:
1034 	for (--i; i >= 0; --i)
1035 		crypto_unregister_ahash(&algs[i]);
1036 
1037 	return ret;
1038 }
1039 EXPORT_SYMBOL_GPL(crypto_register_ahashes);
1040 
1041 void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
1042 {
1043 	int i;
1044 
1045 	for (i = count - 1; i >= 0; --i)
1046 		crypto_unregister_ahash(&algs[i]);
1047 }
1048 EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
1049 
1050 int ahash_register_instance(struct crypto_template *tmpl,
1051 			    struct ahash_instance *inst)
1052 {
1053 	int err;
1054 
1055 	if (WARN_ON(!inst->free))
1056 		return -EINVAL;
1057 
1058 	err = ahash_prepare_alg(&inst->alg);
1059 	if (err)
1060 		return err;
1061 
1062 	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
1063 }
1064 EXPORT_SYMBOL_GPL(ahash_register_instance);
1065 
1066 void ahash_request_free(struct ahash_request *req)
1067 {
1068 	if (unlikely(!req))
1069 		return;
1070 
1071 	if (!ahash_req_on_stack(req)) {
1072 		kfree(req);
1073 		return;
1074 	}
1075 
1076 	ahash_request_zero(req);
1077 }
1078 EXPORT_SYMBOL_GPL(ahash_request_free);
1079 
1080 int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data,
1081 		       unsigned int len, u8 *out)
1082 {
1083 	HASH_REQUEST_ON_STACK(req, crypto_ahash_fb(tfm));
1084 	int err;
1085 
1086 	ahash_request_set_callback(req, 0, NULL, NULL);
1087 	ahash_request_set_virt(req, data, out, len);
1088 	err = crypto_ahash_digest(req);
1089 
1090 	ahash_request_zero(req);
1091 
1092 	return err;
1093 }
1094 EXPORT_SYMBOL_GPL(crypto_hash_digest);
1095 
1096 MODULE_LICENSE("GPL");
1097 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
1098