1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Asynchronous Cryptographic Hash operations. 4 * 5 * This is the implementation of the ahash (asynchronous hash) API. It differs 6 * from shash (synchronous hash) in that ahash supports asynchronous operations, 7 * and it hashes data from scatterlists instead of virtually addressed buffers. 8 * 9 * The ahash API provides access to both ahash and shash algorithms. The shash 10 * API only provides access to shash algorithms. 11 * 12 * Copyright (c) 2008 Loc Ho <lho@amcc.com> 13 */ 14 15 #include <crypto/scatterwalk.h> 16 #include <linux/cryptouser.h> 17 #include <linux/err.h> 18 #include <linux/kernel.h> 19 #include <linux/mm.h> 20 #include <linux/module.h> 21 #include <linux/sched.h> 22 #include <linux/slab.h> 23 #include <linux/seq_file.h> 24 #include <linux/string.h> 25 #include <linux/string_choices.h> 26 #include <net/netlink.h> 27 28 #include "hash.h" 29 30 #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e 31 32 struct crypto_hash_walk { 33 const char *data; 34 35 unsigned int offset; 36 unsigned int flags; 37 38 struct page *pg; 39 unsigned int entrylen; 40 41 unsigned int total; 42 struct scatterlist *sg; 43 }; 44 45 struct ahash_save_req_state { 46 struct list_head head; 47 struct ahash_request *req0; 48 struct ahash_request *cur; 49 int (*op)(struct ahash_request *req); 50 crypto_completion_t compl; 51 void *data; 52 struct scatterlist sg; 53 const u8 *src; 54 u8 *page; 55 unsigned int offset; 56 unsigned int nbytes; 57 }; 58 59 static void ahash_reqchain_done(void *data, int err); 60 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt); 61 static void ahash_restore_req(struct ahash_request *req); 62 static void ahash_def_finup_done1(void *data, int err); 63 static int ahash_def_finup_finish1(struct ahash_request *req, int err); 64 static int ahash_def_finup(struct ahash_request *req); 65 66 static int hash_walk_next(struct crypto_hash_walk *walk) 67 { 68 unsigned int offset = walk->offset; 69 unsigned int nbytes = min(walk->entrylen, 70 ((unsigned int)(PAGE_SIZE)) - offset); 71 72 walk->data = kmap_local_page(walk->pg); 73 walk->data += offset; 74 walk->entrylen -= nbytes; 75 return nbytes; 76 } 77 78 static int hash_walk_new_entry(struct crypto_hash_walk *walk) 79 { 80 struct scatterlist *sg; 81 82 sg = walk->sg; 83 walk->offset = sg->offset; 84 walk->pg = nth_page(sg_page(walk->sg), (walk->offset >> PAGE_SHIFT)); 85 walk->offset = offset_in_page(walk->offset); 86 walk->entrylen = sg->length; 87 88 if (walk->entrylen > walk->total) 89 walk->entrylen = walk->total; 90 walk->total -= walk->entrylen; 91 92 return hash_walk_next(walk); 93 } 94 95 static int crypto_hash_walk_first(struct ahash_request *req, 96 struct crypto_hash_walk *walk) 97 { 98 walk->total = req->nbytes; 99 walk->entrylen = 0; 100 101 if (!walk->total) 102 return 0; 103 104 walk->flags = req->base.flags; 105 106 if (ahash_request_isvirt(req)) { 107 walk->data = req->svirt; 108 walk->total = 0; 109 return req->nbytes; 110 } 111 112 walk->sg = req->src; 113 114 return hash_walk_new_entry(walk); 115 } 116 117 static int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) 118 { 119 if ((walk->flags & CRYPTO_AHASH_REQ_VIRT)) 120 return err; 121 122 walk->data -= walk->offset; 123 124 kunmap_local(walk->data); 125 crypto_yield(walk->flags); 126 127 if (err) 128 return err; 129 130 if (walk->entrylen) { 131 walk->offset = 0; 132 walk->pg++; 133 return hash_walk_next(walk); 134 } 135 136 if (!walk->total) 137 return 0; 138 139 walk->sg = sg_next(walk->sg); 140 141 return hash_walk_new_entry(walk); 142 } 143 144 static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk) 145 { 146 return !(walk->entrylen | walk->total); 147 } 148 149 /* 150 * For an ahash tfm that is using an shash algorithm (instead of an ahash 151 * algorithm), this returns the underlying shash tfm. 152 */ 153 static inline struct crypto_shash *ahash_to_shash(struct crypto_ahash *tfm) 154 { 155 return *(struct crypto_shash **)crypto_ahash_ctx(tfm); 156 } 157 158 static inline struct shash_desc *prepare_shash_desc(struct ahash_request *req, 159 struct crypto_ahash *tfm) 160 { 161 struct shash_desc *desc = ahash_request_ctx(req); 162 163 desc->tfm = ahash_to_shash(tfm); 164 return desc; 165 } 166 167 int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc) 168 { 169 struct crypto_hash_walk walk; 170 int nbytes; 171 172 for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0; 173 nbytes = crypto_hash_walk_done(&walk, nbytes)) 174 nbytes = crypto_shash_update(desc, walk.data, nbytes); 175 176 return nbytes; 177 } 178 EXPORT_SYMBOL_GPL(shash_ahash_update); 179 180 int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc) 181 { 182 struct crypto_hash_walk walk; 183 int nbytes; 184 185 nbytes = crypto_hash_walk_first(req, &walk); 186 if (!nbytes) 187 return crypto_shash_final(desc, req->result); 188 189 do { 190 nbytes = crypto_hash_walk_last(&walk) ? 191 crypto_shash_finup(desc, walk.data, nbytes, 192 req->result) : 193 crypto_shash_update(desc, walk.data, nbytes); 194 nbytes = crypto_hash_walk_done(&walk, nbytes); 195 } while (nbytes > 0); 196 197 return nbytes; 198 } 199 EXPORT_SYMBOL_GPL(shash_ahash_finup); 200 201 int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) 202 { 203 unsigned int nbytes = req->nbytes; 204 struct scatterlist *sg; 205 unsigned int offset; 206 struct page *page; 207 const u8 *data; 208 int err; 209 210 data = req->svirt; 211 if (!nbytes || ahash_request_isvirt(req)) 212 return crypto_shash_digest(desc, data, nbytes, req->result); 213 214 sg = req->src; 215 if (nbytes > sg->length) 216 return crypto_shash_init(desc) ?: 217 shash_ahash_finup(req, desc); 218 219 page = sg_page(sg); 220 offset = sg->offset; 221 data = lowmem_page_address(page) + offset; 222 if (!IS_ENABLED(CONFIG_HIGHMEM)) 223 return crypto_shash_digest(desc, data, nbytes, req->result); 224 225 page = nth_page(page, offset >> PAGE_SHIFT); 226 offset = offset_in_page(offset); 227 228 if (nbytes > (unsigned int)PAGE_SIZE - offset) 229 return crypto_shash_init(desc) ?: 230 shash_ahash_finup(req, desc); 231 232 data = kmap_local_page(page); 233 err = crypto_shash_digest(desc, data + offset, nbytes, 234 req->result); 235 kunmap_local(data); 236 return err; 237 } 238 EXPORT_SYMBOL_GPL(shash_ahash_digest); 239 240 static void crypto_exit_ahash_using_shash(struct crypto_tfm *tfm) 241 { 242 struct crypto_shash **ctx = crypto_tfm_ctx(tfm); 243 244 crypto_free_shash(*ctx); 245 } 246 247 static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm) 248 { 249 struct crypto_alg *calg = tfm->__crt_alg; 250 struct crypto_ahash *crt = __crypto_ahash_cast(tfm); 251 struct crypto_shash **ctx = crypto_tfm_ctx(tfm); 252 struct crypto_shash *shash; 253 254 if (!crypto_mod_get(calg)) 255 return -EAGAIN; 256 257 shash = crypto_create_tfm(calg, &crypto_shash_type); 258 if (IS_ERR(shash)) { 259 crypto_mod_put(calg); 260 return PTR_ERR(shash); 261 } 262 263 crt->using_shash = true; 264 *ctx = shash; 265 tfm->exit = crypto_exit_ahash_using_shash; 266 267 crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & 268 CRYPTO_TFM_NEED_KEY); 269 crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); 270 271 return 0; 272 } 273 274 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, 275 unsigned int keylen) 276 { 277 return -ENOSYS; 278 } 279 280 static void ahash_set_needkey(struct crypto_ahash *tfm, struct ahash_alg *alg) 281 { 282 if (alg->setkey != ahash_nosetkey && 283 !(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) 284 crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); 285 } 286 287 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, 288 unsigned int keylen) 289 { 290 if (likely(tfm->using_shash)) { 291 struct crypto_shash *shash = ahash_to_shash(tfm); 292 int err; 293 294 err = crypto_shash_setkey(shash, key, keylen); 295 if (unlikely(err)) { 296 crypto_ahash_set_flags(tfm, 297 crypto_shash_get_flags(shash) & 298 CRYPTO_TFM_NEED_KEY); 299 return err; 300 } 301 } else { 302 struct ahash_alg *alg = crypto_ahash_alg(tfm); 303 int err; 304 305 err = alg->setkey(tfm, key, keylen); 306 if (unlikely(err)) { 307 ahash_set_needkey(tfm, alg); 308 return err; 309 } 310 } 311 crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); 312 return 0; 313 } 314 EXPORT_SYMBOL_GPL(crypto_ahash_setkey); 315 316 static bool ahash_request_hasvirt(struct ahash_request *req) 317 { 318 struct ahash_request *r2; 319 320 if (ahash_request_isvirt(req)) 321 return true; 322 323 list_for_each_entry(r2, &req->base.list, base.list) 324 if (ahash_request_isvirt(r2)) 325 return true; 326 327 return false; 328 } 329 330 static int ahash_reqchain_virt(struct ahash_save_req_state *state, 331 int err, u32 mask) 332 { 333 struct ahash_request *req = state->cur; 334 335 for (;;) { 336 unsigned len = state->nbytes; 337 338 req->base.err = err; 339 340 if (!state->offset) 341 break; 342 343 if (state->offset == len || err) { 344 u8 *result = req->result; 345 346 ahash_request_set_virt(req, state->src, result, len); 347 state->offset = 0; 348 break; 349 } 350 351 len -= state->offset; 352 353 len = min(PAGE_SIZE, len); 354 memcpy(state->page, state->src + state->offset, len); 355 state->offset += len; 356 req->nbytes = len; 357 358 err = state->op(req); 359 if (err == -EINPROGRESS) { 360 if (!list_empty(&state->head) || 361 state->offset < state->nbytes) 362 err = -EBUSY; 363 break; 364 } 365 366 if (err == -EBUSY) 367 break; 368 } 369 370 return err; 371 } 372 373 static int ahash_reqchain_finish(struct ahash_request *req0, 374 struct ahash_save_req_state *state, 375 int err, u32 mask) 376 { 377 struct ahash_request *req = state->cur; 378 struct crypto_ahash *tfm; 379 struct ahash_request *n; 380 bool update; 381 u8 *page; 382 383 err = ahash_reqchain_virt(state, err, mask); 384 if (err == -EINPROGRESS || err == -EBUSY) 385 goto out; 386 387 if (req != req0) 388 list_add_tail(&req->base.list, &req0->base.list); 389 390 tfm = crypto_ahash_reqtfm(req); 391 update = state->op == crypto_ahash_alg(tfm)->update; 392 393 list_for_each_entry_safe(req, n, &state->head, base.list) { 394 list_del_init(&req->base.list); 395 396 req->base.flags &= mask; 397 req->base.complete = ahash_reqchain_done; 398 req->base.data = state; 399 state->cur = req; 400 401 if (update && ahash_request_isvirt(req) && req->nbytes) { 402 unsigned len = req->nbytes; 403 u8 *result = req->result; 404 405 state->src = req->svirt; 406 state->nbytes = len; 407 408 len = min(PAGE_SIZE, len); 409 410 memcpy(state->page, req->svirt, len); 411 state->offset = len; 412 413 ahash_request_set_crypt(req, &state->sg, result, len); 414 } 415 416 err = state->op(req); 417 418 if (err == -EINPROGRESS) { 419 if (!list_empty(&state->head) || 420 state->offset < state->nbytes) 421 err = -EBUSY; 422 goto out; 423 } 424 425 if (err == -EBUSY) 426 goto out; 427 428 err = ahash_reqchain_virt(state, err, mask); 429 if (err == -EINPROGRESS || err == -EBUSY) 430 goto out; 431 432 list_add_tail(&req->base.list, &req0->base.list); 433 } 434 435 page = state->page; 436 if (page) { 437 memset(page, 0, PAGE_SIZE); 438 free_page((unsigned long)page); 439 } 440 ahash_restore_req(req0); 441 442 out: 443 return err; 444 } 445 446 static void ahash_reqchain_done(void *data, int err) 447 { 448 struct ahash_save_req_state *state = data; 449 crypto_completion_t compl = state->compl; 450 451 data = state->data; 452 453 if (err == -EINPROGRESS) { 454 if (!list_empty(&state->head) || state->offset < state->nbytes) 455 return; 456 goto notify; 457 } 458 459 err = ahash_reqchain_finish(state->req0, state, err, 460 CRYPTO_TFM_REQ_MAY_BACKLOG); 461 if (err == -EBUSY) 462 return; 463 464 notify: 465 compl(data, err); 466 } 467 468 static int ahash_do_req_chain(struct ahash_request *req, 469 int (*op)(struct ahash_request *req)) 470 { 471 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 472 bool update = op == crypto_ahash_alg(tfm)->update; 473 struct ahash_save_req_state *state; 474 struct ahash_save_req_state state0; 475 struct ahash_request *r2; 476 u8 *page = NULL; 477 int err; 478 479 if (crypto_ahash_req_chain(tfm) || 480 (!ahash_request_chained(req) && 481 (!update || !ahash_request_isvirt(req)))) 482 return op(req); 483 484 if (update && ahash_request_hasvirt(req)) { 485 gfp_t gfp; 486 u32 flags; 487 488 flags = ahash_request_flags(req); 489 gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 490 GFP_KERNEL : GFP_ATOMIC; 491 page = (void *)__get_free_page(gfp); 492 err = -ENOMEM; 493 if (!page) 494 goto out_set_chain; 495 } 496 497 state = &state0; 498 if (ahash_is_async(tfm)) { 499 err = ahash_save_req(req, ahash_reqchain_done); 500 if (err) 501 goto out_free_page; 502 503 state = req->base.data; 504 } 505 506 state->op = op; 507 state->cur = req; 508 state->page = page; 509 state->offset = 0; 510 state->nbytes = 0; 511 INIT_LIST_HEAD(&state->head); 512 list_splice_init(&req->base.list, &state->head); 513 514 if (page) 515 sg_init_one(&state->sg, page, PAGE_SIZE); 516 517 if (update && ahash_request_isvirt(req) && req->nbytes) { 518 unsigned len = req->nbytes; 519 u8 *result = req->result; 520 521 state->src = req->svirt; 522 state->nbytes = len; 523 524 len = min(PAGE_SIZE, len); 525 526 memcpy(page, req->svirt, len); 527 state->offset = len; 528 529 ahash_request_set_crypt(req, &state->sg, result, len); 530 } 531 532 err = op(req); 533 if (err == -EBUSY || err == -EINPROGRESS) 534 return -EBUSY; 535 536 return ahash_reqchain_finish(req, state, err, ~0); 537 538 out_free_page: 539 free_page((unsigned long)page); 540 541 out_set_chain: 542 req->base.err = err; 543 list_for_each_entry(r2, &req->base.list, base.list) 544 r2->base.err = err; 545 546 return err; 547 } 548 549 int crypto_ahash_init(struct ahash_request *req) 550 { 551 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 552 553 if (likely(tfm->using_shash)) { 554 struct ahash_request *r2; 555 int err; 556 557 err = crypto_shash_init(prepare_shash_desc(req, tfm)); 558 req->base.err = err; 559 560 list_for_each_entry(r2, &req->base.list, base.list) { 561 struct shash_desc *desc; 562 563 desc = prepare_shash_desc(r2, tfm); 564 r2->base.err = crypto_shash_init(desc); 565 } 566 567 return err; 568 } 569 570 if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 571 return -ENOKEY; 572 573 return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->init); 574 } 575 EXPORT_SYMBOL_GPL(crypto_ahash_init); 576 577 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) 578 { 579 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 580 struct ahash_save_req_state *state; 581 gfp_t gfp; 582 u32 flags; 583 584 if (!ahash_is_async(tfm)) 585 return 0; 586 587 flags = ahash_request_flags(req); 588 gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; 589 state = kmalloc(sizeof(*state), gfp); 590 if (!state) 591 return -ENOMEM; 592 593 state->compl = req->base.complete; 594 state->data = req->base.data; 595 req->base.complete = cplt; 596 req->base.data = state; 597 state->req0 = req; 598 599 return 0; 600 } 601 602 static void ahash_restore_req(struct ahash_request *req) 603 { 604 struct ahash_save_req_state *state; 605 struct crypto_ahash *tfm; 606 607 tfm = crypto_ahash_reqtfm(req); 608 if (!ahash_is_async(tfm)) 609 return; 610 611 state = req->base.data; 612 613 req->base.complete = state->compl; 614 req->base.data = state->data; 615 kfree(state); 616 } 617 618 int crypto_ahash_update(struct ahash_request *req) 619 { 620 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 621 622 if (likely(tfm->using_shash)) { 623 struct ahash_request *r2; 624 int err; 625 626 err = shash_ahash_update(req, ahash_request_ctx(req)); 627 req->base.err = err; 628 629 list_for_each_entry(r2, &req->base.list, base.list) { 630 struct shash_desc *desc; 631 632 desc = ahash_request_ctx(r2); 633 r2->base.err = shash_ahash_update(r2, desc); 634 } 635 636 return err; 637 } 638 639 return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->update); 640 } 641 EXPORT_SYMBOL_GPL(crypto_ahash_update); 642 643 int crypto_ahash_final(struct ahash_request *req) 644 { 645 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 646 647 if (likely(tfm->using_shash)) { 648 struct ahash_request *r2; 649 int err; 650 651 err = crypto_shash_final(ahash_request_ctx(req), req->result); 652 req->base.err = err; 653 654 list_for_each_entry(r2, &req->base.list, base.list) { 655 struct shash_desc *desc; 656 657 desc = ahash_request_ctx(r2); 658 r2->base.err = crypto_shash_final(desc, r2->result); 659 } 660 661 return err; 662 } 663 664 return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->final); 665 } 666 EXPORT_SYMBOL_GPL(crypto_ahash_final); 667 668 int crypto_ahash_finup(struct ahash_request *req) 669 { 670 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 671 672 if (likely(tfm->using_shash)) { 673 struct ahash_request *r2; 674 int err; 675 676 err = shash_ahash_finup(req, ahash_request_ctx(req)); 677 req->base.err = err; 678 679 list_for_each_entry(r2, &req->base.list, base.list) { 680 struct shash_desc *desc; 681 682 desc = ahash_request_ctx(r2); 683 r2->base.err = shash_ahash_finup(r2, desc); 684 } 685 686 return err; 687 } 688 689 if (!crypto_ahash_alg(tfm)->finup || 690 (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req))) 691 return ahash_def_finup(req); 692 693 return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->finup); 694 } 695 EXPORT_SYMBOL_GPL(crypto_ahash_finup); 696 697 static int ahash_def_digest_finish(struct ahash_request *req, int err) 698 { 699 struct crypto_ahash *tfm; 700 701 if (err) 702 goto out; 703 704 tfm = crypto_ahash_reqtfm(req); 705 if (ahash_is_async(tfm)) 706 req->base.complete = ahash_def_finup_done1; 707 708 err = crypto_ahash_update(req); 709 if (err == -EINPROGRESS || err == -EBUSY) 710 return err; 711 712 return ahash_def_finup_finish1(req, err); 713 714 out: 715 ahash_restore_req(req); 716 return err; 717 } 718 719 static void ahash_def_digest_done(void *data, int err) 720 { 721 struct ahash_save_req_state *state0 = data; 722 struct ahash_save_req_state state; 723 struct ahash_request *areq; 724 725 state = *state0; 726 areq = state.req0; 727 if (err == -EINPROGRESS) 728 goto out; 729 730 areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 731 732 err = ahash_def_digest_finish(areq, err); 733 if (err == -EINPROGRESS || err == -EBUSY) 734 return; 735 736 out: 737 state.compl(state.data, err); 738 } 739 740 static int ahash_def_digest(struct ahash_request *req) 741 { 742 int err; 743 744 err = ahash_save_req(req, ahash_def_digest_done); 745 if (err) 746 return err; 747 748 err = crypto_ahash_init(req); 749 if (err == -EINPROGRESS || err == -EBUSY) 750 return err; 751 752 return ahash_def_digest_finish(req, err); 753 } 754 755 int crypto_ahash_digest(struct ahash_request *req) 756 { 757 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 758 759 if (likely(tfm->using_shash)) { 760 struct ahash_request *r2; 761 int err; 762 763 err = shash_ahash_digest(req, prepare_shash_desc(req, tfm)); 764 req->base.err = err; 765 766 list_for_each_entry(r2, &req->base.list, base.list) { 767 struct shash_desc *desc; 768 769 desc = prepare_shash_desc(r2, tfm); 770 r2->base.err = shash_ahash_digest(r2, desc); 771 } 772 773 return err; 774 } 775 776 if (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req)) 777 return ahash_def_digest(req); 778 779 if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 780 return -ENOKEY; 781 782 return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->digest); 783 } 784 EXPORT_SYMBOL_GPL(crypto_ahash_digest); 785 786 static void ahash_def_finup_done2(void *data, int err) 787 { 788 struct ahash_save_req_state *state = data; 789 struct ahash_request *areq = state->req0; 790 791 if (err == -EINPROGRESS) 792 return; 793 794 ahash_restore_req(areq); 795 ahash_request_complete(areq, err); 796 } 797 798 static int ahash_def_finup_finish1(struct ahash_request *req, int err) 799 { 800 struct crypto_ahash *tfm; 801 802 if (err) 803 goto out; 804 805 tfm = crypto_ahash_reqtfm(req); 806 if (ahash_is_async(tfm)) 807 req->base.complete = ahash_def_finup_done2; 808 809 err = crypto_ahash_final(req); 810 if (err == -EINPROGRESS || err == -EBUSY) 811 return err; 812 813 out: 814 ahash_restore_req(req); 815 return err; 816 } 817 818 static void ahash_def_finup_done1(void *data, int err) 819 { 820 struct ahash_save_req_state *state0 = data; 821 struct ahash_save_req_state state; 822 struct ahash_request *areq; 823 824 state = *state0; 825 areq = state.req0; 826 if (err == -EINPROGRESS) 827 goto out; 828 829 areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 830 831 err = ahash_def_finup_finish1(areq, err); 832 if (err == -EINPROGRESS || err == -EBUSY) 833 return; 834 835 out: 836 state.compl(state.data, err); 837 } 838 839 static int ahash_def_finup(struct ahash_request *req) 840 { 841 int err; 842 843 err = ahash_save_req(req, ahash_def_finup_done1); 844 if (err) 845 return err; 846 847 err = crypto_ahash_update(req); 848 if (err == -EINPROGRESS || err == -EBUSY) 849 return err; 850 851 return ahash_def_finup_finish1(req, err); 852 } 853 854 int crypto_ahash_export(struct ahash_request *req, void *out) 855 { 856 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 857 858 if (likely(tfm->using_shash)) 859 return crypto_shash_export(ahash_request_ctx(req), out); 860 return crypto_ahash_alg(tfm)->export(req, out); 861 } 862 EXPORT_SYMBOL_GPL(crypto_ahash_export); 863 864 int crypto_ahash_import(struct ahash_request *req, const void *in) 865 { 866 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 867 868 if (likely(tfm->using_shash)) 869 return crypto_shash_import(prepare_shash_desc(req, tfm), in); 870 if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 871 return -ENOKEY; 872 return crypto_ahash_alg(tfm)->import(req, in); 873 } 874 EXPORT_SYMBOL_GPL(crypto_ahash_import); 875 876 static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm) 877 { 878 struct crypto_ahash *hash = __crypto_ahash_cast(tfm); 879 struct ahash_alg *alg = crypto_ahash_alg(hash); 880 881 alg->exit_tfm(hash); 882 } 883 884 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) 885 { 886 struct crypto_ahash *hash = __crypto_ahash_cast(tfm); 887 struct ahash_alg *alg = crypto_ahash_alg(hash); 888 889 crypto_ahash_set_statesize(hash, alg->halg.statesize); 890 crypto_ahash_set_reqsize(hash, alg->reqsize); 891 892 if (tfm->__crt_alg->cra_type == &crypto_shash_type) 893 return crypto_init_ahash_using_shash(tfm); 894 895 ahash_set_needkey(hash, alg); 896 897 if (alg->exit_tfm) 898 tfm->exit = crypto_ahash_exit_tfm; 899 900 return alg->init_tfm ? alg->init_tfm(hash) : 0; 901 } 902 903 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) 904 { 905 if (alg->cra_type == &crypto_shash_type) 906 return sizeof(struct crypto_shash *); 907 908 return crypto_alg_extsize(alg); 909 } 910 911 static void crypto_ahash_free_instance(struct crypto_instance *inst) 912 { 913 struct ahash_instance *ahash = ahash_instance(inst); 914 915 ahash->free(ahash); 916 } 917 918 static int __maybe_unused crypto_ahash_report( 919 struct sk_buff *skb, struct crypto_alg *alg) 920 { 921 struct crypto_report_hash rhash; 922 923 memset(&rhash, 0, sizeof(rhash)); 924 925 strscpy(rhash.type, "ahash", sizeof(rhash.type)); 926 927 rhash.blocksize = alg->cra_blocksize; 928 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize; 929 930 return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash); 931 } 932 933 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) 934 __maybe_unused; 935 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) 936 { 937 seq_printf(m, "type : ahash\n"); 938 seq_printf(m, "async : %s\n", 939 str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC)); 940 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); 941 seq_printf(m, "digestsize : %u\n", 942 __crypto_hash_alg_common(alg)->digestsize); 943 } 944 945 static const struct crypto_type crypto_ahash_type = { 946 .extsize = crypto_ahash_extsize, 947 .init_tfm = crypto_ahash_init_tfm, 948 .free = crypto_ahash_free_instance, 949 #ifdef CONFIG_PROC_FS 950 .show = crypto_ahash_show, 951 #endif 952 #if IS_ENABLED(CONFIG_CRYPTO_USER) 953 .report = crypto_ahash_report, 954 #endif 955 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 956 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, 957 .type = CRYPTO_ALG_TYPE_AHASH, 958 .tfmsize = offsetof(struct crypto_ahash, base), 959 }; 960 961 int crypto_grab_ahash(struct crypto_ahash_spawn *spawn, 962 struct crypto_instance *inst, 963 const char *name, u32 type, u32 mask) 964 { 965 spawn->base.frontend = &crypto_ahash_type; 966 return crypto_grab_spawn(&spawn->base, inst, name, type, mask); 967 } 968 EXPORT_SYMBOL_GPL(crypto_grab_ahash); 969 970 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, 971 u32 mask) 972 { 973 return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask); 974 } 975 EXPORT_SYMBOL_GPL(crypto_alloc_ahash); 976 977 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask) 978 { 979 return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask); 980 } 981 EXPORT_SYMBOL_GPL(crypto_has_ahash); 982 983 static bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) 984 { 985 struct crypto_alg *alg = &halg->base; 986 987 if (alg->cra_type == &crypto_shash_type) 988 return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); 989 990 return __crypto_ahash_alg(alg)->setkey != ahash_nosetkey; 991 } 992 993 struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash) 994 { 995 struct hash_alg_common *halg = crypto_hash_alg_common(hash); 996 struct crypto_tfm *tfm = crypto_ahash_tfm(hash); 997 struct crypto_ahash *nhash; 998 struct ahash_alg *alg; 999 int err; 1000 1001 if (!crypto_hash_alg_has_setkey(halg)) { 1002 tfm = crypto_tfm_get(tfm); 1003 if (IS_ERR(tfm)) 1004 return ERR_CAST(tfm); 1005 1006 return hash; 1007 } 1008 1009 nhash = crypto_clone_tfm(&crypto_ahash_type, tfm); 1010 1011 if (IS_ERR(nhash)) 1012 return nhash; 1013 1014 nhash->reqsize = hash->reqsize; 1015 nhash->statesize = hash->statesize; 1016 1017 if (likely(hash->using_shash)) { 1018 struct crypto_shash **nctx = crypto_ahash_ctx(nhash); 1019 struct crypto_shash *shash; 1020 1021 shash = crypto_clone_shash(ahash_to_shash(hash)); 1022 if (IS_ERR(shash)) { 1023 err = PTR_ERR(shash); 1024 goto out_free_nhash; 1025 } 1026 nhash->using_shash = true; 1027 *nctx = shash; 1028 return nhash; 1029 } 1030 1031 err = -ENOSYS; 1032 alg = crypto_ahash_alg(hash); 1033 if (!alg->clone_tfm) 1034 goto out_free_nhash; 1035 1036 err = alg->clone_tfm(nhash, hash); 1037 if (err) 1038 goto out_free_nhash; 1039 1040 return nhash; 1041 1042 out_free_nhash: 1043 crypto_free_ahash(nhash); 1044 return ERR_PTR(err); 1045 } 1046 EXPORT_SYMBOL_GPL(crypto_clone_ahash); 1047 1048 static int ahash_prepare_alg(struct ahash_alg *alg) 1049 { 1050 struct crypto_alg *base = &alg->halg.base; 1051 int err; 1052 1053 if (alg->halg.statesize == 0) 1054 return -EINVAL; 1055 1056 if (alg->reqsize && alg->reqsize < alg->halg.statesize) 1057 return -EINVAL; 1058 1059 err = hash_prepare_alg(&alg->halg); 1060 if (err) 1061 return err; 1062 1063 base->cra_type = &crypto_ahash_type; 1064 base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; 1065 1066 if (!alg->setkey) 1067 alg->setkey = ahash_nosetkey; 1068 1069 return 0; 1070 } 1071 1072 int crypto_register_ahash(struct ahash_alg *alg) 1073 { 1074 struct crypto_alg *base = &alg->halg.base; 1075 int err; 1076 1077 err = ahash_prepare_alg(alg); 1078 if (err) 1079 return err; 1080 1081 return crypto_register_alg(base); 1082 } 1083 EXPORT_SYMBOL_GPL(crypto_register_ahash); 1084 1085 void crypto_unregister_ahash(struct ahash_alg *alg) 1086 { 1087 crypto_unregister_alg(&alg->halg.base); 1088 } 1089 EXPORT_SYMBOL_GPL(crypto_unregister_ahash); 1090 1091 int crypto_register_ahashes(struct ahash_alg *algs, int count) 1092 { 1093 int i, ret; 1094 1095 for (i = 0; i < count; i++) { 1096 ret = crypto_register_ahash(&algs[i]); 1097 if (ret) 1098 goto err; 1099 } 1100 1101 return 0; 1102 1103 err: 1104 for (--i; i >= 0; --i) 1105 crypto_unregister_ahash(&algs[i]); 1106 1107 return ret; 1108 } 1109 EXPORT_SYMBOL_GPL(crypto_register_ahashes); 1110 1111 void crypto_unregister_ahashes(struct ahash_alg *algs, int count) 1112 { 1113 int i; 1114 1115 for (i = count - 1; i >= 0; --i) 1116 crypto_unregister_ahash(&algs[i]); 1117 } 1118 EXPORT_SYMBOL_GPL(crypto_unregister_ahashes); 1119 1120 int ahash_register_instance(struct crypto_template *tmpl, 1121 struct ahash_instance *inst) 1122 { 1123 int err; 1124 1125 if (WARN_ON(!inst->free)) 1126 return -EINVAL; 1127 1128 err = ahash_prepare_alg(&inst->alg); 1129 if (err) 1130 return err; 1131 1132 return crypto_register_instance(tmpl, ahash_crypto_instance(inst)); 1133 } 1134 EXPORT_SYMBOL_GPL(ahash_register_instance); 1135 1136 void ahash_request_free(struct ahash_request *req) 1137 { 1138 struct ahash_request *tmp; 1139 struct ahash_request *r2; 1140 1141 if (unlikely(!req)) 1142 return; 1143 1144 list_for_each_entry_safe(r2, tmp, &req->base.list, base.list) 1145 kfree_sensitive(r2); 1146 1147 kfree_sensitive(req); 1148 } 1149 EXPORT_SYMBOL_GPL(ahash_request_free); 1150 1151 MODULE_LICENSE("GPL"); 1152 MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); 1153