1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Asynchronous Cryptographic Hash operations. 4 * 5 * This is the asynchronous version of hash.c with notification of 6 * completion via a callback. 7 * 8 * Copyright (c) 2008 Loc Ho <lho@amcc.com> 9 */ 10 11 #include <crypto/scatterwalk.h> 12 #include <linux/cryptouser.h> 13 #include <linux/err.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/sched.h> 17 #include <linux/slab.h> 18 #include <linux/seq_file.h> 19 #include <linux/string.h> 20 #include <net/netlink.h> 21 22 #include "hash.h" 23 24 #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e 25 26 static const struct crypto_type crypto_ahash_type; 27 28 struct ahash_request_priv { 29 crypto_completion_t complete; 30 void *data; 31 u8 *result; 32 u32 flags; 33 void *ubuf[] CRYPTO_MINALIGN_ATTR; 34 }; 35 36 static int hash_walk_next(struct crypto_hash_walk *walk) 37 { 38 unsigned int alignmask = walk->alignmask; 39 unsigned int offset = walk->offset; 40 unsigned int nbytes = min(walk->entrylen, 41 ((unsigned int)(PAGE_SIZE)) - offset); 42 43 walk->data = kmap_local_page(walk->pg); 44 walk->data += offset; 45 46 if (offset & alignmask) { 47 unsigned int unaligned = alignmask + 1 - (offset & alignmask); 48 49 if (nbytes > unaligned) 50 nbytes = unaligned; 51 } 52 53 walk->entrylen -= nbytes; 54 return nbytes; 55 } 56 57 static int hash_walk_new_entry(struct crypto_hash_walk *walk) 58 { 59 struct scatterlist *sg; 60 61 sg = walk->sg; 62 walk->offset = sg->offset; 63 walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); 64 walk->offset = offset_in_page(walk->offset); 65 walk->entrylen = sg->length; 66 67 if (walk->entrylen > walk->total) 68 walk->entrylen = walk->total; 69 walk->total -= walk->entrylen; 70 71 return hash_walk_next(walk); 72 } 73 74 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) 75 { 76 unsigned int alignmask = walk->alignmask; 77 78 walk->data -= walk->offset; 79 80 if (walk->entrylen && (walk->offset & alignmask) && !err) { 81 unsigned int nbytes; 82 83 walk->offset = ALIGN(walk->offset, alignmask + 1); 84 nbytes = min(walk->entrylen, 85 (unsigned int)(PAGE_SIZE - walk->offset)); 86 if (nbytes) { 87 walk->entrylen -= nbytes; 88 walk->data += walk->offset; 89 return nbytes; 90 } 91 } 92 93 kunmap_local(walk->data); 94 crypto_yield(walk->flags); 95 96 if (err) 97 return err; 98 99 if (walk->entrylen) { 100 walk->offset = 0; 101 walk->pg++; 102 return hash_walk_next(walk); 103 } 104 105 if (!walk->total) 106 return 0; 107 108 walk->sg = sg_next(walk->sg); 109 110 return hash_walk_new_entry(walk); 111 } 112 EXPORT_SYMBOL_GPL(crypto_hash_walk_done); 113 114 int crypto_hash_walk_first(struct ahash_request *req, 115 struct crypto_hash_walk *walk) 116 { 117 walk->total = req->nbytes; 118 119 if (!walk->total) { 120 walk->entrylen = 0; 121 return 0; 122 } 123 124 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); 125 walk->sg = req->src; 126 walk->flags = req->base.flags; 127 128 return hash_walk_new_entry(walk); 129 } 130 EXPORT_SYMBOL_GPL(crypto_hash_walk_first); 131 132 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, 133 unsigned int keylen) 134 { 135 unsigned long alignmask = crypto_ahash_alignmask(tfm); 136 int ret; 137 u8 *buffer, *alignbuffer; 138 unsigned long absize; 139 140 absize = keylen + alignmask; 141 buffer = kmalloc(absize, GFP_KERNEL); 142 if (!buffer) 143 return -ENOMEM; 144 145 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); 146 memcpy(alignbuffer, key, keylen); 147 ret = tfm->setkey(tfm, alignbuffer, keylen); 148 kfree_sensitive(buffer); 149 return ret; 150 } 151 152 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, 153 unsigned int keylen) 154 { 155 return -ENOSYS; 156 } 157 158 static void ahash_set_needkey(struct crypto_ahash *tfm) 159 { 160 const struct hash_alg_common *alg = crypto_hash_alg_common(tfm); 161 162 if (tfm->setkey != ahash_nosetkey && 163 !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) 164 crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); 165 } 166 167 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, 168 unsigned int keylen) 169 { 170 unsigned long alignmask = crypto_ahash_alignmask(tfm); 171 int err; 172 173 if ((unsigned long)key & alignmask) 174 err = ahash_setkey_unaligned(tfm, key, keylen); 175 else 176 err = tfm->setkey(tfm, key, keylen); 177 178 if (unlikely(err)) { 179 ahash_set_needkey(tfm); 180 return err; 181 } 182 183 crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); 184 return 0; 185 } 186 EXPORT_SYMBOL_GPL(crypto_ahash_setkey); 187 188 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt, 189 bool has_state) 190 { 191 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 192 unsigned long alignmask = crypto_ahash_alignmask(tfm); 193 unsigned int ds = crypto_ahash_digestsize(tfm); 194 struct ahash_request *subreq; 195 unsigned int subreq_size; 196 unsigned int reqsize; 197 u8 *result; 198 gfp_t gfp; 199 u32 flags; 200 201 subreq_size = sizeof(*subreq); 202 reqsize = crypto_ahash_reqsize(tfm); 203 reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment()); 204 subreq_size += reqsize; 205 subreq_size += ds; 206 subreq_size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); 207 208 flags = ahash_request_flags(req); 209 gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; 210 subreq = kmalloc(subreq_size, gfp); 211 if (!subreq) 212 return -ENOMEM; 213 214 ahash_request_set_tfm(subreq, tfm); 215 ahash_request_set_callback(subreq, flags, cplt, req); 216 217 result = (u8 *)(subreq + 1) + reqsize; 218 result = PTR_ALIGN(result, alignmask + 1); 219 220 ahash_request_set_crypt(subreq, req->src, result, req->nbytes); 221 222 if (has_state) { 223 void *state; 224 225 state = kmalloc(crypto_ahash_statesize(tfm), gfp); 226 if (!state) { 227 kfree(subreq); 228 return -ENOMEM; 229 } 230 231 crypto_ahash_export(req, state); 232 crypto_ahash_import(subreq, state); 233 kfree_sensitive(state); 234 } 235 236 req->priv = subreq; 237 238 return 0; 239 } 240 241 static void ahash_restore_req(struct ahash_request *req, int err) 242 { 243 struct ahash_request *subreq = req->priv; 244 245 if (!err) 246 memcpy(req->result, subreq->result, 247 crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); 248 249 req->priv = NULL; 250 251 kfree_sensitive(subreq); 252 } 253 254 static void ahash_op_unaligned_done(void *data, int err) 255 { 256 struct ahash_request *areq = data; 257 258 if (err == -EINPROGRESS) 259 goto out; 260 261 /* First copy req->result into req->priv.result */ 262 ahash_restore_req(areq, err); 263 264 out: 265 /* Complete the ORIGINAL request. */ 266 ahash_request_complete(areq, err); 267 } 268 269 static int ahash_op_unaligned(struct ahash_request *req, 270 int (*op)(struct ahash_request *), 271 bool has_state) 272 { 273 int err; 274 275 err = ahash_save_req(req, ahash_op_unaligned_done, has_state); 276 if (err) 277 return err; 278 279 err = op(req->priv); 280 if (err == -EINPROGRESS || err == -EBUSY) 281 return err; 282 283 ahash_restore_req(req, err); 284 285 return err; 286 } 287 288 static int crypto_ahash_op(struct ahash_request *req, 289 int (*op)(struct ahash_request *), 290 bool has_state) 291 { 292 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 293 unsigned long alignmask = crypto_ahash_alignmask(tfm); 294 int err; 295 296 if ((unsigned long)req->result & alignmask) 297 err = ahash_op_unaligned(req, op, has_state); 298 else 299 err = op(req); 300 301 return crypto_hash_errstat(crypto_hash_alg_common(tfm), err); 302 } 303 304 int crypto_ahash_final(struct ahash_request *req) 305 { 306 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 307 struct hash_alg_common *alg = crypto_hash_alg_common(tfm); 308 309 if (IS_ENABLED(CONFIG_CRYPTO_STATS)) 310 atomic64_inc(&hash_get_stat(alg)->hash_cnt); 311 312 return crypto_ahash_op(req, tfm->final, true); 313 } 314 EXPORT_SYMBOL_GPL(crypto_ahash_final); 315 316 int crypto_ahash_finup(struct ahash_request *req) 317 { 318 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 319 struct hash_alg_common *alg = crypto_hash_alg_common(tfm); 320 321 if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { 322 struct crypto_istat_hash *istat = hash_get_stat(alg); 323 324 atomic64_inc(&istat->hash_cnt); 325 atomic64_add(req->nbytes, &istat->hash_tlen); 326 } 327 328 return crypto_ahash_op(req, tfm->finup, true); 329 } 330 EXPORT_SYMBOL_GPL(crypto_ahash_finup); 331 332 int crypto_ahash_digest(struct ahash_request *req) 333 { 334 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 335 struct hash_alg_common *alg = crypto_hash_alg_common(tfm); 336 337 if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { 338 struct crypto_istat_hash *istat = hash_get_stat(alg); 339 340 atomic64_inc(&istat->hash_cnt); 341 atomic64_add(req->nbytes, &istat->hash_tlen); 342 } 343 344 if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 345 return crypto_hash_errstat(alg, -ENOKEY); 346 347 return crypto_ahash_op(req, tfm->digest, false); 348 } 349 EXPORT_SYMBOL_GPL(crypto_ahash_digest); 350 351 static void ahash_def_finup_done2(void *data, int err) 352 { 353 struct ahash_request *areq = data; 354 355 if (err == -EINPROGRESS) 356 return; 357 358 ahash_restore_req(areq, err); 359 360 ahash_request_complete(areq, err); 361 } 362 363 static int ahash_def_finup_finish1(struct ahash_request *req, int err) 364 { 365 struct ahash_request *subreq = req->priv; 366 367 if (err) 368 goto out; 369 370 subreq->base.complete = ahash_def_finup_done2; 371 372 err = crypto_ahash_reqtfm(req)->final(subreq); 373 if (err == -EINPROGRESS || err == -EBUSY) 374 return err; 375 376 out: 377 ahash_restore_req(req, err); 378 return err; 379 } 380 381 static void ahash_def_finup_done1(void *data, int err) 382 { 383 struct ahash_request *areq = data; 384 struct ahash_request *subreq; 385 386 if (err == -EINPROGRESS) 387 goto out; 388 389 subreq = areq->priv; 390 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 391 392 err = ahash_def_finup_finish1(areq, err); 393 if (err == -EINPROGRESS || err == -EBUSY) 394 return; 395 396 out: 397 ahash_request_complete(areq, err); 398 } 399 400 static int ahash_def_finup(struct ahash_request *req) 401 { 402 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 403 int err; 404 405 err = ahash_save_req(req, ahash_def_finup_done1, true); 406 if (err) 407 return err; 408 409 err = tfm->update(req->priv); 410 if (err == -EINPROGRESS || err == -EBUSY) 411 return err; 412 413 return ahash_def_finup_finish1(req, err); 414 } 415 416 static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm) 417 { 418 struct crypto_ahash *hash = __crypto_ahash_cast(tfm); 419 struct ahash_alg *alg = crypto_ahash_alg(hash); 420 421 alg->exit_tfm(hash); 422 } 423 424 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) 425 { 426 struct crypto_ahash *hash = __crypto_ahash_cast(tfm); 427 struct ahash_alg *alg = crypto_ahash_alg(hash); 428 429 hash->setkey = ahash_nosetkey; 430 431 crypto_ahash_set_statesize(hash, alg->halg.statesize); 432 433 if (tfm->__crt_alg->cra_type != &crypto_ahash_type) 434 return crypto_init_shash_ops_async(tfm); 435 436 hash->init = alg->init; 437 hash->update = alg->update; 438 hash->final = alg->final; 439 hash->finup = alg->finup ?: ahash_def_finup; 440 hash->digest = alg->digest; 441 hash->export = alg->export; 442 hash->import = alg->import; 443 444 if (alg->setkey) { 445 hash->setkey = alg->setkey; 446 ahash_set_needkey(hash); 447 } 448 449 if (alg->exit_tfm) 450 tfm->exit = crypto_ahash_exit_tfm; 451 452 return alg->init_tfm ? alg->init_tfm(hash) : 0; 453 } 454 455 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) 456 { 457 if (alg->cra_type != &crypto_ahash_type) 458 return sizeof(struct crypto_shash *); 459 460 return crypto_alg_extsize(alg); 461 } 462 463 static void crypto_ahash_free_instance(struct crypto_instance *inst) 464 { 465 struct ahash_instance *ahash = ahash_instance(inst); 466 467 ahash->free(ahash); 468 } 469 470 static int __maybe_unused crypto_ahash_report( 471 struct sk_buff *skb, struct crypto_alg *alg) 472 { 473 struct crypto_report_hash rhash; 474 475 memset(&rhash, 0, sizeof(rhash)); 476 477 strscpy(rhash.type, "ahash", sizeof(rhash.type)); 478 479 rhash.blocksize = alg->cra_blocksize; 480 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize; 481 482 return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash); 483 } 484 485 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) 486 __maybe_unused; 487 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) 488 { 489 seq_printf(m, "type : ahash\n"); 490 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? 491 "yes" : "no"); 492 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); 493 seq_printf(m, "digestsize : %u\n", 494 __crypto_hash_alg_common(alg)->digestsize); 495 } 496 497 static int __maybe_unused crypto_ahash_report_stat( 498 struct sk_buff *skb, struct crypto_alg *alg) 499 { 500 return crypto_hash_report_stat(skb, alg, "ahash"); 501 } 502 503 static const struct crypto_type crypto_ahash_type = { 504 .extsize = crypto_ahash_extsize, 505 .init_tfm = crypto_ahash_init_tfm, 506 .free = crypto_ahash_free_instance, 507 #ifdef CONFIG_PROC_FS 508 .show = crypto_ahash_show, 509 #endif 510 #if IS_ENABLED(CONFIG_CRYPTO_USER) 511 .report = crypto_ahash_report, 512 #endif 513 #ifdef CONFIG_CRYPTO_STATS 514 .report_stat = crypto_ahash_report_stat, 515 #endif 516 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 517 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, 518 .type = CRYPTO_ALG_TYPE_AHASH, 519 .tfmsize = offsetof(struct crypto_ahash, base), 520 }; 521 522 int crypto_grab_ahash(struct crypto_ahash_spawn *spawn, 523 struct crypto_instance *inst, 524 const char *name, u32 type, u32 mask) 525 { 526 spawn->base.frontend = &crypto_ahash_type; 527 return crypto_grab_spawn(&spawn->base, inst, name, type, mask); 528 } 529 EXPORT_SYMBOL_GPL(crypto_grab_ahash); 530 531 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, 532 u32 mask) 533 { 534 return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask); 535 } 536 EXPORT_SYMBOL_GPL(crypto_alloc_ahash); 537 538 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask) 539 { 540 return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask); 541 } 542 EXPORT_SYMBOL_GPL(crypto_has_ahash); 543 544 struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash) 545 { 546 struct hash_alg_common *halg = crypto_hash_alg_common(hash); 547 struct crypto_tfm *tfm = crypto_ahash_tfm(hash); 548 struct crypto_ahash *nhash; 549 struct ahash_alg *alg; 550 int err; 551 552 if (!crypto_hash_alg_has_setkey(halg)) { 553 tfm = crypto_tfm_get(tfm); 554 if (IS_ERR(tfm)) 555 return ERR_CAST(tfm); 556 557 return hash; 558 } 559 560 nhash = crypto_clone_tfm(&crypto_ahash_type, tfm); 561 562 if (IS_ERR(nhash)) 563 return nhash; 564 565 nhash->init = hash->init; 566 nhash->update = hash->update; 567 nhash->final = hash->final; 568 nhash->finup = hash->finup; 569 nhash->digest = hash->digest; 570 nhash->export = hash->export; 571 nhash->import = hash->import; 572 nhash->setkey = hash->setkey; 573 nhash->reqsize = hash->reqsize; 574 nhash->statesize = hash->statesize; 575 576 if (tfm->__crt_alg->cra_type != &crypto_ahash_type) 577 return crypto_clone_shash_ops_async(nhash, hash); 578 579 err = -ENOSYS; 580 alg = crypto_ahash_alg(hash); 581 if (!alg->clone_tfm) 582 goto out_free_nhash; 583 584 err = alg->clone_tfm(nhash, hash); 585 if (err) 586 goto out_free_nhash; 587 588 return nhash; 589 590 out_free_nhash: 591 crypto_free_ahash(nhash); 592 return ERR_PTR(err); 593 } 594 EXPORT_SYMBOL_GPL(crypto_clone_ahash); 595 596 static int ahash_prepare_alg(struct ahash_alg *alg) 597 { 598 struct crypto_alg *base = &alg->halg.base; 599 int err; 600 601 if (alg->halg.statesize == 0) 602 return -EINVAL; 603 604 err = hash_prepare_alg(&alg->halg); 605 if (err) 606 return err; 607 608 base->cra_type = &crypto_ahash_type; 609 base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; 610 611 return 0; 612 } 613 614 int crypto_register_ahash(struct ahash_alg *alg) 615 { 616 struct crypto_alg *base = &alg->halg.base; 617 int err; 618 619 err = ahash_prepare_alg(alg); 620 if (err) 621 return err; 622 623 return crypto_register_alg(base); 624 } 625 EXPORT_SYMBOL_GPL(crypto_register_ahash); 626 627 void crypto_unregister_ahash(struct ahash_alg *alg) 628 { 629 crypto_unregister_alg(&alg->halg.base); 630 } 631 EXPORT_SYMBOL_GPL(crypto_unregister_ahash); 632 633 int crypto_register_ahashes(struct ahash_alg *algs, int count) 634 { 635 int i, ret; 636 637 for (i = 0; i < count; i++) { 638 ret = crypto_register_ahash(&algs[i]); 639 if (ret) 640 goto err; 641 } 642 643 return 0; 644 645 err: 646 for (--i; i >= 0; --i) 647 crypto_unregister_ahash(&algs[i]); 648 649 return ret; 650 } 651 EXPORT_SYMBOL_GPL(crypto_register_ahashes); 652 653 void crypto_unregister_ahashes(struct ahash_alg *algs, int count) 654 { 655 int i; 656 657 for (i = count - 1; i >= 0; --i) 658 crypto_unregister_ahash(&algs[i]); 659 } 660 EXPORT_SYMBOL_GPL(crypto_unregister_ahashes); 661 662 int ahash_register_instance(struct crypto_template *tmpl, 663 struct ahash_instance *inst) 664 { 665 int err; 666 667 if (WARN_ON(!inst->free)) 668 return -EINVAL; 669 670 err = ahash_prepare_alg(&inst->alg); 671 if (err) 672 return err; 673 674 return crypto_register_instance(tmpl, ahash_crypto_instance(inst)); 675 } 676 EXPORT_SYMBOL_GPL(ahash_register_instance); 677 678 bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) 679 { 680 struct crypto_alg *alg = &halg->base; 681 682 if (alg->cra_type != &crypto_ahash_type) 683 return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); 684 685 return __crypto_ahash_alg(alg)->setkey != NULL; 686 } 687 EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey); 688 689 MODULE_LICENSE("GPL"); 690 MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); 691