1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Asynchronous Cryptographic Hash operations. 4 * 5 * This is the asynchronous version of hash.c with notification of 6 * completion via a callback. 7 * 8 * Copyright (c) 2008 Loc Ho <lho@amcc.com> 9 */ 10 11 #include <crypto/scatterwalk.h> 12 #include <linux/cryptouser.h> 13 #include <linux/err.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/sched.h> 17 #include <linux/slab.h> 18 #include <linux/seq_file.h> 19 #include <linux/string.h> 20 #include <net/netlink.h> 21 22 #include "hash.h" 23 24 static const struct crypto_type crypto_ahash_type; 25 26 struct ahash_request_priv { 27 crypto_completion_t complete; 28 void *data; 29 u8 *result; 30 u32 flags; 31 void *ubuf[] CRYPTO_MINALIGN_ATTR; 32 }; 33 34 static int hash_walk_next(struct crypto_hash_walk *walk) 35 { 36 unsigned int alignmask = walk->alignmask; 37 unsigned int offset = walk->offset; 38 unsigned int nbytes = min(walk->entrylen, 39 ((unsigned int)(PAGE_SIZE)) - offset); 40 41 walk->data = kmap_local_page(walk->pg); 42 walk->data += offset; 43 44 if (offset & alignmask) { 45 unsigned int unaligned = alignmask + 1 - (offset & alignmask); 46 47 if (nbytes > unaligned) 48 nbytes = unaligned; 49 } 50 51 walk->entrylen -= nbytes; 52 return nbytes; 53 } 54 55 static int hash_walk_new_entry(struct crypto_hash_walk *walk) 56 { 57 struct scatterlist *sg; 58 59 sg = walk->sg; 60 walk->offset = sg->offset; 61 walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); 62 walk->offset = offset_in_page(walk->offset); 63 walk->entrylen = sg->length; 64 65 if (walk->entrylen > walk->total) 66 walk->entrylen = walk->total; 67 walk->total -= walk->entrylen; 68 69 return hash_walk_next(walk); 70 } 71 72 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) 73 { 74 unsigned int alignmask = walk->alignmask; 75 76 walk->data -= walk->offset; 77 78 if (walk->entrylen && (walk->offset & alignmask) && !err) { 79 unsigned int nbytes; 80 81 walk->offset = ALIGN(walk->offset, alignmask + 1); 82 nbytes = min(walk->entrylen, 83 (unsigned int)(PAGE_SIZE - walk->offset)); 84 if (nbytes) { 85 walk->entrylen -= nbytes; 86 walk->data += walk->offset; 87 return nbytes; 88 } 89 } 90 91 kunmap_local(walk->data); 92 crypto_yield(walk->flags); 93 94 if (err) 95 return err; 96 97 if (walk->entrylen) { 98 walk->offset = 0; 99 walk->pg++; 100 return hash_walk_next(walk); 101 } 102 103 if (!walk->total) 104 return 0; 105 106 walk->sg = sg_next(walk->sg); 107 108 return hash_walk_new_entry(walk); 109 } 110 EXPORT_SYMBOL_GPL(crypto_hash_walk_done); 111 112 int crypto_hash_walk_first(struct ahash_request *req, 113 struct crypto_hash_walk *walk) 114 { 115 walk->total = req->nbytes; 116 117 if (!walk->total) { 118 walk->entrylen = 0; 119 return 0; 120 } 121 122 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); 123 walk->sg = req->src; 124 walk->flags = req->base.flags; 125 126 return hash_walk_new_entry(walk); 127 } 128 EXPORT_SYMBOL_GPL(crypto_hash_walk_first); 129 130 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, 131 unsigned int keylen) 132 { 133 unsigned long alignmask = crypto_ahash_alignmask(tfm); 134 int ret; 135 u8 *buffer, *alignbuffer; 136 unsigned long absize; 137 138 absize = keylen + alignmask; 139 buffer = kmalloc(absize, GFP_KERNEL); 140 if (!buffer) 141 return -ENOMEM; 142 143 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); 144 memcpy(alignbuffer, key, keylen); 145 ret = tfm->setkey(tfm, alignbuffer, keylen); 146 kfree_sensitive(buffer); 147 return ret; 148 } 149 150 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, 151 unsigned int keylen) 152 { 153 return -ENOSYS; 154 } 155 156 static void ahash_set_needkey(struct crypto_ahash *tfm) 157 { 158 const struct hash_alg_common *alg = crypto_hash_alg_common(tfm); 159 160 if (tfm->setkey != ahash_nosetkey && 161 !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) 162 crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); 163 } 164 165 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, 166 unsigned int keylen) 167 { 168 unsigned long alignmask = crypto_ahash_alignmask(tfm); 169 int err; 170 171 if ((unsigned long)key & alignmask) 172 err = ahash_setkey_unaligned(tfm, key, keylen); 173 else 174 err = tfm->setkey(tfm, key, keylen); 175 176 if (unlikely(err)) { 177 ahash_set_needkey(tfm); 178 return err; 179 } 180 181 crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); 182 return 0; 183 } 184 EXPORT_SYMBOL_GPL(crypto_ahash_setkey); 185 186 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt, 187 bool has_state) 188 { 189 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 190 unsigned long alignmask = crypto_ahash_alignmask(tfm); 191 unsigned int ds = crypto_ahash_digestsize(tfm); 192 struct ahash_request *subreq; 193 unsigned int subreq_size; 194 unsigned int reqsize; 195 u8 *result; 196 gfp_t gfp; 197 u32 flags; 198 199 subreq_size = sizeof(*subreq); 200 reqsize = crypto_ahash_reqsize(tfm); 201 reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment()); 202 subreq_size += reqsize; 203 subreq_size += ds; 204 subreq_size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); 205 206 flags = ahash_request_flags(req); 207 gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; 208 subreq = kmalloc(subreq_size, gfp); 209 if (!subreq) 210 return -ENOMEM; 211 212 ahash_request_set_tfm(subreq, tfm); 213 ahash_request_set_callback(subreq, flags, cplt, req); 214 215 result = (u8 *)(subreq + 1) + reqsize; 216 result = PTR_ALIGN(result, alignmask + 1); 217 218 ahash_request_set_crypt(subreq, req->src, result, req->nbytes); 219 220 if (has_state) { 221 void *state; 222 223 state = kmalloc(crypto_ahash_statesize(tfm), gfp); 224 if (!state) { 225 kfree(subreq); 226 return -ENOMEM; 227 } 228 229 crypto_ahash_export(req, state); 230 crypto_ahash_import(subreq, state); 231 kfree_sensitive(state); 232 } 233 234 req->priv = subreq; 235 236 return 0; 237 } 238 239 static void ahash_restore_req(struct ahash_request *req, int err) 240 { 241 struct ahash_request *subreq = req->priv; 242 243 if (!err) 244 memcpy(req->result, subreq->result, 245 crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); 246 247 req->priv = NULL; 248 249 kfree_sensitive(subreq); 250 } 251 252 static void ahash_op_unaligned_done(void *data, int err) 253 { 254 struct ahash_request *areq = data; 255 256 if (err == -EINPROGRESS) 257 goto out; 258 259 /* First copy req->result into req->priv.result */ 260 ahash_restore_req(areq, err); 261 262 out: 263 /* Complete the ORIGINAL request. */ 264 ahash_request_complete(areq, err); 265 } 266 267 static int ahash_op_unaligned(struct ahash_request *req, 268 int (*op)(struct ahash_request *), 269 bool has_state) 270 { 271 int err; 272 273 err = ahash_save_req(req, ahash_op_unaligned_done, has_state); 274 if (err) 275 return err; 276 277 err = op(req->priv); 278 if (err == -EINPROGRESS || err == -EBUSY) 279 return err; 280 281 ahash_restore_req(req, err); 282 283 return err; 284 } 285 286 static int crypto_ahash_op(struct ahash_request *req, 287 int (*op)(struct ahash_request *), 288 bool has_state) 289 { 290 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 291 unsigned long alignmask = crypto_ahash_alignmask(tfm); 292 int err; 293 294 if ((unsigned long)req->result & alignmask) 295 err = ahash_op_unaligned(req, op, has_state); 296 else 297 err = op(req); 298 299 return crypto_hash_errstat(crypto_hash_alg_common(tfm), err); 300 } 301 302 int crypto_ahash_final(struct ahash_request *req) 303 { 304 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 305 struct hash_alg_common *alg = crypto_hash_alg_common(tfm); 306 307 if (IS_ENABLED(CONFIG_CRYPTO_STATS)) 308 atomic64_inc(&hash_get_stat(alg)->hash_cnt); 309 310 return crypto_ahash_op(req, tfm->final, true); 311 } 312 EXPORT_SYMBOL_GPL(crypto_ahash_final); 313 314 int crypto_ahash_finup(struct ahash_request *req) 315 { 316 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 317 struct hash_alg_common *alg = crypto_hash_alg_common(tfm); 318 319 if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { 320 struct crypto_istat_hash *istat = hash_get_stat(alg); 321 322 atomic64_inc(&istat->hash_cnt); 323 atomic64_add(req->nbytes, &istat->hash_tlen); 324 } 325 326 return crypto_ahash_op(req, tfm->finup, true); 327 } 328 EXPORT_SYMBOL_GPL(crypto_ahash_finup); 329 330 int crypto_ahash_digest(struct ahash_request *req) 331 { 332 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 333 struct hash_alg_common *alg = crypto_hash_alg_common(tfm); 334 335 if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { 336 struct crypto_istat_hash *istat = hash_get_stat(alg); 337 338 atomic64_inc(&istat->hash_cnt); 339 atomic64_add(req->nbytes, &istat->hash_tlen); 340 } 341 342 if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 343 return crypto_hash_errstat(alg, -ENOKEY); 344 345 return crypto_ahash_op(req, tfm->digest, false); 346 } 347 EXPORT_SYMBOL_GPL(crypto_ahash_digest); 348 349 static void ahash_def_finup_done2(void *data, int err) 350 { 351 struct ahash_request *areq = data; 352 353 if (err == -EINPROGRESS) 354 return; 355 356 ahash_restore_req(areq, err); 357 358 ahash_request_complete(areq, err); 359 } 360 361 static int ahash_def_finup_finish1(struct ahash_request *req, int err) 362 { 363 struct ahash_request *subreq = req->priv; 364 365 if (err) 366 goto out; 367 368 subreq->base.complete = ahash_def_finup_done2; 369 370 err = crypto_ahash_reqtfm(req)->final(subreq); 371 if (err == -EINPROGRESS || err == -EBUSY) 372 return err; 373 374 out: 375 ahash_restore_req(req, err); 376 return err; 377 } 378 379 static void ahash_def_finup_done1(void *data, int err) 380 { 381 struct ahash_request *areq = data; 382 struct ahash_request *subreq; 383 384 if (err == -EINPROGRESS) 385 goto out; 386 387 subreq = areq->priv; 388 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 389 390 err = ahash_def_finup_finish1(areq, err); 391 if (err == -EINPROGRESS || err == -EBUSY) 392 return; 393 394 out: 395 ahash_request_complete(areq, err); 396 } 397 398 static int ahash_def_finup(struct ahash_request *req) 399 { 400 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 401 int err; 402 403 err = ahash_save_req(req, ahash_def_finup_done1, true); 404 if (err) 405 return err; 406 407 err = tfm->update(req->priv); 408 if (err == -EINPROGRESS || err == -EBUSY) 409 return err; 410 411 return ahash_def_finup_finish1(req, err); 412 } 413 414 static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm) 415 { 416 struct crypto_ahash *hash = __crypto_ahash_cast(tfm); 417 struct ahash_alg *alg = crypto_ahash_alg(hash); 418 419 alg->exit_tfm(hash); 420 } 421 422 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) 423 { 424 struct crypto_ahash *hash = __crypto_ahash_cast(tfm); 425 struct ahash_alg *alg = crypto_ahash_alg(hash); 426 427 hash->setkey = ahash_nosetkey; 428 429 crypto_ahash_set_statesize(hash, alg->halg.statesize); 430 431 if (tfm->__crt_alg->cra_type != &crypto_ahash_type) 432 return crypto_init_shash_ops_async(tfm); 433 434 hash->init = alg->init; 435 hash->update = alg->update; 436 hash->final = alg->final; 437 hash->finup = alg->finup ?: ahash_def_finup; 438 hash->digest = alg->digest; 439 hash->export = alg->export; 440 hash->import = alg->import; 441 442 if (alg->setkey) { 443 hash->setkey = alg->setkey; 444 ahash_set_needkey(hash); 445 } 446 447 if (alg->exit_tfm) 448 tfm->exit = crypto_ahash_exit_tfm; 449 450 return alg->init_tfm ? alg->init_tfm(hash) : 0; 451 } 452 453 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) 454 { 455 if (alg->cra_type != &crypto_ahash_type) 456 return sizeof(struct crypto_shash *); 457 458 return crypto_alg_extsize(alg); 459 } 460 461 static void crypto_ahash_free_instance(struct crypto_instance *inst) 462 { 463 struct ahash_instance *ahash = ahash_instance(inst); 464 465 ahash->free(ahash); 466 } 467 468 static int __maybe_unused crypto_ahash_report( 469 struct sk_buff *skb, struct crypto_alg *alg) 470 { 471 struct crypto_report_hash rhash; 472 473 memset(&rhash, 0, sizeof(rhash)); 474 475 strscpy(rhash.type, "ahash", sizeof(rhash.type)); 476 477 rhash.blocksize = alg->cra_blocksize; 478 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize; 479 480 return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash); 481 } 482 483 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) 484 __maybe_unused; 485 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) 486 { 487 seq_printf(m, "type : ahash\n"); 488 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? 489 "yes" : "no"); 490 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); 491 seq_printf(m, "digestsize : %u\n", 492 __crypto_hash_alg_common(alg)->digestsize); 493 } 494 495 static int __maybe_unused crypto_ahash_report_stat( 496 struct sk_buff *skb, struct crypto_alg *alg) 497 { 498 return crypto_hash_report_stat(skb, alg, "ahash"); 499 } 500 501 static const struct crypto_type crypto_ahash_type = { 502 .extsize = crypto_ahash_extsize, 503 .init_tfm = crypto_ahash_init_tfm, 504 .free = crypto_ahash_free_instance, 505 #ifdef CONFIG_PROC_FS 506 .show = crypto_ahash_show, 507 #endif 508 #if IS_ENABLED(CONFIG_CRYPTO_USER) 509 .report = crypto_ahash_report, 510 #endif 511 #ifdef CONFIG_CRYPTO_STATS 512 .report_stat = crypto_ahash_report_stat, 513 #endif 514 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 515 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, 516 .type = CRYPTO_ALG_TYPE_AHASH, 517 .tfmsize = offsetof(struct crypto_ahash, base), 518 }; 519 520 int crypto_grab_ahash(struct crypto_ahash_spawn *spawn, 521 struct crypto_instance *inst, 522 const char *name, u32 type, u32 mask) 523 { 524 spawn->base.frontend = &crypto_ahash_type; 525 return crypto_grab_spawn(&spawn->base, inst, name, type, mask); 526 } 527 EXPORT_SYMBOL_GPL(crypto_grab_ahash); 528 529 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, 530 u32 mask) 531 { 532 return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask); 533 } 534 EXPORT_SYMBOL_GPL(crypto_alloc_ahash); 535 536 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask) 537 { 538 return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask); 539 } 540 EXPORT_SYMBOL_GPL(crypto_has_ahash); 541 542 struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash) 543 { 544 struct hash_alg_common *halg = crypto_hash_alg_common(hash); 545 struct crypto_tfm *tfm = crypto_ahash_tfm(hash); 546 struct crypto_ahash *nhash; 547 struct ahash_alg *alg; 548 int err; 549 550 if (!crypto_hash_alg_has_setkey(halg)) { 551 tfm = crypto_tfm_get(tfm); 552 if (IS_ERR(tfm)) 553 return ERR_CAST(tfm); 554 555 return hash; 556 } 557 558 nhash = crypto_clone_tfm(&crypto_ahash_type, tfm); 559 560 if (IS_ERR(nhash)) 561 return nhash; 562 563 nhash->init = hash->init; 564 nhash->update = hash->update; 565 nhash->final = hash->final; 566 nhash->finup = hash->finup; 567 nhash->digest = hash->digest; 568 nhash->export = hash->export; 569 nhash->import = hash->import; 570 nhash->setkey = hash->setkey; 571 nhash->reqsize = hash->reqsize; 572 nhash->statesize = hash->statesize; 573 574 if (tfm->__crt_alg->cra_type != &crypto_ahash_type) 575 return crypto_clone_shash_ops_async(nhash, hash); 576 577 err = -ENOSYS; 578 alg = crypto_ahash_alg(hash); 579 if (!alg->clone_tfm) 580 goto out_free_nhash; 581 582 err = alg->clone_tfm(nhash, hash); 583 if (err) 584 goto out_free_nhash; 585 586 return nhash; 587 588 out_free_nhash: 589 crypto_free_ahash(nhash); 590 return ERR_PTR(err); 591 } 592 EXPORT_SYMBOL_GPL(crypto_clone_ahash); 593 594 static int ahash_prepare_alg(struct ahash_alg *alg) 595 { 596 struct crypto_alg *base = &alg->halg.base; 597 int err; 598 599 if (alg->halg.statesize == 0) 600 return -EINVAL; 601 602 err = hash_prepare_alg(&alg->halg); 603 if (err) 604 return err; 605 606 base->cra_type = &crypto_ahash_type; 607 base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; 608 609 return 0; 610 } 611 612 int crypto_register_ahash(struct ahash_alg *alg) 613 { 614 struct crypto_alg *base = &alg->halg.base; 615 int err; 616 617 err = ahash_prepare_alg(alg); 618 if (err) 619 return err; 620 621 return crypto_register_alg(base); 622 } 623 EXPORT_SYMBOL_GPL(crypto_register_ahash); 624 625 void crypto_unregister_ahash(struct ahash_alg *alg) 626 { 627 crypto_unregister_alg(&alg->halg.base); 628 } 629 EXPORT_SYMBOL_GPL(crypto_unregister_ahash); 630 631 int crypto_register_ahashes(struct ahash_alg *algs, int count) 632 { 633 int i, ret; 634 635 for (i = 0; i < count; i++) { 636 ret = crypto_register_ahash(&algs[i]); 637 if (ret) 638 goto err; 639 } 640 641 return 0; 642 643 err: 644 for (--i; i >= 0; --i) 645 crypto_unregister_ahash(&algs[i]); 646 647 return ret; 648 } 649 EXPORT_SYMBOL_GPL(crypto_register_ahashes); 650 651 void crypto_unregister_ahashes(struct ahash_alg *algs, int count) 652 { 653 int i; 654 655 for (i = count - 1; i >= 0; --i) 656 crypto_unregister_ahash(&algs[i]); 657 } 658 EXPORT_SYMBOL_GPL(crypto_unregister_ahashes); 659 660 int ahash_register_instance(struct crypto_template *tmpl, 661 struct ahash_instance *inst) 662 { 663 int err; 664 665 if (WARN_ON(!inst->free)) 666 return -EINVAL; 667 668 err = ahash_prepare_alg(&inst->alg); 669 if (err) 670 return err; 671 672 return crypto_register_instance(tmpl, ahash_crypto_instance(inst)); 673 } 674 EXPORT_SYMBOL_GPL(ahash_register_instance); 675 676 bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) 677 { 678 struct crypto_alg *alg = &halg->base; 679 680 if (alg->cra_type != &crypto_ahash_type) 681 return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); 682 683 return __crypto_ahash_alg(alg)->setkey != NULL; 684 } 685 EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey); 686 687 MODULE_LICENSE("GPL"); 688 MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); 689