1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Software async crypto daemon. 4 * 5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 6 * 7 * Added AEAD support to cryptd. 8 * Authors: Tadeusz Struk (tadeusz.struk@intel.com) 9 * Adrian Hoban <adrian.hoban@intel.com> 10 * Gabriele Paoloni <gabriele.paoloni@intel.com> 11 * Aidan O'Mahony (aidan.o.mahony@intel.com) 12 * Copyright (c) 2010, Intel Corporation. 13 */ 14 15 #include <crypto/internal/hash.h> 16 #include <crypto/internal/aead.h> 17 #include <crypto/internal/skcipher.h> 18 #include <crypto/cryptd.h> 19 #include <linux/refcount.h> 20 #include <linux/err.h> 21 #include <linux/init.h> 22 #include <linux/kernel.h> 23 #include <linux/list.h> 24 #include <linux/module.h> 25 #include <linux/scatterlist.h> 26 #include <linux/sched.h> 27 #include <linux/slab.h> 28 #include <linux/workqueue.h> 29 30 static unsigned int cryptd_max_cpu_qlen = 1000; 31 module_param(cryptd_max_cpu_qlen, uint, 0); 32 MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); 33 34 static struct workqueue_struct *cryptd_wq; 35 36 struct cryptd_cpu_queue { 37 local_lock_t bh_lock; 38 struct crypto_queue queue; 39 struct work_struct work; 40 }; 41 42 struct cryptd_queue { 43 /* 44 * Protected by disabling BH to allow enqueueing from softinterrupt and 45 * dequeuing from kworker (cryptd_queue_worker()). 46 */ 47 struct cryptd_cpu_queue __percpu *cpu_queue; 48 }; 49 50 struct cryptd_instance_ctx { 51 struct crypto_spawn spawn; 52 struct cryptd_queue *queue; 53 }; 54 55 struct skcipherd_instance_ctx { 56 struct crypto_skcipher_spawn spawn; 57 struct cryptd_queue *queue; 58 }; 59 60 struct hashd_instance_ctx { 61 struct crypto_shash_spawn spawn; 62 struct cryptd_queue *queue; 63 }; 64 65 struct aead_instance_ctx { 66 struct crypto_aead_spawn aead_spawn; 67 struct cryptd_queue *queue; 68 }; 69 70 struct cryptd_skcipher_ctx { 71 refcount_t refcnt; 72 struct crypto_skcipher *child; 73 }; 74 75 struct cryptd_skcipher_request_ctx { 76 struct skcipher_request req; 77 }; 78 79 struct cryptd_hash_ctx { 80 refcount_t refcnt; 81 struct crypto_shash *child; 82 }; 83 84 struct cryptd_hash_request_ctx { 85 crypto_completion_t complete; 86 void *data; 87 struct shash_desc desc; 88 }; 89 90 struct cryptd_aead_ctx { 91 refcount_t refcnt; 92 struct crypto_aead *child; 93 }; 94 95 struct cryptd_aead_request_ctx { 96 struct aead_request req; 97 }; 98 99 static void cryptd_queue_worker(struct work_struct *work); 100 101 static int cryptd_init_queue(struct cryptd_queue *queue, 102 unsigned int max_cpu_qlen) 103 { 104 int cpu; 105 struct cryptd_cpu_queue *cpu_queue; 106 107 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); 108 if (!queue->cpu_queue) 109 return -ENOMEM; 110 for_each_possible_cpu(cpu) { 111 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); 112 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); 113 INIT_WORK(&cpu_queue->work, cryptd_queue_worker); 114 local_lock_init(&cpu_queue->bh_lock); 115 } 116 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen); 117 return 0; 118 } 119 120 static void cryptd_fini_queue(struct cryptd_queue *queue) 121 { 122 int cpu; 123 struct cryptd_cpu_queue *cpu_queue; 124 125 for_each_possible_cpu(cpu) { 126 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); 127 BUG_ON(cpu_queue->queue.qlen); 128 } 129 free_percpu(queue->cpu_queue); 130 } 131 132 static int cryptd_enqueue_request(struct cryptd_queue *queue, 133 struct crypto_async_request *request) 134 { 135 int err; 136 struct cryptd_cpu_queue *cpu_queue; 137 refcount_t *refcnt; 138 139 local_bh_disable(); 140 local_lock_nested_bh(&queue->cpu_queue->bh_lock); 141 cpu_queue = this_cpu_ptr(queue->cpu_queue); 142 err = crypto_enqueue_request(&cpu_queue->queue, request); 143 144 refcnt = crypto_tfm_ctx(request->tfm); 145 146 if (err == -ENOSPC) 147 goto out; 148 149 queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work); 150 151 if (!refcount_read(refcnt)) 152 goto out; 153 154 refcount_inc(refcnt); 155 156 out: 157 local_unlock_nested_bh(&queue->cpu_queue->bh_lock); 158 local_bh_enable(); 159 160 return err; 161 } 162 163 /* Called in workqueue context, do one real cryption work (via 164 * req->complete) and reschedule itself if there are more work to 165 * do. */ 166 static void cryptd_queue_worker(struct work_struct *work) 167 { 168 struct cryptd_cpu_queue *cpu_queue; 169 struct crypto_async_request *req, *backlog; 170 171 cpu_queue = container_of(work, struct cryptd_cpu_queue, work); 172 /* 173 * Only handle one request at a time to avoid hogging crypto workqueue. 174 */ 175 local_bh_disable(); 176 __local_lock_nested_bh(&cpu_queue->bh_lock); 177 backlog = crypto_get_backlog(&cpu_queue->queue); 178 req = crypto_dequeue_request(&cpu_queue->queue); 179 __local_unlock_nested_bh(&cpu_queue->bh_lock); 180 local_bh_enable(); 181 182 if (!req) 183 return; 184 185 if (backlog) 186 crypto_request_complete(backlog, -EINPROGRESS); 187 crypto_request_complete(req, 0); 188 189 if (cpu_queue->queue.qlen) 190 queue_work(cryptd_wq, &cpu_queue->work); 191 } 192 193 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) 194 { 195 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 196 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); 197 return ictx->queue; 198 } 199 200 static void cryptd_type_and_mask(struct crypto_attr_type *algt, 201 u32 *type, u32 *mask) 202 { 203 /* 204 * cryptd is allowed to wrap internal algorithms, but in that case the 205 * resulting cryptd instance will be marked as internal as well. 206 */ 207 *type = algt->type & CRYPTO_ALG_INTERNAL; 208 *mask = algt->mask & CRYPTO_ALG_INTERNAL; 209 210 /* No point in cryptd wrapping an algorithm that's already async. */ 211 *mask |= CRYPTO_ALG_ASYNC; 212 213 *mask |= crypto_algt_inherited_mask(algt); 214 } 215 216 static int cryptd_init_instance(struct crypto_instance *inst, 217 struct crypto_alg *alg) 218 { 219 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, 220 "cryptd(%s)", 221 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 222 return -ENAMETOOLONG; 223 224 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); 225 226 inst->alg.cra_priority = alg->cra_priority + 50; 227 inst->alg.cra_blocksize = alg->cra_blocksize; 228 inst->alg.cra_alignmask = alg->cra_alignmask; 229 230 return 0; 231 } 232 233 static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, 234 const u8 *key, unsigned int keylen) 235 { 236 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); 237 struct crypto_skcipher *child = ctx->child; 238 239 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 240 crypto_skcipher_set_flags(child, 241 crypto_skcipher_get_flags(parent) & 242 CRYPTO_TFM_REQ_MASK); 243 return crypto_skcipher_setkey(child, key, keylen); 244 } 245 246 static struct skcipher_request *cryptd_skcipher_prepare( 247 struct skcipher_request *req, int err) 248 { 249 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 250 struct skcipher_request *subreq = &rctx->req; 251 struct cryptd_skcipher_ctx *ctx; 252 struct crypto_skcipher *child; 253 254 req->base.complete = subreq->base.complete; 255 req->base.data = subreq->base.data; 256 257 if (unlikely(err == -EINPROGRESS)) 258 return NULL; 259 260 ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); 261 child = ctx->child; 262 263 skcipher_request_set_tfm(subreq, child); 264 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, 265 NULL, NULL); 266 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 267 req->iv); 268 269 return subreq; 270 } 271 272 static void cryptd_skcipher_complete(struct skcipher_request *req, int err, 273 crypto_completion_t complete) 274 { 275 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 276 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 277 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 278 struct skcipher_request *subreq = &rctx->req; 279 int refcnt = refcount_read(&ctx->refcnt); 280 281 local_bh_disable(); 282 skcipher_request_complete(req, err); 283 local_bh_enable(); 284 285 if (unlikely(err == -EINPROGRESS)) { 286 subreq->base.complete = req->base.complete; 287 subreq->base.data = req->base.data; 288 req->base.complete = complete; 289 req->base.data = req; 290 } else if (refcnt && refcount_dec_and_test(&ctx->refcnt)) 291 crypto_free_skcipher(tfm); 292 } 293 294 static void cryptd_skcipher_encrypt(void *data, int err) 295 { 296 struct skcipher_request *req = data; 297 struct skcipher_request *subreq; 298 299 subreq = cryptd_skcipher_prepare(req, err); 300 if (likely(subreq)) 301 err = crypto_skcipher_encrypt(subreq); 302 303 cryptd_skcipher_complete(req, err, cryptd_skcipher_encrypt); 304 } 305 306 static void cryptd_skcipher_decrypt(void *data, int err) 307 { 308 struct skcipher_request *req = data; 309 struct skcipher_request *subreq; 310 311 subreq = cryptd_skcipher_prepare(req, err); 312 if (likely(subreq)) 313 err = crypto_skcipher_decrypt(subreq); 314 315 cryptd_skcipher_complete(req, err, cryptd_skcipher_decrypt); 316 } 317 318 static int cryptd_skcipher_enqueue(struct skcipher_request *req, 319 crypto_completion_t compl) 320 { 321 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 322 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 323 struct skcipher_request *subreq = &rctx->req; 324 struct cryptd_queue *queue; 325 326 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm)); 327 subreq->base.complete = req->base.complete; 328 subreq->base.data = req->base.data; 329 req->base.complete = compl; 330 req->base.data = req; 331 332 return cryptd_enqueue_request(queue, &req->base); 333 } 334 335 static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req) 336 { 337 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt); 338 } 339 340 static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req) 341 { 342 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt); 343 } 344 345 static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm) 346 { 347 struct skcipher_instance *inst = skcipher_alg_instance(tfm); 348 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst); 349 struct crypto_skcipher_spawn *spawn = &ictx->spawn; 350 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 351 struct crypto_skcipher *cipher; 352 353 cipher = crypto_spawn_skcipher(spawn); 354 if (IS_ERR(cipher)) 355 return PTR_ERR(cipher); 356 357 ctx->child = cipher; 358 crypto_skcipher_set_reqsize( 359 tfm, sizeof(struct cryptd_skcipher_request_ctx) + 360 crypto_skcipher_reqsize(cipher)); 361 return 0; 362 } 363 364 static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm) 365 { 366 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 367 368 crypto_free_skcipher(ctx->child); 369 } 370 371 static void cryptd_skcipher_free(struct skcipher_instance *inst) 372 { 373 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst); 374 375 crypto_drop_skcipher(&ctx->spawn); 376 kfree(inst); 377 } 378 379 static int cryptd_create_skcipher(struct crypto_template *tmpl, 380 struct rtattr **tb, 381 struct crypto_attr_type *algt, 382 struct cryptd_queue *queue) 383 { 384 struct skcipherd_instance_ctx *ctx; 385 struct skcipher_instance *inst; 386 struct skcipher_alg_common *alg; 387 u32 type; 388 u32 mask; 389 int err; 390 391 cryptd_type_and_mask(algt, &type, &mask); 392 393 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 394 if (!inst) 395 return -ENOMEM; 396 397 ctx = skcipher_instance_ctx(inst); 398 ctx->queue = queue; 399 400 err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), 401 crypto_attr_alg_name(tb[1]), type, mask); 402 if (err) 403 goto err_free_inst; 404 405 alg = crypto_spawn_skcipher_alg_common(&ctx->spawn); 406 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base); 407 if (err) 408 goto err_free_inst; 409 410 inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | 411 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); 412 inst->alg.ivsize = alg->ivsize; 413 inst->alg.chunksize = alg->chunksize; 414 inst->alg.min_keysize = alg->min_keysize; 415 inst->alg.max_keysize = alg->max_keysize; 416 417 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx); 418 419 inst->alg.init = cryptd_skcipher_init_tfm; 420 inst->alg.exit = cryptd_skcipher_exit_tfm; 421 422 inst->alg.setkey = cryptd_skcipher_setkey; 423 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue; 424 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue; 425 426 inst->free = cryptd_skcipher_free; 427 428 err = skcipher_register_instance(tmpl, inst); 429 if (err) { 430 err_free_inst: 431 cryptd_skcipher_free(inst); 432 } 433 return err; 434 } 435 436 static int cryptd_hash_init_tfm(struct crypto_ahash *tfm) 437 { 438 struct ahash_instance *inst = ahash_alg_instance(tfm); 439 struct hashd_instance_ctx *ictx = ahash_instance_ctx(inst); 440 struct crypto_shash_spawn *spawn = &ictx->spawn; 441 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 442 struct crypto_shash *hash; 443 444 hash = crypto_spawn_shash(spawn); 445 if (IS_ERR(hash)) 446 return PTR_ERR(hash); 447 448 ctx->child = hash; 449 crypto_ahash_set_reqsize(tfm, 450 sizeof(struct cryptd_hash_request_ctx) + 451 crypto_shash_descsize(hash)); 452 return 0; 453 } 454 455 static int cryptd_hash_clone_tfm(struct crypto_ahash *ntfm, 456 struct crypto_ahash *tfm) 457 { 458 struct cryptd_hash_ctx *nctx = crypto_ahash_ctx(ntfm); 459 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 460 struct crypto_shash *hash; 461 462 hash = crypto_clone_shash(ctx->child); 463 if (IS_ERR(hash)) 464 return PTR_ERR(hash); 465 466 nctx->child = hash; 467 return 0; 468 } 469 470 static void cryptd_hash_exit_tfm(struct crypto_ahash *tfm) 471 { 472 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 473 474 crypto_free_shash(ctx->child); 475 } 476 477 static int cryptd_hash_setkey(struct crypto_ahash *parent, 478 const u8 *key, unsigned int keylen) 479 { 480 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); 481 struct crypto_shash *child = ctx->child; 482 483 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); 484 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & 485 CRYPTO_TFM_REQ_MASK); 486 return crypto_shash_setkey(child, key, keylen); 487 } 488 489 static int cryptd_hash_enqueue(struct ahash_request *req, 490 crypto_completion_t compl) 491 { 492 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 493 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 494 struct cryptd_queue *queue = 495 cryptd_get_queue(crypto_ahash_tfm(tfm)); 496 497 rctx->complete = req->base.complete; 498 rctx->data = req->base.data; 499 req->base.complete = compl; 500 req->base.data = req; 501 502 return cryptd_enqueue_request(queue, &req->base); 503 } 504 505 static struct shash_desc *cryptd_hash_prepare(struct ahash_request *req, 506 int err) 507 { 508 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 509 510 req->base.complete = rctx->complete; 511 req->base.data = rctx->data; 512 513 if (unlikely(err == -EINPROGRESS)) 514 return NULL; 515 516 return &rctx->desc; 517 } 518 519 static void cryptd_hash_complete(struct ahash_request *req, int err, 520 crypto_completion_t complete) 521 { 522 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 523 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 524 int refcnt = refcount_read(&ctx->refcnt); 525 526 local_bh_disable(); 527 ahash_request_complete(req, err); 528 local_bh_enable(); 529 530 if (err == -EINPROGRESS) { 531 req->base.complete = complete; 532 req->base.data = req; 533 } else if (refcnt && refcount_dec_and_test(&ctx->refcnt)) 534 crypto_free_ahash(tfm); 535 } 536 537 static void cryptd_hash_init(void *data, int err) 538 { 539 struct ahash_request *req = data; 540 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 541 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 542 struct crypto_shash *child = ctx->child; 543 struct shash_desc *desc; 544 545 desc = cryptd_hash_prepare(req, err); 546 if (unlikely(!desc)) 547 goto out; 548 549 desc->tfm = child; 550 551 err = crypto_shash_init(desc); 552 553 out: 554 cryptd_hash_complete(req, err, cryptd_hash_init); 555 } 556 557 static int cryptd_hash_init_enqueue(struct ahash_request *req) 558 { 559 return cryptd_hash_enqueue(req, cryptd_hash_init); 560 } 561 562 static void cryptd_hash_update(void *data, int err) 563 { 564 struct ahash_request *req = data; 565 struct shash_desc *desc; 566 567 desc = cryptd_hash_prepare(req, err); 568 if (likely(desc)) 569 err = shash_ahash_update(req, desc); 570 571 cryptd_hash_complete(req, err, cryptd_hash_update); 572 } 573 574 static int cryptd_hash_update_enqueue(struct ahash_request *req) 575 { 576 return cryptd_hash_enqueue(req, cryptd_hash_update); 577 } 578 579 static void cryptd_hash_final(void *data, int err) 580 { 581 struct ahash_request *req = data; 582 struct shash_desc *desc; 583 584 desc = cryptd_hash_prepare(req, err); 585 if (likely(desc)) 586 err = crypto_shash_final(desc, req->result); 587 588 cryptd_hash_complete(req, err, cryptd_hash_final); 589 } 590 591 static int cryptd_hash_final_enqueue(struct ahash_request *req) 592 { 593 return cryptd_hash_enqueue(req, cryptd_hash_final); 594 } 595 596 static void cryptd_hash_finup(void *data, int err) 597 { 598 struct ahash_request *req = data; 599 struct shash_desc *desc; 600 601 desc = cryptd_hash_prepare(req, err); 602 if (likely(desc)) 603 err = shash_ahash_finup(req, desc); 604 605 cryptd_hash_complete(req, err, cryptd_hash_finup); 606 } 607 608 static int cryptd_hash_finup_enqueue(struct ahash_request *req) 609 { 610 return cryptd_hash_enqueue(req, cryptd_hash_finup); 611 } 612 613 static void cryptd_hash_digest(void *data, int err) 614 { 615 struct ahash_request *req = data; 616 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 617 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 618 struct crypto_shash *child = ctx->child; 619 struct shash_desc *desc; 620 621 desc = cryptd_hash_prepare(req, err); 622 if (unlikely(!desc)) 623 goto out; 624 625 desc->tfm = child; 626 627 err = shash_ahash_digest(req, desc); 628 629 out: 630 cryptd_hash_complete(req, err, cryptd_hash_digest); 631 } 632 633 static int cryptd_hash_digest_enqueue(struct ahash_request *req) 634 { 635 return cryptd_hash_enqueue(req, cryptd_hash_digest); 636 } 637 638 static int cryptd_hash_export(struct ahash_request *req, void *out) 639 { 640 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 641 642 return crypto_shash_export(&rctx->desc, out); 643 } 644 645 static int cryptd_hash_import(struct ahash_request *req, const void *in) 646 { 647 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 648 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 649 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 650 struct shash_desc *desc = &rctx->desc; 651 652 desc->tfm = ctx->child; 653 654 return crypto_shash_import(desc, in); 655 } 656 657 static void cryptd_hash_free(struct ahash_instance *inst) 658 { 659 struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst); 660 661 crypto_drop_shash(&ctx->spawn); 662 kfree(inst); 663 } 664 665 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, 666 struct crypto_attr_type *algt, 667 struct cryptd_queue *queue) 668 { 669 struct hashd_instance_ctx *ctx; 670 struct ahash_instance *inst; 671 struct shash_alg *alg; 672 u32 type; 673 u32 mask; 674 int err; 675 676 cryptd_type_and_mask(algt, &type, &mask); 677 678 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 679 if (!inst) 680 return -ENOMEM; 681 682 ctx = ahash_instance_ctx(inst); 683 ctx->queue = queue; 684 685 err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst), 686 crypto_attr_alg_name(tb[1]), type, mask); 687 if (err) 688 goto err_free_inst; 689 alg = crypto_spawn_shash_alg(&ctx->spawn); 690 691 err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base); 692 if (err) 693 goto err_free_inst; 694 695 inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC | 696 (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL| 697 CRYPTO_ALG_OPTIONAL_KEY)); 698 inst->alg.halg.digestsize = alg->digestsize; 699 inst->alg.halg.statesize = alg->statesize; 700 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); 701 702 inst->alg.init_tfm = cryptd_hash_init_tfm; 703 inst->alg.clone_tfm = cryptd_hash_clone_tfm; 704 inst->alg.exit_tfm = cryptd_hash_exit_tfm; 705 706 inst->alg.init = cryptd_hash_init_enqueue; 707 inst->alg.update = cryptd_hash_update_enqueue; 708 inst->alg.final = cryptd_hash_final_enqueue; 709 inst->alg.finup = cryptd_hash_finup_enqueue; 710 inst->alg.export = cryptd_hash_export; 711 inst->alg.import = cryptd_hash_import; 712 if (crypto_shash_alg_has_setkey(alg)) 713 inst->alg.setkey = cryptd_hash_setkey; 714 inst->alg.digest = cryptd_hash_digest_enqueue; 715 716 inst->free = cryptd_hash_free; 717 718 err = ahash_register_instance(tmpl, inst); 719 if (err) { 720 err_free_inst: 721 cryptd_hash_free(inst); 722 } 723 return err; 724 } 725 726 static int cryptd_aead_setkey(struct crypto_aead *parent, 727 const u8 *key, unsigned int keylen) 728 { 729 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); 730 struct crypto_aead *child = ctx->child; 731 732 return crypto_aead_setkey(child, key, keylen); 733 } 734 735 static int cryptd_aead_setauthsize(struct crypto_aead *parent, 736 unsigned int authsize) 737 { 738 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); 739 struct crypto_aead *child = ctx->child; 740 741 return crypto_aead_setauthsize(child, authsize); 742 } 743 744 static void cryptd_aead_crypt(struct aead_request *req, 745 struct crypto_aead *child, int err, 746 int (*crypt)(struct aead_request *req), 747 crypto_completion_t compl) 748 { 749 struct cryptd_aead_request_ctx *rctx; 750 struct aead_request *subreq; 751 struct cryptd_aead_ctx *ctx; 752 struct crypto_aead *tfm; 753 int refcnt; 754 755 rctx = aead_request_ctx(req); 756 subreq = &rctx->req; 757 req->base.complete = subreq->base.complete; 758 req->base.data = subreq->base.data; 759 760 tfm = crypto_aead_reqtfm(req); 761 762 if (unlikely(err == -EINPROGRESS)) 763 goto out; 764 765 aead_request_set_tfm(subreq, child); 766 aead_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, 767 NULL, NULL); 768 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 769 req->iv); 770 aead_request_set_ad(subreq, req->assoclen); 771 772 err = crypt(subreq); 773 774 out: 775 ctx = crypto_aead_ctx(tfm); 776 refcnt = refcount_read(&ctx->refcnt); 777 778 local_bh_disable(); 779 aead_request_complete(req, err); 780 local_bh_enable(); 781 782 if (err == -EINPROGRESS) { 783 subreq->base.complete = req->base.complete; 784 subreq->base.data = req->base.data; 785 req->base.complete = compl; 786 req->base.data = req; 787 } else if (refcnt && refcount_dec_and_test(&ctx->refcnt)) 788 crypto_free_aead(tfm); 789 } 790 791 static void cryptd_aead_encrypt(void *data, int err) 792 { 793 struct aead_request *req = data; 794 struct cryptd_aead_ctx *ctx; 795 struct crypto_aead *child; 796 797 ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); 798 child = ctx->child; 799 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt, 800 cryptd_aead_encrypt); 801 } 802 803 static void cryptd_aead_decrypt(void *data, int err) 804 { 805 struct aead_request *req = data; 806 struct cryptd_aead_ctx *ctx; 807 struct crypto_aead *child; 808 809 ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); 810 child = ctx->child; 811 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt, 812 cryptd_aead_decrypt); 813 } 814 815 static int cryptd_aead_enqueue(struct aead_request *req, 816 crypto_completion_t compl) 817 { 818 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req); 819 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 820 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); 821 struct aead_request *subreq = &rctx->req; 822 823 subreq->base.complete = req->base.complete; 824 subreq->base.data = req->base.data; 825 req->base.complete = compl; 826 req->base.data = req; 827 return cryptd_enqueue_request(queue, &req->base); 828 } 829 830 static int cryptd_aead_encrypt_enqueue(struct aead_request *req) 831 { 832 return cryptd_aead_enqueue(req, cryptd_aead_encrypt ); 833 } 834 835 static int cryptd_aead_decrypt_enqueue(struct aead_request *req) 836 { 837 return cryptd_aead_enqueue(req, cryptd_aead_decrypt ); 838 } 839 840 static int cryptd_aead_init_tfm(struct crypto_aead *tfm) 841 { 842 struct aead_instance *inst = aead_alg_instance(tfm); 843 struct aead_instance_ctx *ictx = aead_instance_ctx(inst); 844 struct crypto_aead_spawn *spawn = &ictx->aead_spawn; 845 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); 846 struct crypto_aead *cipher; 847 848 cipher = crypto_spawn_aead(spawn); 849 if (IS_ERR(cipher)) 850 return PTR_ERR(cipher); 851 852 ctx->child = cipher; 853 crypto_aead_set_reqsize( 854 tfm, sizeof(struct cryptd_aead_request_ctx) + 855 crypto_aead_reqsize(cipher)); 856 return 0; 857 } 858 859 static void cryptd_aead_exit_tfm(struct crypto_aead *tfm) 860 { 861 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); 862 crypto_free_aead(ctx->child); 863 } 864 865 static void cryptd_aead_free(struct aead_instance *inst) 866 { 867 struct aead_instance_ctx *ctx = aead_instance_ctx(inst); 868 869 crypto_drop_aead(&ctx->aead_spawn); 870 kfree(inst); 871 } 872 873 static int cryptd_create_aead(struct crypto_template *tmpl, 874 struct rtattr **tb, 875 struct crypto_attr_type *algt, 876 struct cryptd_queue *queue) 877 { 878 struct aead_instance_ctx *ctx; 879 struct aead_instance *inst; 880 struct aead_alg *alg; 881 u32 type; 882 u32 mask; 883 int err; 884 885 cryptd_type_and_mask(algt, &type, &mask); 886 887 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 888 if (!inst) 889 return -ENOMEM; 890 891 ctx = aead_instance_ctx(inst); 892 ctx->queue = queue; 893 894 err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst), 895 crypto_attr_alg_name(tb[1]), type, mask); 896 if (err) 897 goto err_free_inst; 898 899 alg = crypto_spawn_aead_alg(&ctx->aead_spawn); 900 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base); 901 if (err) 902 goto err_free_inst; 903 904 inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | 905 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); 906 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx); 907 908 inst->alg.ivsize = crypto_aead_alg_ivsize(alg); 909 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); 910 911 inst->alg.init = cryptd_aead_init_tfm; 912 inst->alg.exit = cryptd_aead_exit_tfm; 913 inst->alg.setkey = cryptd_aead_setkey; 914 inst->alg.setauthsize = cryptd_aead_setauthsize; 915 inst->alg.encrypt = cryptd_aead_encrypt_enqueue; 916 inst->alg.decrypt = cryptd_aead_decrypt_enqueue; 917 918 inst->free = cryptd_aead_free; 919 920 err = aead_register_instance(tmpl, inst); 921 if (err) { 922 err_free_inst: 923 cryptd_aead_free(inst); 924 } 925 return err; 926 } 927 928 static struct cryptd_queue queue; 929 930 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) 931 { 932 struct crypto_attr_type *algt; 933 934 algt = crypto_get_attr_type(tb); 935 if (IS_ERR(algt)) 936 return PTR_ERR(algt); 937 938 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { 939 case CRYPTO_ALG_TYPE_LSKCIPHER: 940 return cryptd_create_skcipher(tmpl, tb, algt, &queue); 941 case CRYPTO_ALG_TYPE_HASH: 942 return cryptd_create_hash(tmpl, tb, algt, &queue); 943 case CRYPTO_ALG_TYPE_AEAD: 944 return cryptd_create_aead(tmpl, tb, algt, &queue); 945 } 946 947 return -EINVAL; 948 } 949 950 static struct crypto_template cryptd_tmpl = { 951 .name = "cryptd", 952 .create = cryptd_create, 953 .module = THIS_MODULE, 954 }; 955 956 struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, 957 u32 type, u32 mask) 958 { 959 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 960 struct cryptd_aead_ctx *ctx; 961 struct crypto_aead *tfm; 962 963 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 964 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 965 return ERR_PTR(-EINVAL); 966 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask); 967 if (IS_ERR(tfm)) 968 return ERR_CAST(tfm); 969 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 970 crypto_free_aead(tfm); 971 return ERR_PTR(-EINVAL); 972 } 973 974 ctx = crypto_aead_ctx(tfm); 975 refcount_set(&ctx->refcnt, 1); 976 977 return __cryptd_aead_cast(tfm); 978 } 979 EXPORT_SYMBOL_GPL(cryptd_alloc_aead); 980 981 struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm) 982 { 983 struct cryptd_aead_ctx *ctx; 984 ctx = crypto_aead_ctx(&tfm->base); 985 return ctx->child; 986 } 987 EXPORT_SYMBOL_GPL(cryptd_aead_child); 988 989 bool cryptd_aead_queued(struct cryptd_aead *tfm) 990 { 991 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); 992 993 return refcount_read(&ctx->refcnt) - 1; 994 } 995 EXPORT_SYMBOL_GPL(cryptd_aead_queued); 996 997 void cryptd_free_aead(struct cryptd_aead *tfm) 998 { 999 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); 1000 1001 if (refcount_dec_and_test(&ctx->refcnt)) 1002 crypto_free_aead(&tfm->base); 1003 } 1004 EXPORT_SYMBOL_GPL(cryptd_free_aead); 1005 1006 static int __init cryptd_init(void) 1007 { 1008 int err; 1009 1010 cryptd_wq = alloc_workqueue("cryptd", 1011 WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE | WQ_PERCPU, 1012 1); 1013 if (!cryptd_wq) 1014 return -ENOMEM; 1015 1016 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen); 1017 if (err) 1018 goto err_destroy_wq; 1019 1020 err = crypto_register_template(&cryptd_tmpl); 1021 if (err) 1022 goto err_fini_queue; 1023 1024 return 0; 1025 1026 err_fini_queue: 1027 cryptd_fini_queue(&queue); 1028 err_destroy_wq: 1029 destroy_workqueue(cryptd_wq); 1030 return err; 1031 } 1032 1033 static void __exit cryptd_exit(void) 1034 { 1035 destroy_workqueue(cryptd_wq); 1036 cryptd_fini_queue(&queue); 1037 crypto_unregister_template(&cryptd_tmpl); 1038 } 1039 1040 module_init(cryptd_init); 1041 module_exit(cryptd_exit); 1042 1043 MODULE_LICENSE("GPL"); 1044 MODULE_DESCRIPTION("Software async crypto daemon"); 1045 MODULE_ALIAS_CRYPTO("cryptd"); 1046