1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Software async crypto daemon. 4 * 5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 6 * 7 * Added AEAD support to cryptd. 8 * Authors: Tadeusz Struk (tadeusz.struk@intel.com) 9 * Adrian Hoban <adrian.hoban@intel.com> 10 * Gabriele Paoloni <gabriele.paoloni@intel.com> 11 * Aidan O'Mahony (aidan.o.mahony@intel.com) 12 * Copyright (c) 2010, Intel Corporation. 13 */ 14 15 #include <crypto/internal/hash.h> 16 #include <crypto/internal/aead.h> 17 #include <crypto/internal/skcipher.h> 18 #include <crypto/cryptd.h> 19 #include <linux/refcount.h> 20 #include <linux/err.h> 21 #include <linux/init.h> 22 #include <linux/kernel.h> 23 #include <linux/list.h> 24 #include <linux/module.h> 25 #include <linux/scatterlist.h> 26 #include <linux/sched.h> 27 #include <linux/slab.h> 28 #include <linux/workqueue.h> 29 30 static unsigned int cryptd_max_cpu_qlen = 1000; 31 module_param(cryptd_max_cpu_qlen, uint, 0); 32 MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); 33 34 static struct workqueue_struct *cryptd_wq; 35 36 struct cryptd_cpu_queue { 37 struct crypto_queue queue; 38 struct work_struct work; 39 }; 40 41 struct cryptd_queue { 42 /* 43 * Protected by disabling BH to allow enqueueing from softinterrupt and 44 * dequeuing from kworker (cryptd_queue_worker()). 45 */ 46 struct cryptd_cpu_queue __percpu *cpu_queue; 47 }; 48 49 struct cryptd_instance_ctx { 50 struct crypto_spawn spawn; 51 struct cryptd_queue *queue; 52 }; 53 54 struct skcipherd_instance_ctx { 55 struct crypto_skcipher_spawn spawn; 56 struct cryptd_queue *queue; 57 }; 58 59 struct hashd_instance_ctx { 60 struct crypto_shash_spawn spawn; 61 struct cryptd_queue *queue; 62 }; 63 64 struct aead_instance_ctx { 65 struct crypto_aead_spawn aead_spawn; 66 struct cryptd_queue *queue; 67 }; 68 69 struct cryptd_skcipher_ctx { 70 refcount_t refcnt; 71 struct crypto_skcipher *child; 72 }; 73 74 struct cryptd_skcipher_request_ctx { 75 struct skcipher_request req; 76 }; 77 78 struct cryptd_hash_ctx { 79 refcount_t refcnt; 80 struct crypto_shash *child; 81 }; 82 83 struct cryptd_hash_request_ctx { 84 crypto_completion_t complete; 85 void *data; 86 struct shash_desc desc; 87 }; 88 89 struct cryptd_aead_ctx { 90 refcount_t refcnt; 91 struct crypto_aead *child; 92 }; 93 94 struct cryptd_aead_request_ctx { 95 struct aead_request req; 96 }; 97 98 static void cryptd_queue_worker(struct work_struct *work); 99 100 static int cryptd_init_queue(struct cryptd_queue *queue, 101 unsigned int max_cpu_qlen) 102 { 103 int cpu; 104 struct cryptd_cpu_queue *cpu_queue; 105 106 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); 107 if (!queue->cpu_queue) 108 return -ENOMEM; 109 for_each_possible_cpu(cpu) { 110 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); 111 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); 112 INIT_WORK(&cpu_queue->work, cryptd_queue_worker); 113 } 114 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen); 115 return 0; 116 } 117 118 static void cryptd_fini_queue(struct cryptd_queue *queue) 119 { 120 int cpu; 121 struct cryptd_cpu_queue *cpu_queue; 122 123 for_each_possible_cpu(cpu) { 124 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); 125 BUG_ON(cpu_queue->queue.qlen); 126 } 127 free_percpu(queue->cpu_queue); 128 } 129 130 static int cryptd_enqueue_request(struct cryptd_queue *queue, 131 struct crypto_async_request *request) 132 { 133 int err; 134 struct cryptd_cpu_queue *cpu_queue; 135 refcount_t *refcnt; 136 137 local_bh_disable(); 138 cpu_queue = this_cpu_ptr(queue->cpu_queue); 139 err = crypto_enqueue_request(&cpu_queue->queue, request); 140 141 refcnt = crypto_tfm_ctx(request->tfm); 142 143 if (err == -ENOSPC) 144 goto out; 145 146 queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work); 147 148 if (!refcount_read(refcnt)) 149 goto out; 150 151 refcount_inc(refcnt); 152 153 out: 154 local_bh_enable(); 155 156 return err; 157 } 158 159 /* Called in workqueue context, do one real cryption work (via 160 * req->complete) and reschedule itself if there are more work to 161 * do. */ 162 static void cryptd_queue_worker(struct work_struct *work) 163 { 164 struct cryptd_cpu_queue *cpu_queue; 165 struct crypto_async_request *req, *backlog; 166 167 cpu_queue = container_of(work, struct cryptd_cpu_queue, work); 168 /* 169 * Only handle one request at a time to avoid hogging crypto workqueue. 170 */ 171 local_bh_disable(); 172 backlog = crypto_get_backlog(&cpu_queue->queue); 173 req = crypto_dequeue_request(&cpu_queue->queue); 174 local_bh_enable(); 175 176 if (!req) 177 return; 178 179 if (backlog) 180 crypto_request_complete(backlog, -EINPROGRESS); 181 crypto_request_complete(req, 0); 182 183 if (cpu_queue->queue.qlen) 184 queue_work(cryptd_wq, &cpu_queue->work); 185 } 186 187 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) 188 { 189 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 190 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); 191 return ictx->queue; 192 } 193 194 static void cryptd_type_and_mask(struct crypto_attr_type *algt, 195 u32 *type, u32 *mask) 196 { 197 /* 198 * cryptd is allowed to wrap internal algorithms, but in that case the 199 * resulting cryptd instance will be marked as internal as well. 200 */ 201 *type = algt->type & CRYPTO_ALG_INTERNAL; 202 *mask = algt->mask & CRYPTO_ALG_INTERNAL; 203 204 /* No point in cryptd wrapping an algorithm that's already async. */ 205 *mask |= CRYPTO_ALG_ASYNC; 206 207 *mask |= crypto_algt_inherited_mask(algt); 208 } 209 210 static int cryptd_init_instance(struct crypto_instance *inst, 211 struct crypto_alg *alg) 212 { 213 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, 214 "cryptd(%s)", 215 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 216 return -ENAMETOOLONG; 217 218 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); 219 220 inst->alg.cra_priority = alg->cra_priority + 50; 221 inst->alg.cra_blocksize = alg->cra_blocksize; 222 inst->alg.cra_alignmask = alg->cra_alignmask; 223 224 return 0; 225 } 226 227 static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, 228 const u8 *key, unsigned int keylen) 229 { 230 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); 231 struct crypto_skcipher *child = ctx->child; 232 233 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 234 crypto_skcipher_set_flags(child, 235 crypto_skcipher_get_flags(parent) & 236 CRYPTO_TFM_REQ_MASK); 237 return crypto_skcipher_setkey(child, key, keylen); 238 } 239 240 static struct skcipher_request *cryptd_skcipher_prepare( 241 struct skcipher_request *req, int err) 242 { 243 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 244 struct skcipher_request *subreq = &rctx->req; 245 struct cryptd_skcipher_ctx *ctx; 246 struct crypto_skcipher *child; 247 248 req->base.complete = subreq->base.complete; 249 req->base.data = subreq->base.data; 250 251 if (unlikely(err == -EINPROGRESS)) 252 return NULL; 253 254 ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); 255 child = ctx->child; 256 257 skcipher_request_set_tfm(subreq, child); 258 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, 259 NULL, NULL); 260 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 261 req->iv); 262 263 return subreq; 264 } 265 266 static void cryptd_skcipher_complete(struct skcipher_request *req, int err, 267 crypto_completion_t complete) 268 { 269 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 270 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 271 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 272 struct skcipher_request *subreq = &rctx->req; 273 int refcnt = refcount_read(&ctx->refcnt); 274 275 local_bh_disable(); 276 skcipher_request_complete(req, err); 277 local_bh_enable(); 278 279 if (unlikely(err == -EINPROGRESS)) { 280 subreq->base.complete = req->base.complete; 281 subreq->base.data = req->base.data; 282 req->base.complete = complete; 283 req->base.data = req; 284 } else if (refcnt && refcount_dec_and_test(&ctx->refcnt)) 285 crypto_free_skcipher(tfm); 286 } 287 288 static void cryptd_skcipher_encrypt(struct crypto_async_request *base, 289 int err) 290 { 291 struct skcipher_request *req = skcipher_request_cast(base); 292 struct skcipher_request *subreq; 293 294 subreq = cryptd_skcipher_prepare(req, err); 295 if (likely(subreq)) 296 err = crypto_skcipher_encrypt(subreq); 297 298 cryptd_skcipher_complete(req, err, cryptd_skcipher_encrypt); 299 } 300 301 static void cryptd_skcipher_decrypt(struct crypto_async_request *base, 302 int err) 303 { 304 struct skcipher_request *req = skcipher_request_cast(base); 305 struct skcipher_request *subreq; 306 307 subreq = cryptd_skcipher_prepare(req, err); 308 if (likely(subreq)) 309 err = crypto_skcipher_decrypt(subreq); 310 311 cryptd_skcipher_complete(req, err, cryptd_skcipher_decrypt); 312 } 313 314 static int cryptd_skcipher_enqueue(struct skcipher_request *req, 315 crypto_completion_t compl) 316 { 317 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 318 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 319 struct skcipher_request *subreq = &rctx->req; 320 struct cryptd_queue *queue; 321 322 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm)); 323 subreq->base.complete = req->base.complete; 324 subreq->base.data = req->base.data; 325 req->base.complete = compl; 326 req->base.data = req; 327 328 return cryptd_enqueue_request(queue, &req->base); 329 } 330 331 static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req) 332 { 333 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt); 334 } 335 336 static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req) 337 { 338 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt); 339 } 340 341 static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm) 342 { 343 struct skcipher_instance *inst = skcipher_alg_instance(tfm); 344 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst); 345 struct crypto_skcipher_spawn *spawn = &ictx->spawn; 346 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 347 struct crypto_skcipher *cipher; 348 349 cipher = crypto_spawn_skcipher(spawn); 350 if (IS_ERR(cipher)) 351 return PTR_ERR(cipher); 352 353 ctx->child = cipher; 354 crypto_skcipher_set_reqsize( 355 tfm, sizeof(struct cryptd_skcipher_request_ctx) + 356 crypto_skcipher_reqsize(cipher)); 357 return 0; 358 } 359 360 static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm) 361 { 362 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 363 364 crypto_free_skcipher(ctx->child); 365 } 366 367 static void cryptd_skcipher_free(struct skcipher_instance *inst) 368 { 369 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst); 370 371 crypto_drop_skcipher(&ctx->spawn); 372 kfree(inst); 373 } 374 375 static int cryptd_create_skcipher(struct crypto_template *tmpl, 376 struct rtattr **tb, 377 struct crypto_attr_type *algt, 378 struct cryptd_queue *queue) 379 { 380 struct skcipherd_instance_ctx *ctx; 381 struct skcipher_instance *inst; 382 struct skcipher_alg *alg; 383 u32 type; 384 u32 mask; 385 int err; 386 387 cryptd_type_and_mask(algt, &type, &mask); 388 389 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 390 if (!inst) 391 return -ENOMEM; 392 393 ctx = skcipher_instance_ctx(inst); 394 ctx->queue = queue; 395 396 err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), 397 crypto_attr_alg_name(tb[1]), type, mask); 398 if (err) 399 goto err_free_inst; 400 401 alg = crypto_spawn_skcipher_alg(&ctx->spawn); 402 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base); 403 if (err) 404 goto err_free_inst; 405 406 inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | 407 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); 408 inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); 409 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); 410 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); 411 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg); 412 413 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx); 414 415 inst->alg.init = cryptd_skcipher_init_tfm; 416 inst->alg.exit = cryptd_skcipher_exit_tfm; 417 418 inst->alg.setkey = cryptd_skcipher_setkey; 419 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue; 420 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue; 421 422 inst->free = cryptd_skcipher_free; 423 424 err = skcipher_register_instance(tmpl, inst); 425 if (err) { 426 err_free_inst: 427 cryptd_skcipher_free(inst); 428 } 429 return err; 430 } 431 432 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) 433 { 434 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 435 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); 436 struct crypto_shash_spawn *spawn = &ictx->spawn; 437 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 438 struct crypto_shash *hash; 439 440 hash = crypto_spawn_shash(spawn); 441 if (IS_ERR(hash)) 442 return PTR_ERR(hash); 443 444 ctx->child = hash; 445 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 446 sizeof(struct cryptd_hash_request_ctx) + 447 crypto_shash_descsize(hash)); 448 return 0; 449 } 450 451 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) 452 { 453 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 454 455 crypto_free_shash(ctx->child); 456 } 457 458 static int cryptd_hash_setkey(struct crypto_ahash *parent, 459 const u8 *key, unsigned int keylen) 460 { 461 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); 462 struct crypto_shash *child = ctx->child; 463 464 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); 465 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & 466 CRYPTO_TFM_REQ_MASK); 467 return crypto_shash_setkey(child, key, keylen); 468 } 469 470 static int cryptd_hash_enqueue(struct ahash_request *req, 471 crypto_completion_t compl) 472 { 473 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 474 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 475 struct cryptd_queue *queue = 476 cryptd_get_queue(crypto_ahash_tfm(tfm)); 477 478 rctx->complete = req->base.complete; 479 rctx->data = req->base.data; 480 req->base.complete = compl; 481 req->base.data = req; 482 483 return cryptd_enqueue_request(queue, &req->base); 484 } 485 486 static struct shash_desc *cryptd_hash_prepare(struct ahash_request *req, 487 int err) 488 { 489 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 490 491 req->base.complete = rctx->complete; 492 req->base.data = rctx->data; 493 494 if (unlikely(err == -EINPROGRESS)) 495 return NULL; 496 497 return &rctx->desc; 498 } 499 500 static void cryptd_hash_complete(struct ahash_request *req, int err, 501 crypto_completion_t complete) 502 { 503 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 504 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 505 int refcnt = refcount_read(&ctx->refcnt); 506 507 local_bh_disable(); 508 ahash_request_complete(req, err); 509 local_bh_enable(); 510 511 if (err == -EINPROGRESS) { 512 req->base.complete = complete; 513 req->base.data = req; 514 } else if (refcnt && refcount_dec_and_test(&ctx->refcnt)) 515 crypto_free_ahash(tfm); 516 } 517 518 static void cryptd_hash_init(struct crypto_async_request *req_async, int err) 519 { 520 struct ahash_request *req = ahash_request_cast(req_async); 521 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 522 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 523 struct crypto_shash *child = ctx->child; 524 struct shash_desc *desc; 525 526 desc = cryptd_hash_prepare(req, err); 527 if (unlikely(!desc)) 528 goto out; 529 530 desc->tfm = child; 531 532 err = crypto_shash_init(desc); 533 534 out: 535 cryptd_hash_complete(req, err, cryptd_hash_init); 536 } 537 538 static int cryptd_hash_init_enqueue(struct ahash_request *req) 539 { 540 return cryptd_hash_enqueue(req, cryptd_hash_init); 541 } 542 543 static void cryptd_hash_update(struct crypto_async_request *req_async, int err) 544 { 545 struct ahash_request *req = ahash_request_cast(req_async); 546 struct shash_desc *desc; 547 548 desc = cryptd_hash_prepare(req, err); 549 if (likely(desc)) 550 err = shash_ahash_update(req, desc); 551 552 cryptd_hash_complete(req, err, cryptd_hash_update); 553 } 554 555 static int cryptd_hash_update_enqueue(struct ahash_request *req) 556 { 557 return cryptd_hash_enqueue(req, cryptd_hash_update); 558 } 559 560 static void cryptd_hash_final(struct crypto_async_request *req_async, int err) 561 { 562 struct ahash_request *req = ahash_request_cast(req_async); 563 struct shash_desc *desc; 564 565 desc = cryptd_hash_prepare(req, err); 566 if (likely(desc)) 567 err = crypto_shash_final(desc, req->result); 568 569 cryptd_hash_complete(req, err, cryptd_hash_final); 570 } 571 572 static int cryptd_hash_final_enqueue(struct ahash_request *req) 573 { 574 return cryptd_hash_enqueue(req, cryptd_hash_final); 575 } 576 577 static void cryptd_hash_finup(struct crypto_async_request *req_async, int err) 578 { 579 struct ahash_request *req = ahash_request_cast(req_async); 580 struct shash_desc *desc; 581 582 desc = cryptd_hash_prepare(req, err); 583 if (likely(desc)) 584 err = shash_ahash_finup(req, desc); 585 586 cryptd_hash_complete(req, err, cryptd_hash_finup); 587 } 588 589 static int cryptd_hash_finup_enqueue(struct ahash_request *req) 590 { 591 return cryptd_hash_enqueue(req, cryptd_hash_finup); 592 } 593 594 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) 595 { 596 struct ahash_request *req = ahash_request_cast(req_async); 597 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 598 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 599 struct crypto_shash *child = ctx->child; 600 struct shash_desc *desc; 601 602 desc = cryptd_hash_prepare(req, err); 603 if (unlikely(!desc)) 604 goto out; 605 606 desc->tfm = child; 607 608 err = shash_ahash_digest(req, desc); 609 610 out: 611 cryptd_hash_complete(req, err, cryptd_hash_digest); 612 } 613 614 static int cryptd_hash_digest_enqueue(struct ahash_request *req) 615 { 616 return cryptd_hash_enqueue(req, cryptd_hash_digest); 617 } 618 619 static int cryptd_hash_export(struct ahash_request *req, void *out) 620 { 621 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 622 623 return crypto_shash_export(&rctx->desc, out); 624 } 625 626 static int cryptd_hash_import(struct ahash_request *req, const void *in) 627 { 628 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 629 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 630 struct shash_desc *desc = cryptd_shash_desc(req); 631 632 desc->tfm = ctx->child; 633 634 return crypto_shash_import(desc, in); 635 } 636 637 static void cryptd_hash_free(struct ahash_instance *inst) 638 { 639 struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst); 640 641 crypto_drop_shash(&ctx->spawn); 642 kfree(inst); 643 } 644 645 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, 646 struct crypto_attr_type *algt, 647 struct cryptd_queue *queue) 648 { 649 struct hashd_instance_ctx *ctx; 650 struct ahash_instance *inst; 651 struct shash_alg *alg; 652 u32 type; 653 u32 mask; 654 int err; 655 656 cryptd_type_and_mask(algt, &type, &mask); 657 658 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 659 if (!inst) 660 return -ENOMEM; 661 662 ctx = ahash_instance_ctx(inst); 663 ctx->queue = queue; 664 665 err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst), 666 crypto_attr_alg_name(tb[1]), type, mask); 667 if (err) 668 goto err_free_inst; 669 alg = crypto_spawn_shash_alg(&ctx->spawn); 670 671 err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base); 672 if (err) 673 goto err_free_inst; 674 675 inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC | 676 (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL| 677 CRYPTO_ALG_OPTIONAL_KEY)); 678 inst->alg.halg.digestsize = alg->digestsize; 679 inst->alg.halg.statesize = alg->statesize; 680 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); 681 682 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; 683 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm; 684 685 inst->alg.init = cryptd_hash_init_enqueue; 686 inst->alg.update = cryptd_hash_update_enqueue; 687 inst->alg.final = cryptd_hash_final_enqueue; 688 inst->alg.finup = cryptd_hash_finup_enqueue; 689 inst->alg.export = cryptd_hash_export; 690 inst->alg.import = cryptd_hash_import; 691 if (crypto_shash_alg_has_setkey(alg)) 692 inst->alg.setkey = cryptd_hash_setkey; 693 inst->alg.digest = cryptd_hash_digest_enqueue; 694 695 inst->free = cryptd_hash_free; 696 697 err = ahash_register_instance(tmpl, inst); 698 if (err) { 699 err_free_inst: 700 cryptd_hash_free(inst); 701 } 702 return err; 703 } 704 705 static int cryptd_aead_setkey(struct crypto_aead *parent, 706 const u8 *key, unsigned int keylen) 707 { 708 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); 709 struct crypto_aead *child = ctx->child; 710 711 return crypto_aead_setkey(child, key, keylen); 712 } 713 714 static int cryptd_aead_setauthsize(struct crypto_aead *parent, 715 unsigned int authsize) 716 { 717 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); 718 struct crypto_aead *child = ctx->child; 719 720 return crypto_aead_setauthsize(child, authsize); 721 } 722 723 static void cryptd_aead_crypt(struct aead_request *req, 724 struct crypto_aead *child, int err, 725 int (*crypt)(struct aead_request *req), 726 crypto_completion_t compl) 727 { 728 struct cryptd_aead_request_ctx *rctx; 729 struct aead_request *subreq; 730 struct cryptd_aead_ctx *ctx; 731 struct crypto_aead *tfm; 732 int refcnt; 733 734 rctx = aead_request_ctx(req); 735 subreq = &rctx->req; 736 req->base.complete = subreq->base.complete; 737 req->base.data = subreq->base.data; 738 739 tfm = crypto_aead_reqtfm(req); 740 741 if (unlikely(err == -EINPROGRESS)) 742 goto out; 743 744 aead_request_set_tfm(subreq, child); 745 aead_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, 746 NULL, NULL); 747 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 748 req->iv); 749 aead_request_set_ad(subreq, req->assoclen); 750 751 err = crypt(subreq); 752 753 out: 754 ctx = crypto_aead_ctx(tfm); 755 refcnt = refcount_read(&ctx->refcnt); 756 757 local_bh_disable(); 758 aead_request_complete(req, err); 759 local_bh_enable(); 760 761 if (err == -EINPROGRESS) { 762 subreq->base.complete = req->base.complete; 763 subreq->base.data = req->base.data; 764 req->base.complete = compl; 765 req->base.data = req; 766 } else if (refcnt && refcount_dec_and_test(&ctx->refcnt)) 767 crypto_free_aead(tfm); 768 } 769 770 static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) 771 { 772 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); 773 struct crypto_aead *child = ctx->child; 774 struct aead_request *req; 775 776 req = container_of(areq, struct aead_request, base); 777 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt, 778 cryptd_aead_encrypt); 779 } 780 781 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err) 782 { 783 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); 784 struct crypto_aead *child = ctx->child; 785 struct aead_request *req; 786 787 req = container_of(areq, struct aead_request, base); 788 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt, 789 cryptd_aead_decrypt); 790 } 791 792 static int cryptd_aead_enqueue(struct aead_request *req, 793 crypto_completion_t compl) 794 { 795 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req); 796 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 797 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); 798 struct aead_request *subreq = &rctx->req; 799 800 subreq->base.complete = req->base.complete; 801 subreq->base.data = req->base.data; 802 req->base.complete = compl; 803 req->base.data = req; 804 return cryptd_enqueue_request(queue, &req->base); 805 } 806 807 static int cryptd_aead_encrypt_enqueue(struct aead_request *req) 808 { 809 return cryptd_aead_enqueue(req, cryptd_aead_encrypt ); 810 } 811 812 static int cryptd_aead_decrypt_enqueue(struct aead_request *req) 813 { 814 return cryptd_aead_enqueue(req, cryptd_aead_decrypt ); 815 } 816 817 static int cryptd_aead_init_tfm(struct crypto_aead *tfm) 818 { 819 struct aead_instance *inst = aead_alg_instance(tfm); 820 struct aead_instance_ctx *ictx = aead_instance_ctx(inst); 821 struct crypto_aead_spawn *spawn = &ictx->aead_spawn; 822 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); 823 struct crypto_aead *cipher; 824 825 cipher = crypto_spawn_aead(spawn); 826 if (IS_ERR(cipher)) 827 return PTR_ERR(cipher); 828 829 ctx->child = cipher; 830 crypto_aead_set_reqsize( 831 tfm, sizeof(struct cryptd_aead_request_ctx) + 832 crypto_aead_reqsize(cipher)); 833 return 0; 834 } 835 836 static void cryptd_aead_exit_tfm(struct crypto_aead *tfm) 837 { 838 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); 839 crypto_free_aead(ctx->child); 840 } 841 842 static void cryptd_aead_free(struct aead_instance *inst) 843 { 844 struct aead_instance_ctx *ctx = aead_instance_ctx(inst); 845 846 crypto_drop_aead(&ctx->aead_spawn); 847 kfree(inst); 848 } 849 850 static int cryptd_create_aead(struct crypto_template *tmpl, 851 struct rtattr **tb, 852 struct crypto_attr_type *algt, 853 struct cryptd_queue *queue) 854 { 855 struct aead_instance_ctx *ctx; 856 struct aead_instance *inst; 857 struct aead_alg *alg; 858 u32 type; 859 u32 mask; 860 int err; 861 862 cryptd_type_and_mask(algt, &type, &mask); 863 864 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 865 if (!inst) 866 return -ENOMEM; 867 868 ctx = aead_instance_ctx(inst); 869 ctx->queue = queue; 870 871 err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst), 872 crypto_attr_alg_name(tb[1]), type, mask); 873 if (err) 874 goto err_free_inst; 875 876 alg = crypto_spawn_aead_alg(&ctx->aead_spawn); 877 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base); 878 if (err) 879 goto err_free_inst; 880 881 inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | 882 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); 883 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx); 884 885 inst->alg.ivsize = crypto_aead_alg_ivsize(alg); 886 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); 887 888 inst->alg.init = cryptd_aead_init_tfm; 889 inst->alg.exit = cryptd_aead_exit_tfm; 890 inst->alg.setkey = cryptd_aead_setkey; 891 inst->alg.setauthsize = cryptd_aead_setauthsize; 892 inst->alg.encrypt = cryptd_aead_encrypt_enqueue; 893 inst->alg.decrypt = cryptd_aead_decrypt_enqueue; 894 895 inst->free = cryptd_aead_free; 896 897 err = aead_register_instance(tmpl, inst); 898 if (err) { 899 err_free_inst: 900 cryptd_aead_free(inst); 901 } 902 return err; 903 } 904 905 static struct cryptd_queue queue; 906 907 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) 908 { 909 struct crypto_attr_type *algt; 910 911 algt = crypto_get_attr_type(tb); 912 if (IS_ERR(algt)) 913 return PTR_ERR(algt); 914 915 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { 916 case CRYPTO_ALG_TYPE_SKCIPHER: 917 return cryptd_create_skcipher(tmpl, tb, algt, &queue); 918 case CRYPTO_ALG_TYPE_HASH: 919 return cryptd_create_hash(tmpl, tb, algt, &queue); 920 case CRYPTO_ALG_TYPE_AEAD: 921 return cryptd_create_aead(tmpl, tb, algt, &queue); 922 } 923 924 return -EINVAL; 925 } 926 927 static struct crypto_template cryptd_tmpl = { 928 .name = "cryptd", 929 .create = cryptd_create, 930 .module = THIS_MODULE, 931 }; 932 933 struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, 934 u32 type, u32 mask) 935 { 936 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 937 struct cryptd_skcipher_ctx *ctx; 938 struct crypto_skcipher *tfm; 939 940 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 941 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 942 return ERR_PTR(-EINVAL); 943 944 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask); 945 if (IS_ERR(tfm)) 946 return ERR_CAST(tfm); 947 948 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 949 crypto_free_skcipher(tfm); 950 return ERR_PTR(-EINVAL); 951 } 952 953 ctx = crypto_skcipher_ctx(tfm); 954 refcount_set(&ctx->refcnt, 1); 955 956 return container_of(tfm, struct cryptd_skcipher, base); 957 } 958 EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher); 959 960 struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm) 961 { 962 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 963 964 return ctx->child; 965 } 966 EXPORT_SYMBOL_GPL(cryptd_skcipher_child); 967 968 bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm) 969 { 970 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 971 972 return refcount_read(&ctx->refcnt) - 1; 973 } 974 EXPORT_SYMBOL_GPL(cryptd_skcipher_queued); 975 976 void cryptd_free_skcipher(struct cryptd_skcipher *tfm) 977 { 978 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 979 980 if (refcount_dec_and_test(&ctx->refcnt)) 981 crypto_free_skcipher(&tfm->base); 982 } 983 EXPORT_SYMBOL_GPL(cryptd_free_skcipher); 984 985 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, 986 u32 type, u32 mask) 987 { 988 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 989 struct cryptd_hash_ctx *ctx; 990 struct crypto_ahash *tfm; 991 992 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 993 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 994 return ERR_PTR(-EINVAL); 995 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask); 996 if (IS_ERR(tfm)) 997 return ERR_CAST(tfm); 998 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 999 crypto_free_ahash(tfm); 1000 return ERR_PTR(-EINVAL); 1001 } 1002 1003 ctx = crypto_ahash_ctx(tfm); 1004 refcount_set(&ctx->refcnt, 1); 1005 1006 return __cryptd_ahash_cast(tfm); 1007 } 1008 EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); 1009 1010 struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm) 1011 { 1012 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 1013 1014 return ctx->child; 1015 } 1016 EXPORT_SYMBOL_GPL(cryptd_ahash_child); 1017 1018 struct shash_desc *cryptd_shash_desc(struct ahash_request *req) 1019 { 1020 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 1021 return &rctx->desc; 1022 } 1023 EXPORT_SYMBOL_GPL(cryptd_shash_desc); 1024 1025 bool cryptd_ahash_queued(struct cryptd_ahash *tfm) 1026 { 1027 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 1028 1029 return refcount_read(&ctx->refcnt) - 1; 1030 } 1031 EXPORT_SYMBOL_GPL(cryptd_ahash_queued); 1032 1033 void cryptd_free_ahash(struct cryptd_ahash *tfm) 1034 { 1035 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 1036 1037 if (refcount_dec_and_test(&ctx->refcnt)) 1038 crypto_free_ahash(&tfm->base); 1039 } 1040 EXPORT_SYMBOL_GPL(cryptd_free_ahash); 1041 1042 struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, 1043 u32 type, u32 mask) 1044 { 1045 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 1046 struct cryptd_aead_ctx *ctx; 1047 struct crypto_aead *tfm; 1048 1049 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 1050 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 1051 return ERR_PTR(-EINVAL); 1052 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask); 1053 if (IS_ERR(tfm)) 1054 return ERR_CAST(tfm); 1055 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 1056 crypto_free_aead(tfm); 1057 return ERR_PTR(-EINVAL); 1058 } 1059 1060 ctx = crypto_aead_ctx(tfm); 1061 refcount_set(&ctx->refcnt, 1); 1062 1063 return __cryptd_aead_cast(tfm); 1064 } 1065 EXPORT_SYMBOL_GPL(cryptd_alloc_aead); 1066 1067 struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm) 1068 { 1069 struct cryptd_aead_ctx *ctx; 1070 ctx = crypto_aead_ctx(&tfm->base); 1071 return ctx->child; 1072 } 1073 EXPORT_SYMBOL_GPL(cryptd_aead_child); 1074 1075 bool cryptd_aead_queued(struct cryptd_aead *tfm) 1076 { 1077 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); 1078 1079 return refcount_read(&ctx->refcnt) - 1; 1080 } 1081 EXPORT_SYMBOL_GPL(cryptd_aead_queued); 1082 1083 void cryptd_free_aead(struct cryptd_aead *tfm) 1084 { 1085 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); 1086 1087 if (refcount_dec_and_test(&ctx->refcnt)) 1088 crypto_free_aead(&tfm->base); 1089 } 1090 EXPORT_SYMBOL_GPL(cryptd_free_aead); 1091 1092 static int __init cryptd_init(void) 1093 { 1094 int err; 1095 1096 cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1097 1); 1098 if (!cryptd_wq) 1099 return -ENOMEM; 1100 1101 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen); 1102 if (err) 1103 goto err_destroy_wq; 1104 1105 err = crypto_register_template(&cryptd_tmpl); 1106 if (err) 1107 goto err_fini_queue; 1108 1109 return 0; 1110 1111 err_fini_queue: 1112 cryptd_fini_queue(&queue); 1113 err_destroy_wq: 1114 destroy_workqueue(cryptd_wq); 1115 return err; 1116 } 1117 1118 static void __exit cryptd_exit(void) 1119 { 1120 destroy_workqueue(cryptd_wq); 1121 cryptd_fini_queue(&queue); 1122 crypto_unregister_template(&cryptd_tmpl); 1123 } 1124 1125 subsys_initcall(cryptd_init); 1126 module_exit(cryptd_exit); 1127 1128 MODULE_LICENSE("GPL"); 1129 MODULE_DESCRIPTION("Software async crypto daemon"); 1130 MODULE_ALIAS_CRYPTO("cryptd"); 1131