1 /* 2 * Software async crypto daemon. 3 * 4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 5 * 6 * Added AEAD support to cryptd. 7 * Authors: Tadeusz Struk (tadeusz.struk@intel.com) 8 * Adrian Hoban <adrian.hoban@intel.com> 9 * Gabriele Paoloni <gabriele.paoloni@intel.com> 10 * Aidan O'Mahony (aidan.o.mahony@intel.com) 11 * Copyright (c) 2010, Intel Corporation. 12 * 13 * This program is free software; you can redistribute it and/or modify it 14 * under the terms of the GNU General Public License as published by the Free 15 * Software Foundation; either version 2 of the License, or (at your option) 16 * any later version. 17 * 18 */ 19 20 #include <crypto/internal/hash.h> 21 #include <crypto/internal/aead.h> 22 #include <crypto/internal/skcipher.h> 23 #include <crypto/cryptd.h> 24 #include <crypto/crypto_wq.h> 25 #include <linux/atomic.h> 26 #include <linux/err.h> 27 #include <linux/init.h> 28 #include <linux/kernel.h> 29 #include <linux/list.h> 30 #include <linux/module.h> 31 #include <linux/scatterlist.h> 32 #include <linux/sched.h> 33 #include <linux/slab.h> 34 35 #define CRYPTD_MAX_CPU_QLEN 1000 36 37 struct cryptd_cpu_queue { 38 struct crypto_queue queue; 39 struct work_struct work; 40 }; 41 42 struct cryptd_queue { 43 struct cryptd_cpu_queue __percpu *cpu_queue; 44 }; 45 46 struct cryptd_instance_ctx { 47 struct crypto_spawn spawn; 48 struct cryptd_queue *queue; 49 }; 50 51 struct skcipherd_instance_ctx { 52 struct crypto_skcipher_spawn spawn; 53 struct cryptd_queue *queue; 54 }; 55 56 struct hashd_instance_ctx { 57 struct crypto_shash_spawn spawn; 58 struct cryptd_queue *queue; 59 }; 60 61 struct aead_instance_ctx { 62 struct crypto_aead_spawn aead_spawn; 63 struct cryptd_queue *queue; 64 }; 65 66 struct cryptd_blkcipher_ctx { 67 atomic_t refcnt; 68 struct crypto_blkcipher *child; 69 }; 70 71 struct cryptd_blkcipher_request_ctx { 72 crypto_completion_t complete; 73 }; 74 75 struct cryptd_skcipher_ctx { 76 atomic_t refcnt; 77 struct crypto_skcipher *child; 78 }; 79 80 struct cryptd_skcipher_request_ctx { 81 crypto_completion_t complete; 82 }; 83 84 struct cryptd_hash_ctx { 85 atomic_t refcnt; 86 struct crypto_shash *child; 87 }; 88 89 struct cryptd_hash_request_ctx { 90 crypto_completion_t complete; 91 struct shash_desc desc; 92 }; 93 94 struct cryptd_aead_ctx { 95 atomic_t refcnt; 96 struct crypto_aead *child; 97 }; 98 99 struct cryptd_aead_request_ctx { 100 crypto_completion_t complete; 101 }; 102 103 static void cryptd_queue_worker(struct work_struct *work); 104 105 static int cryptd_init_queue(struct cryptd_queue *queue, 106 unsigned int max_cpu_qlen) 107 { 108 int cpu; 109 struct cryptd_cpu_queue *cpu_queue; 110 111 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); 112 if (!queue->cpu_queue) 113 return -ENOMEM; 114 for_each_possible_cpu(cpu) { 115 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); 116 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); 117 INIT_WORK(&cpu_queue->work, cryptd_queue_worker); 118 } 119 return 0; 120 } 121 122 static void cryptd_fini_queue(struct cryptd_queue *queue) 123 { 124 int cpu; 125 struct cryptd_cpu_queue *cpu_queue; 126 127 for_each_possible_cpu(cpu) { 128 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); 129 BUG_ON(cpu_queue->queue.qlen); 130 } 131 free_percpu(queue->cpu_queue); 132 } 133 134 static int cryptd_enqueue_request(struct cryptd_queue *queue, 135 struct crypto_async_request *request) 136 { 137 int cpu, err; 138 struct cryptd_cpu_queue *cpu_queue; 139 atomic_t *refcnt; 140 141 cpu = get_cpu(); 142 cpu_queue = this_cpu_ptr(queue->cpu_queue); 143 err = crypto_enqueue_request(&cpu_queue->queue, request); 144 145 refcnt = crypto_tfm_ctx(request->tfm); 146 147 if (err == -ENOSPC) 148 goto out_put_cpu; 149 150 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); 151 152 if (!atomic_read(refcnt)) 153 goto out_put_cpu; 154 155 atomic_inc(refcnt); 156 157 out_put_cpu: 158 put_cpu(); 159 160 return err; 161 } 162 163 /* Called in workqueue context, do one real cryption work (via 164 * req->complete) and reschedule itself if there are more work to 165 * do. */ 166 static void cryptd_queue_worker(struct work_struct *work) 167 { 168 struct cryptd_cpu_queue *cpu_queue; 169 struct crypto_async_request *req, *backlog; 170 171 cpu_queue = container_of(work, struct cryptd_cpu_queue, work); 172 /* 173 * Only handle one request at a time to avoid hogging crypto workqueue. 174 * preempt_disable/enable is used to prevent being preempted by 175 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent 176 * cryptd_enqueue_request() being accessed from software interrupts. 177 */ 178 local_bh_disable(); 179 preempt_disable(); 180 backlog = crypto_get_backlog(&cpu_queue->queue); 181 req = crypto_dequeue_request(&cpu_queue->queue); 182 preempt_enable(); 183 local_bh_enable(); 184 185 if (!req) 186 return; 187 188 if (backlog) 189 backlog->complete(backlog, -EINPROGRESS); 190 req->complete(req, 0); 191 192 if (cpu_queue->queue.qlen) 193 queue_work(kcrypto_wq, &cpu_queue->work); 194 } 195 196 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) 197 { 198 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 199 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); 200 return ictx->queue; 201 } 202 203 static inline void cryptd_check_internal(struct rtattr **tb, u32 *type, 204 u32 *mask) 205 { 206 struct crypto_attr_type *algt; 207 208 algt = crypto_get_attr_type(tb); 209 if (IS_ERR(algt)) 210 return; 211 212 *type |= algt->type & CRYPTO_ALG_INTERNAL; 213 *mask |= algt->mask & CRYPTO_ALG_INTERNAL; 214 } 215 216 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, 217 const u8 *key, unsigned int keylen) 218 { 219 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); 220 struct crypto_blkcipher *child = ctx->child; 221 int err; 222 223 crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 224 crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & 225 CRYPTO_TFM_REQ_MASK); 226 err = crypto_blkcipher_setkey(child, key, keylen); 227 crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & 228 CRYPTO_TFM_RES_MASK); 229 return err; 230 } 231 232 static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, 233 struct crypto_blkcipher *child, 234 int err, 235 int (*crypt)(struct blkcipher_desc *desc, 236 struct scatterlist *dst, 237 struct scatterlist *src, 238 unsigned int len)) 239 { 240 struct cryptd_blkcipher_request_ctx *rctx; 241 struct cryptd_blkcipher_ctx *ctx; 242 struct crypto_ablkcipher *tfm; 243 struct blkcipher_desc desc; 244 int refcnt; 245 246 rctx = ablkcipher_request_ctx(req); 247 248 if (unlikely(err == -EINPROGRESS)) 249 goto out; 250 251 desc.tfm = child; 252 desc.info = req->info; 253 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 254 255 err = crypt(&desc, req->dst, req->src, req->nbytes); 256 257 req->base.complete = rctx->complete; 258 259 out: 260 tfm = crypto_ablkcipher_reqtfm(req); 261 ctx = crypto_ablkcipher_ctx(tfm); 262 refcnt = atomic_read(&ctx->refcnt); 263 264 local_bh_disable(); 265 rctx->complete(&req->base, err); 266 local_bh_enable(); 267 268 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) 269 crypto_free_ablkcipher(tfm); 270 } 271 272 static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) 273 { 274 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); 275 struct crypto_blkcipher *child = ctx->child; 276 277 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, 278 crypto_blkcipher_crt(child)->encrypt); 279 } 280 281 static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) 282 { 283 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); 284 struct crypto_blkcipher *child = ctx->child; 285 286 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, 287 crypto_blkcipher_crt(child)->decrypt); 288 } 289 290 static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, 291 crypto_completion_t compl) 292 { 293 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); 294 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 295 struct cryptd_queue *queue; 296 297 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); 298 rctx->complete = req->base.complete; 299 req->base.complete = compl; 300 301 return cryptd_enqueue_request(queue, &req->base); 302 } 303 304 static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) 305 { 306 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); 307 } 308 309 static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) 310 { 311 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); 312 } 313 314 static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) 315 { 316 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 317 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); 318 struct crypto_spawn *spawn = &ictx->spawn; 319 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); 320 struct crypto_blkcipher *cipher; 321 322 cipher = crypto_spawn_blkcipher(spawn); 323 if (IS_ERR(cipher)) 324 return PTR_ERR(cipher); 325 326 ctx->child = cipher; 327 tfm->crt_ablkcipher.reqsize = 328 sizeof(struct cryptd_blkcipher_request_ctx); 329 return 0; 330 } 331 332 static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) 333 { 334 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); 335 336 crypto_free_blkcipher(ctx->child); 337 } 338 339 static int cryptd_init_instance(struct crypto_instance *inst, 340 struct crypto_alg *alg) 341 { 342 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, 343 "cryptd(%s)", 344 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 345 return -ENAMETOOLONG; 346 347 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); 348 349 inst->alg.cra_priority = alg->cra_priority + 50; 350 inst->alg.cra_blocksize = alg->cra_blocksize; 351 inst->alg.cra_alignmask = alg->cra_alignmask; 352 353 return 0; 354 } 355 356 static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, 357 unsigned int tail) 358 { 359 char *p; 360 struct crypto_instance *inst; 361 int err; 362 363 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); 364 if (!p) 365 return ERR_PTR(-ENOMEM); 366 367 inst = (void *)(p + head); 368 369 err = cryptd_init_instance(inst, alg); 370 if (err) 371 goto out_free_inst; 372 373 out: 374 return p; 375 376 out_free_inst: 377 kfree(p); 378 p = ERR_PTR(err); 379 goto out; 380 } 381 382 static int cryptd_create_blkcipher(struct crypto_template *tmpl, 383 struct rtattr **tb, 384 struct cryptd_queue *queue) 385 { 386 struct cryptd_instance_ctx *ctx; 387 struct crypto_instance *inst; 388 struct crypto_alg *alg; 389 u32 type = CRYPTO_ALG_TYPE_BLKCIPHER; 390 u32 mask = CRYPTO_ALG_TYPE_MASK; 391 int err; 392 393 cryptd_check_internal(tb, &type, &mask); 394 395 alg = crypto_get_attr_alg(tb, type, mask); 396 if (IS_ERR(alg)) 397 return PTR_ERR(alg); 398 399 inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); 400 err = PTR_ERR(inst); 401 if (IS_ERR(inst)) 402 goto out_put_alg; 403 404 ctx = crypto_instance_ctx(inst); 405 ctx->queue = queue; 406 407 err = crypto_init_spawn(&ctx->spawn, alg, inst, 408 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); 409 if (err) 410 goto out_free_inst; 411 412 type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; 413 if (alg->cra_flags & CRYPTO_ALG_INTERNAL) 414 type |= CRYPTO_ALG_INTERNAL; 415 inst->alg.cra_flags = type; 416 inst->alg.cra_type = &crypto_ablkcipher_type; 417 418 inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; 419 inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; 420 inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; 421 422 inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; 423 424 inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); 425 426 inst->alg.cra_init = cryptd_blkcipher_init_tfm; 427 inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; 428 429 inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; 430 inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; 431 inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; 432 433 err = crypto_register_instance(tmpl, inst); 434 if (err) { 435 crypto_drop_spawn(&ctx->spawn); 436 out_free_inst: 437 kfree(inst); 438 } 439 440 out_put_alg: 441 crypto_mod_put(alg); 442 return err; 443 } 444 445 static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, 446 const u8 *key, unsigned int keylen) 447 { 448 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); 449 struct crypto_skcipher *child = ctx->child; 450 int err; 451 452 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 453 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & 454 CRYPTO_TFM_REQ_MASK); 455 err = crypto_skcipher_setkey(child, key, keylen); 456 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & 457 CRYPTO_TFM_RES_MASK); 458 return err; 459 } 460 461 static void cryptd_skcipher_complete(struct skcipher_request *req, int err) 462 { 463 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 464 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 465 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 466 int refcnt = atomic_read(&ctx->refcnt); 467 468 local_bh_disable(); 469 rctx->complete(&req->base, err); 470 local_bh_enable(); 471 472 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) 473 crypto_free_skcipher(tfm); 474 } 475 476 static void cryptd_skcipher_encrypt(struct crypto_async_request *base, 477 int err) 478 { 479 struct skcipher_request *req = skcipher_request_cast(base); 480 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 481 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 482 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 483 struct crypto_skcipher *child = ctx->child; 484 SKCIPHER_REQUEST_ON_STACK(subreq, child); 485 486 if (unlikely(err == -EINPROGRESS)) 487 goto out; 488 489 skcipher_request_set_tfm(subreq, child); 490 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, 491 NULL, NULL); 492 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 493 req->iv); 494 495 err = crypto_skcipher_encrypt(subreq); 496 skcipher_request_zero(subreq); 497 498 req->base.complete = rctx->complete; 499 500 out: 501 cryptd_skcipher_complete(req, err); 502 } 503 504 static void cryptd_skcipher_decrypt(struct crypto_async_request *base, 505 int err) 506 { 507 struct skcipher_request *req = skcipher_request_cast(base); 508 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 509 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 510 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 511 struct crypto_skcipher *child = ctx->child; 512 SKCIPHER_REQUEST_ON_STACK(subreq, child); 513 514 if (unlikely(err == -EINPROGRESS)) 515 goto out; 516 517 skcipher_request_set_tfm(subreq, child); 518 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, 519 NULL, NULL); 520 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 521 req->iv); 522 523 err = crypto_skcipher_decrypt(subreq); 524 skcipher_request_zero(subreq); 525 526 req->base.complete = rctx->complete; 527 528 out: 529 cryptd_skcipher_complete(req, err); 530 } 531 532 static int cryptd_skcipher_enqueue(struct skcipher_request *req, 533 crypto_completion_t compl) 534 { 535 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); 536 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 537 struct cryptd_queue *queue; 538 539 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm)); 540 rctx->complete = req->base.complete; 541 req->base.complete = compl; 542 543 return cryptd_enqueue_request(queue, &req->base); 544 } 545 546 static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req) 547 { 548 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt); 549 } 550 551 static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req) 552 { 553 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt); 554 } 555 556 static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm) 557 { 558 struct skcipher_instance *inst = skcipher_alg_instance(tfm); 559 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst); 560 struct crypto_skcipher_spawn *spawn = &ictx->spawn; 561 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 562 struct crypto_skcipher *cipher; 563 564 cipher = crypto_spawn_skcipher(spawn); 565 if (IS_ERR(cipher)) 566 return PTR_ERR(cipher); 567 568 ctx->child = cipher; 569 crypto_skcipher_set_reqsize( 570 tfm, sizeof(struct cryptd_skcipher_request_ctx)); 571 return 0; 572 } 573 574 static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm) 575 { 576 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 577 578 crypto_free_skcipher(ctx->child); 579 } 580 581 static void cryptd_skcipher_free(struct skcipher_instance *inst) 582 { 583 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst); 584 585 crypto_drop_skcipher(&ctx->spawn); 586 } 587 588 static int cryptd_create_skcipher(struct crypto_template *tmpl, 589 struct rtattr **tb, 590 struct cryptd_queue *queue) 591 { 592 struct skcipherd_instance_ctx *ctx; 593 struct skcipher_instance *inst; 594 struct skcipher_alg *alg; 595 const char *name; 596 u32 type; 597 u32 mask; 598 int err; 599 600 type = 0; 601 mask = CRYPTO_ALG_ASYNC; 602 603 cryptd_check_internal(tb, &type, &mask); 604 605 name = crypto_attr_alg_name(tb[1]); 606 if (IS_ERR(name)) 607 return PTR_ERR(name); 608 609 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 610 if (!inst) 611 return -ENOMEM; 612 613 ctx = skcipher_instance_ctx(inst); 614 ctx->queue = queue; 615 616 crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst)); 617 err = crypto_grab_skcipher(&ctx->spawn, name, type, mask); 618 if (err) 619 goto out_free_inst; 620 621 alg = crypto_spawn_skcipher_alg(&ctx->spawn); 622 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base); 623 if (err) 624 goto out_drop_skcipher; 625 626 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | 627 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); 628 629 inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); 630 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); 631 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); 632 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg); 633 634 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx); 635 636 inst->alg.init = cryptd_skcipher_init_tfm; 637 inst->alg.exit = cryptd_skcipher_exit_tfm; 638 639 inst->alg.setkey = cryptd_skcipher_setkey; 640 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue; 641 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue; 642 643 inst->free = cryptd_skcipher_free; 644 645 err = skcipher_register_instance(tmpl, inst); 646 if (err) { 647 out_drop_skcipher: 648 crypto_drop_skcipher(&ctx->spawn); 649 out_free_inst: 650 kfree(inst); 651 } 652 return err; 653 } 654 655 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) 656 { 657 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 658 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); 659 struct crypto_shash_spawn *spawn = &ictx->spawn; 660 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 661 struct crypto_shash *hash; 662 663 hash = crypto_spawn_shash(spawn); 664 if (IS_ERR(hash)) 665 return PTR_ERR(hash); 666 667 ctx->child = hash; 668 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 669 sizeof(struct cryptd_hash_request_ctx) + 670 crypto_shash_descsize(hash)); 671 return 0; 672 } 673 674 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) 675 { 676 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 677 678 crypto_free_shash(ctx->child); 679 } 680 681 static int cryptd_hash_setkey(struct crypto_ahash *parent, 682 const u8 *key, unsigned int keylen) 683 { 684 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); 685 struct crypto_shash *child = ctx->child; 686 int err; 687 688 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); 689 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & 690 CRYPTO_TFM_REQ_MASK); 691 err = crypto_shash_setkey(child, key, keylen); 692 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & 693 CRYPTO_TFM_RES_MASK); 694 return err; 695 } 696 697 static int cryptd_hash_enqueue(struct ahash_request *req, 698 crypto_completion_t compl) 699 { 700 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 701 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 702 struct cryptd_queue *queue = 703 cryptd_get_queue(crypto_ahash_tfm(tfm)); 704 705 rctx->complete = req->base.complete; 706 req->base.complete = compl; 707 708 return cryptd_enqueue_request(queue, &req->base); 709 } 710 711 static void cryptd_hash_complete(struct ahash_request *req, int err) 712 { 713 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 714 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 715 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 716 int refcnt = atomic_read(&ctx->refcnt); 717 718 local_bh_disable(); 719 rctx->complete(&req->base, err); 720 local_bh_enable(); 721 722 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) 723 crypto_free_ahash(tfm); 724 } 725 726 static void cryptd_hash_init(struct crypto_async_request *req_async, int err) 727 { 728 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 729 struct crypto_shash *child = ctx->child; 730 struct ahash_request *req = ahash_request_cast(req_async); 731 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 732 struct shash_desc *desc = &rctx->desc; 733 734 if (unlikely(err == -EINPROGRESS)) 735 goto out; 736 737 desc->tfm = child; 738 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 739 740 err = crypto_shash_init(desc); 741 742 req->base.complete = rctx->complete; 743 744 out: 745 cryptd_hash_complete(req, err); 746 } 747 748 static int cryptd_hash_init_enqueue(struct ahash_request *req) 749 { 750 return cryptd_hash_enqueue(req, cryptd_hash_init); 751 } 752 753 static void cryptd_hash_update(struct crypto_async_request *req_async, int err) 754 { 755 struct ahash_request *req = ahash_request_cast(req_async); 756 struct cryptd_hash_request_ctx *rctx; 757 758 rctx = ahash_request_ctx(req); 759 760 if (unlikely(err == -EINPROGRESS)) 761 goto out; 762 763 err = shash_ahash_update(req, &rctx->desc); 764 765 req->base.complete = rctx->complete; 766 767 out: 768 cryptd_hash_complete(req, err); 769 } 770 771 static int cryptd_hash_update_enqueue(struct ahash_request *req) 772 { 773 return cryptd_hash_enqueue(req, cryptd_hash_update); 774 } 775 776 static void cryptd_hash_final(struct crypto_async_request *req_async, int err) 777 { 778 struct ahash_request *req = ahash_request_cast(req_async); 779 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 780 781 if (unlikely(err == -EINPROGRESS)) 782 goto out; 783 784 err = crypto_shash_final(&rctx->desc, req->result); 785 786 req->base.complete = rctx->complete; 787 788 out: 789 cryptd_hash_complete(req, err); 790 } 791 792 static int cryptd_hash_final_enqueue(struct ahash_request *req) 793 { 794 return cryptd_hash_enqueue(req, cryptd_hash_final); 795 } 796 797 static void cryptd_hash_finup(struct crypto_async_request *req_async, int err) 798 { 799 struct ahash_request *req = ahash_request_cast(req_async); 800 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 801 802 if (unlikely(err == -EINPROGRESS)) 803 goto out; 804 805 err = shash_ahash_finup(req, &rctx->desc); 806 807 req->base.complete = rctx->complete; 808 809 out: 810 cryptd_hash_complete(req, err); 811 } 812 813 static int cryptd_hash_finup_enqueue(struct ahash_request *req) 814 { 815 return cryptd_hash_enqueue(req, cryptd_hash_finup); 816 } 817 818 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) 819 { 820 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 821 struct crypto_shash *child = ctx->child; 822 struct ahash_request *req = ahash_request_cast(req_async); 823 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 824 struct shash_desc *desc = &rctx->desc; 825 826 if (unlikely(err == -EINPROGRESS)) 827 goto out; 828 829 desc->tfm = child; 830 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 831 832 err = shash_ahash_digest(req, desc); 833 834 req->base.complete = rctx->complete; 835 836 out: 837 cryptd_hash_complete(req, err); 838 } 839 840 static int cryptd_hash_digest_enqueue(struct ahash_request *req) 841 { 842 return cryptd_hash_enqueue(req, cryptd_hash_digest); 843 } 844 845 static int cryptd_hash_export(struct ahash_request *req, void *out) 846 { 847 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 848 849 return crypto_shash_export(&rctx->desc, out); 850 } 851 852 static int cryptd_hash_import(struct ahash_request *req, const void *in) 853 { 854 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 855 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); 856 struct shash_desc *desc = cryptd_shash_desc(req); 857 858 desc->tfm = ctx->child; 859 desc->flags = req->base.flags; 860 861 return crypto_shash_import(desc, in); 862 } 863 864 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, 865 struct cryptd_queue *queue) 866 { 867 struct hashd_instance_ctx *ctx; 868 struct ahash_instance *inst; 869 struct shash_alg *salg; 870 struct crypto_alg *alg; 871 u32 type = 0; 872 u32 mask = 0; 873 int err; 874 875 cryptd_check_internal(tb, &type, &mask); 876 877 salg = shash_attr_alg(tb[1], type, mask); 878 if (IS_ERR(salg)) 879 return PTR_ERR(salg); 880 881 alg = &salg->base; 882 inst = cryptd_alloc_instance(alg, ahash_instance_headroom(), 883 sizeof(*ctx)); 884 err = PTR_ERR(inst); 885 if (IS_ERR(inst)) 886 goto out_put_alg; 887 888 ctx = ahash_instance_ctx(inst); 889 ctx->queue = queue; 890 891 err = crypto_init_shash_spawn(&ctx->spawn, salg, 892 ahash_crypto_instance(inst)); 893 if (err) 894 goto out_free_inst; 895 896 type = CRYPTO_ALG_ASYNC; 897 if (alg->cra_flags & CRYPTO_ALG_INTERNAL) 898 type |= CRYPTO_ALG_INTERNAL; 899 inst->alg.halg.base.cra_flags = type; 900 901 inst->alg.halg.digestsize = salg->digestsize; 902 inst->alg.halg.statesize = salg->statesize; 903 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); 904 905 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; 906 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm; 907 908 inst->alg.init = cryptd_hash_init_enqueue; 909 inst->alg.update = cryptd_hash_update_enqueue; 910 inst->alg.final = cryptd_hash_final_enqueue; 911 inst->alg.finup = cryptd_hash_finup_enqueue; 912 inst->alg.export = cryptd_hash_export; 913 inst->alg.import = cryptd_hash_import; 914 inst->alg.setkey = cryptd_hash_setkey; 915 inst->alg.digest = cryptd_hash_digest_enqueue; 916 917 err = ahash_register_instance(tmpl, inst); 918 if (err) { 919 crypto_drop_shash(&ctx->spawn); 920 out_free_inst: 921 kfree(inst); 922 } 923 924 out_put_alg: 925 crypto_mod_put(alg); 926 return err; 927 } 928 929 static int cryptd_aead_setkey(struct crypto_aead *parent, 930 const u8 *key, unsigned int keylen) 931 { 932 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); 933 struct crypto_aead *child = ctx->child; 934 935 return crypto_aead_setkey(child, key, keylen); 936 } 937 938 static int cryptd_aead_setauthsize(struct crypto_aead *parent, 939 unsigned int authsize) 940 { 941 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); 942 struct crypto_aead *child = ctx->child; 943 944 return crypto_aead_setauthsize(child, authsize); 945 } 946 947 static void cryptd_aead_crypt(struct aead_request *req, 948 struct crypto_aead *child, 949 int err, 950 int (*crypt)(struct aead_request *req)) 951 { 952 struct cryptd_aead_request_ctx *rctx; 953 struct cryptd_aead_ctx *ctx; 954 crypto_completion_t compl; 955 struct crypto_aead *tfm; 956 int refcnt; 957 958 rctx = aead_request_ctx(req); 959 compl = rctx->complete; 960 961 tfm = crypto_aead_reqtfm(req); 962 963 if (unlikely(err == -EINPROGRESS)) 964 goto out; 965 aead_request_set_tfm(req, child); 966 err = crypt( req ); 967 968 out: 969 ctx = crypto_aead_ctx(tfm); 970 refcnt = atomic_read(&ctx->refcnt); 971 972 local_bh_disable(); 973 compl(&req->base, err); 974 local_bh_enable(); 975 976 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) 977 crypto_free_aead(tfm); 978 } 979 980 static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) 981 { 982 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); 983 struct crypto_aead *child = ctx->child; 984 struct aead_request *req; 985 986 req = container_of(areq, struct aead_request, base); 987 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt); 988 } 989 990 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err) 991 { 992 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); 993 struct crypto_aead *child = ctx->child; 994 struct aead_request *req; 995 996 req = container_of(areq, struct aead_request, base); 997 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt); 998 } 999 1000 static int cryptd_aead_enqueue(struct aead_request *req, 1001 crypto_completion_t compl) 1002 { 1003 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req); 1004 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1005 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); 1006 1007 rctx->complete = req->base.complete; 1008 req->base.complete = compl; 1009 return cryptd_enqueue_request(queue, &req->base); 1010 } 1011 1012 static int cryptd_aead_encrypt_enqueue(struct aead_request *req) 1013 { 1014 return cryptd_aead_enqueue(req, cryptd_aead_encrypt ); 1015 } 1016 1017 static int cryptd_aead_decrypt_enqueue(struct aead_request *req) 1018 { 1019 return cryptd_aead_enqueue(req, cryptd_aead_decrypt ); 1020 } 1021 1022 static int cryptd_aead_init_tfm(struct crypto_aead *tfm) 1023 { 1024 struct aead_instance *inst = aead_alg_instance(tfm); 1025 struct aead_instance_ctx *ictx = aead_instance_ctx(inst); 1026 struct crypto_aead_spawn *spawn = &ictx->aead_spawn; 1027 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); 1028 struct crypto_aead *cipher; 1029 1030 cipher = crypto_spawn_aead(spawn); 1031 if (IS_ERR(cipher)) 1032 return PTR_ERR(cipher); 1033 1034 ctx->child = cipher; 1035 crypto_aead_set_reqsize( 1036 tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx), 1037 crypto_aead_reqsize(cipher))); 1038 return 0; 1039 } 1040 1041 static void cryptd_aead_exit_tfm(struct crypto_aead *tfm) 1042 { 1043 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); 1044 crypto_free_aead(ctx->child); 1045 } 1046 1047 static int cryptd_create_aead(struct crypto_template *tmpl, 1048 struct rtattr **tb, 1049 struct cryptd_queue *queue) 1050 { 1051 struct aead_instance_ctx *ctx; 1052 struct aead_instance *inst; 1053 struct aead_alg *alg; 1054 const char *name; 1055 u32 type = 0; 1056 u32 mask = CRYPTO_ALG_ASYNC; 1057 int err; 1058 1059 cryptd_check_internal(tb, &type, &mask); 1060 1061 name = crypto_attr_alg_name(tb[1]); 1062 if (IS_ERR(name)) 1063 return PTR_ERR(name); 1064 1065 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 1066 if (!inst) 1067 return -ENOMEM; 1068 1069 ctx = aead_instance_ctx(inst); 1070 ctx->queue = queue; 1071 1072 crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst)); 1073 err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask); 1074 if (err) 1075 goto out_free_inst; 1076 1077 alg = crypto_spawn_aead_alg(&ctx->aead_spawn); 1078 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base); 1079 if (err) 1080 goto out_drop_aead; 1081 1082 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | 1083 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); 1084 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx); 1085 1086 inst->alg.ivsize = crypto_aead_alg_ivsize(alg); 1087 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); 1088 1089 inst->alg.init = cryptd_aead_init_tfm; 1090 inst->alg.exit = cryptd_aead_exit_tfm; 1091 inst->alg.setkey = cryptd_aead_setkey; 1092 inst->alg.setauthsize = cryptd_aead_setauthsize; 1093 inst->alg.encrypt = cryptd_aead_encrypt_enqueue; 1094 inst->alg.decrypt = cryptd_aead_decrypt_enqueue; 1095 1096 err = aead_register_instance(tmpl, inst); 1097 if (err) { 1098 out_drop_aead: 1099 crypto_drop_aead(&ctx->aead_spawn); 1100 out_free_inst: 1101 kfree(inst); 1102 } 1103 return err; 1104 } 1105 1106 static struct cryptd_queue queue; 1107 1108 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) 1109 { 1110 struct crypto_attr_type *algt; 1111 1112 algt = crypto_get_attr_type(tb); 1113 if (IS_ERR(algt)) 1114 return PTR_ERR(algt); 1115 1116 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { 1117 case CRYPTO_ALG_TYPE_BLKCIPHER: 1118 if ((algt->type & CRYPTO_ALG_TYPE_MASK) == 1119 CRYPTO_ALG_TYPE_BLKCIPHER) 1120 return cryptd_create_blkcipher(tmpl, tb, &queue); 1121 1122 return cryptd_create_skcipher(tmpl, tb, &queue); 1123 case CRYPTO_ALG_TYPE_DIGEST: 1124 return cryptd_create_hash(tmpl, tb, &queue); 1125 case CRYPTO_ALG_TYPE_AEAD: 1126 return cryptd_create_aead(tmpl, tb, &queue); 1127 } 1128 1129 return -EINVAL; 1130 } 1131 1132 static void cryptd_free(struct crypto_instance *inst) 1133 { 1134 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); 1135 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); 1136 struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst); 1137 1138 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { 1139 case CRYPTO_ALG_TYPE_AHASH: 1140 crypto_drop_shash(&hctx->spawn); 1141 kfree(ahash_instance(inst)); 1142 return; 1143 case CRYPTO_ALG_TYPE_AEAD: 1144 crypto_drop_aead(&aead_ctx->aead_spawn); 1145 kfree(aead_instance(inst)); 1146 return; 1147 default: 1148 crypto_drop_spawn(&ctx->spawn); 1149 kfree(inst); 1150 } 1151 } 1152 1153 static struct crypto_template cryptd_tmpl = { 1154 .name = "cryptd", 1155 .create = cryptd_create, 1156 .free = cryptd_free, 1157 .module = THIS_MODULE, 1158 }; 1159 1160 struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, 1161 u32 type, u32 mask) 1162 { 1163 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 1164 struct cryptd_blkcipher_ctx *ctx; 1165 struct crypto_tfm *tfm; 1166 1167 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 1168 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 1169 return ERR_PTR(-EINVAL); 1170 type = crypto_skcipher_type(type); 1171 mask &= ~CRYPTO_ALG_TYPE_MASK; 1172 mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); 1173 tfm = crypto_alloc_base(cryptd_alg_name, type, mask); 1174 if (IS_ERR(tfm)) 1175 return ERR_CAST(tfm); 1176 if (tfm->__crt_alg->cra_module != THIS_MODULE) { 1177 crypto_free_tfm(tfm); 1178 return ERR_PTR(-EINVAL); 1179 } 1180 1181 ctx = crypto_tfm_ctx(tfm); 1182 atomic_set(&ctx->refcnt, 1); 1183 1184 return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); 1185 } 1186 EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); 1187 1188 struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) 1189 { 1190 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); 1191 return ctx->child; 1192 } 1193 EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); 1194 1195 bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm) 1196 { 1197 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); 1198 1199 return atomic_read(&ctx->refcnt) - 1; 1200 } 1201 EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued); 1202 1203 void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) 1204 { 1205 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); 1206 1207 if (atomic_dec_and_test(&ctx->refcnt)) 1208 crypto_free_ablkcipher(&tfm->base); 1209 } 1210 EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); 1211 1212 struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, 1213 u32 type, u32 mask) 1214 { 1215 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 1216 struct cryptd_skcipher_ctx *ctx; 1217 struct crypto_skcipher *tfm; 1218 1219 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 1220 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 1221 return ERR_PTR(-EINVAL); 1222 1223 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask); 1224 if (IS_ERR(tfm)) 1225 return ERR_CAST(tfm); 1226 1227 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 1228 crypto_free_skcipher(tfm); 1229 return ERR_PTR(-EINVAL); 1230 } 1231 1232 ctx = crypto_skcipher_ctx(tfm); 1233 atomic_set(&ctx->refcnt, 1); 1234 1235 return container_of(tfm, struct cryptd_skcipher, base); 1236 } 1237 EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher); 1238 1239 struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm) 1240 { 1241 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 1242 1243 return ctx->child; 1244 } 1245 EXPORT_SYMBOL_GPL(cryptd_skcipher_child); 1246 1247 bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm) 1248 { 1249 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 1250 1251 return atomic_read(&ctx->refcnt) - 1; 1252 } 1253 EXPORT_SYMBOL_GPL(cryptd_skcipher_queued); 1254 1255 void cryptd_free_skcipher(struct cryptd_skcipher *tfm) 1256 { 1257 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); 1258 1259 if (atomic_dec_and_test(&ctx->refcnt)) 1260 crypto_free_skcipher(&tfm->base); 1261 } 1262 EXPORT_SYMBOL_GPL(cryptd_free_skcipher); 1263 1264 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, 1265 u32 type, u32 mask) 1266 { 1267 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 1268 struct cryptd_hash_ctx *ctx; 1269 struct crypto_ahash *tfm; 1270 1271 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 1272 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 1273 return ERR_PTR(-EINVAL); 1274 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask); 1275 if (IS_ERR(tfm)) 1276 return ERR_CAST(tfm); 1277 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 1278 crypto_free_ahash(tfm); 1279 return ERR_PTR(-EINVAL); 1280 } 1281 1282 ctx = crypto_ahash_ctx(tfm); 1283 atomic_set(&ctx->refcnt, 1); 1284 1285 return __cryptd_ahash_cast(tfm); 1286 } 1287 EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); 1288 1289 struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm) 1290 { 1291 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 1292 1293 return ctx->child; 1294 } 1295 EXPORT_SYMBOL_GPL(cryptd_ahash_child); 1296 1297 struct shash_desc *cryptd_shash_desc(struct ahash_request *req) 1298 { 1299 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 1300 return &rctx->desc; 1301 } 1302 EXPORT_SYMBOL_GPL(cryptd_shash_desc); 1303 1304 bool cryptd_ahash_queued(struct cryptd_ahash *tfm) 1305 { 1306 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 1307 1308 return atomic_read(&ctx->refcnt) - 1; 1309 } 1310 EXPORT_SYMBOL_GPL(cryptd_ahash_queued); 1311 1312 void cryptd_free_ahash(struct cryptd_ahash *tfm) 1313 { 1314 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 1315 1316 if (atomic_dec_and_test(&ctx->refcnt)) 1317 crypto_free_ahash(&tfm->base); 1318 } 1319 EXPORT_SYMBOL_GPL(cryptd_free_ahash); 1320 1321 struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, 1322 u32 type, u32 mask) 1323 { 1324 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 1325 struct cryptd_aead_ctx *ctx; 1326 struct crypto_aead *tfm; 1327 1328 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 1329 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 1330 return ERR_PTR(-EINVAL); 1331 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask); 1332 if (IS_ERR(tfm)) 1333 return ERR_CAST(tfm); 1334 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 1335 crypto_free_aead(tfm); 1336 return ERR_PTR(-EINVAL); 1337 } 1338 1339 ctx = crypto_aead_ctx(tfm); 1340 atomic_set(&ctx->refcnt, 1); 1341 1342 return __cryptd_aead_cast(tfm); 1343 } 1344 EXPORT_SYMBOL_GPL(cryptd_alloc_aead); 1345 1346 struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm) 1347 { 1348 struct cryptd_aead_ctx *ctx; 1349 ctx = crypto_aead_ctx(&tfm->base); 1350 return ctx->child; 1351 } 1352 EXPORT_SYMBOL_GPL(cryptd_aead_child); 1353 1354 bool cryptd_aead_queued(struct cryptd_aead *tfm) 1355 { 1356 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); 1357 1358 return atomic_read(&ctx->refcnt) - 1; 1359 } 1360 EXPORT_SYMBOL_GPL(cryptd_aead_queued); 1361 1362 void cryptd_free_aead(struct cryptd_aead *tfm) 1363 { 1364 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); 1365 1366 if (atomic_dec_and_test(&ctx->refcnt)) 1367 crypto_free_aead(&tfm->base); 1368 } 1369 EXPORT_SYMBOL_GPL(cryptd_free_aead); 1370 1371 static int __init cryptd_init(void) 1372 { 1373 int err; 1374 1375 err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN); 1376 if (err) 1377 return err; 1378 1379 err = crypto_register_template(&cryptd_tmpl); 1380 if (err) 1381 cryptd_fini_queue(&queue); 1382 1383 return err; 1384 } 1385 1386 static void __exit cryptd_exit(void) 1387 { 1388 cryptd_fini_queue(&queue); 1389 crypto_unregister_template(&cryptd_tmpl); 1390 } 1391 1392 subsys_initcall(cryptd_init); 1393 module_exit(cryptd_exit); 1394 1395 MODULE_LICENSE("GPL"); 1396 MODULE_DESCRIPTION("Software async crypto daemon"); 1397 MODULE_ALIAS_CRYPTO("cryptd"); 1398