cryptd.c (1cac2cbc76b9f3fce0d4ccc374e724e7f2533a47) | cryptd.c (254eff771441f4ee7aa9cf770a6e4820492c9dab) |
---|---|
1/* 2 * Software async crypto daemon. 3 * 4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation; either version 2 of the License, or (at your option) 9 * any later version. 10 * 11 */ 12 13#include <crypto/algapi.h> 14#include <crypto/internal/hash.h> 15#include <crypto/cryptd.h> | 1/* 2 * Software async crypto daemon. 3 * 4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation; either version 2 of the License, or (at your option) 9 * any later version. 10 * 11 */ 12 13#include <crypto/algapi.h> 14#include <crypto/internal/hash.h> 15#include <crypto/cryptd.h> |
16#include <crypto/crypto_wq.h> |
|
16#include <linux/err.h> 17#include <linux/init.h> 18#include <linux/kernel.h> | 17#include <linux/err.h> 18#include <linux/init.h> 19#include <linux/kernel.h> |
19#include <linux/kthread.h> | |
20#include <linux/list.h> 21#include <linux/module.h> | 20#include <linux/list.h> 21#include <linux/module.h> |
22#include <linux/mutex.h> | |
23#include <linux/scatterlist.h> 24#include <linux/sched.h> 25#include <linux/slab.h> | 22#include <linux/scatterlist.h> 23#include <linux/sched.h> 24#include <linux/slab.h> |
26#include <linux/spinlock.h> | |
27 | 25 |
28#define CRYPTD_MAX_QLEN 100 | 26#define CRYPTD_MAX_CPU_QLEN 100 |
29 | 27 |
30struct cryptd_state { 31 spinlock_t lock; 32 struct mutex mutex; | 28struct cryptd_cpu_queue { |
33 struct crypto_queue queue; | 29 struct crypto_queue queue; |
34 struct task_struct *task; | 30 struct work_struct work; |
35}; 36 | 31}; 32 |
33struct cryptd_queue { 34 struct cryptd_cpu_queue *cpu_queue; 35}; 36 |
|
37struct cryptd_instance_ctx { 38 struct crypto_spawn spawn; | 37struct cryptd_instance_ctx { 38 struct crypto_spawn spawn; |
39 struct cryptd_state *state; | 39 struct cryptd_queue *queue; |
40}; 41 42struct cryptd_blkcipher_ctx { 43 struct crypto_blkcipher *child; 44}; 45 46struct cryptd_blkcipher_request_ctx { 47 crypto_completion_t complete; 48}; 49 50struct cryptd_hash_ctx { 51 struct crypto_hash *child; 52}; 53 54struct cryptd_hash_request_ctx { 55 crypto_completion_t complete; 56}; 57 | 40}; 41 42struct cryptd_blkcipher_ctx { 43 struct crypto_blkcipher *child; 44}; 45 46struct cryptd_blkcipher_request_ctx { 47 crypto_completion_t complete; 48}; 49 50struct cryptd_hash_ctx { 51 struct crypto_hash *child; 52}; 53 54struct cryptd_hash_request_ctx { 55 crypto_completion_t complete; 56}; 57 |
58static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm) | 58static void cryptd_queue_worker(struct work_struct *work); 59 60static int cryptd_init_queue(struct cryptd_queue *queue, 61 unsigned int max_cpu_qlen) |
59{ | 62{ |
63 int cpu; 64 struct cryptd_cpu_queue *cpu_queue; 65 66 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); 67 if (!queue->cpu_queue) 68 return -ENOMEM; 69 for_each_possible_cpu(cpu) { 70 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); 71 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); 72 INIT_WORK(&cpu_queue->work, cryptd_queue_worker); 73 } 74 return 0; 75} 76 77static void cryptd_fini_queue(struct cryptd_queue *queue) 78{ 79 int cpu; 80 struct cryptd_cpu_queue *cpu_queue; 81 82 for_each_possible_cpu(cpu) { 83 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); 84 BUG_ON(cpu_queue->queue.qlen); 85 } 86 free_percpu(queue->cpu_queue); 87} 88 89static int cryptd_enqueue_request(struct cryptd_queue *queue, 90 struct crypto_async_request *request) 91{ 92 int cpu, err; 93 struct cryptd_cpu_queue *cpu_queue; 94 95 cpu = get_cpu(); 96 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); 97 err = crypto_enqueue_request(&cpu_queue->queue, request); 98 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); 99 put_cpu(); 100 101 return err; 102} 103 104/* Called in workqueue context, do one real cryption work (via 105 * req->complete) and reschedule itself if there are more work to 106 * do. */ 107static void cryptd_queue_worker(struct work_struct *work) 108{ 109 struct cryptd_cpu_queue *cpu_queue; 110 struct crypto_async_request *req, *backlog; 111 112 cpu_queue = container_of(work, struct cryptd_cpu_queue, work); 113 /* Only handle one request at a time to avoid hogging crypto 114 * workqueue. preempt_disable/enable is used to prevent 115 * being preempted by cryptd_enqueue_request() */ 116 preempt_disable(); 117 backlog = crypto_get_backlog(&cpu_queue->queue); 118 req = crypto_dequeue_request(&cpu_queue->queue); 119 preempt_enable(); 120 121 if (!req) 122 return; 123 124 if (backlog) 125 backlog->complete(backlog, -EINPROGRESS); 126 req->complete(req, 0); 127 128 if (cpu_queue->queue.qlen) 129 queue_work(kcrypto_wq, &cpu_queue->work); 130} 131 132static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) 133{ |
|
60 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 61 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | 134 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 135 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); |
62 return ictx->state; | 136 return ictx->queue; |
63} 64 65static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, 66 const u8 *key, unsigned int keylen) 67{ 68 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); 69 struct crypto_blkcipher *child = ctx->child; 70 int err; --- 55 unchanged lines hidden (view full) --- 126 crypto_blkcipher_crt(child)->decrypt); 127} 128 129static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, 130 crypto_completion_t complete) 131{ 132 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); 133 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 137} 138 139static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, 140 const u8 *key, unsigned int keylen) 141{ 142 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); 143 struct crypto_blkcipher *child = ctx->child; 144 int err; --- 55 unchanged lines hidden (view full) --- 200 crypto_blkcipher_crt(child)->decrypt); 201} 202 203static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, 204 crypto_completion_t complete) 205{ 206 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); 207 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
134 struct cryptd_state *state = 135 cryptd_get_state(crypto_ablkcipher_tfm(tfm)); 136 int err; | 208 struct cryptd_queue *queue; |
137 | 209 |
210 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); |
|
138 rctx->complete = req->base.complete; 139 req->base.complete = complete; 140 | 211 rctx->complete = req->base.complete; 212 req->base.complete = complete; 213 |
141 spin_lock_bh(&state->lock); 142 err = ablkcipher_enqueue_request(&state->queue, req); 143 spin_unlock_bh(&state->lock); 144 145 wake_up_process(state->task); 146 return err; | 214 return cryptd_enqueue_request(queue, &req->base); |
147} 148 149static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) 150{ 151 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); 152} 153 154static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) --- 17 unchanged lines hidden (view full) --- 172 tfm->crt_ablkcipher.reqsize = 173 sizeof(struct cryptd_blkcipher_request_ctx); 174 return 0; 175} 176 177static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) 178{ 179 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | 215} 216 217static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) 218{ 219 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); 220} 221 222static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) --- 17 unchanged lines hidden (view full) --- 240 tfm->crt_ablkcipher.reqsize = 241 sizeof(struct cryptd_blkcipher_request_ctx); 242 return 0; 243} 244 245static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) 246{ 247 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); |
180 struct cryptd_state *state = cryptd_get_state(tfm); 181 int active; | |
182 | 248 |
183 mutex_lock(&state->mutex); 184 active = ablkcipher_tfm_in_queue(&state->queue, 185 __crypto_ablkcipher_cast(tfm)); 186 mutex_unlock(&state->mutex); 187 188 BUG_ON(active); 189 | |
190 crypto_free_blkcipher(ctx->child); 191} 192 193static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, | 249 crypto_free_blkcipher(ctx->child); 250} 251 252static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, |
194 struct cryptd_state *state) | 253 struct cryptd_queue *queue) |
195{ 196 struct crypto_instance *inst; 197 struct cryptd_instance_ctx *ctx; 198 int err; 199 200 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 201 if (!inst) { 202 inst = ERR_PTR(-ENOMEM); --- 6 unchanged lines hidden (view full) --- 209 goto out_free_inst; 210 211 ctx = crypto_instance_ctx(inst); 212 err = crypto_init_spawn(&ctx->spawn, alg, inst, 213 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); 214 if (err) 215 goto out_free_inst; 216 | 254{ 255 struct crypto_instance *inst; 256 struct cryptd_instance_ctx *ctx; 257 int err; 258 259 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 260 if (!inst) { 261 inst = ERR_PTR(-ENOMEM); --- 6 unchanged lines hidden (view full) --- 268 goto out_free_inst; 269 270 ctx = crypto_instance_ctx(inst); 271 err = crypto_init_spawn(&ctx->spawn, alg, inst, 272 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); 273 if (err) 274 goto out_free_inst; 275 |
217 ctx->state = state; | 276 ctx->queue = queue; |
218 219 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); 220 221 inst->alg.cra_priority = alg->cra_priority + 50; 222 inst->alg.cra_blocksize = alg->cra_blocksize; 223 inst->alg.cra_alignmask = alg->cra_alignmask; 224 225out: 226 return inst; 227 228out_free_inst: 229 kfree(inst); 230 inst = ERR_PTR(err); 231 goto out; 232} 233 234static struct crypto_instance *cryptd_alloc_blkcipher( | 277 278 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); 279 280 inst->alg.cra_priority = alg->cra_priority + 50; 281 inst->alg.cra_blocksize = alg->cra_blocksize; 282 inst->alg.cra_alignmask = alg->cra_alignmask; 283 284out: 285 return inst; 286 287out_free_inst: 288 kfree(inst); 289 inst = ERR_PTR(err); 290 goto out; 291} 292 293static struct crypto_instance *cryptd_alloc_blkcipher( |
235 struct rtattr **tb, struct cryptd_state *state) | 294 struct rtattr **tb, struct cryptd_queue *queue) |
236{ 237 struct crypto_instance *inst; 238 struct crypto_alg *alg; 239 240 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, 241 CRYPTO_ALG_TYPE_MASK); 242 if (IS_ERR(alg)) 243 return ERR_CAST(alg); 244 | 295{ 296 struct crypto_instance *inst; 297 struct crypto_alg *alg; 298 299 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, 300 CRYPTO_ALG_TYPE_MASK); 301 if (IS_ERR(alg)) 302 return ERR_CAST(alg); 303 |
245 inst = cryptd_alloc_instance(alg, state); | 304 inst = cryptd_alloc_instance(alg, queue); |
246 if (IS_ERR(inst)) 247 goto out_put_alg; 248 249 inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; 250 inst->alg.cra_type = &crypto_ablkcipher_type; 251 252 inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; 253 inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; --- 31 unchanged lines hidden (view full) --- 285 tfm->crt_ahash.reqsize = 286 sizeof(struct cryptd_hash_request_ctx); 287 return 0; 288} 289 290static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) 291{ 292 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 305 if (IS_ERR(inst)) 306 goto out_put_alg; 307 308 inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; 309 inst->alg.cra_type = &crypto_ablkcipher_type; 310 311 inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; 312 inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; --- 31 unchanged lines hidden (view full) --- 344 tfm->crt_ahash.reqsize = 345 sizeof(struct cryptd_hash_request_ctx); 346 return 0; 347} 348 349static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) 350{ 351 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
293 struct cryptd_state *state = cryptd_get_state(tfm); 294 int active; | |
295 | 352 |
296 mutex_lock(&state->mutex); 297 active = ahash_tfm_in_queue(&state->queue, 298 __crypto_ahash_cast(tfm)); 299 mutex_unlock(&state->mutex); 300 301 BUG_ON(active); 302 | |
303 crypto_free_hash(ctx->child); 304} 305 306static int cryptd_hash_setkey(struct crypto_ahash *parent, 307 const u8 *key, unsigned int keylen) 308{ 309 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); 310 struct crypto_hash *child = ctx->child; --- 8 unchanged lines hidden (view full) --- 319 return err; 320} 321 322static int cryptd_hash_enqueue(struct ahash_request *req, 323 crypto_completion_t complete) 324{ 325 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 326 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 353 crypto_free_hash(ctx->child); 354} 355 356static int cryptd_hash_setkey(struct crypto_ahash *parent, 357 const u8 *key, unsigned int keylen) 358{ 359 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); 360 struct crypto_hash *child = ctx->child; --- 8 unchanged lines hidden (view full) --- 369 return err; 370} 371 372static int cryptd_hash_enqueue(struct ahash_request *req, 373 crypto_completion_t complete) 374{ 375 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 376 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
327 struct cryptd_state *state = 328 cryptd_get_state(crypto_ahash_tfm(tfm)); 329 int err; | 377 struct cryptd_queue *queue = 378 cryptd_get_queue(crypto_ahash_tfm(tfm)); |
330 331 rctx->complete = req->base.complete; 332 req->base.complete = complete; 333 | 379 380 rctx->complete = req->base.complete; 381 req->base.complete = complete; 382 |
334 spin_lock_bh(&state->lock); 335 err = ahash_enqueue_request(&state->queue, req); 336 spin_unlock_bh(&state->lock); 337 338 wake_up_process(state->task); 339 return err; | 383 return cryptd_enqueue_request(queue, &req->base); |
340} 341 342static void cryptd_hash_init(struct crypto_async_request *req_async, int err) 343{ 344 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 345 struct crypto_hash *child = ctx->child; 346 struct ahash_request *req = ahash_request_cast(req_async); 347 struct cryptd_hash_request_ctx *rctx; --- 116 unchanged lines hidden (view full) --- 464} 465 466static int cryptd_hash_digest_enqueue(struct ahash_request *req) 467{ 468 return cryptd_hash_enqueue(req, cryptd_hash_digest); 469} 470 471static struct crypto_instance *cryptd_alloc_hash( | 384} 385 386static void cryptd_hash_init(struct crypto_async_request *req_async, int err) 387{ 388 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 389 struct crypto_hash *child = ctx->child; 390 struct ahash_request *req = ahash_request_cast(req_async); 391 struct cryptd_hash_request_ctx *rctx; --- 116 unchanged lines hidden (view full) --- 508} 509 510static int cryptd_hash_digest_enqueue(struct ahash_request *req) 511{ 512 return cryptd_hash_enqueue(req, cryptd_hash_digest); 513} 514 515static struct crypto_instance *cryptd_alloc_hash( |
472 struct rtattr **tb, struct cryptd_state *state) | 516 struct rtattr **tb, struct cryptd_queue *queue) |
473{ 474 struct crypto_instance *inst; 475 struct crypto_alg *alg; 476 477 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, 478 CRYPTO_ALG_TYPE_HASH_MASK); 479 if (IS_ERR(alg)) 480 return ERR_PTR(PTR_ERR(alg)); 481 | 517{ 518 struct crypto_instance *inst; 519 struct crypto_alg *alg; 520 521 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, 522 CRYPTO_ALG_TYPE_HASH_MASK); 523 if (IS_ERR(alg)) 524 return ERR_PTR(PTR_ERR(alg)); 525 |
482 inst = cryptd_alloc_instance(alg, state); | 526 inst = cryptd_alloc_instance(alg, queue); |
483 if (IS_ERR(inst)) 484 goto out_put_alg; 485 486 inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC; 487 inst->alg.cra_type = &crypto_ahash_type; 488 489 inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize; 490 inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx); --- 7 unchanged lines hidden (view full) --- 498 inst->alg.cra_ahash.setkey = cryptd_hash_setkey; 499 inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue; 500 501out_put_alg: 502 crypto_mod_put(alg); 503 return inst; 504} 505 | 527 if (IS_ERR(inst)) 528 goto out_put_alg; 529 530 inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC; 531 inst->alg.cra_type = &crypto_ahash_type; 532 533 inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize; 534 inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx); --- 7 unchanged lines hidden (view full) --- 542 inst->alg.cra_ahash.setkey = cryptd_hash_setkey; 543 inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue; 544 545out_put_alg: 546 crypto_mod_put(alg); 547 return inst; 548} 549 |
506static struct cryptd_state state; | 550static struct cryptd_queue queue; |
507 508static struct crypto_instance *cryptd_alloc(struct rtattr **tb) 509{ 510 struct crypto_attr_type *algt; 511 512 algt = crypto_get_attr_type(tb); 513 if (IS_ERR(algt)) 514 return ERR_CAST(algt); 515 516 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { 517 case CRYPTO_ALG_TYPE_BLKCIPHER: | 551 552static struct crypto_instance *cryptd_alloc(struct rtattr **tb) 553{ 554 struct crypto_attr_type *algt; 555 556 algt = crypto_get_attr_type(tb); 557 if (IS_ERR(algt)) 558 return ERR_CAST(algt); 559 560 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { 561 case CRYPTO_ALG_TYPE_BLKCIPHER: |
518 return cryptd_alloc_blkcipher(tb, &state); | 562 return cryptd_alloc_blkcipher(tb, &queue); |
519 case CRYPTO_ALG_TYPE_DIGEST: | 563 case CRYPTO_ALG_TYPE_DIGEST: |
520 return cryptd_alloc_hash(tb, &state); | 564 return cryptd_alloc_hash(tb, &queue); |
521 } 522 523 return ERR_PTR(-EINVAL); 524} 525 526static void cryptd_free(struct crypto_instance *inst) 527{ 528 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); --- 38 unchanged lines hidden (view full) --- 567EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); 568 569void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) 570{ 571 crypto_free_ablkcipher(&tfm->base); 572} 573EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); 574 | 565 } 566 567 return ERR_PTR(-EINVAL); 568} 569 570static void cryptd_free(struct crypto_instance *inst) 571{ 572 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); --- 38 unchanged lines hidden (view full) --- 611EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); 612 613void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) 614{ 615 crypto_free_ablkcipher(&tfm->base); 616} 617EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); 618 |
575static inline int cryptd_create_thread(struct cryptd_state *state, 576 int (*fn)(void *data), const char *name) 577{ 578 spin_lock_init(&state->lock); 579 mutex_init(&state->mutex); 580 crypto_init_queue(&state->queue, CRYPTD_MAX_QLEN); 581 582 state->task = kthread_run(fn, state, name); 583 if (IS_ERR(state->task)) 584 return PTR_ERR(state->task); 585 586 return 0; 587} 588 589static inline void cryptd_stop_thread(struct cryptd_state *state) 590{ 591 BUG_ON(state->queue.qlen); 592 kthread_stop(state->task); 593} 594 595static int cryptd_thread(void *data) 596{ 597 struct cryptd_state *state = data; 598 int stop; 599 600 current->flags |= PF_NOFREEZE; 601 602 do { 603 struct crypto_async_request *req, *backlog; 604 605 mutex_lock(&state->mutex); 606 __set_current_state(TASK_INTERRUPTIBLE); 607 608 spin_lock_bh(&state->lock); 609 backlog = crypto_get_backlog(&state->queue); 610 req = crypto_dequeue_request(&state->queue); 611 spin_unlock_bh(&state->lock); 612 613 stop = kthread_should_stop(); 614 615 if (stop || req) { 616 __set_current_state(TASK_RUNNING); 617 if (req) { 618 if (backlog) 619 backlog->complete(backlog, 620 -EINPROGRESS); 621 req->complete(req, 0); 622 } 623 } 624 625 mutex_unlock(&state->mutex); 626 627 schedule(); 628 } while (!stop); 629 630 return 0; 631} 632 | |
633static int __init cryptd_init(void) 634{ 635 int err; 636 | 619static int __init cryptd_init(void) 620{ 621 int err; 622 |
637 err = cryptd_create_thread(&state, cryptd_thread, "cryptd"); | 623 err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN); |
638 if (err) 639 return err; 640 641 err = crypto_register_template(&cryptd_tmpl); 642 if (err) | 624 if (err) 625 return err; 626 627 err = crypto_register_template(&cryptd_tmpl); 628 if (err) |
643 kthread_stop(state.task); | 629 cryptd_fini_queue(&queue); |
644 645 return err; 646} 647 648static void __exit cryptd_exit(void) 649{ | 630 631 return err; 632} 633 634static void __exit cryptd_exit(void) 635{ |
650 cryptd_stop_thread(&state); | 636 cryptd_fini_queue(&queue); |
651 crypto_unregister_template(&cryptd_tmpl); 652} 653 654module_init(cryptd_init); 655module_exit(cryptd_exit); 656 657MODULE_LICENSE("GPL"); 658MODULE_DESCRIPTION("Software async crypto daemon"); | 637 crypto_unregister_template(&cryptd_tmpl); 638} 639 640module_init(cryptd_init); 641module_exit(cryptd_exit); 642 643MODULE_LICENSE("GPL"); 644MODULE_DESCRIPTION("Software async crypto daemon"); |