xref: /linux/crypto/cryptd.c (revision 215a0d305c5651928eb67c96bcedd0a6c297dfce)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Software async crypto daemon.
4  *
5  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6  *
7  * Added AEAD support to cryptd.
8  *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
9  *             Adrian Hoban <adrian.hoban@intel.com>
10  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
11  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
12  *    Copyright (c) 2010, Intel Corporation.
13  */
14 
15 #include <crypto/internal/hash.h>
16 #include <crypto/internal/aead.h>
17 #include <crypto/internal/skcipher.h>
18 #include <crypto/cryptd.h>
19 #include <crypto/crypto_wq.h>
20 #include <linux/atomic.h>
21 #include <linux/err.h>
22 #include <linux/init.h>
23 #include <linux/kernel.h>
24 #include <linux/list.h>
25 #include <linux/module.h>
26 #include <linux/scatterlist.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 
30 static unsigned int cryptd_max_cpu_qlen = 1000;
31 module_param(cryptd_max_cpu_qlen, uint, 0);
32 MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
33 
34 struct cryptd_cpu_queue {
35 	struct crypto_queue queue;
36 	struct work_struct work;
37 };
38 
39 struct cryptd_queue {
40 	struct cryptd_cpu_queue __percpu *cpu_queue;
41 };
42 
43 struct cryptd_instance_ctx {
44 	struct crypto_spawn spawn;
45 	struct cryptd_queue *queue;
46 };
47 
48 struct skcipherd_instance_ctx {
49 	struct crypto_skcipher_spawn spawn;
50 	struct cryptd_queue *queue;
51 };
52 
53 struct hashd_instance_ctx {
54 	struct crypto_shash_spawn spawn;
55 	struct cryptd_queue *queue;
56 };
57 
58 struct aead_instance_ctx {
59 	struct crypto_aead_spawn aead_spawn;
60 	struct cryptd_queue *queue;
61 };
62 
63 struct cryptd_skcipher_ctx {
64 	atomic_t refcnt;
65 	struct crypto_sync_skcipher *child;
66 };
67 
68 struct cryptd_skcipher_request_ctx {
69 	crypto_completion_t complete;
70 };
71 
72 struct cryptd_hash_ctx {
73 	atomic_t refcnt;
74 	struct crypto_shash *child;
75 };
76 
77 struct cryptd_hash_request_ctx {
78 	crypto_completion_t complete;
79 	struct shash_desc desc;
80 };
81 
82 struct cryptd_aead_ctx {
83 	atomic_t refcnt;
84 	struct crypto_aead *child;
85 };
86 
87 struct cryptd_aead_request_ctx {
88 	crypto_completion_t complete;
89 };
90 
91 static void cryptd_queue_worker(struct work_struct *work);
92 
93 static int cryptd_init_queue(struct cryptd_queue *queue,
94 			     unsigned int max_cpu_qlen)
95 {
96 	int cpu;
97 	struct cryptd_cpu_queue *cpu_queue;
98 
99 	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
100 	if (!queue->cpu_queue)
101 		return -ENOMEM;
102 	for_each_possible_cpu(cpu) {
103 		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
104 		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
105 		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
106 	}
107 	pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
108 	return 0;
109 }
110 
111 static void cryptd_fini_queue(struct cryptd_queue *queue)
112 {
113 	int cpu;
114 	struct cryptd_cpu_queue *cpu_queue;
115 
116 	for_each_possible_cpu(cpu) {
117 		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
118 		BUG_ON(cpu_queue->queue.qlen);
119 	}
120 	free_percpu(queue->cpu_queue);
121 }
122 
123 static int cryptd_enqueue_request(struct cryptd_queue *queue,
124 				  struct crypto_async_request *request)
125 {
126 	int cpu, err;
127 	struct cryptd_cpu_queue *cpu_queue;
128 	atomic_t *refcnt;
129 
130 	cpu = get_cpu();
131 	cpu_queue = this_cpu_ptr(queue->cpu_queue);
132 	err = crypto_enqueue_request(&cpu_queue->queue, request);
133 
134 	refcnt = crypto_tfm_ctx(request->tfm);
135 
136 	if (err == -ENOSPC)
137 		goto out_put_cpu;
138 
139 	queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
140 
141 	if (!atomic_read(refcnt))
142 		goto out_put_cpu;
143 
144 	atomic_inc(refcnt);
145 
146 out_put_cpu:
147 	put_cpu();
148 
149 	return err;
150 }
151 
152 /* Called in workqueue context, do one real cryption work (via
153  * req->complete) and reschedule itself if there are more work to
154  * do. */
155 static void cryptd_queue_worker(struct work_struct *work)
156 {
157 	struct cryptd_cpu_queue *cpu_queue;
158 	struct crypto_async_request *req, *backlog;
159 
160 	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
161 	/*
162 	 * Only handle one request at a time to avoid hogging crypto workqueue.
163 	 * preempt_disable/enable is used to prevent being preempted by
164 	 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
165 	 * cryptd_enqueue_request() being accessed from software interrupts.
166 	 */
167 	local_bh_disable();
168 	preempt_disable();
169 	backlog = crypto_get_backlog(&cpu_queue->queue);
170 	req = crypto_dequeue_request(&cpu_queue->queue);
171 	preempt_enable();
172 	local_bh_enable();
173 
174 	if (!req)
175 		return;
176 
177 	if (backlog)
178 		backlog->complete(backlog, -EINPROGRESS);
179 	req->complete(req, 0);
180 
181 	if (cpu_queue->queue.qlen)
182 		queue_work(kcrypto_wq, &cpu_queue->work);
183 }
184 
185 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
186 {
187 	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
188 	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
189 	return ictx->queue;
190 }
191 
192 static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
193 					 u32 *mask)
194 {
195 	struct crypto_attr_type *algt;
196 
197 	algt = crypto_get_attr_type(tb);
198 	if (IS_ERR(algt))
199 		return;
200 
201 	*type |= algt->type & CRYPTO_ALG_INTERNAL;
202 	*mask |= algt->mask & CRYPTO_ALG_INTERNAL;
203 }
204 
205 static int cryptd_init_instance(struct crypto_instance *inst,
206 				struct crypto_alg *alg)
207 {
208 	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
209 		     "cryptd(%s)",
210 		     alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
211 		return -ENAMETOOLONG;
212 
213 	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
214 
215 	inst->alg.cra_priority = alg->cra_priority + 50;
216 	inst->alg.cra_blocksize = alg->cra_blocksize;
217 	inst->alg.cra_alignmask = alg->cra_alignmask;
218 
219 	return 0;
220 }
221 
222 static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
223 				   unsigned int tail)
224 {
225 	char *p;
226 	struct crypto_instance *inst;
227 	int err;
228 
229 	p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
230 	if (!p)
231 		return ERR_PTR(-ENOMEM);
232 
233 	inst = (void *)(p + head);
234 
235 	err = cryptd_init_instance(inst, alg);
236 	if (err)
237 		goto out_free_inst;
238 
239 out:
240 	return p;
241 
242 out_free_inst:
243 	kfree(p);
244 	p = ERR_PTR(err);
245 	goto out;
246 }
247 
248 static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
249 				  const u8 *key, unsigned int keylen)
250 {
251 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
252 	struct crypto_sync_skcipher *child = ctx->child;
253 	int err;
254 
255 	crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
256 	crypto_sync_skcipher_set_flags(child,
257 				       crypto_skcipher_get_flags(parent) &
258 					 CRYPTO_TFM_REQ_MASK);
259 	err = crypto_sync_skcipher_setkey(child, key, keylen);
260 	crypto_skcipher_set_flags(parent,
261 				  crypto_sync_skcipher_get_flags(child) &
262 					  CRYPTO_TFM_RES_MASK);
263 	return err;
264 }
265 
266 static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
267 {
268 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
269 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
270 	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
271 	int refcnt = atomic_read(&ctx->refcnt);
272 
273 	local_bh_disable();
274 	rctx->complete(&req->base, err);
275 	local_bh_enable();
276 
277 	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
278 		crypto_free_skcipher(tfm);
279 }
280 
281 static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
282 				    int err)
283 {
284 	struct skcipher_request *req = skcipher_request_cast(base);
285 	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
286 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
287 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
288 	struct crypto_sync_skcipher *child = ctx->child;
289 	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
290 
291 	if (unlikely(err == -EINPROGRESS))
292 		goto out;
293 
294 	skcipher_request_set_sync_tfm(subreq, child);
295 	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
296 				      NULL, NULL);
297 	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
298 				   req->iv);
299 
300 	err = crypto_skcipher_encrypt(subreq);
301 	skcipher_request_zero(subreq);
302 
303 	req->base.complete = rctx->complete;
304 
305 out:
306 	cryptd_skcipher_complete(req, err);
307 }
308 
309 static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
310 				    int err)
311 {
312 	struct skcipher_request *req = skcipher_request_cast(base);
313 	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
314 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
315 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
316 	struct crypto_sync_skcipher *child = ctx->child;
317 	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
318 
319 	if (unlikely(err == -EINPROGRESS))
320 		goto out;
321 
322 	skcipher_request_set_sync_tfm(subreq, child);
323 	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
324 				      NULL, NULL);
325 	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
326 				   req->iv);
327 
328 	err = crypto_skcipher_decrypt(subreq);
329 	skcipher_request_zero(subreq);
330 
331 	req->base.complete = rctx->complete;
332 
333 out:
334 	cryptd_skcipher_complete(req, err);
335 }
336 
337 static int cryptd_skcipher_enqueue(struct skcipher_request *req,
338 				   crypto_completion_t compl)
339 {
340 	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
341 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
342 	struct cryptd_queue *queue;
343 
344 	queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
345 	rctx->complete = req->base.complete;
346 	req->base.complete = compl;
347 
348 	return cryptd_enqueue_request(queue, &req->base);
349 }
350 
351 static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
352 {
353 	return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
354 }
355 
356 static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
357 {
358 	return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
359 }
360 
361 static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
362 {
363 	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
364 	struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
365 	struct crypto_skcipher_spawn *spawn = &ictx->spawn;
366 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
367 	struct crypto_skcipher *cipher;
368 
369 	cipher = crypto_spawn_skcipher(spawn);
370 	if (IS_ERR(cipher))
371 		return PTR_ERR(cipher);
372 
373 	ctx->child = (struct crypto_sync_skcipher *)cipher;
374 	crypto_skcipher_set_reqsize(
375 		tfm, sizeof(struct cryptd_skcipher_request_ctx));
376 	return 0;
377 }
378 
379 static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
380 {
381 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
382 
383 	crypto_free_sync_skcipher(ctx->child);
384 }
385 
386 static void cryptd_skcipher_free(struct skcipher_instance *inst)
387 {
388 	struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
389 
390 	crypto_drop_skcipher(&ctx->spawn);
391 	kfree(inst);
392 }
393 
394 static int cryptd_create_skcipher(struct crypto_template *tmpl,
395 				  struct rtattr **tb,
396 				  struct cryptd_queue *queue)
397 {
398 	struct skcipherd_instance_ctx *ctx;
399 	struct skcipher_instance *inst;
400 	struct skcipher_alg *alg;
401 	const char *name;
402 	u32 type;
403 	u32 mask;
404 	int err;
405 
406 	type = 0;
407 	mask = CRYPTO_ALG_ASYNC;
408 
409 	cryptd_check_internal(tb, &type, &mask);
410 
411 	name = crypto_attr_alg_name(tb[1]);
412 	if (IS_ERR(name))
413 		return PTR_ERR(name);
414 
415 	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
416 	if (!inst)
417 		return -ENOMEM;
418 
419 	ctx = skcipher_instance_ctx(inst);
420 	ctx->queue = queue;
421 
422 	crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
423 	err = crypto_grab_skcipher(&ctx->spawn, name, type, mask);
424 	if (err)
425 		goto out_free_inst;
426 
427 	alg = crypto_spawn_skcipher_alg(&ctx->spawn);
428 	err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
429 	if (err)
430 		goto out_drop_skcipher;
431 
432 	inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
433 				   (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
434 
435 	inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
436 	inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
437 	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
438 	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
439 
440 	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
441 
442 	inst->alg.init = cryptd_skcipher_init_tfm;
443 	inst->alg.exit = cryptd_skcipher_exit_tfm;
444 
445 	inst->alg.setkey = cryptd_skcipher_setkey;
446 	inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
447 	inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
448 
449 	inst->free = cryptd_skcipher_free;
450 
451 	err = skcipher_register_instance(tmpl, inst);
452 	if (err) {
453 out_drop_skcipher:
454 		crypto_drop_skcipher(&ctx->spawn);
455 out_free_inst:
456 		kfree(inst);
457 	}
458 	return err;
459 }
460 
461 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
462 {
463 	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
464 	struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
465 	struct crypto_shash_spawn *spawn = &ictx->spawn;
466 	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
467 	struct crypto_shash *hash;
468 
469 	hash = crypto_spawn_shash(spawn);
470 	if (IS_ERR(hash))
471 		return PTR_ERR(hash);
472 
473 	ctx->child = hash;
474 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
475 				 sizeof(struct cryptd_hash_request_ctx) +
476 				 crypto_shash_descsize(hash));
477 	return 0;
478 }
479 
480 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
481 {
482 	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
483 
484 	crypto_free_shash(ctx->child);
485 }
486 
487 static int cryptd_hash_setkey(struct crypto_ahash *parent,
488 				   const u8 *key, unsigned int keylen)
489 {
490 	struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
491 	struct crypto_shash *child = ctx->child;
492 	int err;
493 
494 	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
495 	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
496 				      CRYPTO_TFM_REQ_MASK);
497 	err = crypto_shash_setkey(child, key, keylen);
498 	crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
499 				       CRYPTO_TFM_RES_MASK);
500 	return err;
501 }
502 
503 static int cryptd_hash_enqueue(struct ahash_request *req,
504 				crypto_completion_t compl)
505 {
506 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
507 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
508 	struct cryptd_queue *queue =
509 		cryptd_get_queue(crypto_ahash_tfm(tfm));
510 
511 	rctx->complete = req->base.complete;
512 	req->base.complete = compl;
513 
514 	return cryptd_enqueue_request(queue, &req->base);
515 }
516 
517 static void cryptd_hash_complete(struct ahash_request *req, int err)
518 {
519 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
520 	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
521 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
522 	int refcnt = atomic_read(&ctx->refcnt);
523 
524 	local_bh_disable();
525 	rctx->complete(&req->base, err);
526 	local_bh_enable();
527 
528 	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
529 		crypto_free_ahash(tfm);
530 }
531 
532 static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
533 {
534 	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
535 	struct crypto_shash *child = ctx->child;
536 	struct ahash_request *req = ahash_request_cast(req_async);
537 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
538 	struct shash_desc *desc = &rctx->desc;
539 
540 	if (unlikely(err == -EINPROGRESS))
541 		goto out;
542 
543 	desc->tfm = child;
544 
545 	err = crypto_shash_init(desc);
546 
547 	req->base.complete = rctx->complete;
548 
549 out:
550 	cryptd_hash_complete(req, err);
551 }
552 
553 static int cryptd_hash_init_enqueue(struct ahash_request *req)
554 {
555 	return cryptd_hash_enqueue(req, cryptd_hash_init);
556 }
557 
558 static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
559 {
560 	struct ahash_request *req = ahash_request_cast(req_async);
561 	struct cryptd_hash_request_ctx *rctx;
562 
563 	rctx = ahash_request_ctx(req);
564 
565 	if (unlikely(err == -EINPROGRESS))
566 		goto out;
567 
568 	err = shash_ahash_update(req, &rctx->desc);
569 
570 	req->base.complete = rctx->complete;
571 
572 out:
573 	cryptd_hash_complete(req, err);
574 }
575 
576 static int cryptd_hash_update_enqueue(struct ahash_request *req)
577 {
578 	return cryptd_hash_enqueue(req, cryptd_hash_update);
579 }
580 
581 static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
582 {
583 	struct ahash_request *req = ahash_request_cast(req_async);
584 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
585 
586 	if (unlikely(err == -EINPROGRESS))
587 		goto out;
588 
589 	err = crypto_shash_final(&rctx->desc, req->result);
590 
591 	req->base.complete = rctx->complete;
592 
593 out:
594 	cryptd_hash_complete(req, err);
595 }
596 
597 static int cryptd_hash_final_enqueue(struct ahash_request *req)
598 {
599 	return cryptd_hash_enqueue(req, cryptd_hash_final);
600 }
601 
602 static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
603 {
604 	struct ahash_request *req = ahash_request_cast(req_async);
605 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
606 
607 	if (unlikely(err == -EINPROGRESS))
608 		goto out;
609 
610 	err = shash_ahash_finup(req, &rctx->desc);
611 
612 	req->base.complete = rctx->complete;
613 
614 out:
615 	cryptd_hash_complete(req, err);
616 }
617 
618 static int cryptd_hash_finup_enqueue(struct ahash_request *req)
619 {
620 	return cryptd_hash_enqueue(req, cryptd_hash_finup);
621 }
622 
623 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
624 {
625 	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
626 	struct crypto_shash *child = ctx->child;
627 	struct ahash_request *req = ahash_request_cast(req_async);
628 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
629 	struct shash_desc *desc = &rctx->desc;
630 
631 	if (unlikely(err == -EINPROGRESS))
632 		goto out;
633 
634 	desc->tfm = child;
635 
636 	err = shash_ahash_digest(req, desc);
637 
638 	req->base.complete = rctx->complete;
639 
640 out:
641 	cryptd_hash_complete(req, err);
642 }
643 
644 static int cryptd_hash_digest_enqueue(struct ahash_request *req)
645 {
646 	return cryptd_hash_enqueue(req, cryptd_hash_digest);
647 }
648 
649 static int cryptd_hash_export(struct ahash_request *req, void *out)
650 {
651 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
652 
653 	return crypto_shash_export(&rctx->desc, out);
654 }
655 
656 static int cryptd_hash_import(struct ahash_request *req, const void *in)
657 {
658 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
659 	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
660 	struct shash_desc *desc = cryptd_shash_desc(req);
661 
662 	desc->tfm = ctx->child;
663 
664 	return crypto_shash_import(desc, in);
665 }
666 
667 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
668 			      struct cryptd_queue *queue)
669 {
670 	struct hashd_instance_ctx *ctx;
671 	struct ahash_instance *inst;
672 	struct shash_alg *salg;
673 	struct crypto_alg *alg;
674 	u32 type = 0;
675 	u32 mask = 0;
676 	int err;
677 
678 	cryptd_check_internal(tb, &type, &mask);
679 
680 	salg = shash_attr_alg(tb[1], type, mask);
681 	if (IS_ERR(salg))
682 		return PTR_ERR(salg);
683 
684 	alg = &salg->base;
685 	inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
686 				     sizeof(*ctx));
687 	err = PTR_ERR(inst);
688 	if (IS_ERR(inst))
689 		goto out_put_alg;
690 
691 	ctx = ahash_instance_ctx(inst);
692 	ctx->queue = queue;
693 
694 	err = crypto_init_shash_spawn(&ctx->spawn, salg,
695 				      ahash_crypto_instance(inst));
696 	if (err)
697 		goto out_free_inst;
698 
699 	inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
700 		(alg->cra_flags & (CRYPTO_ALG_INTERNAL |
701 				   CRYPTO_ALG_OPTIONAL_KEY));
702 
703 	inst->alg.halg.digestsize = salg->digestsize;
704 	inst->alg.halg.statesize = salg->statesize;
705 	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
706 
707 	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
708 	inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
709 
710 	inst->alg.init   = cryptd_hash_init_enqueue;
711 	inst->alg.update = cryptd_hash_update_enqueue;
712 	inst->alg.final  = cryptd_hash_final_enqueue;
713 	inst->alg.finup  = cryptd_hash_finup_enqueue;
714 	inst->alg.export = cryptd_hash_export;
715 	inst->alg.import = cryptd_hash_import;
716 	if (crypto_shash_alg_has_setkey(salg))
717 		inst->alg.setkey = cryptd_hash_setkey;
718 	inst->alg.digest = cryptd_hash_digest_enqueue;
719 
720 	err = ahash_register_instance(tmpl, inst);
721 	if (err) {
722 		crypto_drop_shash(&ctx->spawn);
723 out_free_inst:
724 		kfree(inst);
725 	}
726 
727 out_put_alg:
728 	crypto_mod_put(alg);
729 	return err;
730 }
731 
732 static int cryptd_aead_setkey(struct crypto_aead *parent,
733 			      const u8 *key, unsigned int keylen)
734 {
735 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
736 	struct crypto_aead *child = ctx->child;
737 
738 	return crypto_aead_setkey(child, key, keylen);
739 }
740 
741 static int cryptd_aead_setauthsize(struct crypto_aead *parent,
742 				   unsigned int authsize)
743 {
744 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
745 	struct crypto_aead *child = ctx->child;
746 
747 	return crypto_aead_setauthsize(child, authsize);
748 }
749 
750 static void cryptd_aead_crypt(struct aead_request *req,
751 			struct crypto_aead *child,
752 			int err,
753 			int (*crypt)(struct aead_request *req))
754 {
755 	struct cryptd_aead_request_ctx *rctx;
756 	struct cryptd_aead_ctx *ctx;
757 	crypto_completion_t compl;
758 	struct crypto_aead *tfm;
759 	int refcnt;
760 
761 	rctx = aead_request_ctx(req);
762 	compl = rctx->complete;
763 
764 	tfm = crypto_aead_reqtfm(req);
765 
766 	if (unlikely(err == -EINPROGRESS))
767 		goto out;
768 	aead_request_set_tfm(req, child);
769 	err = crypt( req );
770 
771 out:
772 	ctx = crypto_aead_ctx(tfm);
773 	refcnt = atomic_read(&ctx->refcnt);
774 
775 	local_bh_disable();
776 	compl(&req->base, err);
777 	local_bh_enable();
778 
779 	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
780 		crypto_free_aead(tfm);
781 }
782 
783 static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
784 {
785 	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
786 	struct crypto_aead *child = ctx->child;
787 	struct aead_request *req;
788 
789 	req = container_of(areq, struct aead_request, base);
790 	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
791 }
792 
793 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
794 {
795 	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
796 	struct crypto_aead *child = ctx->child;
797 	struct aead_request *req;
798 
799 	req = container_of(areq, struct aead_request, base);
800 	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
801 }
802 
803 static int cryptd_aead_enqueue(struct aead_request *req,
804 				    crypto_completion_t compl)
805 {
806 	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
807 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
808 	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
809 
810 	rctx->complete = req->base.complete;
811 	req->base.complete = compl;
812 	return cryptd_enqueue_request(queue, &req->base);
813 }
814 
815 static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
816 {
817 	return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
818 }
819 
820 static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
821 {
822 	return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
823 }
824 
825 static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
826 {
827 	struct aead_instance *inst = aead_alg_instance(tfm);
828 	struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
829 	struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
830 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
831 	struct crypto_aead *cipher;
832 
833 	cipher = crypto_spawn_aead(spawn);
834 	if (IS_ERR(cipher))
835 		return PTR_ERR(cipher);
836 
837 	ctx->child = cipher;
838 	crypto_aead_set_reqsize(
839 		tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
840 			 crypto_aead_reqsize(cipher)));
841 	return 0;
842 }
843 
844 static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
845 {
846 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
847 	crypto_free_aead(ctx->child);
848 }
849 
850 static int cryptd_create_aead(struct crypto_template *tmpl,
851 		              struct rtattr **tb,
852 			      struct cryptd_queue *queue)
853 {
854 	struct aead_instance_ctx *ctx;
855 	struct aead_instance *inst;
856 	struct aead_alg *alg;
857 	const char *name;
858 	u32 type = 0;
859 	u32 mask = CRYPTO_ALG_ASYNC;
860 	int err;
861 
862 	cryptd_check_internal(tb, &type, &mask);
863 
864 	name = crypto_attr_alg_name(tb[1]);
865 	if (IS_ERR(name))
866 		return PTR_ERR(name);
867 
868 	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
869 	if (!inst)
870 		return -ENOMEM;
871 
872 	ctx = aead_instance_ctx(inst);
873 	ctx->queue = queue;
874 
875 	crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
876 	err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
877 	if (err)
878 		goto out_free_inst;
879 
880 	alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
881 	err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
882 	if (err)
883 		goto out_drop_aead;
884 
885 	inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
886 				   (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
887 	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
888 
889 	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
890 	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
891 
892 	inst->alg.init = cryptd_aead_init_tfm;
893 	inst->alg.exit = cryptd_aead_exit_tfm;
894 	inst->alg.setkey = cryptd_aead_setkey;
895 	inst->alg.setauthsize = cryptd_aead_setauthsize;
896 	inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
897 	inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
898 
899 	err = aead_register_instance(tmpl, inst);
900 	if (err) {
901 out_drop_aead:
902 		crypto_drop_aead(&ctx->aead_spawn);
903 out_free_inst:
904 		kfree(inst);
905 	}
906 	return err;
907 }
908 
909 static struct cryptd_queue queue;
910 
911 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
912 {
913 	struct crypto_attr_type *algt;
914 
915 	algt = crypto_get_attr_type(tb);
916 	if (IS_ERR(algt))
917 		return PTR_ERR(algt);
918 
919 	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
920 	case CRYPTO_ALG_TYPE_BLKCIPHER:
921 		return cryptd_create_skcipher(tmpl, tb, &queue);
922 	case CRYPTO_ALG_TYPE_DIGEST:
923 		return cryptd_create_hash(tmpl, tb, &queue);
924 	case CRYPTO_ALG_TYPE_AEAD:
925 		return cryptd_create_aead(tmpl, tb, &queue);
926 	}
927 
928 	return -EINVAL;
929 }
930 
931 static void cryptd_free(struct crypto_instance *inst)
932 {
933 	struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
934 	struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
935 	struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
936 
937 	switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
938 	case CRYPTO_ALG_TYPE_AHASH:
939 		crypto_drop_shash(&hctx->spawn);
940 		kfree(ahash_instance(inst));
941 		return;
942 	case CRYPTO_ALG_TYPE_AEAD:
943 		crypto_drop_aead(&aead_ctx->aead_spawn);
944 		kfree(aead_instance(inst));
945 		return;
946 	default:
947 		crypto_drop_spawn(&ctx->spawn);
948 		kfree(inst);
949 	}
950 }
951 
952 static struct crypto_template cryptd_tmpl = {
953 	.name = "cryptd",
954 	.create = cryptd_create,
955 	.free = cryptd_free,
956 	.module = THIS_MODULE,
957 };
958 
959 struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
960 					      u32 type, u32 mask)
961 {
962 	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
963 	struct cryptd_skcipher_ctx *ctx;
964 	struct crypto_skcipher *tfm;
965 
966 	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
967 		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
968 		return ERR_PTR(-EINVAL);
969 
970 	tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
971 	if (IS_ERR(tfm))
972 		return ERR_CAST(tfm);
973 
974 	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
975 		crypto_free_skcipher(tfm);
976 		return ERR_PTR(-EINVAL);
977 	}
978 
979 	ctx = crypto_skcipher_ctx(tfm);
980 	atomic_set(&ctx->refcnt, 1);
981 
982 	return container_of(tfm, struct cryptd_skcipher, base);
983 }
984 EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
985 
986 struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
987 {
988 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
989 
990 	return &ctx->child->base;
991 }
992 EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
993 
994 bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
995 {
996 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
997 
998 	return atomic_read(&ctx->refcnt) - 1;
999 }
1000 EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
1001 
1002 void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
1003 {
1004 	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1005 
1006 	if (atomic_dec_and_test(&ctx->refcnt))
1007 		crypto_free_skcipher(&tfm->base);
1008 }
1009 EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
1010 
1011 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
1012 					u32 type, u32 mask)
1013 {
1014 	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1015 	struct cryptd_hash_ctx *ctx;
1016 	struct crypto_ahash *tfm;
1017 
1018 	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1019 		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1020 		return ERR_PTR(-EINVAL);
1021 	tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
1022 	if (IS_ERR(tfm))
1023 		return ERR_CAST(tfm);
1024 	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1025 		crypto_free_ahash(tfm);
1026 		return ERR_PTR(-EINVAL);
1027 	}
1028 
1029 	ctx = crypto_ahash_ctx(tfm);
1030 	atomic_set(&ctx->refcnt, 1);
1031 
1032 	return __cryptd_ahash_cast(tfm);
1033 }
1034 EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1035 
1036 struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1037 {
1038 	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1039 
1040 	return ctx->child;
1041 }
1042 EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1043 
1044 struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1045 {
1046 	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1047 	return &rctx->desc;
1048 }
1049 EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1050 
1051 bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1052 {
1053 	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1054 
1055 	return atomic_read(&ctx->refcnt) - 1;
1056 }
1057 EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1058 
1059 void cryptd_free_ahash(struct cryptd_ahash *tfm)
1060 {
1061 	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1062 
1063 	if (atomic_dec_and_test(&ctx->refcnt))
1064 		crypto_free_ahash(&tfm->base);
1065 }
1066 EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1067 
1068 struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1069 						  u32 type, u32 mask)
1070 {
1071 	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1072 	struct cryptd_aead_ctx *ctx;
1073 	struct crypto_aead *tfm;
1074 
1075 	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1076 		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1077 		return ERR_PTR(-EINVAL);
1078 	tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1079 	if (IS_ERR(tfm))
1080 		return ERR_CAST(tfm);
1081 	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1082 		crypto_free_aead(tfm);
1083 		return ERR_PTR(-EINVAL);
1084 	}
1085 
1086 	ctx = crypto_aead_ctx(tfm);
1087 	atomic_set(&ctx->refcnt, 1);
1088 
1089 	return __cryptd_aead_cast(tfm);
1090 }
1091 EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1092 
1093 struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1094 {
1095 	struct cryptd_aead_ctx *ctx;
1096 	ctx = crypto_aead_ctx(&tfm->base);
1097 	return ctx->child;
1098 }
1099 EXPORT_SYMBOL_GPL(cryptd_aead_child);
1100 
1101 bool cryptd_aead_queued(struct cryptd_aead *tfm)
1102 {
1103 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1104 
1105 	return atomic_read(&ctx->refcnt) - 1;
1106 }
1107 EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1108 
1109 void cryptd_free_aead(struct cryptd_aead *tfm)
1110 {
1111 	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1112 
1113 	if (atomic_dec_and_test(&ctx->refcnt))
1114 		crypto_free_aead(&tfm->base);
1115 }
1116 EXPORT_SYMBOL_GPL(cryptd_free_aead);
1117 
1118 static int __init cryptd_init(void)
1119 {
1120 	int err;
1121 
1122 	err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1123 	if (err)
1124 		return err;
1125 
1126 	err = crypto_register_template(&cryptd_tmpl);
1127 	if (err)
1128 		cryptd_fini_queue(&queue);
1129 
1130 	return err;
1131 }
1132 
1133 static void __exit cryptd_exit(void)
1134 {
1135 	cryptd_fini_queue(&queue);
1136 	crypto_unregister_template(&cryptd_tmpl);
1137 }
1138 
1139 subsys_initcall(cryptd_init);
1140 module_exit(cryptd_exit);
1141 
1142 MODULE_LICENSE("GPL");
1143 MODULE_DESCRIPTION("Software async crypto daemon");
1144 MODULE_ALIAS_CRYPTO("cryptd");
1145