xref: /linux/crypto/cryptd.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * Software async crypto daemon.
4   *
5   * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6   *
7   * Added AEAD support to cryptd.
8   *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
9   *             Adrian Hoban <adrian.hoban@intel.com>
10   *             Gabriele Paoloni <gabriele.paoloni@intel.com>
11   *             Aidan O'Mahony (aidan.o.mahony@intel.com)
12   *    Copyright (c) 2010, Intel Corporation.
13   */
14  
15  #include <crypto/internal/hash.h>
16  #include <crypto/internal/aead.h>
17  #include <crypto/internal/skcipher.h>
18  #include <crypto/cryptd.h>
19  #include <linux/refcount.h>
20  #include <linux/err.h>
21  #include <linux/init.h>
22  #include <linux/kernel.h>
23  #include <linux/list.h>
24  #include <linux/module.h>
25  #include <linux/scatterlist.h>
26  #include <linux/sched.h>
27  #include <linux/slab.h>
28  #include <linux/workqueue.h>
29  
30  static unsigned int cryptd_max_cpu_qlen = 1000;
31  module_param(cryptd_max_cpu_qlen, uint, 0);
32  MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
33  
34  static struct workqueue_struct *cryptd_wq;
35  
36  struct cryptd_cpu_queue {
37  	struct crypto_queue queue;
38  	struct work_struct work;
39  };
40  
41  struct cryptd_queue {
42  	/*
43  	 * Protected by disabling BH to allow enqueueing from softinterrupt and
44  	 * dequeuing from kworker (cryptd_queue_worker()).
45  	 */
46  	struct cryptd_cpu_queue __percpu *cpu_queue;
47  };
48  
49  struct cryptd_instance_ctx {
50  	struct crypto_spawn spawn;
51  	struct cryptd_queue *queue;
52  };
53  
54  struct skcipherd_instance_ctx {
55  	struct crypto_skcipher_spawn spawn;
56  	struct cryptd_queue *queue;
57  };
58  
59  struct hashd_instance_ctx {
60  	struct crypto_shash_spawn spawn;
61  	struct cryptd_queue *queue;
62  };
63  
64  struct aead_instance_ctx {
65  	struct crypto_aead_spawn aead_spawn;
66  	struct cryptd_queue *queue;
67  };
68  
69  struct cryptd_skcipher_ctx {
70  	refcount_t refcnt;
71  	struct crypto_skcipher *child;
72  };
73  
74  struct cryptd_skcipher_request_ctx {
75  	struct skcipher_request req;
76  };
77  
78  struct cryptd_hash_ctx {
79  	refcount_t refcnt;
80  	struct crypto_shash *child;
81  };
82  
83  struct cryptd_hash_request_ctx {
84  	crypto_completion_t complete;
85  	void *data;
86  	struct shash_desc desc;
87  };
88  
89  struct cryptd_aead_ctx {
90  	refcount_t refcnt;
91  	struct crypto_aead *child;
92  };
93  
94  struct cryptd_aead_request_ctx {
95  	struct aead_request req;
96  };
97  
98  static void cryptd_queue_worker(struct work_struct *work);
99  
cryptd_init_queue(struct cryptd_queue * queue,unsigned int max_cpu_qlen)100  static int cryptd_init_queue(struct cryptd_queue *queue,
101  			     unsigned int max_cpu_qlen)
102  {
103  	int cpu;
104  	struct cryptd_cpu_queue *cpu_queue;
105  
106  	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
107  	if (!queue->cpu_queue)
108  		return -ENOMEM;
109  	for_each_possible_cpu(cpu) {
110  		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
111  		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
112  		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
113  	}
114  	pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
115  	return 0;
116  }
117  
cryptd_fini_queue(struct cryptd_queue * queue)118  static void cryptd_fini_queue(struct cryptd_queue *queue)
119  {
120  	int cpu;
121  	struct cryptd_cpu_queue *cpu_queue;
122  
123  	for_each_possible_cpu(cpu) {
124  		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
125  		BUG_ON(cpu_queue->queue.qlen);
126  	}
127  	free_percpu(queue->cpu_queue);
128  }
129  
cryptd_enqueue_request(struct cryptd_queue * queue,struct crypto_async_request * request)130  static int cryptd_enqueue_request(struct cryptd_queue *queue,
131  				  struct crypto_async_request *request)
132  {
133  	int err;
134  	struct cryptd_cpu_queue *cpu_queue;
135  	refcount_t *refcnt;
136  
137  	local_bh_disable();
138  	cpu_queue = this_cpu_ptr(queue->cpu_queue);
139  	err = crypto_enqueue_request(&cpu_queue->queue, request);
140  
141  	refcnt = crypto_tfm_ctx(request->tfm);
142  
143  	if (err == -ENOSPC)
144  		goto out;
145  
146  	queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
147  
148  	if (!refcount_read(refcnt))
149  		goto out;
150  
151  	refcount_inc(refcnt);
152  
153  out:
154  	local_bh_enable();
155  
156  	return err;
157  }
158  
159  /* Called in workqueue context, do one real cryption work (via
160   * req->complete) and reschedule itself if there are more work to
161   * do. */
cryptd_queue_worker(struct work_struct * work)162  static void cryptd_queue_worker(struct work_struct *work)
163  {
164  	struct cryptd_cpu_queue *cpu_queue;
165  	struct crypto_async_request *req, *backlog;
166  
167  	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
168  	/*
169  	 * Only handle one request at a time to avoid hogging crypto workqueue.
170  	 */
171  	local_bh_disable();
172  	backlog = crypto_get_backlog(&cpu_queue->queue);
173  	req = crypto_dequeue_request(&cpu_queue->queue);
174  	local_bh_enable();
175  
176  	if (!req)
177  		return;
178  
179  	if (backlog)
180  		crypto_request_complete(backlog, -EINPROGRESS);
181  	crypto_request_complete(req, 0);
182  
183  	if (cpu_queue->queue.qlen)
184  		queue_work(cryptd_wq, &cpu_queue->work);
185  }
186  
cryptd_get_queue(struct crypto_tfm * tfm)187  static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
188  {
189  	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
190  	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
191  	return ictx->queue;
192  }
193  
cryptd_type_and_mask(struct crypto_attr_type * algt,u32 * type,u32 * mask)194  static void cryptd_type_and_mask(struct crypto_attr_type *algt,
195  				 u32 *type, u32 *mask)
196  {
197  	/*
198  	 * cryptd is allowed to wrap internal algorithms, but in that case the
199  	 * resulting cryptd instance will be marked as internal as well.
200  	 */
201  	*type = algt->type & CRYPTO_ALG_INTERNAL;
202  	*mask = algt->mask & CRYPTO_ALG_INTERNAL;
203  
204  	/* No point in cryptd wrapping an algorithm that's already async. */
205  	*mask |= CRYPTO_ALG_ASYNC;
206  
207  	*mask |= crypto_algt_inherited_mask(algt);
208  }
209  
cryptd_init_instance(struct crypto_instance * inst,struct crypto_alg * alg)210  static int cryptd_init_instance(struct crypto_instance *inst,
211  				struct crypto_alg *alg)
212  {
213  	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
214  		     "cryptd(%s)",
215  		     alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
216  		return -ENAMETOOLONG;
217  
218  	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
219  
220  	inst->alg.cra_priority = alg->cra_priority + 50;
221  	inst->alg.cra_blocksize = alg->cra_blocksize;
222  	inst->alg.cra_alignmask = alg->cra_alignmask;
223  
224  	return 0;
225  }
226  
cryptd_skcipher_setkey(struct crypto_skcipher * parent,const u8 * key,unsigned int keylen)227  static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
228  				  const u8 *key, unsigned int keylen)
229  {
230  	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
231  	struct crypto_skcipher *child = ctx->child;
232  
233  	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
234  	crypto_skcipher_set_flags(child,
235  				  crypto_skcipher_get_flags(parent) &
236  				  CRYPTO_TFM_REQ_MASK);
237  	return crypto_skcipher_setkey(child, key, keylen);
238  }
239  
cryptd_skcipher_prepare(struct skcipher_request * req,int err)240  static struct skcipher_request *cryptd_skcipher_prepare(
241  	struct skcipher_request *req, int err)
242  {
243  	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
244  	struct skcipher_request *subreq = &rctx->req;
245  	struct cryptd_skcipher_ctx *ctx;
246  	struct crypto_skcipher *child;
247  
248  	req->base.complete = subreq->base.complete;
249  	req->base.data = subreq->base.data;
250  
251  	if (unlikely(err == -EINPROGRESS))
252  		return NULL;
253  
254  	ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
255  	child = ctx->child;
256  
257  	skcipher_request_set_tfm(subreq, child);
258  	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
259  				      NULL, NULL);
260  	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
261  				   req->iv);
262  
263  	return subreq;
264  }
265  
cryptd_skcipher_complete(struct skcipher_request * req,int err,crypto_completion_t complete)266  static void cryptd_skcipher_complete(struct skcipher_request *req, int err,
267  				     crypto_completion_t complete)
268  {
269  	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
270  	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
271  	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
272  	struct skcipher_request *subreq = &rctx->req;
273  	int refcnt = refcount_read(&ctx->refcnt);
274  
275  	local_bh_disable();
276  	skcipher_request_complete(req, err);
277  	local_bh_enable();
278  
279  	if (unlikely(err == -EINPROGRESS)) {
280  		subreq->base.complete = req->base.complete;
281  		subreq->base.data = req->base.data;
282  		req->base.complete = complete;
283  		req->base.data = req;
284  	} else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
285  		crypto_free_skcipher(tfm);
286  }
287  
cryptd_skcipher_encrypt(void * data,int err)288  static void cryptd_skcipher_encrypt(void *data, int err)
289  {
290  	struct skcipher_request *req = data;
291  	struct skcipher_request *subreq;
292  
293  	subreq = cryptd_skcipher_prepare(req, err);
294  	if (likely(subreq))
295  		err = crypto_skcipher_encrypt(subreq);
296  
297  	cryptd_skcipher_complete(req, err, cryptd_skcipher_encrypt);
298  }
299  
cryptd_skcipher_decrypt(void * data,int err)300  static void cryptd_skcipher_decrypt(void *data, int err)
301  {
302  	struct skcipher_request *req = data;
303  	struct skcipher_request *subreq;
304  
305  	subreq = cryptd_skcipher_prepare(req, err);
306  	if (likely(subreq))
307  		err = crypto_skcipher_decrypt(subreq);
308  
309  	cryptd_skcipher_complete(req, err, cryptd_skcipher_decrypt);
310  }
311  
cryptd_skcipher_enqueue(struct skcipher_request * req,crypto_completion_t compl)312  static int cryptd_skcipher_enqueue(struct skcipher_request *req,
313  				   crypto_completion_t compl)
314  {
315  	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
316  	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
317  	struct skcipher_request *subreq = &rctx->req;
318  	struct cryptd_queue *queue;
319  
320  	queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
321  	subreq->base.complete = req->base.complete;
322  	subreq->base.data = req->base.data;
323  	req->base.complete = compl;
324  	req->base.data = req;
325  
326  	return cryptd_enqueue_request(queue, &req->base);
327  }
328  
cryptd_skcipher_encrypt_enqueue(struct skcipher_request * req)329  static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
330  {
331  	return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
332  }
333  
cryptd_skcipher_decrypt_enqueue(struct skcipher_request * req)334  static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
335  {
336  	return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
337  }
338  
cryptd_skcipher_init_tfm(struct crypto_skcipher * tfm)339  static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
340  {
341  	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
342  	struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
343  	struct crypto_skcipher_spawn *spawn = &ictx->spawn;
344  	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
345  	struct crypto_skcipher *cipher;
346  
347  	cipher = crypto_spawn_skcipher(spawn);
348  	if (IS_ERR(cipher))
349  		return PTR_ERR(cipher);
350  
351  	ctx->child = cipher;
352  	crypto_skcipher_set_reqsize(
353  		tfm, sizeof(struct cryptd_skcipher_request_ctx) +
354  		     crypto_skcipher_reqsize(cipher));
355  	return 0;
356  }
357  
cryptd_skcipher_exit_tfm(struct crypto_skcipher * tfm)358  static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
359  {
360  	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
361  
362  	crypto_free_skcipher(ctx->child);
363  }
364  
cryptd_skcipher_free(struct skcipher_instance * inst)365  static void cryptd_skcipher_free(struct skcipher_instance *inst)
366  {
367  	struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
368  
369  	crypto_drop_skcipher(&ctx->spawn);
370  	kfree(inst);
371  }
372  
cryptd_create_skcipher(struct crypto_template * tmpl,struct rtattr ** tb,struct crypto_attr_type * algt,struct cryptd_queue * queue)373  static int cryptd_create_skcipher(struct crypto_template *tmpl,
374  				  struct rtattr **tb,
375  				  struct crypto_attr_type *algt,
376  				  struct cryptd_queue *queue)
377  {
378  	struct skcipherd_instance_ctx *ctx;
379  	struct skcipher_instance *inst;
380  	struct skcipher_alg_common *alg;
381  	u32 type;
382  	u32 mask;
383  	int err;
384  
385  	cryptd_type_and_mask(algt, &type, &mask);
386  
387  	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
388  	if (!inst)
389  		return -ENOMEM;
390  
391  	ctx = skcipher_instance_ctx(inst);
392  	ctx->queue = queue;
393  
394  	err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
395  				   crypto_attr_alg_name(tb[1]), type, mask);
396  	if (err)
397  		goto err_free_inst;
398  
399  	alg = crypto_spawn_skcipher_alg_common(&ctx->spawn);
400  	err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
401  	if (err)
402  		goto err_free_inst;
403  
404  	inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
405  		(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
406  	inst->alg.ivsize = alg->ivsize;
407  	inst->alg.chunksize = alg->chunksize;
408  	inst->alg.min_keysize = alg->min_keysize;
409  	inst->alg.max_keysize = alg->max_keysize;
410  
411  	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
412  
413  	inst->alg.init = cryptd_skcipher_init_tfm;
414  	inst->alg.exit = cryptd_skcipher_exit_tfm;
415  
416  	inst->alg.setkey = cryptd_skcipher_setkey;
417  	inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
418  	inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
419  
420  	inst->free = cryptd_skcipher_free;
421  
422  	err = skcipher_register_instance(tmpl, inst);
423  	if (err) {
424  err_free_inst:
425  		cryptd_skcipher_free(inst);
426  	}
427  	return err;
428  }
429  
cryptd_hash_init_tfm(struct crypto_ahash * tfm)430  static int cryptd_hash_init_tfm(struct crypto_ahash *tfm)
431  {
432  	struct ahash_instance *inst = ahash_alg_instance(tfm);
433  	struct hashd_instance_ctx *ictx = ahash_instance_ctx(inst);
434  	struct crypto_shash_spawn *spawn = &ictx->spawn;
435  	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
436  	struct crypto_shash *hash;
437  
438  	hash = crypto_spawn_shash(spawn);
439  	if (IS_ERR(hash))
440  		return PTR_ERR(hash);
441  
442  	ctx->child = hash;
443  	crypto_ahash_set_reqsize(tfm,
444  				 sizeof(struct cryptd_hash_request_ctx) +
445  				 crypto_shash_descsize(hash));
446  	return 0;
447  }
448  
cryptd_hash_clone_tfm(struct crypto_ahash * ntfm,struct crypto_ahash * tfm)449  static int cryptd_hash_clone_tfm(struct crypto_ahash *ntfm,
450  				 struct crypto_ahash *tfm)
451  {
452  	struct cryptd_hash_ctx *nctx = crypto_ahash_ctx(ntfm);
453  	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
454  	struct crypto_shash *hash;
455  
456  	hash = crypto_clone_shash(ctx->child);
457  	if (IS_ERR(hash))
458  		return PTR_ERR(hash);
459  
460  	nctx->child = hash;
461  	return 0;
462  }
463  
cryptd_hash_exit_tfm(struct crypto_ahash * tfm)464  static void cryptd_hash_exit_tfm(struct crypto_ahash *tfm)
465  {
466  	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
467  
468  	crypto_free_shash(ctx->child);
469  }
470  
cryptd_hash_setkey(struct crypto_ahash * parent,const u8 * key,unsigned int keylen)471  static int cryptd_hash_setkey(struct crypto_ahash *parent,
472  				   const u8 *key, unsigned int keylen)
473  {
474  	struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
475  	struct crypto_shash *child = ctx->child;
476  
477  	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
478  	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
479  				      CRYPTO_TFM_REQ_MASK);
480  	return crypto_shash_setkey(child, key, keylen);
481  }
482  
cryptd_hash_enqueue(struct ahash_request * req,crypto_completion_t compl)483  static int cryptd_hash_enqueue(struct ahash_request *req,
484  				crypto_completion_t compl)
485  {
486  	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
487  	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
488  	struct cryptd_queue *queue =
489  		cryptd_get_queue(crypto_ahash_tfm(tfm));
490  
491  	rctx->complete = req->base.complete;
492  	rctx->data = req->base.data;
493  	req->base.complete = compl;
494  	req->base.data = req;
495  
496  	return cryptd_enqueue_request(queue, &req->base);
497  }
498  
cryptd_hash_prepare(struct ahash_request * req,int err)499  static struct shash_desc *cryptd_hash_prepare(struct ahash_request *req,
500  					      int err)
501  {
502  	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
503  
504  	req->base.complete = rctx->complete;
505  	req->base.data = rctx->data;
506  
507  	if (unlikely(err == -EINPROGRESS))
508  		return NULL;
509  
510  	return &rctx->desc;
511  }
512  
cryptd_hash_complete(struct ahash_request * req,int err,crypto_completion_t complete)513  static void cryptd_hash_complete(struct ahash_request *req, int err,
514  				 crypto_completion_t complete)
515  {
516  	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
517  	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
518  	int refcnt = refcount_read(&ctx->refcnt);
519  
520  	local_bh_disable();
521  	ahash_request_complete(req, err);
522  	local_bh_enable();
523  
524  	if (err == -EINPROGRESS) {
525  		req->base.complete = complete;
526  		req->base.data = req;
527  	} else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
528  		crypto_free_ahash(tfm);
529  }
530  
cryptd_hash_init(void * data,int err)531  static void cryptd_hash_init(void *data, int err)
532  {
533  	struct ahash_request *req = data;
534  	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
535  	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
536  	struct crypto_shash *child = ctx->child;
537  	struct shash_desc *desc;
538  
539  	desc = cryptd_hash_prepare(req, err);
540  	if (unlikely(!desc))
541  		goto out;
542  
543  	desc->tfm = child;
544  
545  	err = crypto_shash_init(desc);
546  
547  out:
548  	cryptd_hash_complete(req, err, cryptd_hash_init);
549  }
550  
cryptd_hash_init_enqueue(struct ahash_request * req)551  static int cryptd_hash_init_enqueue(struct ahash_request *req)
552  {
553  	return cryptd_hash_enqueue(req, cryptd_hash_init);
554  }
555  
cryptd_hash_update(void * data,int err)556  static void cryptd_hash_update(void *data, int err)
557  {
558  	struct ahash_request *req = data;
559  	struct shash_desc *desc;
560  
561  	desc = cryptd_hash_prepare(req, err);
562  	if (likely(desc))
563  		err = shash_ahash_update(req, desc);
564  
565  	cryptd_hash_complete(req, err, cryptd_hash_update);
566  }
567  
cryptd_hash_update_enqueue(struct ahash_request * req)568  static int cryptd_hash_update_enqueue(struct ahash_request *req)
569  {
570  	return cryptd_hash_enqueue(req, cryptd_hash_update);
571  }
572  
cryptd_hash_final(void * data,int err)573  static void cryptd_hash_final(void *data, int err)
574  {
575  	struct ahash_request *req = data;
576  	struct shash_desc *desc;
577  
578  	desc = cryptd_hash_prepare(req, err);
579  	if (likely(desc))
580  		err = crypto_shash_final(desc, req->result);
581  
582  	cryptd_hash_complete(req, err, cryptd_hash_final);
583  }
584  
cryptd_hash_final_enqueue(struct ahash_request * req)585  static int cryptd_hash_final_enqueue(struct ahash_request *req)
586  {
587  	return cryptd_hash_enqueue(req, cryptd_hash_final);
588  }
589  
cryptd_hash_finup(void * data,int err)590  static void cryptd_hash_finup(void *data, int err)
591  {
592  	struct ahash_request *req = data;
593  	struct shash_desc *desc;
594  
595  	desc = cryptd_hash_prepare(req, err);
596  	if (likely(desc))
597  		err = shash_ahash_finup(req, desc);
598  
599  	cryptd_hash_complete(req, err, cryptd_hash_finup);
600  }
601  
cryptd_hash_finup_enqueue(struct ahash_request * req)602  static int cryptd_hash_finup_enqueue(struct ahash_request *req)
603  {
604  	return cryptd_hash_enqueue(req, cryptd_hash_finup);
605  }
606  
cryptd_hash_digest(void * data,int err)607  static void cryptd_hash_digest(void *data, int err)
608  {
609  	struct ahash_request *req = data;
610  	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
611  	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
612  	struct crypto_shash *child = ctx->child;
613  	struct shash_desc *desc;
614  
615  	desc = cryptd_hash_prepare(req, err);
616  	if (unlikely(!desc))
617  		goto out;
618  
619  	desc->tfm = child;
620  
621  	err = shash_ahash_digest(req, desc);
622  
623  out:
624  	cryptd_hash_complete(req, err, cryptd_hash_digest);
625  }
626  
cryptd_hash_digest_enqueue(struct ahash_request * req)627  static int cryptd_hash_digest_enqueue(struct ahash_request *req)
628  {
629  	return cryptd_hash_enqueue(req, cryptd_hash_digest);
630  }
631  
cryptd_hash_export(struct ahash_request * req,void * out)632  static int cryptd_hash_export(struct ahash_request *req, void *out)
633  {
634  	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
635  
636  	return crypto_shash_export(&rctx->desc, out);
637  }
638  
cryptd_hash_import(struct ahash_request * req,const void * in)639  static int cryptd_hash_import(struct ahash_request *req, const void *in)
640  {
641  	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
642  	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
643  	struct shash_desc *desc = cryptd_shash_desc(req);
644  
645  	desc->tfm = ctx->child;
646  
647  	return crypto_shash_import(desc, in);
648  }
649  
cryptd_hash_free(struct ahash_instance * inst)650  static void cryptd_hash_free(struct ahash_instance *inst)
651  {
652  	struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst);
653  
654  	crypto_drop_shash(&ctx->spawn);
655  	kfree(inst);
656  }
657  
cryptd_create_hash(struct crypto_template * tmpl,struct rtattr ** tb,struct crypto_attr_type * algt,struct cryptd_queue * queue)658  static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
659  			      struct crypto_attr_type *algt,
660  			      struct cryptd_queue *queue)
661  {
662  	struct hashd_instance_ctx *ctx;
663  	struct ahash_instance *inst;
664  	struct shash_alg *alg;
665  	u32 type;
666  	u32 mask;
667  	int err;
668  
669  	cryptd_type_and_mask(algt, &type, &mask);
670  
671  	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
672  	if (!inst)
673  		return -ENOMEM;
674  
675  	ctx = ahash_instance_ctx(inst);
676  	ctx->queue = queue;
677  
678  	err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst),
679  				crypto_attr_alg_name(tb[1]), type, mask);
680  	if (err)
681  		goto err_free_inst;
682  	alg = crypto_spawn_shash_alg(&ctx->spawn);
683  
684  	err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base);
685  	if (err)
686  		goto err_free_inst;
687  
688  	inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC |
689  		(alg->base.cra_flags & (CRYPTO_ALG_INTERNAL|
690  					CRYPTO_ALG_OPTIONAL_KEY));
691  	inst->alg.halg.digestsize = alg->digestsize;
692  	inst->alg.halg.statesize = alg->statesize;
693  	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
694  
695  	inst->alg.init_tfm = cryptd_hash_init_tfm;
696  	inst->alg.clone_tfm = cryptd_hash_clone_tfm;
697  	inst->alg.exit_tfm = cryptd_hash_exit_tfm;
698  
699  	inst->alg.init   = cryptd_hash_init_enqueue;
700  	inst->alg.update = cryptd_hash_update_enqueue;
701  	inst->alg.final  = cryptd_hash_final_enqueue;
702  	inst->alg.finup  = cryptd_hash_finup_enqueue;
703  	inst->alg.export = cryptd_hash_export;
704  	inst->alg.import = cryptd_hash_import;
705  	if (crypto_shash_alg_has_setkey(alg))
706  		inst->alg.setkey = cryptd_hash_setkey;
707  	inst->alg.digest = cryptd_hash_digest_enqueue;
708  
709  	inst->free = cryptd_hash_free;
710  
711  	err = ahash_register_instance(tmpl, inst);
712  	if (err) {
713  err_free_inst:
714  		cryptd_hash_free(inst);
715  	}
716  	return err;
717  }
718  
cryptd_aead_setkey(struct crypto_aead * parent,const u8 * key,unsigned int keylen)719  static int cryptd_aead_setkey(struct crypto_aead *parent,
720  			      const u8 *key, unsigned int keylen)
721  {
722  	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
723  	struct crypto_aead *child = ctx->child;
724  
725  	return crypto_aead_setkey(child, key, keylen);
726  }
727  
cryptd_aead_setauthsize(struct crypto_aead * parent,unsigned int authsize)728  static int cryptd_aead_setauthsize(struct crypto_aead *parent,
729  				   unsigned int authsize)
730  {
731  	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
732  	struct crypto_aead *child = ctx->child;
733  
734  	return crypto_aead_setauthsize(child, authsize);
735  }
736  
cryptd_aead_crypt(struct aead_request * req,struct crypto_aead * child,int err,int (* crypt)(struct aead_request * req),crypto_completion_t compl)737  static void cryptd_aead_crypt(struct aead_request *req,
738  			      struct crypto_aead *child, int err,
739  			      int (*crypt)(struct aead_request *req),
740  			      crypto_completion_t compl)
741  {
742  	struct cryptd_aead_request_ctx *rctx;
743  	struct aead_request *subreq;
744  	struct cryptd_aead_ctx *ctx;
745  	struct crypto_aead *tfm;
746  	int refcnt;
747  
748  	rctx = aead_request_ctx(req);
749  	subreq = &rctx->req;
750  	req->base.complete = subreq->base.complete;
751  	req->base.data = subreq->base.data;
752  
753  	tfm = crypto_aead_reqtfm(req);
754  
755  	if (unlikely(err == -EINPROGRESS))
756  		goto out;
757  
758  	aead_request_set_tfm(subreq, child);
759  	aead_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
760  				  NULL, NULL);
761  	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
762  			       req->iv);
763  	aead_request_set_ad(subreq, req->assoclen);
764  
765  	err = crypt(subreq);
766  
767  out:
768  	ctx = crypto_aead_ctx(tfm);
769  	refcnt = refcount_read(&ctx->refcnt);
770  
771  	local_bh_disable();
772  	aead_request_complete(req, err);
773  	local_bh_enable();
774  
775  	if (err == -EINPROGRESS) {
776  		subreq->base.complete = req->base.complete;
777  		subreq->base.data = req->base.data;
778  		req->base.complete = compl;
779  		req->base.data = req;
780  	} else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
781  		crypto_free_aead(tfm);
782  }
783  
cryptd_aead_encrypt(void * data,int err)784  static void cryptd_aead_encrypt(void *data, int err)
785  {
786  	struct aead_request *req = data;
787  	struct cryptd_aead_ctx *ctx;
788  	struct crypto_aead *child;
789  
790  	ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
791  	child = ctx->child;
792  	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt,
793  			  cryptd_aead_encrypt);
794  }
795  
cryptd_aead_decrypt(void * data,int err)796  static void cryptd_aead_decrypt(void *data, int err)
797  {
798  	struct aead_request *req = data;
799  	struct cryptd_aead_ctx *ctx;
800  	struct crypto_aead *child;
801  
802  	ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
803  	child = ctx->child;
804  	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt,
805  			  cryptd_aead_decrypt);
806  }
807  
cryptd_aead_enqueue(struct aead_request * req,crypto_completion_t compl)808  static int cryptd_aead_enqueue(struct aead_request *req,
809  				    crypto_completion_t compl)
810  {
811  	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
812  	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
813  	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
814  	struct aead_request *subreq = &rctx->req;
815  
816  	subreq->base.complete = req->base.complete;
817  	subreq->base.data = req->base.data;
818  	req->base.complete = compl;
819  	req->base.data = req;
820  	return cryptd_enqueue_request(queue, &req->base);
821  }
822  
cryptd_aead_encrypt_enqueue(struct aead_request * req)823  static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
824  {
825  	return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
826  }
827  
cryptd_aead_decrypt_enqueue(struct aead_request * req)828  static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
829  {
830  	return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
831  }
832  
cryptd_aead_init_tfm(struct crypto_aead * tfm)833  static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
834  {
835  	struct aead_instance *inst = aead_alg_instance(tfm);
836  	struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
837  	struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
838  	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
839  	struct crypto_aead *cipher;
840  
841  	cipher = crypto_spawn_aead(spawn);
842  	if (IS_ERR(cipher))
843  		return PTR_ERR(cipher);
844  
845  	ctx->child = cipher;
846  	crypto_aead_set_reqsize(
847  		tfm, sizeof(struct cryptd_aead_request_ctx) +
848  		     crypto_aead_reqsize(cipher));
849  	return 0;
850  }
851  
cryptd_aead_exit_tfm(struct crypto_aead * tfm)852  static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
853  {
854  	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
855  	crypto_free_aead(ctx->child);
856  }
857  
cryptd_aead_free(struct aead_instance * inst)858  static void cryptd_aead_free(struct aead_instance *inst)
859  {
860  	struct aead_instance_ctx *ctx = aead_instance_ctx(inst);
861  
862  	crypto_drop_aead(&ctx->aead_spawn);
863  	kfree(inst);
864  }
865  
cryptd_create_aead(struct crypto_template * tmpl,struct rtattr ** tb,struct crypto_attr_type * algt,struct cryptd_queue * queue)866  static int cryptd_create_aead(struct crypto_template *tmpl,
867  		              struct rtattr **tb,
868  			      struct crypto_attr_type *algt,
869  			      struct cryptd_queue *queue)
870  {
871  	struct aead_instance_ctx *ctx;
872  	struct aead_instance *inst;
873  	struct aead_alg *alg;
874  	u32 type;
875  	u32 mask;
876  	int err;
877  
878  	cryptd_type_and_mask(algt, &type, &mask);
879  
880  	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
881  	if (!inst)
882  		return -ENOMEM;
883  
884  	ctx = aead_instance_ctx(inst);
885  	ctx->queue = queue;
886  
887  	err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst),
888  			       crypto_attr_alg_name(tb[1]), type, mask);
889  	if (err)
890  		goto err_free_inst;
891  
892  	alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
893  	err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
894  	if (err)
895  		goto err_free_inst;
896  
897  	inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
898  		(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
899  	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
900  
901  	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
902  	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
903  
904  	inst->alg.init = cryptd_aead_init_tfm;
905  	inst->alg.exit = cryptd_aead_exit_tfm;
906  	inst->alg.setkey = cryptd_aead_setkey;
907  	inst->alg.setauthsize = cryptd_aead_setauthsize;
908  	inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
909  	inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
910  
911  	inst->free = cryptd_aead_free;
912  
913  	err = aead_register_instance(tmpl, inst);
914  	if (err) {
915  err_free_inst:
916  		cryptd_aead_free(inst);
917  	}
918  	return err;
919  }
920  
921  static struct cryptd_queue queue;
922  
cryptd_create(struct crypto_template * tmpl,struct rtattr ** tb)923  static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
924  {
925  	struct crypto_attr_type *algt;
926  
927  	algt = crypto_get_attr_type(tb);
928  	if (IS_ERR(algt))
929  		return PTR_ERR(algt);
930  
931  	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
932  	case CRYPTO_ALG_TYPE_LSKCIPHER:
933  		return cryptd_create_skcipher(tmpl, tb, algt, &queue);
934  	case CRYPTO_ALG_TYPE_HASH:
935  		return cryptd_create_hash(tmpl, tb, algt, &queue);
936  	case CRYPTO_ALG_TYPE_AEAD:
937  		return cryptd_create_aead(tmpl, tb, algt, &queue);
938  	}
939  
940  	return -EINVAL;
941  }
942  
943  static struct crypto_template cryptd_tmpl = {
944  	.name = "cryptd",
945  	.create = cryptd_create,
946  	.module = THIS_MODULE,
947  };
948  
cryptd_alloc_skcipher(const char * alg_name,u32 type,u32 mask)949  struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
950  					      u32 type, u32 mask)
951  {
952  	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
953  	struct cryptd_skcipher_ctx *ctx;
954  	struct crypto_skcipher *tfm;
955  
956  	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
957  		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
958  		return ERR_PTR(-EINVAL);
959  
960  	tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
961  	if (IS_ERR(tfm))
962  		return ERR_CAST(tfm);
963  
964  	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
965  		crypto_free_skcipher(tfm);
966  		return ERR_PTR(-EINVAL);
967  	}
968  
969  	ctx = crypto_skcipher_ctx(tfm);
970  	refcount_set(&ctx->refcnt, 1);
971  
972  	return container_of(tfm, struct cryptd_skcipher, base);
973  }
974  EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
975  
cryptd_skcipher_child(struct cryptd_skcipher * tfm)976  struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
977  {
978  	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
979  
980  	return ctx->child;
981  }
982  EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
983  
cryptd_skcipher_queued(struct cryptd_skcipher * tfm)984  bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
985  {
986  	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
987  
988  	return refcount_read(&ctx->refcnt) - 1;
989  }
990  EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
991  
cryptd_free_skcipher(struct cryptd_skcipher * tfm)992  void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
993  {
994  	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
995  
996  	if (refcount_dec_and_test(&ctx->refcnt))
997  		crypto_free_skcipher(&tfm->base);
998  }
999  EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
1000  
cryptd_alloc_ahash(const char * alg_name,u32 type,u32 mask)1001  struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
1002  					u32 type, u32 mask)
1003  {
1004  	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1005  	struct cryptd_hash_ctx *ctx;
1006  	struct crypto_ahash *tfm;
1007  
1008  	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1009  		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1010  		return ERR_PTR(-EINVAL);
1011  	tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
1012  	if (IS_ERR(tfm))
1013  		return ERR_CAST(tfm);
1014  	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1015  		crypto_free_ahash(tfm);
1016  		return ERR_PTR(-EINVAL);
1017  	}
1018  
1019  	ctx = crypto_ahash_ctx(tfm);
1020  	refcount_set(&ctx->refcnt, 1);
1021  
1022  	return __cryptd_ahash_cast(tfm);
1023  }
1024  EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1025  
cryptd_ahash_child(struct cryptd_ahash * tfm)1026  struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1027  {
1028  	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1029  
1030  	return ctx->child;
1031  }
1032  EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1033  
cryptd_shash_desc(struct ahash_request * req)1034  struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1035  {
1036  	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1037  	return &rctx->desc;
1038  }
1039  EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1040  
cryptd_ahash_queued(struct cryptd_ahash * tfm)1041  bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1042  {
1043  	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1044  
1045  	return refcount_read(&ctx->refcnt) - 1;
1046  }
1047  EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1048  
cryptd_free_ahash(struct cryptd_ahash * tfm)1049  void cryptd_free_ahash(struct cryptd_ahash *tfm)
1050  {
1051  	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1052  
1053  	if (refcount_dec_and_test(&ctx->refcnt))
1054  		crypto_free_ahash(&tfm->base);
1055  }
1056  EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1057  
cryptd_alloc_aead(const char * alg_name,u32 type,u32 mask)1058  struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1059  						  u32 type, u32 mask)
1060  {
1061  	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1062  	struct cryptd_aead_ctx *ctx;
1063  	struct crypto_aead *tfm;
1064  
1065  	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1066  		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1067  		return ERR_PTR(-EINVAL);
1068  	tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1069  	if (IS_ERR(tfm))
1070  		return ERR_CAST(tfm);
1071  	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1072  		crypto_free_aead(tfm);
1073  		return ERR_PTR(-EINVAL);
1074  	}
1075  
1076  	ctx = crypto_aead_ctx(tfm);
1077  	refcount_set(&ctx->refcnt, 1);
1078  
1079  	return __cryptd_aead_cast(tfm);
1080  }
1081  EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1082  
cryptd_aead_child(struct cryptd_aead * tfm)1083  struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1084  {
1085  	struct cryptd_aead_ctx *ctx;
1086  	ctx = crypto_aead_ctx(&tfm->base);
1087  	return ctx->child;
1088  }
1089  EXPORT_SYMBOL_GPL(cryptd_aead_child);
1090  
cryptd_aead_queued(struct cryptd_aead * tfm)1091  bool cryptd_aead_queued(struct cryptd_aead *tfm)
1092  {
1093  	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1094  
1095  	return refcount_read(&ctx->refcnt) - 1;
1096  }
1097  EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1098  
cryptd_free_aead(struct cryptd_aead * tfm)1099  void cryptd_free_aead(struct cryptd_aead *tfm)
1100  {
1101  	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1102  
1103  	if (refcount_dec_and_test(&ctx->refcnt))
1104  		crypto_free_aead(&tfm->base);
1105  }
1106  EXPORT_SYMBOL_GPL(cryptd_free_aead);
1107  
cryptd_init(void)1108  static int __init cryptd_init(void)
1109  {
1110  	int err;
1111  
1112  	cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
1113  				    1);
1114  	if (!cryptd_wq)
1115  		return -ENOMEM;
1116  
1117  	err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1118  	if (err)
1119  		goto err_destroy_wq;
1120  
1121  	err = crypto_register_template(&cryptd_tmpl);
1122  	if (err)
1123  		goto err_fini_queue;
1124  
1125  	return 0;
1126  
1127  err_fini_queue:
1128  	cryptd_fini_queue(&queue);
1129  err_destroy_wq:
1130  	destroy_workqueue(cryptd_wq);
1131  	return err;
1132  }
1133  
cryptd_exit(void)1134  static void __exit cryptd_exit(void)
1135  {
1136  	destroy_workqueue(cryptd_wq);
1137  	cryptd_fini_queue(&queue);
1138  	crypto_unregister_template(&cryptd_tmpl);
1139  }
1140  
1141  subsys_initcall(cryptd_init);
1142  module_exit(cryptd_exit);
1143  
1144  MODULE_LICENSE("GPL");
1145  MODULE_DESCRIPTION("Software async crypto daemon");
1146  MODULE_ALIAS_CRYPTO("cryptd");
1147