xref: /linux/crypto/pcrypt.c (revision 75a6faf617d107bdbc74d36ccf89f2280b96ac26)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * pcrypt - Parallel crypto wrapper.
4  *
5  * Copyright (C) 2009 secunet Security Networks AG
6  * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
7  */
8 
9 #include <crypto/algapi.h>
10 #include <crypto/internal/aead.h>
11 #include <linux/atomic.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/notifier.h>
17 #include <linux/kobject.h>
18 #include <linux/cpu.h>
19 #include <crypto/pcrypt.h>
20 
21 struct padata_pcrypt {
22 	struct padata_instance *pinst;
23 	struct workqueue_struct *wq;
24 
25 	/*
26 	 * Cpumask for callback CPUs. It should be
27 	 * equal to serial cpumask of corresponding padata instance,
28 	 * so it is updated when padata notifies us about serial
29 	 * cpumask change.
30 	 *
31 	 * cb_cpumask is protected by RCU. This fact prevents us from
32 	 * using cpumask_var_t directly because the actual type of
33 	 * cpumsak_var_t depends on kernel configuration(particularly on
34 	 * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration
35 	 * cpumask_var_t may be either a pointer to the struct cpumask
36 	 * or a variable allocated on the stack. Thus we can not safely use
37 	 * cpumask_var_t with RCU operations such as rcu_assign_pointer or
38 	 * rcu_dereference. So cpumask_var_t is wrapped with struct
39 	 * pcrypt_cpumask which makes possible to use it with RCU.
40 	 */
41 	struct pcrypt_cpumask {
42 		cpumask_var_t mask;
43 	} *cb_cpumask;
44 	struct notifier_block nblock;
45 };
46 
47 static struct padata_pcrypt pencrypt;
48 static struct padata_pcrypt pdecrypt;
49 static struct kset           *pcrypt_kset;
50 
51 struct pcrypt_instance_ctx {
52 	struct crypto_aead_spawn spawn;
53 	atomic_t tfm_count;
54 };
55 
56 struct pcrypt_aead_ctx {
57 	struct crypto_aead *child;
58 	unsigned int cb_cpu;
59 };
60 
61 static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
62 			      struct padata_pcrypt *pcrypt)
63 {
64 	unsigned int cpu_index, cpu, i;
65 	struct pcrypt_cpumask *cpumask;
66 
67 	cpu = *cb_cpu;
68 
69 	rcu_read_lock_bh();
70 	cpumask = rcu_dereference_bh(pcrypt->cb_cpumask);
71 	if (cpumask_test_cpu(cpu, cpumask->mask))
72 			goto out;
73 
74 	if (!cpumask_weight(cpumask->mask))
75 			goto out;
76 
77 	cpu_index = cpu % cpumask_weight(cpumask->mask);
78 
79 	cpu = cpumask_first(cpumask->mask);
80 	for (i = 0; i < cpu_index; i++)
81 		cpu = cpumask_next(cpu, cpumask->mask);
82 
83 	*cb_cpu = cpu;
84 
85 out:
86 	rcu_read_unlock_bh();
87 	return padata_do_parallel(pcrypt->pinst, padata, cpu);
88 }
89 
90 static int pcrypt_aead_setkey(struct crypto_aead *parent,
91 			      const u8 *key, unsigned int keylen)
92 {
93 	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
94 
95 	return crypto_aead_setkey(ctx->child, key, keylen);
96 }
97 
98 static int pcrypt_aead_setauthsize(struct crypto_aead *parent,
99 				   unsigned int authsize)
100 {
101 	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
102 
103 	return crypto_aead_setauthsize(ctx->child, authsize);
104 }
105 
106 static void pcrypt_aead_serial(struct padata_priv *padata)
107 {
108 	struct pcrypt_request *preq = pcrypt_padata_request(padata);
109 	struct aead_request *req = pcrypt_request_ctx(preq);
110 
111 	aead_request_complete(req->base.data, padata->info);
112 }
113 
114 static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
115 {
116 	struct aead_request *req = areq->data;
117 	struct pcrypt_request *preq = aead_request_ctx(req);
118 	struct padata_priv *padata = pcrypt_request_padata(preq);
119 
120 	padata->info = err;
121 	req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
122 
123 	padata_do_serial(padata);
124 }
125 
126 static void pcrypt_aead_enc(struct padata_priv *padata)
127 {
128 	struct pcrypt_request *preq = pcrypt_padata_request(padata);
129 	struct aead_request *req = pcrypt_request_ctx(preq);
130 
131 	padata->info = crypto_aead_encrypt(req);
132 
133 	if (padata->info == -EINPROGRESS)
134 		return;
135 
136 	padata_do_serial(padata);
137 }
138 
139 static int pcrypt_aead_encrypt(struct aead_request *req)
140 {
141 	int err;
142 	struct pcrypt_request *preq = aead_request_ctx(req);
143 	struct aead_request *creq = pcrypt_request_ctx(preq);
144 	struct padata_priv *padata = pcrypt_request_padata(preq);
145 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
146 	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
147 	u32 flags = aead_request_flags(req);
148 
149 	memset(padata, 0, sizeof(struct padata_priv));
150 
151 	padata->parallel = pcrypt_aead_enc;
152 	padata->serial = pcrypt_aead_serial;
153 
154 	aead_request_set_tfm(creq, ctx->child);
155 	aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
156 				  pcrypt_aead_done, req);
157 	aead_request_set_crypt(creq, req->src, req->dst,
158 			       req->cryptlen, req->iv);
159 	aead_request_set_ad(creq, req->assoclen);
160 
161 	err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
162 	if (!err)
163 		return -EINPROGRESS;
164 
165 	return err;
166 }
167 
168 static void pcrypt_aead_dec(struct padata_priv *padata)
169 {
170 	struct pcrypt_request *preq = pcrypt_padata_request(padata);
171 	struct aead_request *req = pcrypt_request_ctx(preq);
172 
173 	padata->info = crypto_aead_decrypt(req);
174 
175 	if (padata->info == -EINPROGRESS)
176 		return;
177 
178 	padata_do_serial(padata);
179 }
180 
181 static int pcrypt_aead_decrypt(struct aead_request *req)
182 {
183 	int err;
184 	struct pcrypt_request *preq = aead_request_ctx(req);
185 	struct aead_request *creq = pcrypt_request_ctx(preq);
186 	struct padata_priv *padata = pcrypt_request_padata(preq);
187 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
188 	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
189 	u32 flags = aead_request_flags(req);
190 
191 	memset(padata, 0, sizeof(struct padata_priv));
192 
193 	padata->parallel = pcrypt_aead_dec;
194 	padata->serial = pcrypt_aead_serial;
195 
196 	aead_request_set_tfm(creq, ctx->child);
197 	aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
198 				  pcrypt_aead_done, req);
199 	aead_request_set_crypt(creq, req->src, req->dst,
200 			       req->cryptlen, req->iv);
201 	aead_request_set_ad(creq, req->assoclen);
202 
203 	err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt);
204 	if (!err)
205 		return -EINPROGRESS;
206 
207 	return err;
208 }
209 
210 static int pcrypt_aead_init_tfm(struct crypto_aead *tfm)
211 {
212 	int cpu, cpu_index;
213 	struct aead_instance *inst = aead_alg_instance(tfm);
214 	struct pcrypt_instance_ctx *ictx = aead_instance_ctx(inst);
215 	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
216 	struct crypto_aead *cipher;
217 
218 	cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) %
219 		    cpumask_weight(cpu_online_mask);
220 
221 	ctx->cb_cpu = cpumask_first(cpu_online_mask);
222 	for (cpu = 0; cpu < cpu_index; cpu++)
223 		ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask);
224 
225 	cipher = crypto_spawn_aead(&ictx->spawn);
226 
227 	if (IS_ERR(cipher))
228 		return PTR_ERR(cipher);
229 
230 	ctx->child = cipher;
231 	crypto_aead_set_reqsize(tfm, sizeof(struct pcrypt_request) +
232 				     sizeof(struct aead_request) +
233 				     crypto_aead_reqsize(cipher));
234 
235 	return 0;
236 }
237 
238 static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
239 {
240 	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
241 
242 	crypto_free_aead(ctx->child);
243 }
244 
245 static void pcrypt_free(struct aead_instance *inst)
246 {
247 	struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);
248 
249 	crypto_drop_aead(&ctx->spawn);
250 	kfree(inst);
251 }
252 
253 static int pcrypt_init_instance(struct crypto_instance *inst,
254 				struct crypto_alg *alg)
255 {
256 	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
257 		     "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
258 		return -ENAMETOOLONG;
259 
260 	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
261 
262 	inst->alg.cra_priority = alg->cra_priority + 100;
263 	inst->alg.cra_blocksize = alg->cra_blocksize;
264 	inst->alg.cra_alignmask = alg->cra_alignmask;
265 
266 	return 0;
267 }
268 
269 static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
270 			      u32 type, u32 mask)
271 {
272 	struct pcrypt_instance_ctx *ctx;
273 	struct crypto_attr_type *algt;
274 	struct aead_instance *inst;
275 	struct aead_alg *alg;
276 	const char *name;
277 	int err;
278 
279 	algt = crypto_get_attr_type(tb);
280 	if (IS_ERR(algt))
281 		return PTR_ERR(algt);
282 
283 	name = crypto_attr_alg_name(tb[1]);
284 	if (IS_ERR(name))
285 		return PTR_ERR(name);
286 
287 	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
288 	if (!inst)
289 		return -ENOMEM;
290 
291 	ctx = aead_instance_ctx(inst);
292 	crypto_set_aead_spawn(&ctx->spawn, aead_crypto_instance(inst));
293 
294 	err = crypto_grab_aead(&ctx->spawn, name, 0, 0);
295 	if (err)
296 		goto out_free_inst;
297 
298 	alg = crypto_spawn_aead_alg(&ctx->spawn);
299 	err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base);
300 	if (err)
301 		goto out_drop_aead;
302 
303 	inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC;
304 
305 	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
306 	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
307 
308 	inst->alg.base.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
309 
310 	inst->alg.init = pcrypt_aead_init_tfm;
311 	inst->alg.exit = pcrypt_aead_exit_tfm;
312 
313 	inst->alg.setkey = pcrypt_aead_setkey;
314 	inst->alg.setauthsize = pcrypt_aead_setauthsize;
315 	inst->alg.encrypt = pcrypt_aead_encrypt;
316 	inst->alg.decrypt = pcrypt_aead_decrypt;
317 
318 	inst->free = pcrypt_free;
319 
320 	err = aead_register_instance(tmpl, inst);
321 	if (err)
322 		goto out_drop_aead;
323 
324 out:
325 	return err;
326 
327 out_drop_aead:
328 	crypto_drop_aead(&ctx->spawn);
329 out_free_inst:
330 	kfree(inst);
331 	goto out;
332 }
333 
334 static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
335 {
336 	struct crypto_attr_type *algt;
337 
338 	algt = crypto_get_attr_type(tb);
339 	if (IS_ERR(algt))
340 		return PTR_ERR(algt);
341 
342 	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
343 	case CRYPTO_ALG_TYPE_AEAD:
344 		return pcrypt_create_aead(tmpl, tb, algt->type, algt->mask);
345 	}
346 
347 	return -EINVAL;
348 }
349 
350 static int pcrypt_cpumask_change_notify(struct notifier_block *self,
351 					unsigned long val, void *data)
352 {
353 	struct padata_pcrypt *pcrypt;
354 	struct pcrypt_cpumask *new_mask, *old_mask;
355 	struct padata_cpumask *cpumask = (struct padata_cpumask *)data;
356 
357 	if (!(val & PADATA_CPU_SERIAL))
358 		return 0;
359 
360 	pcrypt = container_of(self, struct padata_pcrypt, nblock);
361 	new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL);
362 	if (!new_mask)
363 		return -ENOMEM;
364 	if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) {
365 		kfree(new_mask);
366 		return -ENOMEM;
367 	}
368 
369 	old_mask = pcrypt->cb_cpumask;
370 
371 	cpumask_copy(new_mask->mask, cpumask->cbcpu);
372 	rcu_assign_pointer(pcrypt->cb_cpumask, new_mask);
373 	synchronize_rcu();
374 
375 	free_cpumask_var(old_mask->mask);
376 	kfree(old_mask);
377 	return 0;
378 }
379 
380 static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
381 {
382 	int ret;
383 
384 	pinst->kobj.kset = pcrypt_kset;
385 	ret = kobject_add(&pinst->kobj, NULL, "%s", name);
386 	if (!ret)
387 		kobject_uevent(&pinst->kobj, KOBJ_ADD);
388 
389 	return ret;
390 }
391 
392 static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
393 			      const char *name)
394 {
395 	int ret = -ENOMEM;
396 	struct pcrypt_cpumask *mask;
397 
398 	get_online_cpus();
399 
400 	pcrypt->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
401 				     1, name);
402 	if (!pcrypt->wq)
403 		goto err;
404 
405 	pcrypt->pinst = padata_alloc_possible(pcrypt->wq);
406 	if (!pcrypt->pinst)
407 		goto err_destroy_workqueue;
408 
409 	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
410 	if (!mask)
411 		goto err_free_padata;
412 	if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) {
413 		kfree(mask);
414 		goto err_free_padata;
415 	}
416 
417 	cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask);
418 	rcu_assign_pointer(pcrypt->cb_cpumask, mask);
419 
420 	pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
421 	ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
422 	if (ret)
423 		goto err_free_cpumask;
424 
425 	ret = pcrypt_sysfs_add(pcrypt->pinst, name);
426 	if (ret)
427 		goto err_unregister_notifier;
428 
429 	put_online_cpus();
430 
431 	return ret;
432 
433 err_unregister_notifier:
434 	padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
435 err_free_cpumask:
436 	free_cpumask_var(mask->mask);
437 	kfree(mask);
438 err_free_padata:
439 	padata_free(pcrypt->pinst);
440 err_destroy_workqueue:
441 	destroy_workqueue(pcrypt->wq);
442 err:
443 	put_online_cpus();
444 
445 	return ret;
446 }
447 
448 static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
449 {
450 	free_cpumask_var(pcrypt->cb_cpumask->mask);
451 	kfree(pcrypt->cb_cpumask);
452 
453 	padata_stop(pcrypt->pinst);
454 	padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
455 	destroy_workqueue(pcrypt->wq);
456 	padata_free(pcrypt->pinst);
457 }
458 
459 static struct crypto_template pcrypt_tmpl = {
460 	.name = "pcrypt",
461 	.create = pcrypt_create,
462 	.module = THIS_MODULE,
463 };
464 
465 static int __init pcrypt_init(void)
466 {
467 	int err = -ENOMEM;
468 
469 	pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj);
470 	if (!pcrypt_kset)
471 		goto err;
472 
473 	err = pcrypt_init_padata(&pencrypt, "pencrypt");
474 	if (err)
475 		goto err_unreg_kset;
476 
477 	err = pcrypt_init_padata(&pdecrypt, "pdecrypt");
478 	if (err)
479 		goto err_deinit_pencrypt;
480 
481 	padata_start(pencrypt.pinst);
482 	padata_start(pdecrypt.pinst);
483 
484 	return crypto_register_template(&pcrypt_tmpl);
485 
486 err_deinit_pencrypt:
487 	pcrypt_fini_padata(&pencrypt);
488 err_unreg_kset:
489 	kset_unregister(pcrypt_kset);
490 err:
491 	return err;
492 }
493 
494 static void __exit pcrypt_exit(void)
495 {
496 	pcrypt_fini_padata(&pencrypt);
497 	pcrypt_fini_padata(&pdecrypt);
498 
499 	kset_unregister(pcrypt_kset);
500 	crypto_unregister_template(&pcrypt_tmpl);
501 }
502 
503 subsys_initcall(pcrypt_init);
504 module_exit(pcrypt_exit);
505 
506 MODULE_LICENSE("GPL");
507 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
508 MODULE_DESCRIPTION("Parallel crypto wrapper");
509 MODULE_ALIAS_CRYPTO("pcrypt");
510