xref: /linux/crypto/lskcipher.c (revision c4101e55974cc7d835fbd2d8e01553a3f61e9e75)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Linear symmetric key cipher operations.
4  *
5  * Generic encrypt/decrypt wrapper for ciphers.
6  *
7  * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
8  */
9 
10 #include <linux/cryptouser.h>
11 #include <linux/err.h>
12 #include <linux/export.h>
13 #include <linux/kernel.h>
14 #include <linux/seq_file.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
17 #include <net/netlink.h>
18 #include "skcipher.h"
19 
20 static inline struct crypto_lskcipher *__crypto_lskcipher_cast(
21 	struct crypto_tfm *tfm)
22 {
23 	return container_of(tfm, struct crypto_lskcipher, base);
24 }
25 
26 static inline struct lskcipher_alg *__crypto_lskcipher_alg(
27 	struct crypto_alg *alg)
28 {
29 	return container_of(alg, struct lskcipher_alg, co.base);
30 }
31 
32 static inline struct crypto_istat_cipher *lskcipher_get_stat(
33 	struct lskcipher_alg *alg)
34 {
35 	return skcipher_get_stat_common(&alg->co);
36 }
37 
38 static inline int crypto_lskcipher_errstat(struct lskcipher_alg *alg, int err)
39 {
40 	struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
41 
42 	if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
43 		return err;
44 
45 	if (err)
46 		atomic64_inc(&istat->err_cnt);
47 
48 	return err;
49 }
50 
51 static int lskcipher_setkey_unaligned(struct crypto_lskcipher *tfm,
52 				      const u8 *key, unsigned int keylen)
53 {
54 	unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
55 	struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm);
56 	u8 *buffer, *alignbuffer;
57 	unsigned long absize;
58 	int ret;
59 
60 	absize = keylen + alignmask;
61 	buffer = kmalloc(absize, GFP_ATOMIC);
62 	if (!buffer)
63 		return -ENOMEM;
64 
65 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
66 	memcpy(alignbuffer, key, keylen);
67 	ret = cipher->setkey(tfm, alignbuffer, keylen);
68 	kfree_sensitive(buffer);
69 	return ret;
70 }
71 
72 int crypto_lskcipher_setkey(struct crypto_lskcipher *tfm, const u8 *key,
73 			    unsigned int keylen)
74 {
75 	unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
76 	struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm);
77 
78 	if (keylen < cipher->co.min_keysize || keylen > cipher->co.max_keysize)
79 		return -EINVAL;
80 
81 	if ((unsigned long)key & alignmask)
82 		return lskcipher_setkey_unaligned(tfm, key, keylen);
83 	else
84 		return cipher->setkey(tfm, key, keylen);
85 }
86 EXPORT_SYMBOL_GPL(crypto_lskcipher_setkey);
87 
88 static int crypto_lskcipher_crypt_unaligned(
89 	struct crypto_lskcipher *tfm, const u8 *src, u8 *dst, unsigned len,
90 	u8 *iv, int (*crypt)(struct crypto_lskcipher *tfm, const u8 *src,
91 			     u8 *dst, unsigned len, u8 *iv, u32 flags))
92 {
93 	unsigned statesize = crypto_lskcipher_statesize(tfm);
94 	unsigned ivsize = crypto_lskcipher_ivsize(tfm);
95 	unsigned bs = crypto_lskcipher_blocksize(tfm);
96 	unsigned cs = crypto_lskcipher_chunksize(tfm);
97 	int err;
98 	u8 *tiv;
99 	u8 *p;
100 
101 	BUILD_BUG_ON(MAX_CIPHER_BLOCKSIZE > PAGE_SIZE ||
102 		     MAX_CIPHER_ALIGNMASK >= PAGE_SIZE);
103 
104 	tiv = kmalloc(PAGE_SIZE, GFP_ATOMIC);
105 	if (!tiv)
106 		return -ENOMEM;
107 
108 	memcpy(tiv, iv, ivsize + statesize);
109 
110 	p = kmalloc(PAGE_SIZE, GFP_ATOMIC);
111 	err = -ENOMEM;
112 	if (!p)
113 		goto out;
114 
115 	while (len >= bs) {
116 		unsigned chunk = min((unsigned)PAGE_SIZE, len);
117 		int err;
118 
119 		if (chunk > cs)
120 			chunk &= ~(cs - 1);
121 
122 		memcpy(p, src, chunk);
123 		err = crypt(tfm, p, p, chunk, tiv, CRYPTO_LSKCIPHER_FLAG_FINAL);
124 		if (err)
125 			goto out;
126 
127 		memcpy(dst, p, chunk);
128 		src += chunk;
129 		dst += chunk;
130 		len -= chunk;
131 	}
132 
133 	err = len ? -EINVAL : 0;
134 
135 out:
136 	memcpy(iv, tiv, ivsize + statesize);
137 	kfree_sensitive(p);
138 	kfree_sensitive(tiv);
139 	return err;
140 }
141 
142 static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src,
143 				  u8 *dst, unsigned len, u8 *iv,
144 				  int (*crypt)(struct crypto_lskcipher *tfm,
145 					       const u8 *src, u8 *dst,
146 					       unsigned len, u8 *iv,
147 					       u32 flags))
148 {
149 	unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
150 	struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
151 	int ret;
152 
153 	if (((unsigned long)src | (unsigned long)dst | (unsigned long)iv) &
154 	    alignmask) {
155 		ret = crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv,
156 						       crypt);
157 		goto out;
158 	}
159 
160 	ret = crypt(tfm, src, dst, len, iv, CRYPTO_LSKCIPHER_FLAG_FINAL);
161 
162 out:
163 	return crypto_lskcipher_errstat(alg, ret);
164 }
165 
166 int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
167 			     u8 *dst, unsigned len, u8 *iv)
168 {
169 	struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
170 
171 	if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
172 		struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
173 
174 		atomic64_inc(&istat->encrypt_cnt);
175 		atomic64_add(len, &istat->encrypt_tlen);
176 	}
177 
178 	return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->encrypt);
179 }
180 EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt);
181 
182 int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
183 			     u8 *dst, unsigned len, u8 *iv)
184 {
185 	struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
186 
187 	if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
188 		struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
189 
190 		atomic64_inc(&istat->decrypt_cnt);
191 		atomic64_add(len, &istat->decrypt_tlen);
192 	}
193 
194 	return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->decrypt);
195 }
196 EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt);
197 
198 static int crypto_lskcipher_crypt_sg(struct skcipher_request *req,
199 				     int (*crypt)(struct crypto_lskcipher *tfm,
200 						  const u8 *src, u8 *dst,
201 						  unsigned len, u8 *ivs,
202 						  u32 flags))
203 {
204 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
205 	struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
206 	u8 *ivs = skcipher_request_ctx(req);
207 	struct crypto_lskcipher *tfm = *ctx;
208 	struct skcipher_walk walk;
209 	unsigned ivsize;
210 	u32 flags;
211 	int err;
212 
213 	ivsize = crypto_lskcipher_ivsize(tfm);
214 	ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(skcipher) + 1);
215 
216 	flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
217 
218 	if (req->base.flags & CRYPTO_SKCIPHER_REQ_CONT)
219 		flags |= CRYPTO_LSKCIPHER_FLAG_CONT;
220 	else
221 		memcpy(ivs, req->iv, ivsize);
222 
223 	if (!(req->base.flags & CRYPTO_SKCIPHER_REQ_NOTFINAL))
224 		flags |= CRYPTO_LSKCIPHER_FLAG_FINAL;
225 
226 	err = skcipher_walk_virt(&walk, req, false);
227 
228 	while (walk.nbytes) {
229 		err = crypt(tfm, walk.src.virt.addr, walk.dst.virt.addr,
230 			    walk.nbytes, ivs,
231 			    flags & ~(walk.nbytes == walk.total ?
232 			    0 : CRYPTO_LSKCIPHER_FLAG_FINAL));
233 		err = skcipher_walk_done(&walk, err);
234 		flags |= CRYPTO_LSKCIPHER_FLAG_CONT;
235 	}
236 
237 	if (flags & CRYPTO_LSKCIPHER_FLAG_FINAL)
238 		memcpy(req->iv, ivs, ivsize);
239 
240 	return err;
241 }
242 
243 int crypto_lskcipher_encrypt_sg(struct skcipher_request *req)
244 {
245 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
246 	struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
247 	struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx);
248 
249 	return crypto_lskcipher_crypt_sg(req, alg->encrypt);
250 }
251 
252 int crypto_lskcipher_decrypt_sg(struct skcipher_request *req)
253 {
254 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
255 	struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
256 	struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx);
257 
258 	return crypto_lskcipher_crypt_sg(req, alg->decrypt);
259 }
260 
261 static void crypto_lskcipher_exit_tfm(struct crypto_tfm *tfm)
262 {
263 	struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm);
264 	struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher);
265 
266 	alg->exit(skcipher);
267 }
268 
269 static int crypto_lskcipher_init_tfm(struct crypto_tfm *tfm)
270 {
271 	struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm);
272 	struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher);
273 
274 	if (alg->exit)
275 		skcipher->base.exit = crypto_lskcipher_exit_tfm;
276 
277 	if (alg->init)
278 		return alg->init(skcipher);
279 
280 	return 0;
281 }
282 
283 static void crypto_lskcipher_free_instance(struct crypto_instance *inst)
284 {
285 	struct lskcipher_instance *skcipher =
286 		container_of(inst, struct lskcipher_instance, s.base);
287 
288 	skcipher->free(skcipher);
289 }
290 
291 static void __maybe_unused crypto_lskcipher_show(
292 	struct seq_file *m, struct crypto_alg *alg)
293 {
294 	struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
295 
296 	seq_printf(m, "type         : lskcipher\n");
297 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
298 	seq_printf(m, "min keysize  : %u\n", skcipher->co.min_keysize);
299 	seq_printf(m, "max keysize  : %u\n", skcipher->co.max_keysize);
300 	seq_printf(m, "ivsize       : %u\n", skcipher->co.ivsize);
301 	seq_printf(m, "chunksize    : %u\n", skcipher->co.chunksize);
302 	seq_printf(m, "statesize    : %u\n", skcipher->co.statesize);
303 }
304 
305 static int __maybe_unused crypto_lskcipher_report(
306 	struct sk_buff *skb, struct crypto_alg *alg)
307 {
308 	struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
309 	struct crypto_report_blkcipher rblkcipher;
310 
311 	memset(&rblkcipher, 0, sizeof(rblkcipher));
312 
313 	strscpy(rblkcipher.type, "lskcipher", sizeof(rblkcipher.type));
314 	strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
315 
316 	rblkcipher.blocksize = alg->cra_blocksize;
317 	rblkcipher.min_keysize = skcipher->co.min_keysize;
318 	rblkcipher.max_keysize = skcipher->co.max_keysize;
319 	rblkcipher.ivsize = skcipher->co.ivsize;
320 
321 	return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
322 		       sizeof(rblkcipher), &rblkcipher);
323 }
324 
325 static int __maybe_unused crypto_lskcipher_report_stat(
326 	struct sk_buff *skb, struct crypto_alg *alg)
327 {
328 	struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
329 	struct crypto_istat_cipher *istat;
330 	struct crypto_stat_cipher rcipher;
331 
332 	istat = lskcipher_get_stat(skcipher);
333 
334 	memset(&rcipher, 0, sizeof(rcipher));
335 
336 	strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
337 
338 	rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
339 	rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
340 	rcipher.stat_decrypt_cnt =  atomic64_read(&istat->decrypt_cnt);
341 	rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
342 	rcipher.stat_err_cnt =  atomic64_read(&istat->err_cnt);
343 
344 	return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
345 }
346 
347 static const struct crypto_type crypto_lskcipher_type = {
348 	.extsize = crypto_alg_extsize,
349 	.init_tfm = crypto_lskcipher_init_tfm,
350 	.free = crypto_lskcipher_free_instance,
351 #ifdef CONFIG_PROC_FS
352 	.show = crypto_lskcipher_show,
353 #endif
354 #if IS_ENABLED(CONFIG_CRYPTO_USER)
355 	.report = crypto_lskcipher_report,
356 #endif
357 #ifdef CONFIG_CRYPTO_STATS
358 	.report_stat = crypto_lskcipher_report_stat,
359 #endif
360 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
361 	.maskset = CRYPTO_ALG_TYPE_MASK,
362 	.type = CRYPTO_ALG_TYPE_LSKCIPHER,
363 	.tfmsize = offsetof(struct crypto_lskcipher, base),
364 };
365 
366 static void crypto_lskcipher_exit_tfm_sg(struct crypto_tfm *tfm)
367 {
368 	struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm);
369 
370 	crypto_free_lskcipher(*ctx);
371 }
372 
373 int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm)
374 {
375 	struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm);
376 	struct crypto_alg *calg = tfm->__crt_alg;
377 	struct crypto_lskcipher *skcipher;
378 
379 	if (!crypto_mod_get(calg))
380 		return -EAGAIN;
381 
382 	skcipher = crypto_create_tfm(calg, &crypto_lskcipher_type);
383 	if (IS_ERR(skcipher)) {
384 		crypto_mod_put(calg);
385 		return PTR_ERR(skcipher);
386 	}
387 
388 	*ctx = skcipher;
389 	tfm->exit = crypto_lskcipher_exit_tfm_sg;
390 
391 	return 0;
392 }
393 
394 int crypto_grab_lskcipher(struct crypto_lskcipher_spawn *spawn,
395 			  struct crypto_instance *inst,
396 			  const char *name, u32 type, u32 mask)
397 {
398 	spawn->base.frontend = &crypto_lskcipher_type;
399 	return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
400 }
401 EXPORT_SYMBOL_GPL(crypto_grab_lskcipher);
402 
403 struct crypto_lskcipher *crypto_alloc_lskcipher(const char *alg_name,
404 						u32 type, u32 mask)
405 {
406 	return crypto_alloc_tfm(alg_name, &crypto_lskcipher_type, type, mask);
407 }
408 EXPORT_SYMBOL_GPL(crypto_alloc_lskcipher);
409 
410 static int lskcipher_prepare_alg(struct lskcipher_alg *alg)
411 {
412 	struct crypto_alg *base = &alg->co.base;
413 	int err;
414 
415 	err = skcipher_prepare_alg_common(&alg->co);
416 	if (err)
417 		return err;
418 
419 	if (alg->co.chunksize & (alg->co.chunksize - 1))
420 		return -EINVAL;
421 
422 	base->cra_type = &crypto_lskcipher_type;
423 	base->cra_flags |= CRYPTO_ALG_TYPE_LSKCIPHER;
424 
425 	return 0;
426 }
427 
428 int crypto_register_lskcipher(struct lskcipher_alg *alg)
429 {
430 	struct crypto_alg *base = &alg->co.base;
431 	int err;
432 
433 	err = lskcipher_prepare_alg(alg);
434 	if (err)
435 		return err;
436 
437 	return crypto_register_alg(base);
438 }
439 EXPORT_SYMBOL_GPL(crypto_register_lskcipher);
440 
441 void crypto_unregister_lskcipher(struct lskcipher_alg *alg)
442 {
443 	crypto_unregister_alg(&alg->co.base);
444 }
445 EXPORT_SYMBOL_GPL(crypto_unregister_lskcipher);
446 
447 int crypto_register_lskciphers(struct lskcipher_alg *algs, int count)
448 {
449 	int i, ret;
450 
451 	for (i = 0; i < count; i++) {
452 		ret = crypto_register_lskcipher(&algs[i]);
453 		if (ret)
454 			goto err;
455 	}
456 
457 	return 0;
458 
459 err:
460 	for (--i; i >= 0; --i)
461 		crypto_unregister_lskcipher(&algs[i]);
462 
463 	return ret;
464 }
465 EXPORT_SYMBOL_GPL(crypto_register_lskciphers);
466 
467 void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count)
468 {
469 	int i;
470 
471 	for (i = count - 1; i >= 0; --i)
472 		crypto_unregister_lskcipher(&algs[i]);
473 }
474 EXPORT_SYMBOL_GPL(crypto_unregister_lskciphers);
475 
476 int lskcipher_register_instance(struct crypto_template *tmpl,
477 				struct lskcipher_instance *inst)
478 {
479 	int err;
480 
481 	if (WARN_ON(!inst->free))
482 		return -EINVAL;
483 
484 	err = lskcipher_prepare_alg(&inst->alg);
485 	if (err)
486 		return err;
487 
488 	return crypto_register_instance(tmpl, lskcipher_crypto_instance(inst));
489 }
490 EXPORT_SYMBOL_GPL(lskcipher_register_instance);
491 
492 static int lskcipher_setkey_simple(struct crypto_lskcipher *tfm, const u8 *key,
493 				   unsigned int keylen)
494 {
495 	struct crypto_lskcipher *cipher = lskcipher_cipher_simple(tfm);
496 
497 	crypto_lskcipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
498 	crypto_lskcipher_set_flags(cipher, crypto_lskcipher_get_flags(tfm) &
499 				   CRYPTO_TFM_REQ_MASK);
500 	return crypto_lskcipher_setkey(cipher, key, keylen);
501 }
502 
503 static int lskcipher_init_tfm_simple(struct crypto_lskcipher *tfm)
504 {
505 	struct lskcipher_instance *inst = lskcipher_alg_instance(tfm);
506 	struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
507 	struct crypto_lskcipher_spawn *spawn;
508 	struct crypto_lskcipher *cipher;
509 
510 	spawn = lskcipher_instance_ctx(inst);
511 	cipher = crypto_spawn_lskcipher(spawn);
512 	if (IS_ERR(cipher))
513 		return PTR_ERR(cipher);
514 
515 	*ctx = cipher;
516 	return 0;
517 }
518 
519 static void lskcipher_exit_tfm_simple(struct crypto_lskcipher *tfm)
520 {
521 	struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
522 
523 	crypto_free_lskcipher(*ctx);
524 }
525 
526 static void lskcipher_free_instance_simple(struct lskcipher_instance *inst)
527 {
528 	crypto_drop_lskcipher(lskcipher_instance_ctx(inst));
529 	kfree(inst);
530 }
531 
532 /**
533  * lskcipher_alloc_instance_simple - allocate instance of simple block cipher
534  *
535  * Allocate an lskcipher_instance for a simple block cipher mode of operation,
536  * e.g. cbc or ecb.  The instance context will have just a single crypto_spawn,
537  * that for the underlying cipher.  The {min,max}_keysize, ivsize, blocksize,
538  * alignmask, and priority are set from the underlying cipher but can be
539  * overridden if needed.  The tfm context defaults to
540  * struct crypto_lskcipher *, and default ->setkey(), ->init(), and
541  * ->exit() methods are installed.
542  *
543  * @tmpl: the template being instantiated
544  * @tb: the template parameters
545  *
546  * Return: a pointer to the new instance, or an ERR_PTR().  The caller still
547  *	   needs to register the instance.
548  */
549 struct lskcipher_instance *lskcipher_alloc_instance_simple(
550 	struct crypto_template *tmpl, struct rtattr **tb)
551 {
552 	u32 mask;
553 	struct lskcipher_instance *inst;
554 	struct crypto_lskcipher_spawn *spawn;
555 	char ecb_name[CRYPTO_MAX_ALG_NAME];
556 	struct lskcipher_alg *cipher_alg;
557 	const char *cipher_name;
558 	int err;
559 
560 	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_LSKCIPHER, &mask);
561 	if (err)
562 		return ERR_PTR(err);
563 
564 	cipher_name = crypto_attr_alg_name(tb[1]);
565 	if (IS_ERR(cipher_name))
566 		return ERR_CAST(cipher_name);
567 
568 	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
569 	if (!inst)
570 		return ERR_PTR(-ENOMEM);
571 
572 	spawn = lskcipher_instance_ctx(inst);
573 	err = crypto_grab_lskcipher(spawn,
574 				    lskcipher_crypto_instance(inst),
575 				    cipher_name, 0, mask);
576 
577 	ecb_name[0] = 0;
578 	if (err == -ENOENT && !!memcmp(tmpl->name, "ecb", 4)) {
579 		err = -ENAMETOOLONG;
580 		if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
581 			     cipher_name) >= CRYPTO_MAX_ALG_NAME)
582 			goto err_free_inst;
583 
584 		err = crypto_grab_lskcipher(spawn,
585 					    lskcipher_crypto_instance(inst),
586 					    ecb_name, 0, mask);
587 	}
588 
589 	if (err)
590 		goto err_free_inst;
591 
592 	cipher_alg = crypto_lskcipher_spawn_alg(spawn);
593 
594 	err = crypto_inst_setname(lskcipher_crypto_instance(inst), tmpl->name,
595 				  &cipher_alg->co.base);
596 	if (err)
597 		goto err_free_inst;
598 
599 	if (ecb_name[0]) {
600 		int len;
601 
602 		err = -EINVAL;
603 		len = strscpy(ecb_name, &cipher_alg->co.base.cra_name[4],
604 			      sizeof(ecb_name));
605 		if (len < 2)
606 			goto err_free_inst;
607 
608 		if (ecb_name[len - 1] != ')')
609 			goto err_free_inst;
610 
611 		ecb_name[len - 1] = 0;
612 
613 		err = -ENAMETOOLONG;
614 		if (snprintf(inst->alg.co.base.cra_name, CRYPTO_MAX_ALG_NAME,
615 			     "%s(%s)", tmpl->name, ecb_name) >=
616 		    CRYPTO_MAX_ALG_NAME)
617 			goto err_free_inst;
618 
619 		if (strcmp(ecb_name, cipher_name) &&
620 		    snprintf(inst->alg.co.base.cra_driver_name,
621 			     CRYPTO_MAX_ALG_NAME,
622 			     "%s(%s)", tmpl->name, cipher_name) >=
623 		    CRYPTO_MAX_ALG_NAME)
624 			goto err_free_inst;
625 	} else {
626 		/* Don't allow nesting. */
627 		err = -ELOOP;
628 		if ((cipher_alg->co.base.cra_flags & CRYPTO_ALG_INSTANCE))
629 			goto err_free_inst;
630 	}
631 
632 	err = -EINVAL;
633 	if (cipher_alg->co.ivsize)
634 		goto err_free_inst;
635 
636 	inst->free = lskcipher_free_instance_simple;
637 
638 	/* Default algorithm properties, can be overridden */
639 	inst->alg.co.base.cra_blocksize = cipher_alg->co.base.cra_blocksize;
640 	inst->alg.co.base.cra_alignmask = cipher_alg->co.base.cra_alignmask;
641 	inst->alg.co.base.cra_priority = cipher_alg->co.base.cra_priority;
642 	inst->alg.co.min_keysize = cipher_alg->co.min_keysize;
643 	inst->alg.co.max_keysize = cipher_alg->co.max_keysize;
644 	inst->alg.co.ivsize = cipher_alg->co.base.cra_blocksize;
645 	inst->alg.co.statesize = cipher_alg->co.statesize;
646 
647 	/* Use struct crypto_lskcipher * by default, can be overridden */
648 	inst->alg.co.base.cra_ctxsize = sizeof(struct crypto_lskcipher *);
649 	inst->alg.setkey = lskcipher_setkey_simple;
650 	inst->alg.init = lskcipher_init_tfm_simple;
651 	inst->alg.exit = lskcipher_exit_tfm_simple;
652 
653 	return inst;
654 
655 err_free_inst:
656 	lskcipher_free_instance_simple(inst);
657 	return ERR_PTR(err);
658 }
659 EXPORT_SYMBOL_GPL(lskcipher_alloc_instance_simple);
660