xref: /linux/crypto/skcipher.c (revision 4f95a6d2748acffaf866cc58e51d2fd00227e91b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Symmetric key cipher operations.
4  *
5  * Generic encrypt/decrypt wrapper for ciphers, handles operations across
6  * multiple page boundaries by using temporary blocks.  In user context,
7  * the kernel is given a chance to schedule us once per page.
8  *
9  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
10  */
11 
12 #include <crypto/internal/aead.h>
13 #include <crypto/internal/cipher.h>
14 #include <crypto/internal/skcipher.h>
15 #include <crypto/scatterwalk.h>
16 #include <linux/bug.h>
17 #include <linux/cryptouser.h>
18 #include <linux/err.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/module.h>
22 #include <linux/seq_file.h>
23 #include <linux/slab.h>
24 #include <linux/string.h>
25 #include <linux/string_choices.h>
26 #include <net/netlink.h>
27 #include "skcipher.h"
28 
29 #define CRYPTO_ALG_TYPE_SKCIPHER_MASK	0x0000000e
30 
31 enum {
32 	SKCIPHER_WALK_SLOW = 1 << 0,
33 	SKCIPHER_WALK_COPY = 1 << 1,
34 	SKCIPHER_WALK_DIFF = 1 << 2,
35 	SKCIPHER_WALK_SLEEP = 1 << 3,
36 };
37 
38 static const struct crypto_type crypto_skcipher_type;
39 
40 static int skcipher_walk_next(struct skcipher_walk *walk);
41 
42 static inline void skcipher_map_src(struct skcipher_walk *walk)
43 {
44 	walk->src.virt.addr = scatterwalk_map(&walk->in);
45 }
46 
47 static inline void skcipher_map_dst(struct skcipher_walk *walk)
48 {
49 	walk->dst.virt.addr = scatterwalk_map(&walk->out);
50 }
51 
52 static inline void skcipher_unmap_src(struct skcipher_walk *walk)
53 {
54 	scatterwalk_unmap(walk->src.virt.addr);
55 }
56 
57 static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
58 {
59 	scatterwalk_unmap(walk->dst.virt.addr);
60 }
61 
62 static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
63 {
64 	return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
65 }
66 
67 static inline struct skcipher_alg *__crypto_skcipher_alg(
68 	struct crypto_alg *alg)
69 {
70 	return container_of(alg, struct skcipher_alg, base);
71 }
72 
73 static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
74 {
75 	u8 *addr = PTR_ALIGN(walk->buffer, walk->alignmask + 1);
76 
77 	scatterwalk_copychunks(addr, &walk->out, bsize, 1);
78 	return 0;
79 }
80 
81 /**
82  * skcipher_walk_done() - finish one step of a skcipher_walk
83  * @walk: the skcipher_walk
84  * @res: number of bytes *not* processed (>= 0) from walk->nbytes,
85  *	 or a -errno value to terminate the walk due to an error
86  *
87  * This function cleans up after one step of walking through the source and
88  * destination scatterlists, and advances to the next step if applicable.
89  * walk->nbytes is set to the number of bytes available in the next step,
90  * walk->total is set to the new total number of bytes remaining, and
91  * walk->{src,dst}.virt.addr is set to the next pair of data pointers.  If there
92  * is no more data, or if an error occurred (i.e. -errno return), then
93  * walk->nbytes and walk->total are set to 0 and all resources owned by the
94  * skcipher_walk are freed.
95  *
96  * Return: 0 or a -errno value.  If @res was a -errno value then it will be
97  *	   returned, but other errors may occur too.
98  */
99 int skcipher_walk_done(struct skcipher_walk *walk, int res)
100 {
101 	unsigned int n = walk->nbytes; /* num bytes processed this step */
102 	unsigned int total = 0; /* new total remaining */
103 
104 	if (!n)
105 		goto finish;
106 
107 	if (likely(res >= 0)) {
108 		n -= res; /* subtract num bytes *not* processed */
109 		total = walk->total - n;
110 	}
111 
112 	if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW |
113 				    SKCIPHER_WALK_COPY |
114 				    SKCIPHER_WALK_DIFF)))) {
115 unmap_src:
116 		skcipher_unmap_src(walk);
117 	} else if (walk->flags & SKCIPHER_WALK_DIFF) {
118 		skcipher_unmap_dst(walk);
119 		goto unmap_src;
120 	} else if (walk->flags & SKCIPHER_WALK_COPY) {
121 		skcipher_map_dst(walk);
122 		memcpy(walk->dst.virt.addr, walk->page, n);
123 		skcipher_unmap_dst(walk);
124 	} else { /* SKCIPHER_WALK_SLOW */
125 		if (res > 0) {
126 			/*
127 			 * Didn't process all bytes.  Either the algorithm is
128 			 * broken, or this was the last step and it turned out
129 			 * the message wasn't evenly divisible into blocks but
130 			 * the algorithm requires it.
131 			 */
132 			res = -EINVAL;
133 			total = 0;
134 		} else
135 			n = skcipher_done_slow(walk, n);
136 	}
137 
138 	if (res > 0)
139 		res = 0;
140 
141 	walk->total = total;
142 	walk->nbytes = 0;
143 
144 	scatterwalk_advance(&walk->in, n);
145 	scatterwalk_advance(&walk->out, n);
146 	scatterwalk_done(&walk->in, 0, total);
147 	scatterwalk_done(&walk->out, 1, total);
148 
149 	if (total) {
150 		if (walk->flags & SKCIPHER_WALK_SLEEP)
151 			cond_resched();
152 		walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
153 				 SKCIPHER_WALK_DIFF);
154 		return skcipher_walk_next(walk);
155 	}
156 
157 finish:
158 	/* Short-circuit for the common/fast path. */
159 	if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
160 		goto out;
161 
162 	if (walk->iv != walk->oiv)
163 		memcpy(walk->oiv, walk->iv, walk->ivsize);
164 	if (walk->buffer != walk->page)
165 		kfree(walk->buffer);
166 	if (walk->page)
167 		free_page((unsigned long)walk->page);
168 
169 out:
170 	return res;
171 }
172 EXPORT_SYMBOL_GPL(skcipher_walk_done);
173 
174 static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
175 {
176 	unsigned alignmask = walk->alignmask;
177 	unsigned n;
178 	u8 *buffer;
179 
180 	if (!walk->buffer)
181 		walk->buffer = walk->page;
182 	buffer = walk->buffer;
183 	if (!buffer) {
184 		/* Min size for a buffer of bsize bytes aligned to alignmask */
185 		n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
186 
187 		buffer = kzalloc(n, skcipher_walk_gfp(walk));
188 		if (!buffer)
189 			return skcipher_walk_done(walk, -ENOMEM);
190 		walk->buffer = buffer;
191 	}
192 	walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
193 	walk->src.virt.addr = walk->dst.virt.addr;
194 
195 	scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
196 
197 	walk->nbytes = bsize;
198 	walk->flags |= SKCIPHER_WALK_SLOW;
199 
200 	return 0;
201 }
202 
203 static int skcipher_next_copy(struct skcipher_walk *walk)
204 {
205 	u8 *tmp = walk->page;
206 
207 	skcipher_map_src(walk);
208 	memcpy(tmp, walk->src.virt.addr, walk->nbytes);
209 	skcipher_unmap_src(walk);
210 
211 	walk->src.virt.addr = tmp;
212 	walk->dst.virt.addr = tmp;
213 	return 0;
214 }
215 
216 static int skcipher_next_fast(struct skcipher_walk *walk)
217 {
218 	unsigned long diff;
219 
220 	diff = offset_in_page(walk->in.offset) -
221 	       offset_in_page(walk->out.offset);
222 	diff |= (u8 *)scatterwalk_page(&walk->in) -
223 		(u8 *)scatterwalk_page(&walk->out);
224 
225 	skcipher_map_src(walk);
226 	walk->dst.virt.addr = walk->src.virt.addr;
227 
228 	if (diff) {
229 		walk->flags |= SKCIPHER_WALK_DIFF;
230 		skcipher_map_dst(walk);
231 	}
232 
233 	return 0;
234 }
235 
236 static int skcipher_walk_next(struct skcipher_walk *walk)
237 {
238 	unsigned int bsize;
239 	unsigned int n;
240 
241 	n = walk->total;
242 	bsize = min(walk->stride, max(n, walk->blocksize));
243 	n = scatterwalk_clamp(&walk->in, n);
244 	n = scatterwalk_clamp(&walk->out, n);
245 
246 	if (unlikely(n < bsize)) {
247 		if (unlikely(walk->total < walk->blocksize))
248 			return skcipher_walk_done(walk, -EINVAL);
249 
250 slow_path:
251 		return skcipher_next_slow(walk, bsize);
252 	}
253 	walk->nbytes = n;
254 
255 	if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
256 		if (!walk->page) {
257 			gfp_t gfp = skcipher_walk_gfp(walk);
258 
259 			walk->page = (void *)__get_free_page(gfp);
260 			if (!walk->page)
261 				goto slow_path;
262 		}
263 		walk->flags |= SKCIPHER_WALK_COPY;
264 		return skcipher_next_copy(walk);
265 	}
266 
267 	return skcipher_next_fast(walk);
268 }
269 
270 static int skcipher_copy_iv(struct skcipher_walk *walk)
271 {
272 	unsigned alignmask = walk->alignmask;
273 	unsigned ivsize = walk->ivsize;
274 	unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1);
275 	unsigned size;
276 	u8 *iv;
277 
278 	/* Min size for a buffer of stride + ivsize, aligned to alignmask */
279 	size = aligned_stride + ivsize +
280 	       (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
281 
282 	walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
283 	if (!walk->buffer)
284 		return -ENOMEM;
285 
286 	iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride;
287 
288 	walk->iv = memcpy(iv, walk->iv, walk->ivsize);
289 	return 0;
290 }
291 
292 static int skcipher_walk_first(struct skcipher_walk *walk)
293 {
294 	if (WARN_ON_ONCE(in_hardirq()))
295 		return -EDEADLK;
296 
297 	walk->buffer = NULL;
298 	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
299 		int err = skcipher_copy_iv(walk);
300 		if (err)
301 			return err;
302 	}
303 
304 	walk->page = NULL;
305 
306 	return skcipher_walk_next(walk);
307 }
308 
309 int skcipher_walk_virt(struct skcipher_walk *walk,
310 		       struct skcipher_request *req, bool atomic)
311 {
312 	const struct skcipher_alg *alg =
313 		crypto_skcipher_alg(crypto_skcipher_reqtfm(req));
314 
315 	might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
316 
317 	walk->total = req->cryptlen;
318 	walk->nbytes = 0;
319 	walk->iv = req->iv;
320 	walk->oiv = req->iv;
321 	if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
322 		walk->flags = SKCIPHER_WALK_SLEEP;
323 	else
324 		walk->flags = 0;
325 
326 	if (unlikely(!walk->total))
327 		return 0;
328 
329 	scatterwalk_start(&walk->in, req->src);
330 	scatterwalk_start(&walk->out, req->dst);
331 
332 	/*
333 	 * Accessing 'alg' directly generates better code than using the
334 	 * crypto_skcipher_blocksize() and similar helper functions here, as it
335 	 * prevents the algorithm pointer from being repeatedly reloaded.
336 	 */
337 	walk->blocksize = alg->base.cra_blocksize;
338 	walk->ivsize = alg->co.ivsize;
339 	walk->alignmask = alg->base.cra_alignmask;
340 
341 	if (alg->co.base.cra_type != &crypto_skcipher_type)
342 		walk->stride = alg->co.chunksize;
343 	else
344 		walk->stride = alg->walksize;
345 
346 	return skcipher_walk_first(walk);
347 }
348 EXPORT_SYMBOL_GPL(skcipher_walk_virt);
349 
350 static int skcipher_walk_aead_common(struct skcipher_walk *walk,
351 				     struct aead_request *req, bool atomic)
352 {
353 	const struct aead_alg *alg = crypto_aead_alg(crypto_aead_reqtfm(req));
354 
355 	walk->nbytes = 0;
356 	walk->iv = req->iv;
357 	walk->oiv = req->iv;
358 	if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
359 		walk->flags = SKCIPHER_WALK_SLEEP;
360 	else
361 		walk->flags = 0;
362 
363 	if (unlikely(!walk->total))
364 		return 0;
365 
366 	scatterwalk_start(&walk->in, req->src);
367 	scatterwalk_start(&walk->out, req->dst);
368 
369 	scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
370 	scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
371 
372 	scatterwalk_done(&walk->in, 0, walk->total);
373 	scatterwalk_done(&walk->out, 0, walk->total);
374 
375 	/*
376 	 * Accessing 'alg' directly generates better code than using the
377 	 * crypto_aead_blocksize() and similar helper functions here, as it
378 	 * prevents the algorithm pointer from being repeatedly reloaded.
379 	 */
380 	walk->blocksize = alg->base.cra_blocksize;
381 	walk->stride = alg->chunksize;
382 	walk->ivsize = alg->ivsize;
383 	walk->alignmask = alg->base.cra_alignmask;
384 
385 	return skcipher_walk_first(walk);
386 }
387 
388 int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
389 			       struct aead_request *req, bool atomic)
390 {
391 	walk->total = req->cryptlen;
392 
393 	return skcipher_walk_aead_common(walk, req, atomic);
394 }
395 EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
396 
397 int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
398 			       struct aead_request *req, bool atomic)
399 {
400 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
401 
402 	walk->total = req->cryptlen - crypto_aead_authsize(tfm);
403 
404 	return skcipher_walk_aead_common(walk, req, atomic);
405 }
406 EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
407 
408 static void skcipher_set_needkey(struct crypto_skcipher *tfm)
409 {
410 	if (crypto_skcipher_max_keysize(tfm) != 0)
411 		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
412 }
413 
414 static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
415 				     const u8 *key, unsigned int keylen)
416 {
417 	unsigned long alignmask = crypto_skcipher_alignmask(tfm);
418 	struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
419 	u8 *buffer, *alignbuffer;
420 	unsigned long absize;
421 	int ret;
422 
423 	absize = keylen + alignmask;
424 	buffer = kmalloc(absize, GFP_ATOMIC);
425 	if (!buffer)
426 		return -ENOMEM;
427 
428 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
429 	memcpy(alignbuffer, key, keylen);
430 	ret = cipher->setkey(tfm, alignbuffer, keylen);
431 	kfree_sensitive(buffer);
432 	return ret;
433 }
434 
435 int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
436 			   unsigned int keylen)
437 {
438 	struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
439 	unsigned long alignmask = crypto_skcipher_alignmask(tfm);
440 	int err;
441 
442 	if (cipher->co.base.cra_type != &crypto_skcipher_type) {
443 		struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm);
444 
445 		crypto_lskcipher_clear_flags(*ctx, CRYPTO_TFM_REQ_MASK);
446 		crypto_lskcipher_set_flags(*ctx,
447 					   crypto_skcipher_get_flags(tfm) &
448 					   CRYPTO_TFM_REQ_MASK);
449 		err = crypto_lskcipher_setkey(*ctx, key, keylen);
450 		goto out;
451 	}
452 
453 	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize)
454 		return -EINVAL;
455 
456 	if ((unsigned long)key & alignmask)
457 		err = skcipher_setkey_unaligned(tfm, key, keylen);
458 	else
459 		err = cipher->setkey(tfm, key, keylen);
460 
461 out:
462 	if (unlikely(err)) {
463 		skcipher_set_needkey(tfm);
464 		return err;
465 	}
466 
467 	crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
468 	return 0;
469 }
470 EXPORT_SYMBOL_GPL(crypto_skcipher_setkey);
471 
472 int crypto_skcipher_encrypt(struct skcipher_request *req)
473 {
474 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
475 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
476 
477 	if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
478 		return -ENOKEY;
479 	if (alg->co.base.cra_type != &crypto_skcipher_type)
480 		return crypto_lskcipher_encrypt_sg(req);
481 	return alg->encrypt(req);
482 }
483 EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
484 
485 int crypto_skcipher_decrypt(struct skcipher_request *req)
486 {
487 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
488 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
489 
490 	if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
491 		return -ENOKEY;
492 	if (alg->co.base.cra_type != &crypto_skcipher_type)
493 		return crypto_lskcipher_decrypt_sg(req);
494 	return alg->decrypt(req);
495 }
496 EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
497 
498 static int crypto_lskcipher_export(struct skcipher_request *req, void *out)
499 {
500 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
501 	u8 *ivs = skcipher_request_ctx(req);
502 
503 	ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1);
504 
505 	memcpy(out, ivs + crypto_skcipher_ivsize(tfm),
506 	       crypto_skcipher_statesize(tfm));
507 
508 	return 0;
509 }
510 
511 static int crypto_lskcipher_import(struct skcipher_request *req, const void *in)
512 {
513 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
514 	u8 *ivs = skcipher_request_ctx(req);
515 
516 	ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1);
517 
518 	memcpy(ivs + crypto_skcipher_ivsize(tfm), in,
519 	       crypto_skcipher_statesize(tfm));
520 
521 	return 0;
522 }
523 
524 static int skcipher_noexport(struct skcipher_request *req, void *out)
525 {
526 	return 0;
527 }
528 
529 static int skcipher_noimport(struct skcipher_request *req, const void *in)
530 {
531 	return 0;
532 }
533 
534 int crypto_skcipher_export(struct skcipher_request *req, void *out)
535 {
536 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
537 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
538 
539 	if (alg->co.base.cra_type != &crypto_skcipher_type)
540 		return crypto_lskcipher_export(req, out);
541 	return alg->export(req, out);
542 }
543 EXPORT_SYMBOL_GPL(crypto_skcipher_export);
544 
545 int crypto_skcipher_import(struct skcipher_request *req, const void *in)
546 {
547 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
548 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
549 
550 	if (alg->co.base.cra_type != &crypto_skcipher_type)
551 		return crypto_lskcipher_import(req, in);
552 	return alg->import(req, in);
553 }
554 EXPORT_SYMBOL_GPL(crypto_skcipher_import);
555 
556 static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
557 {
558 	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
559 	struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
560 
561 	alg->exit(skcipher);
562 }
563 
564 static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
565 {
566 	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
567 	struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
568 
569 	skcipher_set_needkey(skcipher);
570 
571 	if (tfm->__crt_alg->cra_type != &crypto_skcipher_type) {
572 		unsigned am = crypto_skcipher_alignmask(skcipher);
573 		unsigned reqsize;
574 
575 		reqsize = am & ~(crypto_tfm_ctx_alignment() - 1);
576 		reqsize += crypto_skcipher_ivsize(skcipher);
577 		reqsize += crypto_skcipher_statesize(skcipher);
578 		crypto_skcipher_set_reqsize(skcipher, reqsize);
579 
580 		return crypto_init_lskcipher_ops_sg(tfm);
581 	}
582 
583 	if (alg->exit)
584 		skcipher->base.exit = crypto_skcipher_exit_tfm;
585 
586 	if (alg->init)
587 		return alg->init(skcipher);
588 
589 	return 0;
590 }
591 
592 static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
593 {
594 	if (alg->cra_type != &crypto_skcipher_type)
595 		return sizeof(struct crypto_lskcipher *);
596 
597 	return crypto_alg_extsize(alg);
598 }
599 
600 static void crypto_skcipher_free_instance(struct crypto_instance *inst)
601 {
602 	struct skcipher_instance *skcipher =
603 		container_of(inst, struct skcipher_instance, s.base);
604 
605 	skcipher->free(skcipher);
606 }
607 
608 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
609 	__maybe_unused;
610 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
611 {
612 	struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
613 
614 	seq_printf(m, "type         : skcipher\n");
615 	seq_printf(m, "async        : %s\n",
616 		   str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC));
617 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
618 	seq_printf(m, "min keysize  : %u\n", skcipher->min_keysize);
619 	seq_printf(m, "max keysize  : %u\n", skcipher->max_keysize);
620 	seq_printf(m, "ivsize       : %u\n", skcipher->ivsize);
621 	seq_printf(m, "chunksize    : %u\n", skcipher->chunksize);
622 	seq_printf(m, "walksize     : %u\n", skcipher->walksize);
623 	seq_printf(m, "statesize    : %u\n", skcipher->statesize);
624 }
625 
626 static int __maybe_unused crypto_skcipher_report(
627 	struct sk_buff *skb, struct crypto_alg *alg)
628 {
629 	struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
630 	struct crypto_report_blkcipher rblkcipher;
631 
632 	memset(&rblkcipher, 0, sizeof(rblkcipher));
633 
634 	strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
635 	strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
636 
637 	rblkcipher.blocksize = alg->cra_blocksize;
638 	rblkcipher.min_keysize = skcipher->min_keysize;
639 	rblkcipher.max_keysize = skcipher->max_keysize;
640 	rblkcipher.ivsize = skcipher->ivsize;
641 
642 	return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
643 		       sizeof(rblkcipher), &rblkcipher);
644 }
645 
646 static const struct crypto_type crypto_skcipher_type = {
647 	.extsize = crypto_skcipher_extsize,
648 	.init_tfm = crypto_skcipher_init_tfm,
649 	.free = crypto_skcipher_free_instance,
650 #ifdef CONFIG_PROC_FS
651 	.show = crypto_skcipher_show,
652 #endif
653 #if IS_ENABLED(CONFIG_CRYPTO_USER)
654 	.report = crypto_skcipher_report,
655 #endif
656 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
657 	.maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK,
658 	.type = CRYPTO_ALG_TYPE_SKCIPHER,
659 	.tfmsize = offsetof(struct crypto_skcipher, base),
660 };
661 
662 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
663 			 struct crypto_instance *inst,
664 			 const char *name, u32 type, u32 mask)
665 {
666 	spawn->base.frontend = &crypto_skcipher_type;
667 	return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
668 }
669 EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
670 
671 struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
672 					      u32 type, u32 mask)
673 {
674 	return crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
675 }
676 EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
677 
678 struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
679 				const char *alg_name, u32 type, u32 mask)
680 {
681 	struct crypto_skcipher *tfm;
682 
683 	/* Only sync algorithms allowed. */
684 	mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE;
685 
686 	tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
687 
688 	/*
689 	 * Make sure we do not allocate something that might get used with
690 	 * an on-stack request: check the request size.
691 	 */
692 	if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
693 				    MAX_SYNC_SKCIPHER_REQSIZE)) {
694 		crypto_free_skcipher(tfm);
695 		return ERR_PTR(-EINVAL);
696 	}
697 
698 	return (struct crypto_sync_skcipher *)tfm;
699 }
700 EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
701 
702 int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask)
703 {
704 	return crypto_type_has_alg(alg_name, &crypto_skcipher_type, type, mask);
705 }
706 EXPORT_SYMBOL_GPL(crypto_has_skcipher);
707 
708 int skcipher_prepare_alg_common(struct skcipher_alg_common *alg)
709 {
710 	struct crypto_alg *base = &alg->base;
711 
712 	if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
713 	    alg->statesize > PAGE_SIZE / 2 ||
714 	    (alg->ivsize + alg->statesize) > PAGE_SIZE / 2)
715 		return -EINVAL;
716 
717 	if (!alg->chunksize)
718 		alg->chunksize = base->cra_blocksize;
719 
720 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
721 
722 	return 0;
723 }
724 
725 static int skcipher_prepare_alg(struct skcipher_alg *alg)
726 {
727 	struct crypto_alg *base = &alg->base;
728 	int err;
729 
730 	err = skcipher_prepare_alg_common(&alg->co);
731 	if (err)
732 		return err;
733 
734 	if (alg->walksize > PAGE_SIZE / 8)
735 		return -EINVAL;
736 
737 	if (!alg->walksize)
738 		alg->walksize = alg->chunksize;
739 
740 	if (!alg->statesize) {
741 		alg->import = skcipher_noimport;
742 		alg->export = skcipher_noexport;
743 	} else if (!(alg->import && alg->export))
744 		return -EINVAL;
745 
746 	base->cra_type = &crypto_skcipher_type;
747 	base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
748 
749 	return 0;
750 }
751 
752 int crypto_register_skcipher(struct skcipher_alg *alg)
753 {
754 	struct crypto_alg *base = &alg->base;
755 	int err;
756 
757 	err = skcipher_prepare_alg(alg);
758 	if (err)
759 		return err;
760 
761 	return crypto_register_alg(base);
762 }
763 EXPORT_SYMBOL_GPL(crypto_register_skcipher);
764 
765 void crypto_unregister_skcipher(struct skcipher_alg *alg)
766 {
767 	crypto_unregister_alg(&alg->base);
768 }
769 EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
770 
771 int crypto_register_skciphers(struct skcipher_alg *algs, int count)
772 {
773 	int i, ret;
774 
775 	for (i = 0; i < count; i++) {
776 		ret = crypto_register_skcipher(&algs[i]);
777 		if (ret)
778 			goto err;
779 	}
780 
781 	return 0;
782 
783 err:
784 	for (--i; i >= 0; --i)
785 		crypto_unregister_skcipher(&algs[i]);
786 
787 	return ret;
788 }
789 EXPORT_SYMBOL_GPL(crypto_register_skciphers);
790 
791 void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
792 {
793 	int i;
794 
795 	for (i = count - 1; i >= 0; --i)
796 		crypto_unregister_skcipher(&algs[i]);
797 }
798 EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
799 
800 int skcipher_register_instance(struct crypto_template *tmpl,
801 			   struct skcipher_instance *inst)
802 {
803 	int err;
804 
805 	if (WARN_ON(!inst->free))
806 		return -EINVAL;
807 
808 	err = skcipher_prepare_alg(&inst->alg);
809 	if (err)
810 		return err;
811 
812 	return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
813 }
814 EXPORT_SYMBOL_GPL(skcipher_register_instance);
815 
816 static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key,
817 				  unsigned int keylen)
818 {
819 	struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
820 
821 	crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
822 	crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) &
823 				CRYPTO_TFM_REQ_MASK);
824 	return crypto_cipher_setkey(cipher, key, keylen);
825 }
826 
827 static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm)
828 {
829 	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
830 	struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst);
831 	struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
832 	struct crypto_cipher *cipher;
833 
834 	cipher = crypto_spawn_cipher(spawn);
835 	if (IS_ERR(cipher))
836 		return PTR_ERR(cipher);
837 
838 	ctx->cipher = cipher;
839 	return 0;
840 }
841 
842 static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm)
843 {
844 	struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
845 
846 	crypto_free_cipher(ctx->cipher);
847 }
848 
849 static void skcipher_free_instance_simple(struct skcipher_instance *inst)
850 {
851 	crypto_drop_cipher(skcipher_instance_ctx(inst));
852 	kfree(inst);
853 }
854 
855 /**
856  * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
857  *
858  * Allocate an skcipher_instance for a simple block cipher mode of operation,
859  * e.g. cbc or ecb.  The instance context will have just a single crypto_spawn,
860  * that for the underlying cipher.  The {min,max}_keysize, ivsize, blocksize,
861  * alignmask, and priority are set from the underlying cipher but can be
862  * overridden if needed.  The tfm context defaults to skcipher_ctx_simple, and
863  * default ->setkey(), ->init(), and ->exit() methods are installed.
864  *
865  * @tmpl: the template being instantiated
866  * @tb: the template parameters
867  *
868  * Return: a pointer to the new instance, or an ERR_PTR().  The caller still
869  *	   needs to register the instance.
870  */
871 struct skcipher_instance *skcipher_alloc_instance_simple(
872 	struct crypto_template *tmpl, struct rtattr **tb)
873 {
874 	u32 mask;
875 	struct skcipher_instance *inst;
876 	struct crypto_cipher_spawn *spawn;
877 	struct crypto_alg *cipher_alg;
878 	int err;
879 
880 	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
881 	if (err)
882 		return ERR_PTR(err);
883 
884 	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
885 	if (!inst)
886 		return ERR_PTR(-ENOMEM);
887 	spawn = skcipher_instance_ctx(inst);
888 
889 	err = crypto_grab_cipher(spawn, skcipher_crypto_instance(inst),
890 				 crypto_attr_alg_name(tb[1]), 0, mask);
891 	if (err)
892 		goto err_free_inst;
893 	cipher_alg = crypto_spawn_cipher_alg(spawn);
894 
895 	err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
896 				  cipher_alg);
897 	if (err)
898 		goto err_free_inst;
899 
900 	inst->free = skcipher_free_instance_simple;
901 
902 	/* Default algorithm properties, can be overridden */
903 	inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize;
904 	inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask;
905 	inst->alg.base.cra_priority = cipher_alg->cra_priority;
906 	inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
907 	inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
908 	inst->alg.ivsize = cipher_alg->cra_blocksize;
909 
910 	/* Use skcipher_ctx_simple by default, can be overridden */
911 	inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple);
912 	inst->alg.setkey = skcipher_setkey_simple;
913 	inst->alg.init = skcipher_init_tfm_simple;
914 	inst->alg.exit = skcipher_exit_tfm_simple;
915 
916 	return inst;
917 
918 err_free_inst:
919 	skcipher_free_instance_simple(inst);
920 	return ERR_PTR(err);
921 }
922 EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
923 
924 MODULE_LICENSE("GPL");
925 MODULE_DESCRIPTION("Symmetric key cipher type");
926 MODULE_IMPORT_NS("CRYPTO_INTERNAL");
927