xref: /linux/crypto/skcipher.c (revision 7b6092ee7a4ce2d03dc65b87537889e8e1e0ab95)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Symmetric key cipher operations.
4  *
5  * Generic encrypt/decrypt wrapper for ciphers, handles operations across
6  * multiple page boundaries by using temporary blocks.  In user context,
7  * the kernel is given a chance to schedule us once per page.
8  *
9  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
10  */
11 
12 #include <crypto/internal/aead.h>
13 #include <crypto/internal/cipher.h>
14 #include <crypto/internal/skcipher.h>
15 #include <crypto/scatterwalk.h>
16 #include <linux/bug.h>
17 #include <linux/cryptouser.h>
18 #include <linux/err.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/module.h>
22 #include <linux/seq_file.h>
23 #include <linux/slab.h>
24 #include <linux/string.h>
25 #include <net/netlink.h>
26 #include "skcipher.h"
27 
28 #define CRYPTO_ALG_TYPE_SKCIPHER_MASK	0x0000000e
29 
30 enum {
31 	SKCIPHER_WALK_SLOW = 1 << 0,
32 	SKCIPHER_WALK_COPY = 1 << 1,
33 	SKCIPHER_WALK_DIFF = 1 << 2,
34 	SKCIPHER_WALK_SLEEP = 1 << 3,
35 };
36 
37 static const struct crypto_type crypto_skcipher_type;
38 
39 static int skcipher_walk_next(struct skcipher_walk *walk);
40 
41 static inline void skcipher_map_src(struct skcipher_walk *walk)
42 {
43 	walk->src.virt.addr = scatterwalk_map(&walk->in);
44 }
45 
46 static inline void skcipher_map_dst(struct skcipher_walk *walk)
47 {
48 	walk->dst.virt.addr = scatterwalk_map(&walk->out);
49 }
50 
51 static inline void skcipher_unmap_src(struct skcipher_walk *walk)
52 {
53 	scatterwalk_unmap(walk->src.virt.addr);
54 }
55 
56 static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
57 {
58 	scatterwalk_unmap(walk->dst.virt.addr);
59 }
60 
61 static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
62 {
63 	return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
64 }
65 
66 /* Get a spot of the specified length that does not straddle a page.
67  * The caller needs to ensure that there is enough space for this operation.
68  */
69 static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
70 {
71 	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
72 
73 	return max(start, end_page);
74 }
75 
76 static inline struct skcipher_alg *__crypto_skcipher_alg(
77 	struct crypto_alg *alg)
78 {
79 	return container_of(alg, struct skcipher_alg, base);
80 }
81 
82 static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
83 {
84 	u8 *addr;
85 
86 	addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
87 	addr = skcipher_get_spot(addr, bsize);
88 	scatterwalk_copychunks(addr, &walk->out, bsize, 1);
89 	return 0;
90 }
91 
92 int skcipher_walk_done(struct skcipher_walk *walk, int err)
93 {
94 	unsigned int n = walk->nbytes;
95 	unsigned int nbytes = 0;
96 
97 	if (!n)
98 		goto finish;
99 
100 	if (likely(err >= 0)) {
101 		n -= err;
102 		nbytes = walk->total - n;
103 	}
104 
105 	if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW |
106 				    SKCIPHER_WALK_COPY |
107 				    SKCIPHER_WALK_DIFF)))) {
108 unmap_src:
109 		skcipher_unmap_src(walk);
110 	} else if (walk->flags & SKCIPHER_WALK_DIFF) {
111 		skcipher_unmap_dst(walk);
112 		goto unmap_src;
113 	} else if (walk->flags & SKCIPHER_WALK_COPY) {
114 		skcipher_map_dst(walk);
115 		memcpy(walk->dst.virt.addr, walk->page, n);
116 		skcipher_unmap_dst(walk);
117 	} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
118 		if (err > 0) {
119 			/*
120 			 * Didn't process all bytes.  Either the algorithm is
121 			 * broken, or this was the last step and it turned out
122 			 * the message wasn't evenly divisible into blocks but
123 			 * the algorithm requires it.
124 			 */
125 			err = -EINVAL;
126 			nbytes = 0;
127 		} else
128 			n = skcipher_done_slow(walk, n);
129 	}
130 
131 	if (err > 0)
132 		err = 0;
133 
134 	walk->total = nbytes;
135 	walk->nbytes = 0;
136 
137 	scatterwalk_advance(&walk->in, n);
138 	scatterwalk_advance(&walk->out, n);
139 	scatterwalk_done(&walk->in, 0, nbytes);
140 	scatterwalk_done(&walk->out, 1, nbytes);
141 
142 	if (nbytes) {
143 		crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
144 			     CRYPTO_TFM_REQ_MAY_SLEEP : 0);
145 		return skcipher_walk_next(walk);
146 	}
147 
148 finish:
149 	/* Short-circuit for the common/fast path. */
150 	if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
151 		goto out;
152 
153 	if (walk->iv != walk->oiv)
154 		memcpy(walk->oiv, walk->iv, walk->ivsize);
155 	if (walk->buffer != walk->page)
156 		kfree(walk->buffer);
157 	if (walk->page)
158 		free_page((unsigned long)walk->page);
159 
160 out:
161 	return err;
162 }
163 EXPORT_SYMBOL_GPL(skcipher_walk_done);
164 
165 static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
166 {
167 	unsigned alignmask = walk->alignmask;
168 	unsigned a;
169 	unsigned n;
170 	u8 *buffer;
171 
172 	if (!walk->buffer)
173 		walk->buffer = walk->page;
174 	buffer = walk->buffer;
175 	if (buffer)
176 		goto ok;
177 
178 	/* Start with the minimum alignment of kmalloc. */
179 	a = crypto_tfm_ctx_alignment() - 1;
180 	n = bsize;
181 
182 	/* Minimum size to align buffer by alignmask. */
183 	n += alignmask & ~a;
184 
185 	/* Minimum size to ensure buffer does not straddle a page. */
186 	n += (bsize - 1) & ~(alignmask | a);
187 
188 	buffer = kzalloc(n, skcipher_walk_gfp(walk));
189 	if (!buffer)
190 		return skcipher_walk_done(walk, -ENOMEM);
191 	walk->buffer = buffer;
192 ok:
193 	walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
194 	walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
195 	walk->src.virt.addr = walk->dst.virt.addr;
196 
197 	scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
198 
199 	walk->nbytes = bsize;
200 	walk->flags |= SKCIPHER_WALK_SLOW;
201 
202 	return 0;
203 }
204 
205 static int skcipher_next_copy(struct skcipher_walk *walk)
206 {
207 	u8 *tmp = walk->page;
208 
209 	skcipher_map_src(walk);
210 	memcpy(tmp, walk->src.virt.addr, walk->nbytes);
211 	skcipher_unmap_src(walk);
212 
213 	walk->src.virt.addr = tmp;
214 	walk->dst.virt.addr = tmp;
215 	return 0;
216 }
217 
218 static int skcipher_next_fast(struct skcipher_walk *walk)
219 {
220 	unsigned long diff;
221 
222 	diff = offset_in_page(walk->in.offset) -
223 	       offset_in_page(walk->out.offset);
224 	diff |= (u8 *)scatterwalk_page(&walk->in) -
225 		(u8 *)scatterwalk_page(&walk->out);
226 
227 	skcipher_map_src(walk);
228 	walk->dst.virt.addr = walk->src.virt.addr;
229 
230 	if (diff) {
231 		walk->flags |= SKCIPHER_WALK_DIFF;
232 		skcipher_map_dst(walk);
233 	}
234 
235 	return 0;
236 }
237 
238 static int skcipher_walk_next(struct skcipher_walk *walk)
239 {
240 	unsigned int bsize;
241 	unsigned int n;
242 
243 	walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
244 			 SKCIPHER_WALK_DIFF);
245 
246 	n = walk->total;
247 	bsize = min(walk->stride, max(n, walk->blocksize));
248 	n = scatterwalk_clamp(&walk->in, n);
249 	n = scatterwalk_clamp(&walk->out, n);
250 
251 	if (unlikely(n < bsize)) {
252 		if (unlikely(walk->total < walk->blocksize))
253 			return skcipher_walk_done(walk, -EINVAL);
254 
255 slow_path:
256 		return skcipher_next_slow(walk, bsize);
257 	}
258 
259 	if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
260 		if (!walk->page) {
261 			gfp_t gfp = skcipher_walk_gfp(walk);
262 
263 			walk->page = (void *)__get_free_page(gfp);
264 			if (!walk->page)
265 				goto slow_path;
266 		}
267 
268 		walk->nbytes = min_t(unsigned, n,
269 				     PAGE_SIZE - offset_in_page(walk->page));
270 		walk->flags |= SKCIPHER_WALK_COPY;
271 		return skcipher_next_copy(walk);
272 	}
273 
274 	walk->nbytes = n;
275 
276 	return skcipher_next_fast(walk);
277 }
278 
279 static int skcipher_copy_iv(struct skcipher_walk *walk)
280 {
281 	unsigned a = crypto_tfm_ctx_alignment() - 1;
282 	unsigned alignmask = walk->alignmask;
283 	unsigned ivsize = walk->ivsize;
284 	unsigned bs = walk->stride;
285 	unsigned aligned_bs;
286 	unsigned size;
287 	u8 *iv;
288 
289 	aligned_bs = ALIGN(bs, alignmask + 1);
290 
291 	/* Minimum size to align buffer by alignmask. */
292 	size = alignmask & ~a;
293 
294 	size += aligned_bs + ivsize;
295 
296 	/* Minimum size to ensure buffer does not straddle a page. */
297 	size += (bs - 1) & ~(alignmask | a);
298 
299 	walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
300 	if (!walk->buffer)
301 		return -ENOMEM;
302 
303 	iv = PTR_ALIGN(walk->buffer, alignmask + 1);
304 	iv = skcipher_get_spot(iv, bs) + aligned_bs;
305 
306 	walk->iv = memcpy(iv, walk->iv, walk->ivsize);
307 	return 0;
308 }
309 
310 static int skcipher_walk_first(struct skcipher_walk *walk)
311 {
312 	if (WARN_ON_ONCE(in_hardirq()))
313 		return -EDEADLK;
314 
315 	walk->buffer = NULL;
316 	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
317 		int err = skcipher_copy_iv(walk);
318 		if (err)
319 			return err;
320 	}
321 
322 	walk->page = NULL;
323 
324 	return skcipher_walk_next(walk);
325 }
326 
327 static int skcipher_walk_skcipher(struct skcipher_walk *walk,
328 				  struct skcipher_request *req)
329 {
330 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
331 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
332 
333 	walk->total = req->cryptlen;
334 	walk->nbytes = 0;
335 	walk->iv = req->iv;
336 	walk->oiv = req->iv;
337 
338 	if (unlikely(!walk->total))
339 		return 0;
340 
341 	scatterwalk_start(&walk->in, req->src);
342 	scatterwalk_start(&walk->out, req->dst);
343 
344 	walk->flags &= ~SKCIPHER_WALK_SLEEP;
345 	walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
346 		       SKCIPHER_WALK_SLEEP : 0;
347 
348 	walk->blocksize = crypto_skcipher_blocksize(tfm);
349 	walk->ivsize = crypto_skcipher_ivsize(tfm);
350 	walk->alignmask = crypto_skcipher_alignmask(tfm);
351 
352 	if (alg->co.base.cra_type != &crypto_skcipher_type)
353 		walk->stride = alg->co.chunksize;
354 	else
355 		walk->stride = alg->walksize;
356 
357 	return skcipher_walk_first(walk);
358 }
359 
360 int skcipher_walk_virt(struct skcipher_walk *walk,
361 		       struct skcipher_request *req, bool atomic)
362 {
363 	int err;
364 
365 	might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
366 
367 	err = skcipher_walk_skcipher(walk, req);
368 
369 	walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
370 
371 	return err;
372 }
373 EXPORT_SYMBOL_GPL(skcipher_walk_virt);
374 
375 static int skcipher_walk_aead_common(struct skcipher_walk *walk,
376 				     struct aead_request *req, bool atomic)
377 {
378 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
379 	int err;
380 
381 	walk->nbytes = 0;
382 	walk->iv = req->iv;
383 	walk->oiv = req->iv;
384 
385 	if (unlikely(!walk->total))
386 		return 0;
387 
388 	scatterwalk_start(&walk->in, req->src);
389 	scatterwalk_start(&walk->out, req->dst);
390 
391 	scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
392 	scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
393 
394 	scatterwalk_done(&walk->in, 0, walk->total);
395 	scatterwalk_done(&walk->out, 0, walk->total);
396 
397 	if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
398 		walk->flags |= SKCIPHER_WALK_SLEEP;
399 	else
400 		walk->flags &= ~SKCIPHER_WALK_SLEEP;
401 
402 	walk->blocksize = crypto_aead_blocksize(tfm);
403 	walk->stride = crypto_aead_chunksize(tfm);
404 	walk->ivsize = crypto_aead_ivsize(tfm);
405 	walk->alignmask = crypto_aead_alignmask(tfm);
406 
407 	err = skcipher_walk_first(walk);
408 
409 	if (atomic)
410 		walk->flags &= ~SKCIPHER_WALK_SLEEP;
411 
412 	return err;
413 }
414 
415 int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
416 			       struct aead_request *req, bool atomic)
417 {
418 	walk->total = req->cryptlen;
419 
420 	return skcipher_walk_aead_common(walk, req, atomic);
421 }
422 EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
423 
424 int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
425 			       struct aead_request *req, bool atomic)
426 {
427 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
428 
429 	walk->total = req->cryptlen - crypto_aead_authsize(tfm);
430 
431 	return skcipher_walk_aead_common(walk, req, atomic);
432 }
433 EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
434 
435 static void skcipher_set_needkey(struct crypto_skcipher *tfm)
436 {
437 	if (crypto_skcipher_max_keysize(tfm) != 0)
438 		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
439 }
440 
441 static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
442 				     const u8 *key, unsigned int keylen)
443 {
444 	unsigned long alignmask = crypto_skcipher_alignmask(tfm);
445 	struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
446 	u8 *buffer, *alignbuffer;
447 	unsigned long absize;
448 	int ret;
449 
450 	absize = keylen + alignmask;
451 	buffer = kmalloc(absize, GFP_ATOMIC);
452 	if (!buffer)
453 		return -ENOMEM;
454 
455 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
456 	memcpy(alignbuffer, key, keylen);
457 	ret = cipher->setkey(tfm, alignbuffer, keylen);
458 	kfree_sensitive(buffer);
459 	return ret;
460 }
461 
462 int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
463 			   unsigned int keylen)
464 {
465 	struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
466 	unsigned long alignmask = crypto_skcipher_alignmask(tfm);
467 	int err;
468 
469 	if (cipher->co.base.cra_type != &crypto_skcipher_type) {
470 		struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm);
471 
472 		crypto_lskcipher_clear_flags(*ctx, CRYPTO_TFM_REQ_MASK);
473 		crypto_lskcipher_set_flags(*ctx,
474 					   crypto_skcipher_get_flags(tfm) &
475 					   CRYPTO_TFM_REQ_MASK);
476 		err = crypto_lskcipher_setkey(*ctx, key, keylen);
477 		goto out;
478 	}
479 
480 	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize)
481 		return -EINVAL;
482 
483 	if ((unsigned long)key & alignmask)
484 		err = skcipher_setkey_unaligned(tfm, key, keylen);
485 	else
486 		err = cipher->setkey(tfm, key, keylen);
487 
488 out:
489 	if (unlikely(err)) {
490 		skcipher_set_needkey(tfm);
491 		return err;
492 	}
493 
494 	crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
495 	return 0;
496 }
497 EXPORT_SYMBOL_GPL(crypto_skcipher_setkey);
498 
499 int crypto_skcipher_encrypt(struct skcipher_request *req)
500 {
501 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
502 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
503 
504 	if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
505 		return -ENOKEY;
506 	if (alg->co.base.cra_type != &crypto_skcipher_type)
507 		return crypto_lskcipher_encrypt_sg(req);
508 	return alg->encrypt(req);
509 }
510 EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
511 
512 int crypto_skcipher_decrypt(struct skcipher_request *req)
513 {
514 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
515 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
516 
517 	if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
518 		return -ENOKEY;
519 	if (alg->co.base.cra_type != &crypto_skcipher_type)
520 		return crypto_lskcipher_decrypt_sg(req);
521 	return alg->decrypt(req);
522 }
523 EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
524 
525 static int crypto_lskcipher_export(struct skcipher_request *req, void *out)
526 {
527 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
528 	u8 *ivs = skcipher_request_ctx(req);
529 
530 	ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1);
531 
532 	memcpy(out, ivs + crypto_skcipher_ivsize(tfm),
533 	       crypto_skcipher_statesize(tfm));
534 
535 	return 0;
536 }
537 
538 static int crypto_lskcipher_import(struct skcipher_request *req, const void *in)
539 {
540 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
541 	u8 *ivs = skcipher_request_ctx(req);
542 
543 	ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1);
544 
545 	memcpy(ivs + crypto_skcipher_ivsize(tfm), in,
546 	       crypto_skcipher_statesize(tfm));
547 
548 	return 0;
549 }
550 
551 static int skcipher_noexport(struct skcipher_request *req, void *out)
552 {
553 	return 0;
554 }
555 
556 static int skcipher_noimport(struct skcipher_request *req, const void *in)
557 {
558 	return 0;
559 }
560 
561 int crypto_skcipher_export(struct skcipher_request *req, void *out)
562 {
563 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
564 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
565 
566 	if (alg->co.base.cra_type != &crypto_skcipher_type)
567 		return crypto_lskcipher_export(req, out);
568 	return alg->export(req, out);
569 }
570 EXPORT_SYMBOL_GPL(crypto_skcipher_export);
571 
572 int crypto_skcipher_import(struct skcipher_request *req, const void *in)
573 {
574 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
575 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
576 
577 	if (alg->co.base.cra_type != &crypto_skcipher_type)
578 		return crypto_lskcipher_import(req, in);
579 	return alg->import(req, in);
580 }
581 EXPORT_SYMBOL_GPL(crypto_skcipher_import);
582 
583 static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
584 {
585 	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
586 	struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
587 
588 	alg->exit(skcipher);
589 }
590 
591 static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
592 {
593 	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
594 	struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
595 
596 	skcipher_set_needkey(skcipher);
597 
598 	if (tfm->__crt_alg->cra_type != &crypto_skcipher_type) {
599 		unsigned am = crypto_skcipher_alignmask(skcipher);
600 		unsigned reqsize;
601 
602 		reqsize = am & ~(crypto_tfm_ctx_alignment() - 1);
603 		reqsize += crypto_skcipher_ivsize(skcipher);
604 		reqsize += crypto_skcipher_statesize(skcipher);
605 		crypto_skcipher_set_reqsize(skcipher, reqsize);
606 
607 		return crypto_init_lskcipher_ops_sg(tfm);
608 	}
609 
610 	if (alg->exit)
611 		skcipher->base.exit = crypto_skcipher_exit_tfm;
612 
613 	if (alg->init)
614 		return alg->init(skcipher);
615 
616 	return 0;
617 }
618 
619 static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
620 {
621 	if (alg->cra_type != &crypto_skcipher_type)
622 		return sizeof(struct crypto_lskcipher *);
623 
624 	return crypto_alg_extsize(alg);
625 }
626 
627 static void crypto_skcipher_free_instance(struct crypto_instance *inst)
628 {
629 	struct skcipher_instance *skcipher =
630 		container_of(inst, struct skcipher_instance, s.base);
631 
632 	skcipher->free(skcipher);
633 }
634 
635 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
636 	__maybe_unused;
637 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
638 {
639 	struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
640 
641 	seq_printf(m, "type         : skcipher\n");
642 	seq_printf(m, "async        : %s\n",
643 		   alg->cra_flags & CRYPTO_ALG_ASYNC ?  "yes" : "no");
644 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
645 	seq_printf(m, "min keysize  : %u\n", skcipher->min_keysize);
646 	seq_printf(m, "max keysize  : %u\n", skcipher->max_keysize);
647 	seq_printf(m, "ivsize       : %u\n", skcipher->ivsize);
648 	seq_printf(m, "chunksize    : %u\n", skcipher->chunksize);
649 	seq_printf(m, "walksize     : %u\n", skcipher->walksize);
650 	seq_printf(m, "statesize    : %u\n", skcipher->statesize);
651 }
652 
653 static int __maybe_unused crypto_skcipher_report(
654 	struct sk_buff *skb, struct crypto_alg *alg)
655 {
656 	struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
657 	struct crypto_report_blkcipher rblkcipher;
658 
659 	memset(&rblkcipher, 0, sizeof(rblkcipher));
660 
661 	strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
662 	strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
663 
664 	rblkcipher.blocksize = alg->cra_blocksize;
665 	rblkcipher.min_keysize = skcipher->min_keysize;
666 	rblkcipher.max_keysize = skcipher->max_keysize;
667 	rblkcipher.ivsize = skcipher->ivsize;
668 
669 	return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
670 		       sizeof(rblkcipher), &rblkcipher);
671 }
672 
673 static const struct crypto_type crypto_skcipher_type = {
674 	.extsize = crypto_skcipher_extsize,
675 	.init_tfm = crypto_skcipher_init_tfm,
676 	.free = crypto_skcipher_free_instance,
677 #ifdef CONFIG_PROC_FS
678 	.show = crypto_skcipher_show,
679 #endif
680 #if IS_ENABLED(CONFIG_CRYPTO_USER)
681 	.report = crypto_skcipher_report,
682 #endif
683 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
684 	.maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK,
685 	.type = CRYPTO_ALG_TYPE_SKCIPHER,
686 	.tfmsize = offsetof(struct crypto_skcipher, base),
687 };
688 
689 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
690 			 struct crypto_instance *inst,
691 			 const char *name, u32 type, u32 mask)
692 {
693 	spawn->base.frontend = &crypto_skcipher_type;
694 	return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
695 }
696 EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
697 
698 struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
699 					      u32 type, u32 mask)
700 {
701 	return crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
702 }
703 EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
704 
705 struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
706 				const char *alg_name, u32 type, u32 mask)
707 {
708 	struct crypto_skcipher *tfm;
709 
710 	/* Only sync algorithms allowed. */
711 	mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE;
712 
713 	tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
714 
715 	/*
716 	 * Make sure we do not allocate something that might get used with
717 	 * an on-stack request: check the request size.
718 	 */
719 	if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
720 				    MAX_SYNC_SKCIPHER_REQSIZE)) {
721 		crypto_free_skcipher(tfm);
722 		return ERR_PTR(-EINVAL);
723 	}
724 
725 	return (struct crypto_sync_skcipher *)tfm;
726 }
727 EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
728 
729 int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask)
730 {
731 	return crypto_type_has_alg(alg_name, &crypto_skcipher_type, type, mask);
732 }
733 EXPORT_SYMBOL_GPL(crypto_has_skcipher);
734 
735 int skcipher_prepare_alg_common(struct skcipher_alg_common *alg)
736 {
737 	struct crypto_alg *base = &alg->base;
738 
739 	if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
740 	    alg->statesize > PAGE_SIZE / 2 ||
741 	    (alg->ivsize + alg->statesize) > PAGE_SIZE / 2)
742 		return -EINVAL;
743 
744 	if (!alg->chunksize)
745 		alg->chunksize = base->cra_blocksize;
746 
747 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
748 
749 	return 0;
750 }
751 
752 static int skcipher_prepare_alg(struct skcipher_alg *alg)
753 {
754 	struct crypto_alg *base = &alg->base;
755 	int err;
756 
757 	err = skcipher_prepare_alg_common(&alg->co);
758 	if (err)
759 		return err;
760 
761 	if (alg->walksize > PAGE_SIZE / 8)
762 		return -EINVAL;
763 
764 	if (!alg->walksize)
765 		alg->walksize = alg->chunksize;
766 
767 	if (!alg->statesize) {
768 		alg->import = skcipher_noimport;
769 		alg->export = skcipher_noexport;
770 	} else if (!(alg->import && alg->export))
771 		return -EINVAL;
772 
773 	base->cra_type = &crypto_skcipher_type;
774 	base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
775 
776 	return 0;
777 }
778 
779 int crypto_register_skcipher(struct skcipher_alg *alg)
780 {
781 	struct crypto_alg *base = &alg->base;
782 	int err;
783 
784 	err = skcipher_prepare_alg(alg);
785 	if (err)
786 		return err;
787 
788 	return crypto_register_alg(base);
789 }
790 EXPORT_SYMBOL_GPL(crypto_register_skcipher);
791 
792 void crypto_unregister_skcipher(struct skcipher_alg *alg)
793 {
794 	crypto_unregister_alg(&alg->base);
795 }
796 EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
797 
798 int crypto_register_skciphers(struct skcipher_alg *algs, int count)
799 {
800 	int i, ret;
801 
802 	for (i = 0; i < count; i++) {
803 		ret = crypto_register_skcipher(&algs[i]);
804 		if (ret)
805 			goto err;
806 	}
807 
808 	return 0;
809 
810 err:
811 	for (--i; i >= 0; --i)
812 		crypto_unregister_skcipher(&algs[i]);
813 
814 	return ret;
815 }
816 EXPORT_SYMBOL_GPL(crypto_register_skciphers);
817 
818 void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
819 {
820 	int i;
821 
822 	for (i = count - 1; i >= 0; --i)
823 		crypto_unregister_skcipher(&algs[i]);
824 }
825 EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
826 
827 int skcipher_register_instance(struct crypto_template *tmpl,
828 			   struct skcipher_instance *inst)
829 {
830 	int err;
831 
832 	if (WARN_ON(!inst->free))
833 		return -EINVAL;
834 
835 	err = skcipher_prepare_alg(&inst->alg);
836 	if (err)
837 		return err;
838 
839 	return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
840 }
841 EXPORT_SYMBOL_GPL(skcipher_register_instance);
842 
843 static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key,
844 				  unsigned int keylen)
845 {
846 	struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
847 
848 	crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
849 	crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) &
850 				CRYPTO_TFM_REQ_MASK);
851 	return crypto_cipher_setkey(cipher, key, keylen);
852 }
853 
854 static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm)
855 {
856 	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
857 	struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst);
858 	struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
859 	struct crypto_cipher *cipher;
860 
861 	cipher = crypto_spawn_cipher(spawn);
862 	if (IS_ERR(cipher))
863 		return PTR_ERR(cipher);
864 
865 	ctx->cipher = cipher;
866 	return 0;
867 }
868 
869 static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm)
870 {
871 	struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
872 
873 	crypto_free_cipher(ctx->cipher);
874 }
875 
876 static void skcipher_free_instance_simple(struct skcipher_instance *inst)
877 {
878 	crypto_drop_cipher(skcipher_instance_ctx(inst));
879 	kfree(inst);
880 }
881 
882 /**
883  * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
884  *
885  * Allocate an skcipher_instance for a simple block cipher mode of operation,
886  * e.g. cbc or ecb.  The instance context will have just a single crypto_spawn,
887  * that for the underlying cipher.  The {min,max}_keysize, ivsize, blocksize,
888  * alignmask, and priority are set from the underlying cipher but can be
889  * overridden if needed.  The tfm context defaults to skcipher_ctx_simple, and
890  * default ->setkey(), ->init(), and ->exit() methods are installed.
891  *
892  * @tmpl: the template being instantiated
893  * @tb: the template parameters
894  *
895  * Return: a pointer to the new instance, or an ERR_PTR().  The caller still
896  *	   needs to register the instance.
897  */
898 struct skcipher_instance *skcipher_alloc_instance_simple(
899 	struct crypto_template *tmpl, struct rtattr **tb)
900 {
901 	u32 mask;
902 	struct skcipher_instance *inst;
903 	struct crypto_cipher_spawn *spawn;
904 	struct crypto_alg *cipher_alg;
905 	int err;
906 
907 	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
908 	if (err)
909 		return ERR_PTR(err);
910 
911 	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
912 	if (!inst)
913 		return ERR_PTR(-ENOMEM);
914 	spawn = skcipher_instance_ctx(inst);
915 
916 	err = crypto_grab_cipher(spawn, skcipher_crypto_instance(inst),
917 				 crypto_attr_alg_name(tb[1]), 0, mask);
918 	if (err)
919 		goto err_free_inst;
920 	cipher_alg = crypto_spawn_cipher_alg(spawn);
921 
922 	err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
923 				  cipher_alg);
924 	if (err)
925 		goto err_free_inst;
926 
927 	inst->free = skcipher_free_instance_simple;
928 
929 	/* Default algorithm properties, can be overridden */
930 	inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize;
931 	inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask;
932 	inst->alg.base.cra_priority = cipher_alg->cra_priority;
933 	inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
934 	inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
935 	inst->alg.ivsize = cipher_alg->cra_blocksize;
936 
937 	/* Use skcipher_ctx_simple by default, can be overridden */
938 	inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple);
939 	inst->alg.setkey = skcipher_setkey_simple;
940 	inst->alg.init = skcipher_init_tfm_simple;
941 	inst->alg.exit = skcipher_exit_tfm_simple;
942 
943 	return inst;
944 
945 err_free_inst:
946 	skcipher_free_instance_simple(inst);
947 	return ERR_PTR(err);
948 }
949 EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
950 
951 MODULE_LICENSE("GPL");
952 MODULE_DESCRIPTION("Symmetric key cipher type");
953 MODULE_IMPORT_NS("CRYPTO_INTERNAL");
954