xref: /linux/arch/arm64/crypto/aes-glue.c (revision 187d0801404f415f22c0b31531982c7ea97fa341)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/arch/arm64/crypto/aes-glue.c - wrapper code for ARMv8 AES
4  *
5  * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
6  */
7 
8 #include <crypto/aes.h>
9 #include <crypto/ctr.h>
10 #include <crypto/internal/hash.h>
11 #include <crypto/internal/skcipher.h>
12 #include <crypto/scatterwalk.h>
13 #include <crypto/sha2.h>
14 #include <crypto/utils.h>
15 #include <crypto/xts.h>
16 #include <linux/cpufeature.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/string.h>
20 
21 #include <asm/hwcap.h>
22 #include <asm/simd.h>
23 
24 #include "aes-ce-setkey.h"
25 
26 #ifdef USE_V8_CRYPTO_EXTENSIONS
27 #define MODE			"ce"
28 #define PRIO			300
29 #define aes_expandkey		ce_aes_expandkey
30 #define aes_ecb_encrypt		ce_aes_ecb_encrypt
31 #define aes_ecb_decrypt		ce_aes_ecb_decrypt
32 #define aes_cbc_encrypt		ce_aes_cbc_encrypt
33 #define aes_cbc_decrypt		ce_aes_cbc_decrypt
34 #define aes_cbc_cts_encrypt	ce_aes_cbc_cts_encrypt
35 #define aes_cbc_cts_decrypt	ce_aes_cbc_cts_decrypt
36 #define aes_essiv_cbc_encrypt	ce_aes_essiv_cbc_encrypt
37 #define aes_essiv_cbc_decrypt	ce_aes_essiv_cbc_decrypt
38 #define aes_ctr_encrypt		ce_aes_ctr_encrypt
39 #define aes_xctr_encrypt	ce_aes_xctr_encrypt
40 #define aes_xts_encrypt		ce_aes_xts_encrypt
41 #define aes_xts_decrypt		ce_aes_xts_decrypt
42 #define aes_mac_update		ce_aes_mac_update
43 MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS/XCTR using ARMv8 Crypto Extensions");
44 #else
45 #define MODE			"neon"
46 #define PRIO			200
47 #define aes_ecb_encrypt		neon_aes_ecb_encrypt
48 #define aes_ecb_decrypt		neon_aes_ecb_decrypt
49 #define aes_cbc_encrypt		neon_aes_cbc_encrypt
50 #define aes_cbc_decrypt		neon_aes_cbc_decrypt
51 #define aes_cbc_cts_encrypt	neon_aes_cbc_cts_encrypt
52 #define aes_cbc_cts_decrypt	neon_aes_cbc_cts_decrypt
53 #define aes_essiv_cbc_encrypt	neon_aes_essiv_cbc_encrypt
54 #define aes_essiv_cbc_decrypt	neon_aes_essiv_cbc_decrypt
55 #define aes_ctr_encrypt		neon_aes_ctr_encrypt
56 #define aes_xctr_encrypt	neon_aes_xctr_encrypt
57 #define aes_xts_encrypt		neon_aes_xts_encrypt
58 #define aes_xts_decrypt		neon_aes_xts_decrypt
59 #define aes_mac_update		neon_aes_mac_update
60 MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS/XCTR using ARMv8 NEON");
61 #endif
62 #if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS)
63 MODULE_ALIAS_CRYPTO("ecb(aes)");
64 MODULE_ALIAS_CRYPTO("cbc(aes)");
65 MODULE_ALIAS_CRYPTO("ctr(aes)");
66 MODULE_ALIAS_CRYPTO("xts(aes)");
67 MODULE_ALIAS_CRYPTO("xctr(aes)");
68 #endif
69 MODULE_ALIAS_CRYPTO("cts(cbc(aes))");
70 MODULE_ALIAS_CRYPTO("essiv(cbc(aes),sha256)");
71 MODULE_ALIAS_CRYPTO("cmac(aes)");
72 MODULE_ALIAS_CRYPTO("xcbc(aes)");
73 MODULE_ALIAS_CRYPTO("cbcmac(aes)");
74 
75 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
76 MODULE_LICENSE("GPL v2");
77 
78 /* defined in aes-modes.S */
79 asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
80 				int rounds, int blocks);
81 asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[],
82 				int rounds, int blocks);
83 
84 asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
85 				int rounds, int blocks, u8 iv[]);
86 asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[],
87 				int rounds, int blocks, u8 iv[]);
88 
89 asmlinkage void aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[],
90 				int rounds, int bytes, u8 const iv[]);
91 asmlinkage void aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
92 				int rounds, int bytes, u8 const iv[]);
93 
94 asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
95 				int rounds, int bytes, u8 ctr[]);
96 
97 asmlinkage void aes_xctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
98 				 int rounds, int bytes, u8 ctr[], int byte_ctr);
99 
100 asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[],
101 				int rounds, int bytes, u32 const rk2[], u8 iv[],
102 				int first);
103 asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[],
104 				int rounds, int bytes, u32 const rk2[], u8 iv[],
105 				int first);
106 
107 asmlinkage void aes_essiv_cbc_encrypt(u8 out[], u8 const in[], u32 const rk1[],
108 				      int rounds, int blocks, u8 iv[],
109 				      u32 const rk2[]);
110 asmlinkage void aes_essiv_cbc_decrypt(u8 out[], u8 const in[], u32 const rk1[],
111 				      int rounds, int blocks, u8 iv[],
112 				      u32 const rk2[]);
113 
114 asmlinkage int aes_mac_update(u8 const in[], u32 const rk[], int rounds,
115 			      int blocks, u8 dg[], int enc_before,
116 			      int enc_after);
117 
118 struct crypto_aes_xts_ctx {
119 	struct crypto_aes_ctx key1;
120 	struct crypto_aes_ctx __aligned(8) key2;
121 };
122 
123 struct crypto_aes_essiv_cbc_ctx {
124 	struct crypto_aes_ctx key1;
125 	struct crypto_aes_ctx __aligned(8) key2;
126 };
127 
128 struct mac_tfm_ctx {
129 	struct crypto_aes_ctx key;
130 	u8 __aligned(8) consts[];
131 };
132 
133 struct mac_desc_ctx {
134 	u8 dg[AES_BLOCK_SIZE];
135 };
136 
skcipher_aes_setkey(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)137 static int skcipher_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
138 			       unsigned int key_len)
139 {
140 	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
141 
142 	return aes_expandkey(ctx, in_key, key_len);
143 }
144 
xts_set_key(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)145 static int __maybe_unused xts_set_key(struct crypto_skcipher *tfm,
146 				      const u8 *in_key, unsigned int key_len)
147 {
148 	struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
149 	int ret;
150 
151 	ret = xts_verify_key(tfm, in_key, key_len);
152 	if (ret)
153 		return ret;
154 
155 	ret = aes_expandkey(&ctx->key1, in_key, key_len / 2);
156 	if (!ret)
157 		ret = aes_expandkey(&ctx->key2, &in_key[key_len / 2],
158 				    key_len / 2);
159 	return ret;
160 }
161 
essiv_cbc_set_key(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)162 static int __maybe_unused essiv_cbc_set_key(struct crypto_skcipher *tfm,
163 					    const u8 *in_key,
164 					    unsigned int key_len)
165 {
166 	struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
167 	u8 digest[SHA256_DIGEST_SIZE];
168 	int ret;
169 
170 	ret = aes_expandkey(&ctx->key1, in_key, key_len);
171 	if (ret)
172 		return ret;
173 
174 	sha256(in_key, key_len, digest);
175 
176 	return aes_expandkey(&ctx->key2, digest, sizeof(digest));
177 }
178 
ecb_encrypt(struct skcipher_request * req)179 static int __maybe_unused ecb_encrypt(struct skcipher_request *req)
180 {
181 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
182 	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
183 	int err, rounds = 6 + ctx->key_length / 4;
184 	struct skcipher_walk walk;
185 	unsigned int blocks;
186 
187 	err = skcipher_walk_virt(&walk, req, false);
188 
189 	while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
190 		scoped_ksimd()
191 			aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
192 					ctx->key_enc, rounds, blocks);
193 		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
194 	}
195 	return err;
196 }
197 
ecb_decrypt(struct skcipher_request * req)198 static int __maybe_unused ecb_decrypt(struct skcipher_request *req)
199 {
200 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
201 	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
202 	int err, rounds = 6 + ctx->key_length / 4;
203 	struct skcipher_walk walk;
204 	unsigned int blocks;
205 
206 	err = skcipher_walk_virt(&walk, req, false);
207 
208 	while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
209 		scoped_ksimd()
210 			aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
211 					ctx->key_dec, rounds, blocks);
212 		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
213 	}
214 	return err;
215 }
216 
cbc_encrypt_walk(struct skcipher_request * req,struct skcipher_walk * walk)217 static int cbc_encrypt_walk(struct skcipher_request *req,
218 			    struct skcipher_walk *walk)
219 {
220 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
221 	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
222 	int err = 0, rounds = 6 + ctx->key_length / 4;
223 	unsigned int blocks;
224 
225 	while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) {
226 		scoped_ksimd()
227 			aes_cbc_encrypt(walk->dst.virt.addr, walk->src.virt.addr,
228 					ctx->key_enc, rounds, blocks, walk->iv);
229 		err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE);
230 	}
231 	return err;
232 }
233 
cbc_encrypt(struct skcipher_request * req)234 static int __maybe_unused cbc_encrypt(struct skcipher_request *req)
235 {
236 	struct skcipher_walk walk;
237 	int err;
238 
239 	err = skcipher_walk_virt(&walk, req, false);
240 	if (err)
241 		return err;
242 	return cbc_encrypt_walk(req, &walk);
243 }
244 
cbc_decrypt_walk(struct skcipher_request * req,struct skcipher_walk * walk)245 static int cbc_decrypt_walk(struct skcipher_request *req,
246 			    struct skcipher_walk *walk)
247 {
248 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
249 	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
250 	int err = 0, rounds = 6 + ctx->key_length / 4;
251 	unsigned int blocks;
252 
253 	while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) {
254 		scoped_ksimd()
255 			aes_cbc_decrypt(walk->dst.virt.addr, walk->src.virt.addr,
256 					ctx->key_dec, rounds, blocks, walk->iv);
257 		err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE);
258 	}
259 	return err;
260 }
261 
cbc_decrypt(struct skcipher_request * req)262 static int __maybe_unused cbc_decrypt(struct skcipher_request *req)
263 {
264 	struct skcipher_walk walk;
265 	int err;
266 
267 	err = skcipher_walk_virt(&walk, req, false);
268 	if (err)
269 		return err;
270 	return cbc_decrypt_walk(req, &walk);
271 }
272 
cts_cbc_encrypt(struct skcipher_request * req)273 static int cts_cbc_encrypt(struct skcipher_request *req)
274 {
275 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
276 	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
277 	int err, rounds = 6 + ctx->key_length / 4;
278 	int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
279 	struct scatterlist *src = req->src, *dst = req->dst;
280 	struct scatterlist sg_src[2], sg_dst[2];
281 	struct skcipher_request subreq;
282 	struct skcipher_walk walk;
283 
284 	skcipher_request_set_tfm(&subreq, tfm);
285 	skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
286 				      NULL, NULL);
287 
288 	if (req->cryptlen <= AES_BLOCK_SIZE) {
289 		if (req->cryptlen < AES_BLOCK_SIZE)
290 			return -EINVAL;
291 		cbc_blocks = 1;
292 	}
293 
294 	if (cbc_blocks > 0) {
295 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
296 					   cbc_blocks * AES_BLOCK_SIZE,
297 					   req->iv);
298 
299 		err = skcipher_walk_virt(&walk, &subreq, false) ?:
300 		      cbc_encrypt_walk(&subreq, &walk);
301 		if (err)
302 			return err;
303 
304 		if (req->cryptlen == AES_BLOCK_SIZE)
305 			return 0;
306 
307 		dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
308 		if (req->dst != req->src)
309 			dst = scatterwalk_ffwd(sg_dst, req->dst,
310 					       subreq.cryptlen);
311 	}
312 
313 	/* handle ciphertext stealing */
314 	skcipher_request_set_crypt(&subreq, src, dst,
315 				   req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
316 				   req->iv);
317 
318 	err = skcipher_walk_virt(&walk, &subreq, false);
319 	if (err)
320 		return err;
321 
322 	scoped_ksimd()
323 		aes_cbc_cts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
324 				    ctx->key_enc, rounds, walk.nbytes, walk.iv);
325 
326 	return skcipher_walk_done(&walk, 0);
327 }
328 
cts_cbc_decrypt(struct skcipher_request * req)329 static int cts_cbc_decrypt(struct skcipher_request *req)
330 {
331 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
332 	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
333 	int err, rounds = 6 + ctx->key_length / 4;
334 	int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
335 	struct scatterlist *src = req->src, *dst = req->dst;
336 	struct scatterlist sg_src[2], sg_dst[2];
337 	struct skcipher_request subreq;
338 	struct skcipher_walk walk;
339 
340 	skcipher_request_set_tfm(&subreq, tfm);
341 	skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
342 				      NULL, NULL);
343 
344 	if (req->cryptlen <= AES_BLOCK_SIZE) {
345 		if (req->cryptlen < AES_BLOCK_SIZE)
346 			return -EINVAL;
347 		cbc_blocks = 1;
348 	}
349 
350 	if (cbc_blocks > 0) {
351 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
352 					   cbc_blocks * AES_BLOCK_SIZE,
353 					   req->iv);
354 
355 		err = skcipher_walk_virt(&walk, &subreq, false) ?:
356 		      cbc_decrypt_walk(&subreq, &walk);
357 		if (err)
358 			return err;
359 
360 		if (req->cryptlen == AES_BLOCK_SIZE)
361 			return 0;
362 
363 		dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
364 		if (req->dst != req->src)
365 			dst = scatterwalk_ffwd(sg_dst, req->dst,
366 					       subreq.cryptlen);
367 	}
368 
369 	/* handle ciphertext stealing */
370 	skcipher_request_set_crypt(&subreq, src, dst,
371 				   req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
372 				   req->iv);
373 
374 	err = skcipher_walk_virt(&walk, &subreq, false);
375 	if (err)
376 		return err;
377 
378 	scoped_ksimd()
379 		aes_cbc_cts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
380 				    ctx->key_dec, rounds, walk.nbytes, walk.iv);
381 
382 	return skcipher_walk_done(&walk, 0);
383 }
384 
essiv_cbc_encrypt(struct skcipher_request * req)385 static int __maybe_unused essiv_cbc_encrypt(struct skcipher_request *req)
386 {
387 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
388 	struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
389 	int err, rounds = 6 + ctx->key1.key_length / 4;
390 	struct skcipher_walk walk;
391 	unsigned int blocks;
392 
393 	err = skcipher_walk_virt(&walk, req, false);
394 
395 	blocks = walk.nbytes / AES_BLOCK_SIZE;
396 	if (blocks) {
397 		scoped_ksimd()
398 			aes_essiv_cbc_encrypt(walk.dst.virt.addr,
399 					      walk.src.virt.addr,
400 					      ctx->key1.key_enc, rounds, blocks,
401 					      req->iv, ctx->key2.key_enc);
402 		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
403 	}
404 	return err ?: cbc_encrypt_walk(req, &walk);
405 }
406 
essiv_cbc_decrypt(struct skcipher_request * req)407 static int __maybe_unused essiv_cbc_decrypt(struct skcipher_request *req)
408 {
409 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
410 	struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
411 	int err, rounds = 6 + ctx->key1.key_length / 4;
412 	struct skcipher_walk walk;
413 	unsigned int blocks;
414 
415 	err = skcipher_walk_virt(&walk, req, false);
416 
417 	blocks = walk.nbytes / AES_BLOCK_SIZE;
418 	if (blocks) {
419 		scoped_ksimd()
420 			aes_essiv_cbc_decrypt(walk.dst.virt.addr,
421 					      walk.src.virt.addr,
422 					      ctx->key1.key_dec, rounds, blocks,
423 					      req->iv, ctx->key2.key_enc);
424 		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
425 	}
426 	return err ?: cbc_decrypt_walk(req, &walk);
427 }
428 
xctr_encrypt(struct skcipher_request * req)429 static int __maybe_unused xctr_encrypt(struct skcipher_request *req)
430 {
431 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
432 	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
433 	int err, rounds = 6 + ctx->key_length / 4;
434 	struct skcipher_walk walk;
435 	unsigned int byte_ctr = 0;
436 
437 	err = skcipher_walk_virt(&walk, req, false);
438 
439 	while (walk.nbytes > 0) {
440 		const u8 *src = walk.src.virt.addr;
441 		unsigned int nbytes = walk.nbytes;
442 		u8 *dst = walk.dst.virt.addr;
443 		u8 buf[AES_BLOCK_SIZE];
444 
445 		/*
446 		 * If given less than 16 bytes, we must copy the partial block
447 		 * into a temporary buffer of 16 bytes to avoid out of bounds
448 		 * reads and writes.  Furthermore, this code is somewhat unusual
449 		 * in that it expects the end of the data to be at the end of
450 		 * the temporary buffer, rather than the start of the data at
451 		 * the start of the temporary buffer.
452 		 */
453 		if (unlikely(nbytes < AES_BLOCK_SIZE))
454 			src = dst = memcpy(buf + sizeof(buf) - nbytes,
455 					   src, nbytes);
456 		else if (nbytes < walk.total)
457 			nbytes &= ~(AES_BLOCK_SIZE - 1);
458 
459 		scoped_ksimd()
460 			aes_xctr_encrypt(dst, src, ctx->key_enc, rounds, nbytes,
461 							 walk.iv, byte_ctr);
462 
463 		if (unlikely(nbytes < AES_BLOCK_SIZE))
464 			memcpy(walk.dst.virt.addr,
465 			       buf + sizeof(buf) - nbytes, nbytes);
466 		byte_ctr += nbytes;
467 
468 		err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
469 	}
470 
471 	return err;
472 }
473 
ctr_encrypt(struct skcipher_request * req)474 static int __maybe_unused ctr_encrypt(struct skcipher_request *req)
475 {
476 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
477 	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
478 	int err, rounds = 6 + ctx->key_length / 4;
479 	struct skcipher_walk walk;
480 
481 	err = skcipher_walk_virt(&walk, req, false);
482 
483 	while (walk.nbytes > 0) {
484 		const u8 *src = walk.src.virt.addr;
485 		unsigned int nbytes = walk.nbytes;
486 		u8 *dst = walk.dst.virt.addr;
487 		u8 buf[AES_BLOCK_SIZE];
488 
489 		/*
490 		 * If given less than 16 bytes, we must copy the partial block
491 		 * into a temporary buffer of 16 bytes to avoid out of bounds
492 		 * reads and writes.  Furthermore, this code is somewhat unusual
493 		 * in that it expects the end of the data to be at the end of
494 		 * the temporary buffer, rather than the start of the data at
495 		 * the start of the temporary buffer.
496 		 */
497 		if (unlikely(nbytes < AES_BLOCK_SIZE))
498 			src = dst = memcpy(buf + sizeof(buf) - nbytes,
499 					   src, nbytes);
500 		else if (nbytes < walk.total)
501 			nbytes &= ~(AES_BLOCK_SIZE - 1);
502 
503 		scoped_ksimd()
504 			aes_ctr_encrypt(dst, src, ctx->key_enc, rounds, nbytes,
505 					walk.iv);
506 
507 		if (unlikely(nbytes < AES_BLOCK_SIZE))
508 			memcpy(walk.dst.virt.addr,
509 			       buf + sizeof(buf) - nbytes, nbytes);
510 
511 		err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
512 	}
513 
514 	return err;
515 }
516 
xts_encrypt(struct skcipher_request * req)517 static int __maybe_unused xts_encrypt(struct skcipher_request *req)
518 {
519 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
520 	struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
521 	int err, first, rounds = 6 + ctx->key1.key_length / 4;
522 	int tail = req->cryptlen % AES_BLOCK_SIZE;
523 	struct scatterlist sg_src[2], sg_dst[2];
524 	struct skcipher_request subreq;
525 	struct scatterlist *src, *dst;
526 	struct skcipher_walk walk;
527 
528 	if (req->cryptlen < AES_BLOCK_SIZE)
529 		return -EINVAL;
530 
531 	err = skcipher_walk_virt(&walk, req, false);
532 
533 	if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
534 		int xts_blocks = DIV_ROUND_UP(req->cryptlen,
535 					      AES_BLOCK_SIZE) - 2;
536 
537 		skcipher_walk_abort(&walk);
538 
539 		skcipher_request_set_tfm(&subreq, tfm);
540 		skcipher_request_set_callback(&subreq,
541 					      skcipher_request_flags(req),
542 					      NULL, NULL);
543 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
544 					   xts_blocks * AES_BLOCK_SIZE,
545 					   req->iv);
546 		req = &subreq;
547 		err = skcipher_walk_virt(&walk, req, false);
548 	} else {
549 		tail = 0;
550 	}
551 
552 	scoped_ksimd() {
553 		for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
554 			int nbytes = walk.nbytes;
555 
556 			if (walk.nbytes < walk.total)
557 				nbytes &= ~(AES_BLOCK_SIZE - 1);
558 
559 			aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
560 					ctx->key1.key_enc, rounds, nbytes,
561 					ctx->key2.key_enc, walk.iv, first);
562 			err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
563 		}
564 
565 		if (err || likely(!tail))
566 			return err;
567 
568 		dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
569 		if (req->dst != req->src)
570 			dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
571 
572 		skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
573 					   req->iv);
574 
575 		err = skcipher_walk_virt(&walk, &subreq, false);
576 		if (err)
577 			return err;
578 
579 		aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
580 				ctx->key1.key_enc, rounds, walk.nbytes,
581 				ctx->key2.key_enc, walk.iv, first);
582 	}
583 	return skcipher_walk_done(&walk, 0);
584 }
585 
xts_decrypt(struct skcipher_request * req)586 static int __maybe_unused xts_decrypt(struct skcipher_request *req)
587 {
588 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
589 	struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
590 	int err, first, rounds = 6 + ctx->key1.key_length / 4;
591 	int tail = req->cryptlen % AES_BLOCK_SIZE;
592 	struct scatterlist sg_src[2], sg_dst[2];
593 	struct skcipher_request subreq;
594 	struct scatterlist *src, *dst;
595 	struct skcipher_walk walk;
596 
597 	if (req->cryptlen < AES_BLOCK_SIZE)
598 		return -EINVAL;
599 
600 	err = skcipher_walk_virt(&walk, req, false);
601 
602 	if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
603 		int xts_blocks = DIV_ROUND_UP(req->cryptlen,
604 					      AES_BLOCK_SIZE) - 2;
605 
606 		skcipher_walk_abort(&walk);
607 
608 		skcipher_request_set_tfm(&subreq, tfm);
609 		skcipher_request_set_callback(&subreq,
610 					      skcipher_request_flags(req),
611 					      NULL, NULL);
612 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
613 					   xts_blocks * AES_BLOCK_SIZE,
614 					   req->iv);
615 		req = &subreq;
616 		err = skcipher_walk_virt(&walk, req, false);
617 	} else {
618 		tail = 0;
619 	}
620 
621 	scoped_ksimd() {
622 		for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
623 			int nbytes = walk.nbytes;
624 
625 			if (walk.nbytes < walk.total)
626 				nbytes &= ~(AES_BLOCK_SIZE - 1);
627 
628 			aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
629 					ctx->key1.key_dec, rounds, nbytes,
630 					ctx->key2.key_enc, walk.iv, first);
631 			err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
632 		}
633 
634 		if (err || likely(!tail))
635 			return err;
636 
637 		dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
638 		if (req->dst != req->src)
639 			dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
640 
641 		skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
642 					   req->iv);
643 
644 		err = skcipher_walk_virt(&walk, &subreq, false);
645 		if (err)
646 			return err;
647 
648 		aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
649 				ctx->key1.key_dec, rounds, walk.nbytes,
650 				ctx->key2.key_enc, walk.iv, first);
651 	}
652 	return skcipher_walk_done(&walk, 0);
653 }
654 
655 static struct skcipher_alg aes_algs[] = { {
656 #if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS)
657 	.base = {
658 		.cra_name		= "ecb(aes)",
659 		.cra_driver_name	= "ecb-aes-" MODE,
660 		.cra_priority		= PRIO,
661 		.cra_blocksize		= AES_BLOCK_SIZE,
662 		.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
663 		.cra_module		= THIS_MODULE,
664 	},
665 	.min_keysize	= AES_MIN_KEY_SIZE,
666 	.max_keysize	= AES_MAX_KEY_SIZE,
667 	.setkey		= skcipher_aes_setkey,
668 	.encrypt	= ecb_encrypt,
669 	.decrypt	= ecb_decrypt,
670 }, {
671 	.base = {
672 		.cra_name		= "cbc(aes)",
673 		.cra_driver_name	= "cbc-aes-" MODE,
674 		.cra_priority		= PRIO,
675 		.cra_blocksize		= AES_BLOCK_SIZE,
676 		.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
677 		.cra_module		= THIS_MODULE,
678 	},
679 	.min_keysize	= AES_MIN_KEY_SIZE,
680 	.max_keysize	= AES_MAX_KEY_SIZE,
681 	.ivsize		= AES_BLOCK_SIZE,
682 	.setkey		= skcipher_aes_setkey,
683 	.encrypt	= cbc_encrypt,
684 	.decrypt	= cbc_decrypt,
685 }, {
686 	.base = {
687 		.cra_name		= "ctr(aes)",
688 		.cra_driver_name	= "ctr-aes-" MODE,
689 		.cra_priority		= PRIO,
690 		.cra_blocksize		= 1,
691 		.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
692 		.cra_module		= THIS_MODULE,
693 	},
694 	.min_keysize	= AES_MIN_KEY_SIZE,
695 	.max_keysize	= AES_MAX_KEY_SIZE,
696 	.ivsize		= AES_BLOCK_SIZE,
697 	.chunksize	= AES_BLOCK_SIZE,
698 	.setkey		= skcipher_aes_setkey,
699 	.encrypt	= ctr_encrypt,
700 	.decrypt	= ctr_encrypt,
701 }, {
702 	.base = {
703 		.cra_name		= "xctr(aes)",
704 		.cra_driver_name	= "xctr-aes-" MODE,
705 		.cra_priority		= PRIO,
706 		.cra_blocksize		= 1,
707 		.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
708 		.cra_module		= THIS_MODULE,
709 	},
710 	.min_keysize	= AES_MIN_KEY_SIZE,
711 	.max_keysize	= AES_MAX_KEY_SIZE,
712 	.ivsize		= AES_BLOCK_SIZE,
713 	.chunksize	= AES_BLOCK_SIZE,
714 	.setkey		= skcipher_aes_setkey,
715 	.encrypt	= xctr_encrypt,
716 	.decrypt	= xctr_encrypt,
717 }, {
718 	.base = {
719 		.cra_name		= "xts(aes)",
720 		.cra_driver_name	= "xts-aes-" MODE,
721 		.cra_priority		= PRIO,
722 		.cra_blocksize		= AES_BLOCK_SIZE,
723 		.cra_ctxsize		= sizeof(struct crypto_aes_xts_ctx),
724 		.cra_module		= THIS_MODULE,
725 	},
726 	.min_keysize	= 2 * AES_MIN_KEY_SIZE,
727 	.max_keysize	= 2 * AES_MAX_KEY_SIZE,
728 	.ivsize		= AES_BLOCK_SIZE,
729 	.walksize	= 2 * AES_BLOCK_SIZE,
730 	.setkey		= xts_set_key,
731 	.encrypt	= xts_encrypt,
732 	.decrypt	= xts_decrypt,
733 }, {
734 #endif
735 	.base = {
736 		.cra_name		= "cts(cbc(aes))",
737 		.cra_driver_name	= "cts-cbc-aes-" MODE,
738 		.cra_priority		= PRIO,
739 		.cra_blocksize		= AES_BLOCK_SIZE,
740 		.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
741 		.cra_module		= THIS_MODULE,
742 	},
743 	.min_keysize	= AES_MIN_KEY_SIZE,
744 	.max_keysize	= AES_MAX_KEY_SIZE,
745 	.ivsize		= AES_BLOCK_SIZE,
746 	.walksize	= 2 * AES_BLOCK_SIZE,
747 	.setkey		= skcipher_aes_setkey,
748 	.encrypt	= cts_cbc_encrypt,
749 	.decrypt	= cts_cbc_decrypt,
750 }, {
751 	.base = {
752 		.cra_name		= "essiv(cbc(aes),sha256)",
753 		.cra_driver_name	= "essiv-cbc-aes-sha256-" MODE,
754 		.cra_priority		= PRIO + 1,
755 		.cra_blocksize		= AES_BLOCK_SIZE,
756 		.cra_ctxsize		= sizeof(struct crypto_aes_essiv_cbc_ctx),
757 		.cra_module		= THIS_MODULE,
758 	},
759 	.min_keysize	= AES_MIN_KEY_SIZE,
760 	.max_keysize	= AES_MAX_KEY_SIZE,
761 	.ivsize		= AES_BLOCK_SIZE,
762 	.setkey		= essiv_cbc_set_key,
763 	.encrypt	= essiv_cbc_encrypt,
764 	.decrypt	= essiv_cbc_decrypt,
765 } };
766 
cbcmac_setkey(struct crypto_shash * tfm,const u8 * in_key,unsigned int key_len)767 static int cbcmac_setkey(struct crypto_shash *tfm, const u8 *in_key,
768 			 unsigned int key_len)
769 {
770 	struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
771 
772 	return aes_expandkey(&ctx->key, in_key, key_len);
773 }
774 
cmac_gf128_mul_by_x(be128 * y,const be128 * x)775 static void cmac_gf128_mul_by_x(be128 *y, const be128 *x)
776 {
777 	u64 a = be64_to_cpu(x->a);
778 	u64 b = be64_to_cpu(x->b);
779 
780 	y->a = cpu_to_be64((a << 1) | (b >> 63));
781 	y->b = cpu_to_be64((b << 1) ^ ((a >> 63) ? 0x87 : 0));
782 }
783 
cmac_setkey(struct crypto_shash * tfm,const u8 * in_key,unsigned int key_len)784 static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key,
785 		       unsigned int key_len)
786 {
787 	struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
788 	be128 *consts = (be128 *)ctx->consts;
789 	int rounds = 6 + key_len / 4;
790 	int err;
791 
792 	err = cbcmac_setkey(tfm, in_key, key_len);
793 	if (err)
794 		return err;
795 
796 	/* encrypt the zero vector */
797 	scoped_ksimd()
798 		aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){},
799 				ctx->key.key_enc, rounds, 1);
800 
801 	cmac_gf128_mul_by_x(consts, consts);
802 	cmac_gf128_mul_by_x(consts + 1, consts);
803 
804 	return 0;
805 }
806 
xcbc_setkey(struct crypto_shash * tfm,const u8 * in_key,unsigned int key_len)807 static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key,
808 		       unsigned int key_len)
809 {
810 	static u8 const ks[3][AES_BLOCK_SIZE] = {
811 		{ [0 ... AES_BLOCK_SIZE - 1] = 0x1 },
812 		{ [0 ... AES_BLOCK_SIZE - 1] = 0x2 },
813 		{ [0 ... AES_BLOCK_SIZE - 1] = 0x3 },
814 	};
815 
816 	struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
817 	int rounds = 6 + key_len / 4;
818 	u8 key[AES_BLOCK_SIZE];
819 	int err;
820 
821 	err = cbcmac_setkey(tfm, in_key, key_len);
822 	if (err)
823 		return err;
824 
825 	scoped_ksimd() {
826 		aes_ecb_encrypt(key, ks[0], ctx->key.key_enc, rounds, 1);
827 		aes_ecb_encrypt(ctx->consts, ks[1], ctx->key.key_enc, rounds, 2);
828 	}
829 
830 	return cbcmac_setkey(tfm, key, sizeof(key));
831 }
832 
mac_init(struct shash_desc * desc)833 static int mac_init(struct shash_desc *desc)
834 {
835 	struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
836 
837 	memset(ctx->dg, 0, AES_BLOCK_SIZE);
838 	return 0;
839 }
840 
mac_do_update(struct crypto_aes_ctx * ctx,u8 const in[],int blocks,u8 dg[],int enc_before)841 static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks,
842 			  u8 dg[], int enc_before)
843 {
844 	int rounds = 6 + ctx->key_length / 4;
845 	int rem;
846 
847 	do {
848 		scoped_ksimd()
849 			rem = aes_mac_update(in, ctx->key_enc, rounds, blocks,
850 					     dg, enc_before, !enc_before);
851 		in += (blocks - rem) * AES_BLOCK_SIZE;
852 		blocks = rem;
853 	} while (blocks);
854 }
855 
mac_update(struct shash_desc * desc,const u8 * p,unsigned int len)856 static int mac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
857 {
858 	struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
859 	struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
860 	int blocks = len / AES_BLOCK_SIZE;
861 
862 	len %= AES_BLOCK_SIZE;
863 	mac_do_update(&tctx->key, p, blocks, ctx->dg, 0);
864 	return len;
865 }
866 
cbcmac_finup(struct shash_desc * desc,const u8 * src,unsigned int len,u8 * out)867 static int cbcmac_finup(struct shash_desc *desc, const u8 *src,
868 			unsigned int len, u8 *out)
869 {
870 	struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
871 	struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
872 
873 	if (len) {
874 		crypto_xor(ctx->dg, src, len);
875 		mac_do_update(&tctx->key, NULL, 0, ctx->dg, 1);
876 	}
877 	memcpy(out, ctx->dg, AES_BLOCK_SIZE);
878 	return 0;
879 }
880 
cmac_finup(struct shash_desc * desc,const u8 * src,unsigned int len,u8 * out)881 static int cmac_finup(struct shash_desc *desc, const u8 *src, unsigned int len,
882 		      u8 *out)
883 {
884 	struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
885 	struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
886 	u8 *consts = tctx->consts;
887 
888 	crypto_xor(ctx->dg, src, len);
889 	if (len != AES_BLOCK_SIZE) {
890 		ctx->dg[len] ^= 0x80;
891 		consts += AES_BLOCK_SIZE;
892 	}
893 	mac_do_update(&tctx->key, consts, 1, ctx->dg, 0);
894 	memcpy(out, ctx->dg, AES_BLOCK_SIZE);
895 	return 0;
896 }
897 
898 static struct shash_alg mac_algs[] = { {
899 	.base.cra_name		= "cmac(aes)",
900 	.base.cra_driver_name	= "cmac-aes-" MODE,
901 	.base.cra_priority	= PRIO,
902 	.base.cra_flags		= CRYPTO_AHASH_ALG_BLOCK_ONLY |
903 				  CRYPTO_AHASH_ALG_FINAL_NONZERO,
904 	.base.cra_blocksize	= AES_BLOCK_SIZE,
905 	.base.cra_ctxsize	= sizeof(struct mac_tfm_ctx) +
906 				  2 * AES_BLOCK_SIZE,
907 	.base.cra_module	= THIS_MODULE,
908 
909 	.digestsize		= AES_BLOCK_SIZE,
910 	.init			= mac_init,
911 	.update			= mac_update,
912 	.finup			= cmac_finup,
913 	.setkey			= cmac_setkey,
914 	.descsize		= sizeof(struct mac_desc_ctx),
915 }, {
916 	.base.cra_name		= "xcbc(aes)",
917 	.base.cra_driver_name	= "xcbc-aes-" MODE,
918 	.base.cra_priority	= PRIO,
919 	.base.cra_flags		= CRYPTO_AHASH_ALG_BLOCK_ONLY |
920 				  CRYPTO_AHASH_ALG_FINAL_NONZERO,
921 	.base.cra_blocksize	= AES_BLOCK_SIZE,
922 	.base.cra_ctxsize	= sizeof(struct mac_tfm_ctx) +
923 				  2 * AES_BLOCK_SIZE,
924 	.base.cra_module	= THIS_MODULE,
925 
926 	.digestsize		= AES_BLOCK_SIZE,
927 	.init			= mac_init,
928 	.update			= mac_update,
929 	.finup			= cmac_finup,
930 	.setkey			= xcbc_setkey,
931 	.descsize		= sizeof(struct mac_desc_ctx),
932 }, {
933 	.base.cra_name		= "cbcmac(aes)",
934 	.base.cra_driver_name	= "cbcmac-aes-" MODE,
935 	.base.cra_priority	= PRIO,
936 	.base.cra_flags		= CRYPTO_AHASH_ALG_BLOCK_ONLY,
937 	.base.cra_blocksize	= AES_BLOCK_SIZE,
938 	.base.cra_ctxsize	= sizeof(struct mac_tfm_ctx),
939 	.base.cra_module	= THIS_MODULE,
940 
941 	.digestsize		= AES_BLOCK_SIZE,
942 	.init			= mac_init,
943 	.update			= mac_update,
944 	.finup			= cbcmac_finup,
945 	.setkey			= cbcmac_setkey,
946 	.descsize		= sizeof(struct mac_desc_ctx),
947 } };
948 
aes_exit(void)949 static void aes_exit(void)
950 {
951 	crypto_unregister_shashes(mac_algs, ARRAY_SIZE(mac_algs));
952 	crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
953 }
954 
aes_init(void)955 static int __init aes_init(void)
956 {
957 	int err;
958 
959 	err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
960 	if (err)
961 		return err;
962 
963 	err = crypto_register_shashes(mac_algs, ARRAY_SIZE(mac_algs));
964 	if (err)
965 		goto unregister_ciphers;
966 
967 	return 0;
968 
969 unregister_ciphers:
970 	crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
971 	return err;
972 }
973 
974 #ifdef USE_V8_CRYPTO_EXTENSIONS
975 module_cpu_feature_match(AES, aes_init);
976 EXPORT_SYMBOL_NS(ce_aes_mac_update, "CRYPTO_INTERNAL");
977 #else
978 module_init(aes_init);
979 EXPORT_SYMBOL(neon_aes_ecb_encrypt);
980 EXPORT_SYMBOL(neon_aes_cbc_encrypt);
981 EXPORT_SYMBOL(neon_aes_ctr_encrypt);
982 EXPORT_SYMBOL(neon_aes_xts_encrypt);
983 EXPORT_SYMBOL(neon_aes_xts_decrypt);
984 #endif
985 module_exit(aes_exit);
986