xref: /linux/arch/arm64/crypto/aes-neonbs-glue.c (revision 1863b4055b7902de43a1dcc7396805eb631682e5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Bit sliced AES using NEON instructions
4  *
5  * Copyright (C) 2016 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
6  */
7 
8 #include <asm/neon.h>
9 #include <asm/simd.h>
10 #include <crypto/aes.h>
11 #include <crypto/ctr.h>
12 #include <crypto/internal/simd.h>
13 #include <crypto/internal/skcipher.h>
14 #include <crypto/scatterwalk.h>
15 #include <crypto/xts.h>
16 #include <linux/module.h>
17 
18 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
19 MODULE_DESCRIPTION("Bit sliced AES using NEON instructions");
20 MODULE_LICENSE("GPL v2");
21 
22 MODULE_ALIAS_CRYPTO("ecb(aes)");
23 MODULE_ALIAS_CRYPTO("cbc(aes)");
24 MODULE_ALIAS_CRYPTO("ctr(aes)");
25 MODULE_ALIAS_CRYPTO("xts(aes)");
26 
27 asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds);
28 
29 asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
30 				  int rounds, int blocks);
31 asmlinkage void aesbs_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[],
32 				  int rounds, int blocks);
33 
34 asmlinkage void aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
35 				  int rounds, int blocks, u8 iv[]);
36 
37 asmlinkage void aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
38 				  int rounds, int blocks, u8 iv[]);
39 
40 asmlinkage void aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[],
41 				  int rounds, int blocks, u8 iv[]);
42 asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[],
43 				  int rounds, int blocks, u8 iv[]);
44 
45 /* borrowed from aes-neon-blk.ko */
46 asmlinkage void neon_aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
47 				     int rounds, int blocks);
48 asmlinkage void neon_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
49 				     int rounds, int blocks, u8 iv[]);
50 asmlinkage void neon_aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
51 				     int rounds, int bytes, u8 ctr[]);
52 asmlinkage void neon_aes_xts_encrypt(u8 out[], u8 const in[],
53 				     u32 const rk1[], int rounds, int bytes,
54 				     u32 const rk2[], u8 iv[], int first);
55 asmlinkage void neon_aes_xts_decrypt(u8 out[], u8 const in[],
56 				     u32 const rk1[], int rounds, int bytes,
57 				     u32 const rk2[], u8 iv[], int first);
58 
59 struct aesbs_ctx {
60 	u8	rk[13 * (8 * AES_BLOCK_SIZE) + 32];
61 	int	rounds;
62 } __aligned(AES_BLOCK_SIZE);
63 
64 struct aesbs_cbc_ctr_ctx {
65 	struct aesbs_ctx	key;
66 	u32			enc[AES_MAX_KEYLENGTH_U32];
67 };
68 
69 struct aesbs_xts_ctx {
70 	struct aesbs_ctx	key;
71 	u32			twkey[AES_MAX_KEYLENGTH_U32];
72 	struct crypto_aes_ctx	cts;
73 };
74 
aesbs_setkey(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)75 static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
76 			unsigned int key_len)
77 {
78 	struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
79 	struct crypto_aes_ctx *rk;
80 	int err;
81 
82 	rk = kmalloc(sizeof(*rk), GFP_KERNEL);
83 	if (!rk)
84 		return -ENOMEM;
85 
86 	err = aes_expandkey(rk, in_key, key_len);
87 	if (err)
88 		goto out;
89 
90 	ctx->rounds = 6 + key_len / 4;
91 
92 	scoped_ksimd()
93 		aesbs_convert_key(ctx->rk, rk->key_enc, ctx->rounds);
94 out:
95 	kfree_sensitive(rk);
96 	return err;
97 }
98 
__ecb_crypt(struct skcipher_request * req,void (* fn)(u8 out[],u8 const in[],u8 const rk[],int rounds,int blocks))99 static int __ecb_crypt(struct skcipher_request *req,
100 		       void (*fn)(u8 out[], u8 const in[], u8 const rk[],
101 				  int rounds, int blocks))
102 {
103 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
104 	struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
105 	struct skcipher_walk walk;
106 	int err;
107 
108 	err = skcipher_walk_virt(&walk, req, false);
109 
110 	while (walk.nbytes >= AES_BLOCK_SIZE) {
111 		unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
112 
113 		if (walk.nbytes < walk.total)
114 			blocks = round_down(blocks,
115 					    walk.stride / AES_BLOCK_SIZE);
116 
117 		scoped_ksimd()
118 			fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk,
119 			   ctx->rounds, blocks);
120 		err = skcipher_walk_done(&walk,
121 					 walk.nbytes - blocks * AES_BLOCK_SIZE);
122 	}
123 
124 	return err;
125 }
126 
ecb_encrypt(struct skcipher_request * req)127 static int ecb_encrypt(struct skcipher_request *req)
128 {
129 	return __ecb_crypt(req, aesbs_ecb_encrypt);
130 }
131 
ecb_decrypt(struct skcipher_request * req)132 static int ecb_decrypt(struct skcipher_request *req)
133 {
134 	return __ecb_crypt(req, aesbs_ecb_decrypt);
135 }
136 
aesbs_cbc_ctr_setkey(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)137 static int aesbs_cbc_ctr_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
138 			    unsigned int key_len)
139 {
140 	struct aesbs_cbc_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
141 	struct crypto_aes_ctx *rk;
142 	int err;
143 
144 	rk = kmalloc(sizeof(*rk), GFP_KERNEL);
145 	if (!rk)
146 		return -ENOMEM;
147 
148 	err = aes_expandkey(rk, in_key, key_len);
149 	if (err)
150 		goto out;
151 
152 	ctx->key.rounds = 6 + key_len / 4;
153 
154 	memcpy(ctx->enc, rk->key_enc, sizeof(ctx->enc));
155 
156 	scoped_ksimd()
157 		aesbs_convert_key(ctx->key.rk, rk->key_enc, ctx->key.rounds);
158 out:
159 	kfree_sensitive(rk);
160 	return err;
161 }
162 
cbc_encrypt(struct skcipher_request * req)163 static int cbc_encrypt(struct skcipher_request *req)
164 {
165 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
166 	struct aesbs_cbc_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
167 	struct skcipher_walk walk;
168 	int err;
169 
170 	err = skcipher_walk_virt(&walk, req, false);
171 
172 	while (walk.nbytes >= AES_BLOCK_SIZE) {
173 		unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
174 
175 		/* fall back to the non-bitsliced NEON implementation */
176 		scoped_ksimd()
177 			neon_aes_cbc_encrypt(walk.dst.virt.addr,
178 					     walk.src.virt.addr,
179 					     ctx->enc, ctx->key.rounds, blocks,
180 					     walk.iv);
181 		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
182 	}
183 	return err;
184 }
185 
cbc_decrypt(struct skcipher_request * req)186 static int cbc_decrypt(struct skcipher_request *req)
187 {
188 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
189 	struct aesbs_cbc_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
190 	struct skcipher_walk walk;
191 	int err;
192 
193 	err = skcipher_walk_virt(&walk, req, false);
194 
195 	while (walk.nbytes >= AES_BLOCK_SIZE) {
196 		unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
197 
198 		if (walk.nbytes < walk.total)
199 			blocks = round_down(blocks,
200 					    walk.stride / AES_BLOCK_SIZE);
201 
202 		scoped_ksimd()
203 			aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
204 					  ctx->key.rk, ctx->key.rounds, blocks,
205 					  walk.iv);
206 		err = skcipher_walk_done(&walk,
207 					 walk.nbytes - blocks * AES_BLOCK_SIZE);
208 	}
209 
210 	return err;
211 }
212 
ctr_encrypt(struct skcipher_request * req)213 static int ctr_encrypt(struct skcipher_request *req)
214 {
215 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
216 	struct aesbs_cbc_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
217 	struct skcipher_walk walk;
218 	int err;
219 
220 	err = skcipher_walk_virt(&walk, req, false);
221 
222 	while (walk.nbytes > 0) {
223 		int blocks = (walk.nbytes / AES_BLOCK_SIZE) & ~7;
224 		int nbytes = walk.nbytes % (8 * AES_BLOCK_SIZE);
225 		const u8 *src = walk.src.virt.addr;
226 		u8 *dst = walk.dst.virt.addr;
227 
228 		scoped_ksimd() {
229 			if (blocks >= 8) {
230 				aesbs_ctr_encrypt(dst, src, ctx->key.rk,
231 						  ctx->key.rounds, blocks,
232 						  walk.iv);
233 				dst += blocks * AES_BLOCK_SIZE;
234 				src += blocks * AES_BLOCK_SIZE;
235 			}
236 			if (nbytes && walk.nbytes == walk.total) {
237 				u8 buf[AES_BLOCK_SIZE];
238 				u8 *d = dst;
239 
240 				if (unlikely(nbytes < AES_BLOCK_SIZE))
241 					src = dst = memcpy(buf + sizeof(buf) -
242 							   nbytes, src, nbytes);
243 
244 				neon_aes_ctr_encrypt(dst, src, ctx->enc,
245 						     ctx->key.rounds, nbytes,
246 						     walk.iv);
247 
248 				if (unlikely(nbytes < AES_BLOCK_SIZE))
249 					memcpy(d, dst, nbytes);
250 
251 				nbytes = 0;
252 			}
253 		}
254 		err = skcipher_walk_done(&walk, nbytes);
255 	}
256 	return err;
257 }
258 
aesbs_xts_setkey(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)259 static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
260 			    unsigned int key_len)
261 {
262 	struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
263 	struct crypto_aes_ctx rk;
264 	int err;
265 
266 	err = xts_verify_key(tfm, in_key, key_len);
267 	if (err)
268 		return err;
269 
270 	key_len /= 2;
271 	err = aes_expandkey(&ctx->cts, in_key, key_len);
272 	if (err)
273 		return err;
274 
275 	err = aes_expandkey(&rk, in_key + key_len, key_len);
276 	if (err)
277 		return err;
278 
279 	memcpy(ctx->twkey, rk.key_enc, sizeof(ctx->twkey));
280 
281 	return aesbs_setkey(tfm, in_key, key_len);
282 }
283 
__xts_crypt(struct skcipher_request * req,bool encrypt,void (* fn)(u8 out[],u8 const in[],u8 const rk[],int rounds,int blocks,u8 iv[]))284 static int __xts_crypt(struct skcipher_request *req, bool encrypt,
285 		       void (*fn)(u8 out[], u8 const in[], u8 const rk[],
286 				  int rounds, int blocks, u8 iv[]))
287 {
288 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
289 	struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
290 	int tail = req->cryptlen % (8 * AES_BLOCK_SIZE);
291 	struct scatterlist sg_src[2], sg_dst[2];
292 	struct skcipher_request subreq;
293 	struct scatterlist *src, *dst;
294 	struct skcipher_walk walk;
295 	int nbytes, err;
296 	int first = 1;
297 	const u8 *in;
298 	u8 *out;
299 
300 	if (req->cryptlen < AES_BLOCK_SIZE)
301 		return -EINVAL;
302 
303 	/* ensure that the cts tail is covered by a single step */
304 	if (unlikely(tail > 0 && tail < AES_BLOCK_SIZE)) {
305 		int xts_blocks = DIV_ROUND_UP(req->cryptlen,
306 					      AES_BLOCK_SIZE) - 2;
307 
308 		skcipher_request_set_tfm(&subreq, tfm);
309 		skcipher_request_set_callback(&subreq,
310 					      skcipher_request_flags(req),
311 					      NULL, NULL);
312 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
313 					   xts_blocks * AES_BLOCK_SIZE,
314 					   req->iv);
315 		req = &subreq;
316 	} else {
317 		tail = 0;
318 	}
319 
320 	err = skcipher_walk_virt(&walk, req, false);
321 	if (err)
322 		return err;
323 
324 	scoped_ksimd() {
325 		while (walk.nbytes >= AES_BLOCK_SIZE) {
326 			int blocks = (walk.nbytes / AES_BLOCK_SIZE) & ~7;
327 			out = walk.dst.virt.addr;
328 			in = walk.src.virt.addr;
329 			nbytes = walk.nbytes;
330 
331 			if (blocks >= 8) {
332 				if (first == 1)
333 					neon_aes_ecb_encrypt(walk.iv, walk.iv,
334 							     ctx->twkey,
335 							     ctx->key.rounds, 1);
336 				first = 2;
337 
338 				fn(out, in, ctx->key.rk, ctx->key.rounds, blocks,
339 				   walk.iv);
340 
341 				out += blocks * AES_BLOCK_SIZE;
342 				in += blocks * AES_BLOCK_SIZE;
343 				nbytes -= blocks * AES_BLOCK_SIZE;
344 			}
345 			if (walk.nbytes == walk.total && nbytes > 0) {
346 				if (encrypt)
347 					neon_aes_xts_encrypt(out, in, ctx->cts.key_enc,
348 							     ctx->key.rounds, nbytes,
349 							     ctx->twkey, walk.iv, first);
350 				else
351 					neon_aes_xts_decrypt(out, in, ctx->cts.key_dec,
352 							     ctx->key.rounds, nbytes,
353 							     ctx->twkey, walk.iv, first);
354 				nbytes = first = 0;
355 			}
356 			err = skcipher_walk_done(&walk, nbytes);
357 		}
358 
359 		if (err || likely(!tail))
360 			return err;
361 
362 		/* handle ciphertext stealing */
363 		dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
364 		if (req->dst != req->src)
365 			dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
366 
367 		skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
368 					   req->iv);
369 
370 		err = skcipher_walk_virt(&walk, req, false);
371 		if (err)
372 			return err;
373 
374 		out = walk.dst.virt.addr;
375 		in = walk.src.virt.addr;
376 		nbytes = walk.nbytes;
377 
378 		if (encrypt)
379 			neon_aes_xts_encrypt(out, in, ctx->cts.key_enc,
380 					     ctx->key.rounds, nbytes, ctx->twkey,
381 					     walk.iv, first);
382 		else
383 			neon_aes_xts_decrypt(out, in, ctx->cts.key_dec,
384 					     ctx->key.rounds, nbytes, ctx->twkey,
385 					     walk.iv, first);
386 	}
387 
388 	return skcipher_walk_done(&walk, 0);
389 }
390 
xts_encrypt(struct skcipher_request * req)391 static int xts_encrypt(struct skcipher_request *req)
392 {
393 	return __xts_crypt(req, true, aesbs_xts_encrypt);
394 }
395 
xts_decrypt(struct skcipher_request * req)396 static int xts_decrypt(struct skcipher_request *req)
397 {
398 	return __xts_crypt(req, false, aesbs_xts_decrypt);
399 }
400 
401 static struct skcipher_alg aes_algs[] = { {
402 	.base.cra_name		= "ecb(aes)",
403 	.base.cra_driver_name	= "ecb-aes-neonbs",
404 	.base.cra_priority	= 250,
405 	.base.cra_blocksize	= AES_BLOCK_SIZE,
406 	.base.cra_ctxsize	= sizeof(struct aesbs_ctx),
407 	.base.cra_module	= THIS_MODULE,
408 
409 	.min_keysize		= AES_MIN_KEY_SIZE,
410 	.max_keysize		= AES_MAX_KEY_SIZE,
411 	.walksize		= 8 * AES_BLOCK_SIZE,
412 	.setkey			= aesbs_setkey,
413 	.encrypt		= ecb_encrypt,
414 	.decrypt		= ecb_decrypt,
415 }, {
416 	.base.cra_name		= "cbc(aes)",
417 	.base.cra_driver_name	= "cbc-aes-neonbs",
418 	.base.cra_priority	= 250,
419 	.base.cra_blocksize	= AES_BLOCK_SIZE,
420 	.base.cra_ctxsize	= sizeof(struct aesbs_cbc_ctr_ctx),
421 	.base.cra_module	= THIS_MODULE,
422 
423 	.min_keysize		= AES_MIN_KEY_SIZE,
424 	.max_keysize		= AES_MAX_KEY_SIZE,
425 	.walksize		= 8 * AES_BLOCK_SIZE,
426 	.ivsize			= AES_BLOCK_SIZE,
427 	.setkey			= aesbs_cbc_ctr_setkey,
428 	.encrypt		= cbc_encrypt,
429 	.decrypt		= cbc_decrypt,
430 }, {
431 	.base.cra_name		= "ctr(aes)",
432 	.base.cra_driver_name	= "ctr-aes-neonbs",
433 	.base.cra_priority	= 250,
434 	.base.cra_blocksize	= 1,
435 	.base.cra_ctxsize	= sizeof(struct aesbs_cbc_ctr_ctx),
436 	.base.cra_module	= THIS_MODULE,
437 
438 	.min_keysize		= AES_MIN_KEY_SIZE,
439 	.max_keysize		= AES_MAX_KEY_SIZE,
440 	.chunksize		= AES_BLOCK_SIZE,
441 	.walksize		= 8 * AES_BLOCK_SIZE,
442 	.ivsize			= AES_BLOCK_SIZE,
443 	.setkey			= aesbs_cbc_ctr_setkey,
444 	.encrypt		= ctr_encrypt,
445 	.decrypt		= ctr_encrypt,
446 }, {
447 	.base.cra_name		= "xts(aes)",
448 	.base.cra_driver_name	= "xts-aes-neonbs",
449 	.base.cra_priority	= 250,
450 	.base.cra_blocksize	= AES_BLOCK_SIZE,
451 	.base.cra_ctxsize	= sizeof(struct aesbs_xts_ctx),
452 	.base.cra_module	= THIS_MODULE,
453 
454 	.min_keysize		= 2 * AES_MIN_KEY_SIZE,
455 	.max_keysize		= 2 * AES_MAX_KEY_SIZE,
456 	.walksize		= 8 * AES_BLOCK_SIZE,
457 	.ivsize			= AES_BLOCK_SIZE,
458 	.setkey			= aesbs_xts_setkey,
459 	.encrypt		= xts_encrypt,
460 	.decrypt		= xts_decrypt,
461 } };
462 
aes_exit(void)463 static void aes_exit(void)
464 {
465 	crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
466 }
467 
aes_init(void)468 static int __init aes_init(void)
469 {
470 	if (!cpu_have_named_feature(ASIMD))
471 		return -ENODEV;
472 
473 	return crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
474 }
475 
476 module_init(aes_init);
477 module_exit(aes_exit);
478