xref: /linux/arch/x86/crypto/aesni-intel_glue.c (revision 2f0a7504530c24f55daec7d2364d933bb1a1fa68)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Support for AES-NI and VAES instructions.  This file contains glue code.
4  * The real AES implementations are in aesni-intel_asm.S and other .S files.
5  *
6  * Copyright (C) 2008, Intel Corp.
7  *    Author: Huang Ying <ying.huang@intel.com>
8  *
9  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
10  * interface for 64-bit kernels.
11  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
12  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
13  *             Tadeusz Struk (tadeusz.struk@intel.com)
14  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
15  *    Copyright (c) 2010, Intel Corporation.
16  *
17  * Copyright 2024 Google LLC
18  */
19 
20 #include <linux/hardirq.h>
21 #include <linux/types.h>
22 #include <linux/module.h>
23 #include <linux/err.h>
24 #include <crypto/algapi.h>
25 #include <crypto/aes.h>
26 #include <crypto/b128ops.h>
27 #include <crypto/gcm.h>
28 #include <crypto/xts.h>
29 #include <asm/cpu_device_id.h>
30 #include <asm/simd.h>
31 #include <crypto/scatterwalk.h>
32 #include <crypto/internal/aead.h>
33 #include <crypto/internal/simd.h>
34 #include <crypto/internal/skcipher.h>
35 #include <linux/jump_label.h>
36 #include <linux/workqueue.h>
37 #include <linux/spinlock.h>
38 #include <linux/static_call.h>
39 
40 
41 #define AESNI_ALIGN	16
42 #define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
43 #define AES_BLOCK_MASK	(~(AES_BLOCK_SIZE - 1))
44 #define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
45 #define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
46 #define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
47 
48 struct aesni_xts_ctx {
49 	struct crypto_aes_ctx tweak_ctx AESNI_ALIGN_ATTR;
50 	struct crypto_aes_ctx crypt_ctx AESNI_ALIGN_ATTR;
51 };
52 
aes_align_addr(void * addr)53 static inline void *aes_align_addr(void *addr)
54 {
55 	if (crypto_tfm_ctx_alignment() >= AESNI_ALIGN)
56 		return addr;
57 	return PTR_ALIGN(addr, AESNI_ALIGN);
58 }
59 
60 asmlinkage void aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
61 			      unsigned int key_len);
62 asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
63 asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in);
64 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
65 			      const u8 *in, unsigned int len);
66 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
67 			      const u8 *in, unsigned int len);
68 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
69 			      const u8 *in, unsigned int len, u8 *iv);
70 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
71 			      const u8 *in, unsigned int len, u8 *iv);
72 asmlinkage void aesni_cts_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
73 				  const u8 *in, unsigned int len, u8 *iv);
74 asmlinkage void aesni_cts_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
75 				  const u8 *in, unsigned int len, u8 *iv);
76 
77 asmlinkage void aesni_xts_enc(const struct crypto_aes_ctx *ctx, u8 *out,
78 			      const u8 *in, unsigned int len, u8 *iv);
79 
80 asmlinkage void aesni_xts_dec(const struct crypto_aes_ctx *ctx, u8 *out,
81 			      const u8 *in, unsigned int len, u8 *iv);
82 
83 #ifdef CONFIG_X86_64
84 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
85 			      const u8 *in, unsigned int len, u8 *iv);
86 #endif
87 
aes_ctx(void * raw_ctx)88 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
89 {
90 	return aes_align_addr(raw_ctx);
91 }
92 
aes_xts_ctx(struct crypto_skcipher * tfm)93 static inline struct aesni_xts_ctx *aes_xts_ctx(struct crypto_skcipher *tfm)
94 {
95 	return aes_align_addr(crypto_skcipher_ctx(tfm));
96 }
97 
aes_set_key_common(struct crypto_aes_ctx * ctx,const u8 * in_key,unsigned int key_len)98 static int aes_set_key_common(struct crypto_aes_ctx *ctx,
99 			      const u8 *in_key, unsigned int key_len)
100 {
101 	int err;
102 
103 	if (!crypto_simd_usable())
104 		return aes_expandkey(ctx, in_key, key_len);
105 
106 	err = aes_check_keylen(key_len);
107 	if (err)
108 		return err;
109 
110 	kernel_fpu_begin();
111 	aesni_set_key(ctx, in_key, key_len);
112 	kernel_fpu_end();
113 	return 0;
114 }
115 
aes_set_key(struct crypto_tfm * tfm,const u8 * in_key,unsigned int key_len)116 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
117 		       unsigned int key_len)
118 {
119 	return aes_set_key_common(aes_ctx(crypto_tfm_ctx(tfm)), in_key,
120 				  key_len);
121 }
122 
aesni_encrypt(struct crypto_tfm * tfm,u8 * dst,const u8 * src)123 static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
124 {
125 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
126 
127 	if (!crypto_simd_usable()) {
128 		aes_encrypt(ctx, dst, src);
129 	} else {
130 		kernel_fpu_begin();
131 		aesni_enc(ctx, dst, src);
132 		kernel_fpu_end();
133 	}
134 }
135 
aesni_decrypt(struct crypto_tfm * tfm,u8 * dst,const u8 * src)136 static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
137 {
138 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
139 
140 	if (!crypto_simd_usable()) {
141 		aes_decrypt(ctx, dst, src);
142 	} else {
143 		kernel_fpu_begin();
144 		aesni_dec(ctx, dst, src);
145 		kernel_fpu_end();
146 	}
147 }
148 
aesni_skcipher_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int len)149 static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
150 			         unsigned int len)
151 {
152 	return aes_set_key_common(aes_ctx(crypto_skcipher_ctx(tfm)), key, len);
153 }
154 
ecb_encrypt(struct skcipher_request * req)155 static int ecb_encrypt(struct skcipher_request *req)
156 {
157 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
158 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
159 	struct skcipher_walk walk;
160 	unsigned int nbytes;
161 	int err;
162 
163 	err = skcipher_walk_virt(&walk, req, false);
164 
165 	while ((nbytes = walk.nbytes)) {
166 		kernel_fpu_begin();
167 		aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
168 			      nbytes & AES_BLOCK_MASK);
169 		kernel_fpu_end();
170 		nbytes &= AES_BLOCK_SIZE - 1;
171 		err = skcipher_walk_done(&walk, nbytes);
172 	}
173 
174 	return err;
175 }
176 
ecb_decrypt(struct skcipher_request * req)177 static int ecb_decrypt(struct skcipher_request *req)
178 {
179 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
180 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
181 	struct skcipher_walk walk;
182 	unsigned int nbytes;
183 	int err;
184 
185 	err = skcipher_walk_virt(&walk, req, false);
186 
187 	while ((nbytes = walk.nbytes)) {
188 		kernel_fpu_begin();
189 		aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
190 			      nbytes & AES_BLOCK_MASK);
191 		kernel_fpu_end();
192 		nbytes &= AES_BLOCK_SIZE - 1;
193 		err = skcipher_walk_done(&walk, nbytes);
194 	}
195 
196 	return err;
197 }
198 
cbc_encrypt(struct skcipher_request * req)199 static int cbc_encrypt(struct skcipher_request *req)
200 {
201 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
202 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
203 	struct skcipher_walk walk;
204 	unsigned int nbytes;
205 	int err;
206 
207 	err = skcipher_walk_virt(&walk, req, false);
208 
209 	while ((nbytes = walk.nbytes)) {
210 		kernel_fpu_begin();
211 		aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
212 			      nbytes & AES_BLOCK_MASK, walk.iv);
213 		kernel_fpu_end();
214 		nbytes &= AES_BLOCK_SIZE - 1;
215 		err = skcipher_walk_done(&walk, nbytes);
216 	}
217 
218 	return err;
219 }
220 
cbc_decrypt(struct skcipher_request * req)221 static int cbc_decrypt(struct skcipher_request *req)
222 {
223 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
224 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
225 	struct skcipher_walk walk;
226 	unsigned int nbytes;
227 	int err;
228 
229 	err = skcipher_walk_virt(&walk, req, false);
230 
231 	while ((nbytes = walk.nbytes)) {
232 		kernel_fpu_begin();
233 		aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
234 			      nbytes & AES_BLOCK_MASK, walk.iv);
235 		kernel_fpu_end();
236 		nbytes &= AES_BLOCK_SIZE - 1;
237 		err = skcipher_walk_done(&walk, nbytes);
238 	}
239 
240 	return err;
241 }
242 
cts_cbc_encrypt(struct skcipher_request * req)243 static int cts_cbc_encrypt(struct skcipher_request *req)
244 {
245 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
246 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
247 	int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
248 	struct scatterlist *src = req->src, *dst = req->dst;
249 	struct scatterlist sg_src[2], sg_dst[2];
250 	struct skcipher_request subreq;
251 	struct skcipher_walk walk;
252 	int err;
253 
254 	skcipher_request_set_tfm(&subreq, tfm);
255 	skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
256 				      NULL, NULL);
257 
258 	if (req->cryptlen <= AES_BLOCK_SIZE) {
259 		if (req->cryptlen < AES_BLOCK_SIZE)
260 			return -EINVAL;
261 		cbc_blocks = 1;
262 	}
263 
264 	if (cbc_blocks > 0) {
265 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
266 					   cbc_blocks * AES_BLOCK_SIZE,
267 					   req->iv);
268 
269 		err = cbc_encrypt(&subreq);
270 		if (err)
271 			return err;
272 
273 		if (req->cryptlen == AES_BLOCK_SIZE)
274 			return 0;
275 
276 		dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
277 		if (req->dst != req->src)
278 			dst = scatterwalk_ffwd(sg_dst, req->dst,
279 					       subreq.cryptlen);
280 	}
281 
282 	/* handle ciphertext stealing */
283 	skcipher_request_set_crypt(&subreq, src, dst,
284 				   req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
285 				   req->iv);
286 
287 	err = skcipher_walk_virt(&walk, &subreq, false);
288 	if (err)
289 		return err;
290 
291 	kernel_fpu_begin();
292 	aesni_cts_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
293 			  walk.nbytes, walk.iv);
294 	kernel_fpu_end();
295 
296 	return skcipher_walk_done(&walk, 0);
297 }
298 
cts_cbc_decrypt(struct skcipher_request * req)299 static int cts_cbc_decrypt(struct skcipher_request *req)
300 {
301 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
302 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
303 	int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
304 	struct scatterlist *src = req->src, *dst = req->dst;
305 	struct scatterlist sg_src[2], sg_dst[2];
306 	struct skcipher_request subreq;
307 	struct skcipher_walk walk;
308 	int err;
309 
310 	skcipher_request_set_tfm(&subreq, tfm);
311 	skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
312 				      NULL, NULL);
313 
314 	if (req->cryptlen <= AES_BLOCK_SIZE) {
315 		if (req->cryptlen < AES_BLOCK_SIZE)
316 			return -EINVAL;
317 		cbc_blocks = 1;
318 	}
319 
320 	if (cbc_blocks > 0) {
321 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
322 					   cbc_blocks * AES_BLOCK_SIZE,
323 					   req->iv);
324 
325 		err = cbc_decrypt(&subreq);
326 		if (err)
327 			return err;
328 
329 		if (req->cryptlen == AES_BLOCK_SIZE)
330 			return 0;
331 
332 		dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
333 		if (req->dst != req->src)
334 			dst = scatterwalk_ffwd(sg_dst, req->dst,
335 					       subreq.cryptlen);
336 	}
337 
338 	/* handle ciphertext stealing */
339 	skcipher_request_set_crypt(&subreq, src, dst,
340 				   req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
341 				   req->iv);
342 
343 	err = skcipher_walk_virt(&walk, &subreq, false);
344 	if (err)
345 		return err;
346 
347 	kernel_fpu_begin();
348 	aesni_cts_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
349 			  walk.nbytes, walk.iv);
350 	kernel_fpu_end();
351 
352 	return skcipher_walk_done(&walk, 0);
353 }
354 
355 #ifdef CONFIG_X86_64
356 /* This is the non-AVX version. */
ctr_crypt_aesni(struct skcipher_request * req)357 static int ctr_crypt_aesni(struct skcipher_request *req)
358 {
359 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
360 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
361 	u8 keystream[AES_BLOCK_SIZE];
362 	struct skcipher_walk walk;
363 	unsigned int nbytes;
364 	int err;
365 
366 	err = skcipher_walk_virt(&walk, req, false);
367 
368 	while ((nbytes = walk.nbytes) > 0) {
369 		kernel_fpu_begin();
370 		if (nbytes & AES_BLOCK_MASK)
371 			aesni_ctr_enc(ctx, walk.dst.virt.addr,
372 				      walk.src.virt.addr,
373 				      nbytes & AES_BLOCK_MASK, walk.iv);
374 		nbytes &= ~AES_BLOCK_MASK;
375 
376 		if (walk.nbytes == walk.total && nbytes > 0) {
377 			aesni_enc(ctx, keystream, walk.iv);
378 			crypto_xor_cpy(walk.dst.virt.addr + walk.nbytes - nbytes,
379 				       walk.src.virt.addr + walk.nbytes - nbytes,
380 				       keystream, nbytes);
381 			crypto_inc(walk.iv, AES_BLOCK_SIZE);
382 			nbytes = 0;
383 		}
384 		kernel_fpu_end();
385 		err = skcipher_walk_done(&walk, nbytes);
386 	}
387 	return err;
388 }
389 #endif
390 
xts_setkey_aesni(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)391 static int xts_setkey_aesni(struct crypto_skcipher *tfm, const u8 *key,
392 			    unsigned int keylen)
393 {
394 	struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm);
395 	int err;
396 
397 	err = xts_verify_key(tfm, key, keylen);
398 	if (err)
399 		return err;
400 
401 	keylen /= 2;
402 
403 	/* first half of xts-key is for crypt */
404 	err = aes_set_key_common(&ctx->crypt_ctx, key, keylen);
405 	if (err)
406 		return err;
407 
408 	/* second half of xts-key is for tweak */
409 	return aes_set_key_common(&ctx->tweak_ctx, key + keylen, keylen);
410 }
411 
412 typedef void (*xts_encrypt_iv_func)(const struct crypto_aes_ctx *tweak_key,
413 				    u8 iv[AES_BLOCK_SIZE]);
414 typedef void (*xts_crypt_func)(const struct crypto_aes_ctx *key,
415 			       const u8 *src, u8 *dst, int len,
416 			       u8 tweak[AES_BLOCK_SIZE]);
417 
418 /* This handles cases where the source and/or destination span pages. */
419 static noinline int
xts_crypt_slowpath(struct skcipher_request * req,xts_crypt_func crypt_func)420 xts_crypt_slowpath(struct skcipher_request *req, xts_crypt_func crypt_func)
421 {
422 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
423 	const struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm);
424 	int tail = req->cryptlen % AES_BLOCK_SIZE;
425 	struct scatterlist sg_src[2], sg_dst[2];
426 	struct skcipher_request subreq;
427 	struct skcipher_walk walk;
428 	struct scatterlist *src, *dst;
429 	int err;
430 
431 	/*
432 	 * If the message length isn't divisible by the AES block size, then
433 	 * separate off the last full block and the partial block.  This ensures
434 	 * that they are processed in the same call to the assembly function,
435 	 * which is required for ciphertext stealing.
436 	 */
437 	if (tail) {
438 		skcipher_request_set_tfm(&subreq, tfm);
439 		skcipher_request_set_callback(&subreq,
440 					      skcipher_request_flags(req),
441 					      NULL, NULL);
442 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
443 					   req->cryptlen - tail - AES_BLOCK_SIZE,
444 					   req->iv);
445 		req = &subreq;
446 	}
447 
448 	err = skcipher_walk_virt(&walk, req, false);
449 
450 	while (walk.nbytes) {
451 		kernel_fpu_begin();
452 		(*crypt_func)(&ctx->crypt_ctx,
453 			      walk.src.virt.addr, walk.dst.virt.addr,
454 			      walk.nbytes & ~(AES_BLOCK_SIZE - 1), req->iv);
455 		kernel_fpu_end();
456 		err = skcipher_walk_done(&walk,
457 					 walk.nbytes & (AES_BLOCK_SIZE - 1));
458 	}
459 
460 	if (err || !tail)
461 		return err;
462 
463 	/* Do ciphertext stealing with the last full block and partial block. */
464 
465 	dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
466 	if (req->dst != req->src)
467 		dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
468 
469 	skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
470 				   req->iv);
471 
472 	err = skcipher_walk_virt(&walk, req, false);
473 	if (err)
474 		return err;
475 
476 	kernel_fpu_begin();
477 	(*crypt_func)(&ctx->crypt_ctx, walk.src.virt.addr, walk.dst.virt.addr,
478 		      walk.nbytes, req->iv);
479 	kernel_fpu_end();
480 
481 	return skcipher_walk_done(&walk, 0);
482 }
483 
484 /* __always_inline to avoid indirect call in fastpath */
485 static __always_inline int
xts_crypt(struct skcipher_request * req,xts_encrypt_iv_func encrypt_iv,xts_crypt_func crypt_func)486 xts_crypt(struct skcipher_request *req, xts_encrypt_iv_func encrypt_iv,
487 	  xts_crypt_func crypt_func)
488 {
489 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
490 	const struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm);
491 
492 	if (unlikely(req->cryptlen < AES_BLOCK_SIZE))
493 		return -EINVAL;
494 
495 	kernel_fpu_begin();
496 	(*encrypt_iv)(&ctx->tweak_ctx, req->iv);
497 
498 	/*
499 	 * In practice, virtually all XTS plaintexts and ciphertexts are either
500 	 * 512 or 4096 bytes and do not use multiple scatterlist elements.  To
501 	 * optimize the performance of these cases, the below fast-path handles
502 	 * single-scatterlist-element messages as efficiently as possible.  The
503 	 * code is 64-bit specific, as it assumes no page mapping is needed.
504 	 */
505 	if (IS_ENABLED(CONFIG_X86_64) &&
506 	    likely(req->src->length >= req->cryptlen &&
507 		   req->dst->length >= req->cryptlen)) {
508 		(*crypt_func)(&ctx->crypt_ctx, sg_virt(req->src),
509 			      sg_virt(req->dst), req->cryptlen, req->iv);
510 		kernel_fpu_end();
511 		return 0;
512 	}
513 	kernel_fpu_end();
514 	return xts_crypt_slowpath(req, crypt_func);
515 }
516 
aesni_xts_encrypt_iv(const struct crypto_aes_ctx * tweak_key,u8 iv[AES_BLOCK_SIZE])517 static void aesni_xts_encrypt_iv(const struct crypto_aes_ctx *tweak_key,
518 				 u8 iv[AES_BLOCK_SIZE])
519 {
520 	aesni_enc(tweak_key, iv, iv);
521 }
522 
aesni_xts_encrypt(const struct crypto_aes_ctx * key,const u8 * src,u8 * dst,int len,u8 tweak[AES_BLOCK_SIZE])523 static void aesni_xts_encrypt(const struct crypto_aes_ctx *key,
524 			      const u8 *src, u8 *dst, int len,
525 			      u8 tweak[AES_BLOCK_SIZE])
526 {
527 	aesni_xts_enc(key, dst, src, len, tweak);
528 }
529 
aesni_xts_decrypt(const struct crypto_aes_ctx * key,const u8 * src,u8 * dst,int len,u8 tweak[AES_BLOCK_SIZE])530 static void aesni_xts_decrypt(const struct crypto_aes_ctx *key,
531 			      const u8 *src, u8 *dst, int len,
532 			      u8 tweak[AES_BLOCK_SIZE])
533 {
534 	aesni_xts_dec(key, dst, src, len, tweak);
535 }
536 
xts_encrypt_aesni(struct skcipher_request * req)537 static int xts_encrypt_aesni(struct skcipher_request *req)
538 {
539 	return xts_crypt(req, aesni_xts_encrypt_iv, aesni_xts_encrypt);
540 }
541 
xts_decrypt_aesni(struct skcipher_request * req)542 static int xts_decrypt_aesni(struct skcipher_request *req)
543 {
544 	return xts_crypt(req, aesni_xts_encrypt_iv, aesni_xts_decrypt);
545 }
546 
547 static struct crypto_alg aesni_cipher_alg = {
548 	.cra_name		= "aes",
549 	.cra_driver_name	= "aes-aesni",
550 	.cra_priority		= 300,
551 	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
552 	.cra_blocksize		= AES_BLOCK_SIZE,
553 	.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
554 	.cra_module		= THIS_MODULE,
555 	.cra_u	= {
556 		.cipher	= {
557 			.cia_min_keysize	= AES_MIN_KEY_SIZE,
558 			.cia_max_keysize	= AES_MAX_KEY_SIZE,
559 			.cia_setkey		= aes_set_key,
560 			.cia_encrypt		= aesni_encrypt,
561 			.cia_decrypt		= aesni_decrypt
562 		}
563 	}
564 };
565 
566 static struct skcipher_alg aesni_skciphers[] = {
567 	{
568 		.base = {
569 			.cra_name		= "ecb(aes)",
570 			.cra_driver_name	= "ecb-aes-aesni",
571 			.cra_priority		= 400,
572 			.cra_blocksize		= AES_BLOCK_SIZE,
573 			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
574 			.cra_module		= THIS_MODULE,
575 		},
576 		.min_keysize	= AES_MIN_KEY_SIZE,
577 		.max_keysize	= AES_MAX_KEY_SIZE,
578 		.setkey		= aesni_skcipher_setkey,
579 		.encrypt	= ecb_encrypt,
580 		.decrypt	= ecb_decrypt,
581 	}, {
582 		.base = {
583 			.cra_name		= "cbc(aes)",
584 			.cra_driver_name	= "cbc-aes-aesni",
585 			.cra_priority		= 400,
586 			.cra_blocksize		= AES_BLOCK_SIZE,
587 			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
588 			.cra_module		= THIS_MODULE,
589 		},
590 		.min_keysize	= AES_MIN_KEY_SIZE,
591 		.max_keysize	= AES_MAX_KEY_SIZE,
592 		.ivsize		= AES_BLOCK_SIZE,
593 		.setkey		= aesni_skcipher_setkey,
594 		.encrypt	= cbc_encrypt,
595 		.decrypt	= cbc_decrypt,
596 	}, {
597 		.base = {
598 			.cra_name		= "cts(cbc(aes))",
599 			.cra_driver_name	= "cts-cbc-aes-aesni",
600 			.cra_priority		= 400,
601 			.cra_blocksize		= AES_BLOCK_SIZE,
602 			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
603 			.cra_module		= THIS_MODULE,
604 		},
605 		.min_keysize	= AES_MIN_KEY_SIZE,
606 		.max_keysize	= AES_MAX_KEY_SIZE,
607 		.ivsize		= AES_BLOCK_SIZE,
608 		.walksize	= 2 * AES_BLOCK_SIZE,
609 		.setkey		= aesni_skcipher_setkey,
610 		.encrypt	= cts_cbc_encrypt,
611 		.decrypt	= cts_cbc_decrypt,
612 #ifdef CONFIG_X86_64
613 	}, {
614 		.base = {
615 			.cra_name		= "ctr(aes)",
616 			.cra_driver_name	= "ctr-aes-aesni",
617 			.cra_priority		= 400,
618 			.cra_blocksize		= 1,
619 			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
620 			.cra_module		= THIS_MODULE,
621 		},
622 		.min_keysize	= AES_MIN_KEY_SIZE,
623 		.max_keysize	= AES_MAX_KEY_SIZE,
624 		.ivsize		= AES_BLOCK_SIZE,
625 		.chunksize	= AES_BLOCK_SIZE,
626 		.setkey		= aesni_skcipher_setkey,
627 		.encrypt	= ctr_crypt_aesni,
628 		.decrypt	= ctr_crypt_aesni,
629 #endif
630 	}, {
631 		.base = {
632 			.cra_name		= "xts(aes)",
633 			.cra_driver_name	= "xts-aes-aesni",
634 			.cra_priority		= 401,
635 			.cra_blocksize		= AES_BLOCK_SIZE,
636 			.cra_ctxsize		= XTS_AES_CTX_SIZE,
637 			.cra_module		= THIS_MODULE,
638 		},
639 		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
640 		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
641 		.ivsize		= AES_BLOCK_SIZE,
642 		.walksize	= 2 * AES_BLOCK_SIZE,
643 		.setkey		= xts_setkey_aesni,
644 		.encrypt	= xts_encrypt_aesni,
645 		.decrypt	= xts_decrypt_aesni,
646 	}
647 };
648 
649 #ifdef CONFIG_X86_64
650 asmlinkage void aes_xts_encrypt_iv(const struct crypto_aes_ctx *tweak_key,
651 				   u8 iv[AES_BLOCK_SIZE]);
652 
653 /* __always_inline to avoid indirect call */
654 static __always_inline int
ctr_crypt(struct skcipher_request * req,void (* ctr64_func)(const struct crypto_aes_ctx * key,const u8 * src,u8 * dst,int len,const u64 le_ctr[2]))655 ctr_crypt(struct skcipher_request *req,
656 	  void (*ctr64_func)(const struct crypto_aes_ctx *key,
657 			     const u8 *src, u8 *dst, int len,
658 			     const u64 le_ctr[2]))
659 {
660 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
661 	const struct crypto_aes_ctx *key = aes_ctx(crypto_skcipher_ctx(tfm));
662 	unsigned int nbytes, p1_nbytes, nblocks;
663 	struct skcipher_walk walk;
664 	u64 le_ctr[2];
665 	u64 ctr64;
666 	int err;
667 
668 	ctr64 = le_ctr[0] = get_unaligned_be64(&req->iv[8]);
669 	le_ctr[1] = get_unaligned_be64(&req->iv[0]);
670 
671 	err = skcipher_walk_virt(&walk, req, false);
672 
673 	while ((nbytes = walk.nbytes) != 0) {
674 		if (nbytes < walk.total) {
675 			/* Not the end yet, so keep the length block-aligned. */
676 			nbytes = round_down(nbytes, AES_BLOCK_SIZE);
677 			nblocks = nbytes / AES_BLOCK_SIZE;
678 		} else {
679 			/* It's the end, so include any final partial block. */
680 			nblocks = DIV_ROUND_UP(nbytes, AES_BLOCK_SIZE);
681 		}
682 		ctr64 += nblocks;
683 
684 		kernel_fpu_begin();
685 		if (likely(ctr64 >= nblocks)) {
686 			/* The low 64 bits of the counter won't overflow. */
687 			(*ctr64_func)(key, walk.src.virt.addr,
688 				      walk.dst.virt.addr, nbytes, le_ctr);
689 		} else {
690 			/*
691 			 * The low 64 bits of the counter will overflow.  The
692 			 * assembly doesn't handle this case, so split the
693 			 * operation into two at the point where the overflow
694 			 * will occur.  After the first part, add the carry bit.
695 			 */
696 			p1_nbytes = min_t(unsigned int, nbytes,
697 					  (nblocks - ctr64) * AES_BLOCK_SIZE);
698 			(*ctr64_func)(key, walk.src.virt.addr,
699 				      walk.dst.virt.addr, p1_nbytes, le_ctr);
700 			le_ctr[0] = 0;
701 			le_ctr[1]++;
702 			(*ctr64_func)(key, walk.src.virt.addr + p1_nbytes,
703 				      walk.dst.virt.addr + p1_nbytes,
704 				      nbytes - p1_nbytes, le_ctr);
705 		}
706 		kernel_fpu_end();
707 		le_ctr[0] = ctr64;
708 
709 		err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
710 	}
711 
712 	put_unaligned_be64(ctr64, &req->iv[8]);
713 	put_unaligned_be64(le_ctr[1], &req->iv[0]);
714 
715 	return err;
716 }
717 
718 /* __always_inline to avoid indirect call */
719 static __always_inline int
xctr_crypt(struct skcipher_request * req,void (* xctr_func)(const struct crypto_aes_ctx * key,const u8 * src,u8 * dst,int len,const u8 iv[AES_BLOCK_SIZE],u64 ctr))720 xctr_crypt(struct skcipher_request *req,
721 	   void (*xctr_func)(const struct crypto_aes_ctx *key,
722 			     const u8 *src, u8 *dst, int len,
723 			     const u8 iv[AES_BLOCK_SIZE], u64 ctr))
724 {
725 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
726 	const struct crypto_aes_ctx *key = aes_ctx(crypto_skcipher_ctx(tfm));
727 	struct skcipher_walk walk;
728 	unsigned int nbytes;
729 	u64 ctr = 1;
730 	int err;
731 
732 	err = skcipher_walk_virt(&walk, req, false);
733 	while ((nbytes = walk.nbytes) != 0) {
734 		if (nbytes < walk.total)
735 			nbytes = round_down(nbytes, AES_BLOCK_SIZE);
736 
737 		kernel_fpu_begin();
738 		(*xctr_func)(key, walk.src.virt.addr, walk.dst.virt.addr,
739 			     nbytes, req->iv, ctr);
740 		kernel_fpu_end();
741 
742 		ctr += DIV_ROUND_UP(nbytes, AES_BLOCK_SIZE);
743 		err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
744 	}
745 	return err;
746 }
747 
748 #define DEFINE_AVX_SKCIPHER_ALGS(suffix, driver_name_suffix, priority)	       \
749 									       \
750 asmlinkage void								       \
751 aes_xts_encrypt_##suffix(const struct crypto_aes_ctx *key, const u8 *src,      \
752 			 u8 *dst, int len, u8 tweak[AES_BLOCK_SIZE]);	       \
753 asmlinkage void								       \
754 aes_xts_decrypt_##suffix(const struct crypto_aes_ctx *key, const u8 *src,      \
755 			 u8 *dst, int len, u8 tweak[AES_BLOCK_SIZE]);	       \
756 									       \
757 static int xts_encrypt_##suffix(struct skcipher_request *req)		       \
758 {									       \
759 	return xts_crypt(req, aes_xts_encrypt_iv, aes_xts_encrypt_##suffix);   \
760 }									       \
761 									       \
762 static int xts_decrypt_##suffix(struct skcipher_request *req)		       \
763 {									       \
764 	return xts_crypt(req, aes_xts_encrypt_iv, aes_xts_decrypt_##suffix);   \
765 }									       \
766 									       \
767 asmlinkage void								       \
768 aes_ctr64_crypt_##suffix(const struct crypto_aes_ctx *key,		       \
769 			 const u8 *src, u8 *dst, int len, const u64 le_ctr[2]);\
770 									       \
771 static int ctr_crypt_##suffix(struct skcipher_request *req)		       \
772 {									       \
773 	return ctr_crypt(req, aes_ctr64_crypt_##suffix);		       \
774 }									       \
775 									       \
776 asmlinkage void								       \
777 aes_xctr_crypt_##suffix(const struct crypto_aes_ctx *key,		       \
778 			const u8 *src, u8 *dst, int len,		       \
779 			const u8 iv[AES_BLOCK_SIZE], u64 ctr);		       \
780 									       \
781 static int xctr_crypt_##suffix(struct skcipher_request *req)		       \
782 {									       \
783 	return xctr_crypt(req, aes_xctr_crypt_##suffix);		       \
784 }									       \
785 									       \
786 static struct skcipher_alg skcipher_algs_##suffix[] = {{		       \
787 	.base.cra_name		= "xts(aes)",				       \
788 	.base.cra_driver_name	= "xts-aes-" driver_name_suffix,	       \
789 	.base.cra_priority	= priority,				       \
790 	.base.cra_blocksize	= AES_BLOCK_SIZE,			       \
791 	.base.cra_ctxsize	= XTS_AES_CTX_SIZE,			       \
792 	.base.cra_module	= THIS_MODULE,				       \
793 	.min_keysize		= 2 * AES_MIN_KEY_SIZE,			       \
794 	.max_keysize		= 2 * AES_MAX_KEY_SIZE,			       \
795 	.ivsize			= AES_BLOCK_SIZE,			       \
796 	.walksize		= 2 * AES_BLOCK_SIZE,			       \
797 	.setkey			= xts_setkey_aesni,			       \
798 	.encrypt		= xts_encrypt_##suffix,			       \
799 	.decrypt		= xts_decrypt_##suffix,			       \
800 }, {									       \
801 	.base.cra_name		= "ctr(aes)",				       \
802 	.base.cra_driver_name	= "ctr-aes-" driver_name_suffix,	       \
803 	.base.cra_priority	= priority,				       \
804 	.base.cra_blocksize	= 1,					       \
805 	.base.cra_ctxsize	= CRYPTO_AES_CTX_SIZE,			       \
806 	.base.cra_module	= THIS_MODULE,				       \
807 	.min_keysize		= AES_MIN_KEY_SIZE,			       \
808 	.max_keysize		= AES_MAX_KEY_SIZE,			       \
809 	.ivsize			= AES_BLOCK_SIZE,			       \
810 	.chunksize		= AES_BLOCK_SIZE,			       \
811 	.setkey			= aesni_skcipher_setkey,		       \
812 	.encrypt		= ctr_crypt_##suffix,			       \
813 	.decrypt		= ctr_crypt_##suffix,			       \
814 }, {									       \
815 	.base.cra_name		= "xctr(aes)",				       \
816 	.base.cra_driver_name	= "xctr-aes-" driver_name_suffix,	       \
817 	.base.cra_priority	= priority,				       \
818 	.base.cra_blocksize	= 1,					       \
819 	.base.cra_ctxsize	= CRYPTO_AES_CTX_SIZE,			       \
820 	.base.cra_module	= THIS_MODULE,				       \
821 	.min_keysize		= AES_MIN_KEY_SIZE,			       \
822 	.max_keysize		= AES_MAX_KEY_SIZE,			       \
823 	.ivsize			= AES_BLOCK_SIZE,			       \
824 	.chunksize		= AES_BLOCK_SIZE,			       \
825 	.setkey			= aesni_skcipher_setkey,		       \
826 	.encrypt		= xctr_crypt_##suffix,			       \
827 	.decrypt		= xctr_crypt_##suffix,			       \
828 }}
829 
830 DEFINE_AVX_SKCIPHER_ALGS(aesni_avx, "aesni-avx", 500);
831 DEFINE_AVX_SKCIPHER_ALGS(vaes_avx2, "vaes-avx2", 600);
832 DEFINE_AVX_SKCIPHER_ALGS(vaes_avx512, "vaes-avx512", 800);
833 
834 /* The common part of the x86_64 AES-GCM key struct */
835 struct aes_gcm_key {
836 	/* Expanded AES key and the AES key length in bytes */
837 	struct crypto_aes_ctx aes_key;
838 
839 	/* RFC4106 nonce (used only by the rfc4106 algorithms) */
840 	u32 rfc4106_nonce;
841 };
842 
843 /* Key struct used by the AES-NI implementations of AES-GCM */
844 struct aes_gcm_key_aesni {
845 	/*
846 	 * Common part of the key.  The assembly code requires 16-byte alignment
847 	 * for the round keys; we get this by them being located at the start of
848 	 * the struct and the whole struct being 16-byte aligned.
849 	 */
850 	struct aes_gcm_key base;
851 
852 	/*
853 	 * Powers of the hash key H^8 through H^1.  These are 128-bit values.
854 	 * They all have an extra factor of x^-1 and are byte-reversed.  16-byte
855 	 * alignment is required by the assembly code.
856 	 */
857 	u64 h_powers[8][2] __aligned(16);
858 
859 	/*
860 	 * h_powers_xored[i] contains the two 64-bit halves of h_powers[i] XOR'd
861 	 * together.  It's used for Karatsuba multiplication.  16-byte alignment
862 	 * is required by the assembly code.
863 	 */
864 	u64 h_powers_xored[8] __aligned(16);
865 
866 	/*
867 	 * H^1 times x^64 (and also the usual extra factor of x^-1).  16-byte
868 	 * alignment is required by the assembly code.
869 	 */
870 	u64 h_times_x64[2] __aligned(16);
871 };
872 #define AES_GCM_KEY_AESNI(key)	\
873 	container_of((key), struct aes_gcm_key_aesni, base)
874 #define AES_GCM_KEY_AESNI_SIZE	\
875 	(sizeof(struct aes_gcm_key_aesni) + (15 & ~(CRYPTO_MINALIGN - 1)))
876 
877 /* Key struct used by the VAES + AVX10 implementations of AES-GCM */
878 struct aes_gcm_key_avx10 {
879 	/*
880 	 * Common part of the key.  The assembly code prefers 16-byte alignment
881 	 * for the round keys; we get this by them being located at the start of
882 	 * the struct and the whole struct being 64-byte aligned.
883 	 */
884 	struct aes_gcm_key base;
885 
886 	/*
887 	 * Powers of the hash key H^16 through H^1.  These are 128-bit values.
888 	 * They all have an extra factor of x^-1 and are byte-reversed.  This
889 	 * array is aligned to a 64-byte boundary to make it naturally aligned
890 	 * for 512-bit loads, which can improve performance.  (The assembly code
891 	 * doesn't *need* the alignment; this is just an optimization.)
892 	 */
893 	u64 h_powers[16][2] __aligned(64);
894 
895 	/* Three padding blocks required by the assembly code */
896 	u64 padding[3][2];
897 };
898 #define AES_GCM_KEY_AVX10(key)	\
899 	container_of((key), struct aes_gcm_key_avx10, base)
900 #define AES_GCM_KEY_AVX10_SIZE	\
901 	(sizeof(struct aes_gcm_key_avx10) + (63 & ~(CRYPTO_MINALIGN - 1)))
902 
903 /*
904  * These flags are passed to the AES-GCM helper functions to specify the
905  * specific version of AES-GCM (RFC4106 or not), whether it's encryption or
906  * decryption, and which assembly functions should be called.  Assembly
907  * functions are selected using flags instead of function pointers to avoid
908  * indirect calls (which are very expensive on x86) regardless of inlining.
909  */
910 #define FLAG_RFC4106	BIT(0)
911 #define FLAG_ENC	BIT(1)
912 #define FLAG_AVX	BIT(2)
913 #define FLAG_AVX10_256	BIT(3)
914 #define FLAG_AVX10_512	BIT(4)
915 
916 static inline struct aes_gcm_key *
aes_gcm_key_get(struct crypto_aead * tfm,int flags)917 aes_gcm_key_get(struct crypto_aead *tfm, int flags)
918 {
919 	if (flags & (FLAG_AVX10_256 | FLAG_AVX10_512))
920 		return PTR_ALIGN(crypto_aead_ctx(tfm), 64);
921 	else
922 		return PTR_ALIGN(crypto_aead_ctx(tfm), 16);
923 }
924 
925 asmlinkage void
926 aes_gcm_precompute_aesni(struct aes_gcm_key_aesni *key);
927 asmlinkage void
928 aes_gcm_precompute_aesni_avx(struct aes_gcm_key_aesni *key);
929 asmlinkage void
930 aes_gcm_precompute_vaes_avx10_256(struct aes_gcm_key_avx10 *key);
931 asmlinkage void
932 aes_gcm_precompute_vaes_avx10_512(struct aes_gcm_key_avx10 *key);
933 
aes_gcm_precompute(struct aes_gcm_key * key,int flags)934 static void aes_gcm_precompute(struct aes_gcm_key *key, int flags)
935 {
936 	/*
937 	 * To make things a bit easier on the assembly side, the AVX10
938 	 * implementations use the same key format.  Therefore, a single
939 	 * function using 256-bit vectors would suffice here.  However, it's
940 	 * straightforward to provide a 512-bit one because of how the assembly
941 	 * code is structured, and it works nicely because the total size of the
942 	 * key powers is a multiple of 512 bits.  So we take advantage of that.
943 	 *
944 	 * A similar situation applies to the AES-NI implementations.
945 	 */
946 	if (flags & FLAG_AVX10_512)
947 		aes_gcm_precompute_vaes_avx10_512(AES_GCM_KEY_AVX10(key));
948 	else if (flags & FLAG_AVX10_256)
949 		aes_gcm_precompute_vaes_avx10_256(AES_GCM_KEY_AVX10(key));
950 	else if (flags & FLAG_AVX)
951 		aes_gcm_precompute_aesni_avx(AES_GCM_KEY_AESNI(key));
952 	else
953 		aes_gcm_precompute_aesni(AES_GCM_KEY_AESNI(key));
954 }
955 
956 asmlinkage void
957 aes_gcm_aad_update_aesni(const struct aes_gcm_key_aesni *key,
958 			 u8 ghash_acc[16], const u8 *aad, int aadlen);
959 asmlinkage void
960 aes_gcm_aad_update_aesni_avx(const struct aes_gcm_key_aesni *key,
961 			     u8 ghash_acc[16], const u8 *aad, int aadlen);
962 asmlinkage void
963 aes_gcm_aad_update_vaes_avx10(const struct aes_gcm_key_avx10 *key,
964 			      u8 ghash_acc[16], const u8 *aad, int aadlen);
965 
aes_gcm_aad_update(const struct aes_gcm_key * key,u8 ghash_acc[16],const u8 * aad,int aadlen,int flags)966 static void aes_gcm_aad_update(const struct aes_gcm_key *key, u8 ghash_acc[16],
967 			       const u8 *aad, int aadlen, int flags)
968 {
969 	if (flags & (FLAG_AVX10_256 | FLAG_AVX10_512))
970 		aes_gcm_aad_update_vaes_avx10(AES_GCM_KEY_AVX10(key), ghash_acc,
971 					      aad, aadlen);
972 	else if (flags & FLAG_AVX)
973 		aes_gcm_aad_update_aesni_avx(AES_GCM_KEY_AESNI(key), ghash_acc,
974 					     aad, aadlen);
975 	else
976 		aes_gcm_aad_update_aesni(AES_GCM_KEY_AESNI(key), ghash_acc,
977 					 aad, aadlen);
978 }
979 
980 asmlinkage void
981 aes_gcm_enc_update_aesni(const struct aes_gcm_key_aesni *key,
982 			 const u32 le_ctr[4], u8 ghash_acc[16],
983 			 const u8 *src, u8 *dst, int datalen);
984 asmlinkage void
985 aes_gcm_enc_update_aesni_avx(const struct aes_gcm_key_aesni *key,
986 			     const u32 le_ctr[4], u8 ghash_acc[16],
987 			     const u8 *src, u8 *dst, int datalen);
988 asmlinkage void
989 aes_gcm_enc_update_vaes_avx10_256(const struct aes_gcm_key_avx10 *key,
990 				  const u32 le_ctr[4], u8 ghash_acc[16],
991 				  const u8 *src, u8 *dst, int datalen);
992 asmlinkage void
993 aes_gcm_enc_update_vaes_avx10_512(const struct aes_gcm_key_avx10 *key,
994 				  const u32 le_ctr[4], u8 ghash_acc[16],
995 				  const u8 *src, u8 *dst, int datalen);
996 
997 asmlinkage void
998 aes_gcm_dec_update_aesni(const struct aes_gcm_key_aesni *key,
999 			 const u32 le_ctr[4], u8 ghash_acc[16],
1000 			 const u8 *src, u8 *dst, int datalen);
1001 asmlinkage void
1002 aes_gcm_dec_update_aesni_avx(const struct aes_gcm_key_aesni *key,
1003 			     const u32 le_ctr[4], u8 ghash_acc[16],
1004 			     const u8 *src, u8 *dst, int datalen);
1005 asmlinkage void
1006 aes_gcm_dec_update_vaes_avx10_256(const struct aes_gcm_key_avx10 *key,
1007 				  const u32 le_ctr[4], u8 ghash_acc[16],
1008 				  const u8 *src, u8 *dst, int datalen);
1009 asmlinkage void
1010 aes_gcm_dec_update_vaes_avx10_512(const struct aes_gcm_key_avx10 *key,
1011 				  const u32 le_ctr[4], u8 ghash_acc[16],
1012 				  const u8 *src, u8 *dst, int datalen);
1013 
1014 /* __always_inline to optimize out the branches based on @flags */
1015 static __always_inline void
aes_gcm_update(const struct aes_gcm_key * key,const u32 le_ctr[4],u8 ghash_acc[16],const u8 * src,u8 * dst,int datalen,int flags)1016 aes_gcm_update(const struct aes_gcm_key *key,
1017 	       const u32 le_ctr[4], u8 ghash_acc[16],
1018 	       const u8 *src, u8 *dst, int datalen, int flags)
1019 {
1020 	if (flags & FLAG_ENC) {
1021 		if (flags & FLAG_AVX10_512)
1022 			aes_gcm_enc_update_vaes_avx10_512(AES_GCM_KEY_AVX10(key),
1023 							  le_ctr, ghash_acc,
1024 							  src, dst, datalen);
1025 		else if (flags & FLAG_AVX10_256)
1026 			aes_gcm_enc_update_vaes_avx10_256(AES_GCM_KEY_AVX10(key),
1027 							  le_ctr, ghash_acc,
1028 							  src, dst, datalen);
1029 		else if (flags & FLAG_AVX)
1030 			aes_gcm_enc_update_aesni_avx(AES_GCM_KEY_AESNI(key),
1031 						     le_ctr, ghash_acc,
1032 						     src, dst, datalen);
1033 		else
1034 			aes_gcm_enc_update_aesni(AES_GCM_KEY_AESNI(key), le_ctr,
1035 						 ghash_acc, src, dst, datalen);
1036 	} else {
1037 		if (flags & FLAG_AVX10_512)
1038 			aes_gcm_dec_update_vaes_avx10_512(AES_GCM_KEY_AVX10(key),
1039 							  le_ctr, ghash_acc,
1040 							  src, dst, datalen);
1041 		else if (flags & FLAG_AVX10_256)
1042 			aes_gcm_dec_update_vaes_avx10_256(AES_GCM_KEY_AVX10(key),
1043 							  le_ctr, ghash_acc,
1044 							  src, dst, datalen);
1045 		else if (flags & FLAG_AVX)
1046 			aes_gcm_dec_update_aesni_avx(AES_GCM_KEY_AESNI(key),
1047 						     le_ctr, ghash_acc,
1048 						     src, dst, datalen);
1049 		else
1050 			aes_gcm_dec_update_aesni(AES_GCM_KEY_AESNI(key),
1051 						 le_ctr, ghash_acc,
1052 						 src, dst, datalen);
1053 	}
1054 }
1055 
1056 asmlinkage void
1057 aes_gcm_enc_final_aesni(const struct aes_gcm_key_aesni *key,
1058 			const u32 le_ctr[4], u8 ghash_acc[16],
1059 			u64 total_aadlen, u64 total_datalen);
1060 asmlinkage void
1061 aes_gcm_enc_final_aesni_avx(const struct aes_gcm_key_aesni *key,
1062 			    const u32 le_ctr[4], u8 ghash_acc[16],
1063 			    u64 total_aadlen, u64 total_datalen);
1064 asmlinkage void
1065 aes_gcm_enc_final_vaes_avx10(const struct aes_gcm_key_avx10 *key,
1066 			     const u32 le_ctr[4], u8 ghash_acc[16],
1067 			     u64 total_aadlen, u64 total_datalen);
1068 
1069 /* __always_inline to optimize out the branches based on @flags */
1070 static __always_inline void
aes_gcm_enc_final(const struct aes_gcm_key * key,const u32 le_ctr[4],u8 ghash_acc[16],u64 total_aadlen,u64 total_datalen,int flags)1071 aes_gcm_enc_final(const struct aes_gcm_key *key,
1072 		  const u32 le_ctr[4], u8 ghash_acc[16],
1073 		  u64 total_aadlen, u64 total_datalen, int flags)
1074 {
1075 	if (flags & (FLAG_AVX10_256 | FLAG_AVX10_512))
1076 		aes_gcm_enc_final_vaes_avx10(AES_GCM_KEY_AVX10(key),
1077 					     le_ctr, ghash_acc,
1078 					     total_aadlen, total_datalen);
1079 	else if (flags & FLAG_AVX)
1080 		aes_gcm_enc_final_aesni_avx(AES_GCM_KEY_AESNI(key),
1081 					    le_ctr, ghash_acc,
1082 					    total_aadlen, total_datalen);
1083 	else
1084 		aes_gcm_enc_final_aesni(AES_GCM_KEY_AESNI(key),
1085 					le_ctr, ghash_acc,
1086 					total_aadlen, total_datalen);
1087 }
1088 
1089 asmlinkage bool __must_check
1090 aes_gcm_dec_final_aesni(const struct aes_gcm_key_aesni *key,
1091 			const u32 le_ctr[4], const u8 ghash_acc[16],
1092 			u64 total_aadlen, u64 total_datalen,
1093 			const u8 tag[16], int taglen);
1094 asmlinkage bool __must_check
1095 aes_gcm_dec_final_aesni_avx(const struct aes_gcm_key_aesni *key,
1096 			    const u32 le_ctr[4], const u8 ghash_acc[16],
1097 			    u64 total_aadlen, u64 total_datalen,
1098 			    const u8 tag[16], int taglen);
1099 asmlinkage bool __must_check
1100 aes_gcm_dec_final_vaes_avx10(const struct aes_gcm_key_avx10 *key,
1101 			     const u32 le_ctr[4], const u8 ghash_acc[16],
1102 			     u64 total_aadlen, u64 total_datalen,
1103 			     const u8 tag[16], int taglen);
1104 
1105 /* __always_inline to optimize out the branches based on @flags */
1106 static __always_inline bool __must_check
aes_gcm_dec_final(const struct aes_gcm_key * key,const u32 le_ctr[4],u8 ghash_acc[16],u64 total_aadlen,u64 total_datalen,u8 tag[16],int taglen,int flags)1107 aes_gcm_dec_final(const struct aes_gcm_key *key, const u32 le_ctr[4],
1108 		  u8 ghash_acc[16], u64 total_aadlen, u64 total_datalen,
1109 		  u8 tag[16], int taglen, int flags)
1110 {
1111 	if (flags & (FLAG_AVX10_256 | FLAG_AVX10_512))
1112 		return aes_gcm_dec_final_vaes_avx10(AES_GCM_KEY_AVX10(key),
1113 						    le_ctr, ghash_acc,
1114 						    total_aadlen, total_datalen,
1115 						    tag, taglen);
1116 	else if (flags & FLAG_AVX)
1117 		return aes_gcm_dec_final_aesni_avx(AES_GCM_KEY_AESNI(key),
1118 						   le_ctr, ghash_acc,
1119 						   total_aadlen, total_datalen,
1120 						   tag, taglen);
1121 	else
1122 		return aes_gcm_dec_final_aesni(AES_GCM_KEY_AESNI(key),
1123 					       le_ctr, ghash_acc,
1124 					       total_aadlen, total_datalen,
1125 					       tag, taglen);
1126 }
1127 
1128 /*
1129  * This is the Integrity Check Value (aka the authentication tag) length and can
1130  * be 8, 12 or 16 bytes long.
1131  */
common_rfc4106_set_authsize(struct crypto_aead * aead,unsigned int authsize)1132 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
1133 				       unsigned int authsize)
1134 {
1135 	switch (authsize) {
1136 	case 8:
1137 	case 12:
1138 	case 16:
1139 		break;
1140 	default:
1141 		return -EINVAL;
1142 	}
1143 
1144 	return 0;
1145 }
1146 
generic_gcmaes_set_authsize(struct crypto_aead * tfm,unsigned int authsize)1147 static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
1148 				       unsigned int authsize)
1149 {
1150 	switch (authsize) {
1151 	case 4:
1152 	case 8:
1153 	case 12:
1154 	case 13:
1155 	case 14:
1156 	case 15:
1157 	case 16:
1158 		break;
1159 	default:
1160 		return -EINVAL;
1161 	}
1162 
1163 	return 0;
1164 }
1165 
1166 /*
1167  * This is the setkey function for the x86_64 implementations of AES-GCM.  It
1168  * saves the RFC4106 nonce if applicable, expands the AES key, and precomputes
1169  * powers of the hash key.
1170  *
1171  * To comply with the crypto_aead API, this has to be usable in no-SIMD context.
1172  * For that reason, this function includes a portable C implementation of the
1173  * needed logic.  However, the portable C implementation is very slow, taking
1174  * about the same time as encrypting 37 KB of data.  To be ready for users that
1175  * may set a key even somewhat frequently, we therefore also include a SIMD
1176  * assembly implementation, expanding the AES key using AES-NI and precomputing
1177  * the hash key powers using PCLMULQDQ or VPCLMULQDQ.
1178  */
gcm_setkey(struct crypto_aead * tfm,const u8 * raw_key,unsigned int keylen,int flags)1179 static int gcm_setkey(struct crypto_aead *tfm, const u8 *raw_key,
1180 		      unsigned int keylen, int flags)
1181 {
1182 	struct aes_gcm_key *key = aes_gcm_key_get(tfm, flags);
1183 	int err;
1184 
1185 	if (flags & FLAG_RFC4106) {
1186 		if (keylen < 4)
1187 			return -EINVAL;
1188 		keylen -= 4;
1189 		key->rfc4106_nonce = get_unaligned_be32(raw_key + keylen);
1190 	}
1191 
1192 	/* The assembly code assumes the following offsets. */
1193 	BUILD_BUG_ON(offsetof(struct aes_gcm_key_aesni, base.aes_key.key_enc) != 0);
1194 	BUILD_BUG_ON(offsetof(struct aes_gcm_key_aesni, base.aes_key.key_length) != 480);
1195 	BUILD_BUG_ON(offsetof(struct aes_gcm_key_aesni, h_powers) != 496);
1196 	BUILD_BUG_ON(offsetof(struct aes_gcm_key_aesni, h_powers_xored) != 624);
1197 	BUILD_BUG_ON(offsetof(struct aes_gcm_key_aesni, h_times_x64) != 688);
1198 	BUILD_BUG_ON(offsetof(struct aes_gcm_key_avx10, base.aes_key.key_enc) != 0);
1199 	BUILD_BUG_ON(offsetof(struct aes_gcm_key_avx10, base.aes_key.key_length) != 480);
1200 	BUILD_BUG_ON(offsetof(struct aes_gcm_key_avx10, h_powers) != 512);
1201 	BUILD_BUG_ON(offsetof(struct aes_gcm_key_avx10, padding) != 768);
1202 
1203 	if (likely(crypto_simd_usable())) {
1204 		err = aes_check_keylen(keylen);
1205 		if (err)
1206 			return err;
1207 		kernel_fpu_begin();
1208 		aesni_set_key(&key->aes_key, raw_key, keylen);
1209 		aes_gcm_precompute(key, flags);
1210 		kernel_fpu_end();
1211 	} else {
1212 		static const u8 x_to_the_minus1[16] __aligned(__alignof__(be128)) = {
1213 			[0] = 0xc2, [15] = 1
1214 		};
1215 		static const u8 x_to_the_63[16] __aligned(__alignof__(be128)) = {
1216 			[7] = 1,
1217 		};
1218 		be128 h1 = {};
1219 		be128 h;
1220 		int i;
1221 
1222 		err = aes_expandkey(&key->aes_key, raw_key, keylen);
1223 		if (err)
1224 			return err;
1225 
1226 		/* Encrypt the all-zeroes block to get the hash key H^1 */
1227 		aes_encrypt(&key->aes_key, (u8 *)&h1, (u8 *)&h1);
1228 
1229 		/* Compute H^1 * x^-1 */
1230 		h = h1;
1231 		gf128mul_lle(&h, (const be128 *)x_to_the_minus1);
1232 
1233 		/* Compute the needed key powers */
1234 		if (flags & (FLAG_AVX10_256 | FLAG_AVX10_512)) {
1235 			struct aes_gcm_key_avx10 *k = AES_GCM_KEY_AVX10(key);
1236 
1237 			for (i = ARRAY_SIZE(k->h_powers) - 1; i >= 0; i--) {
1238 				k->h_powers[i][0] = be64_to_cpu(h.b);
1239 				k->h_powers[i][1] = be64_to_cpu(h.a);
1240 				gf128mul_lle(&h, &h1);
1241 			}
1242 			memset(k->padding, 0, sizeof(k->padding));
1243 		} else {
1244 			struct aes_gcm_key_aesni *k = AES_GCM_KEY_AESNI(key);
1245 
1246 			for (i = ARRAY_SIZE(k->h_powers) - 1; i >= 0; i--) {
1247 				k->h_powers[i][0] = be64_to_cpu(h.b);
1248 				k->h_powers[i][1] = be64_to_cpu(h.a);
1249 				k->h_powers_xored[i] = k->h_powers[i][0] ^
1250 						       k->h_powers[i][1];
1251 				gf128mul_lle(&h, &h1);
1252 			}
1253 			gf128mul_lle(&h1, (const be128 *)x_to_the_63);
1254 			k->h_times_x64[0] = be64_to_cpu(h1.b);
1255 			k->h_times_x64[1] = be64_to_cpu(h1.a);
1256 		}
1257 	}
1258 	return 0;
1259 }
1260 
1261 /*
1262  * Initialize @ghash_acc, then pass all @assoclen bytes of associated data
1263  * (a.k.a. additional authenticated data) from @sg_src through the GHASH update
1264  * assembly function.  kernel_fpu_begin() must have already been called.
1265  */
gcm_process_assoc(const struct aes_gcm_key * key,u8 ghash_acc[16],struct scatterlist * sg_src,unsigned int assoclen,int flags)1266 static void gcm_process_assoc(const struct aes_gcm_key *key, u8 ghash_acc[16],
1267 			      struct scatterlist *sg_src, unsigned int assoclen,
1268 			      int flags)
1269 {
1270 	struct scatter_walk walk;
1271 	/*
1272 	 * The assembly function requires that the length of any non-last
1273 	 * segment of associated data be a multiple of 16 bytes, so this
1274 	 * function does the buffering needed to achieve that.
1275 	 */
1276 	unsigned int pos = 0;
1277 	u8 buf[16];
1278 
1279 	memset(ghash_acc, 0, 16);
1280 	scatterwalk_start(&walk, sg_src);
1281 
1282 	while (assoclen) {
1283 		unsigned int orig_len_this_step = scatterwalk_next(
1284 			&walk, assoclen);
1285 		unsigned int len_this_step = orig_len_this_step;
1286 		unsigned int len;
1287 		const u8 *src = walk.addr;
1288 
1289 		if (unlikely(pos)) {
1290 			len = min(len_this_step, 16 - pos);
1291 			memcpy(&buf[pos], src, len);
1292 			pos += len;
1293 			src += len;
1294 			len_this_step -= len;
1295 			if (pos < 16)
1296 				goto next;
1297 			aes_gcm_aad_update(key, ghash_acc, buf, 16, flags);
1298 			pos = 0;
1299 		}
1300 		len = len_this_step;
1301 		if (unlikely(assoclen)) /* Not the last segment yet? */
1302 			len = round_down(len, 16);
1303 		aes_gcm_aad_update(key, ghash_acc, src, len, flags);
1304 		src += len;
1305 		len_this_step -= len;
1306 		if (unlikely(len_this_step)) {
1307 			memcpy(buf, src, len_this_step);
1308 			pos = len_this_step;
1309 		}
1310 next:
1311 		scatterwalk_done_src(&walk, orig_len_this_step);
1312 		if (need_resched()) {
1313 			kernel_fpu_end();
1314 			kernel_fpu_begin();
1315 		}
1316 		assoclen -= orig_len_this_step;
1317 	}
1318 	if (unlikely(pos))
1319 		aes_gcm_aad_update(key, ghash_acc, buf, pos, flags);
1320 }
1321 
1322 
1323 /* __always_inline to optimize out the branches based on @flags */
1324 static __always_inline int
gcm_crypt(struct aead_request * req,int flags)1325 gcm_crypt(struct aead_request *req, int flags)
1326 {
1327 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1328 	const struct aes_gcm_key *key = aes_gcm_key_get(tfm, flags);
1329 	unsigned int assoclen = req->assoclen;
1330 	struct skcipher_walk walk;
1331 	unsigned int nbytes;
1332 	u8 ghash_acc[16]; /* GHASH accumulator */
1333 	u32 le_ctr[4]; /* Counter in little-endian format */
1334 	int taglen;
1335 	int err;
1336 
1337 	/* Initialize the counter and determine the associated data length. */
1338 	le_ctr[0] = 2;
1339 	if (flags & FLAG_RFC4106) {
1340 		if (unlikely(assoclen != 16 && assoclen != 20))
1341 			return -EINVAL;
1342 		assoclen -= 8;
1343 		le_ctr[1] = get_unaligned_be32(req->iv + 4);
1344 		le_ctr[2] = get_unaligned_be32(req->iv + 0);
1345 		le_ctr[3] = key->rfc4106_nonce; /* already byte-swapped */
1346 	} else {
1347 		le_ctr[1] = get_unaligned_be32(req->iv + 8);
1348 		le_ctr[2] = get_unaligned_be32(req->iv + 4);
1349 		le_ctr[3] = get_unaligned_be32(req->iv + 0);
1350 	}
1351 
1352 	/* Begin walking through the plaintext or ciphertext. */
1353 	if (flags & FLAG_ENC)
1354 		err = skcipher_walk_aead_encrypt(&walk, req, false);
1355 	else
1356 		err = skcipher_walk_aead_decrypt(&walk, req, false);
1357 	if (err)
1358 		return err;
1359 
1360 	/*
1361 	 * Since the AES-GCM assembly code requires that at least three assembly
1362 	 * functions be called to process any message (this is needed to support
1363 	 * incremental updates cleanly), to reduce overhead we try to do all
1364 	 * three calls in the same kernel FPU section if possible.  We close the
1365 	 * section and start a new one if there are multiple data segments or if
1366 	 * rescheduling is needed while processing the associated data.
1367 	 */
1368 	kernel_fpu_begin();
1369 
1370 	/* Pass the associated data through GHASH. */
1371 	gcm_process_assoc(key, ghash_acc, req->src, assoclen, flags);
1372 
1373 	/* En/decrypt the data and pass the ciphertext through GHASH. */
1374 	while (unlikely((nbytes = walk.nbytes) < walk.total)) {
1375 		/*
1376 		 * Non-last segment.  In this case, the assembly function
1377 		 * requires that the length be a multiple of 16 (AES_BLOCK_SIZE)
1378 		 * bytes.  The needed buffering of up to 16 bytes is handled by
1379 		 * the skcipher_walk.  Here we just need to round down to a
1380 		 * multiple of 16.
1381 		 */
1382 		nbytes = round_down(nbytes, AES_BLOCK_SIZE);
1383 		aes_gcm_update(key, le_ctr, ghash_acc, walk.src.virt.addr,
1384 			       walk.dst.virt.addr, nbytes, flags);
1385 		le_ctr[0] += nbytes / AES_BLOCK_SIZE;
1386 		kernel_fpu_end();
1387 		err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
1388 		if (err)
1389 			return err;
1390 		kernel_fpu_begin();
1391 	}
1392 	/* Last segment: process all remaining data. */
1393 	aes_gcm_update(key, le_ctr, ghash_acc, walk.src.virt.addr,
1394 		       walk.dst.virt.addr, nbytes, flags);
1395 	/*
1396 	 * The low word of the counter isn't used by the finalize, so there's no
1397 	 * need to increment it here.
1398 	 */
1399 
1400 	/* Finalize */
1401 	taglen = crypto_aead_authsize(tfm);
1402 	if (flags & FLAG_ENC) {
1403 		/* Finish computing the auth tag. */
1404 		aes_gcm_enc_final(key, le_ctr, ghash_acc, assoclen,
1405 				  req->cryptlen, flags);
1406 
1407 		/* Store the computed auth tag in the dst scatterlist. */
1408 		scatterwalk_map_and_copy(ghash_acc, req->dst, req->assoclen +
1409 					 req->cryptlen, taglen, 1);
1410 	} else {
1411 		unsigned int datalen = req->cryptlen - taglen;
1412 		u8 tag[16];
1413 
1414 		/* Get the transmitted auth tag from the src scatterlist. */
1415 		scatterwalk_map_and_copy(tag, req->src, req->assoclen + datalen,
1416 					 taglen, 0);
1417 		/*
1418 		 * Finish computing the auth tag and compare it to the
1419 		 * transmitted one.  The assembly function does the actual tag
1420 		 * comparison.  Here, just check the boolean result.
1421 		 */
1422 		if (!aes_gcm_dec_final(key, le_ctr, ghash_acc, assoclen,
1423 				       datalen, tag, taglen, flags))
1424 			err = -EBADMSG;
1425 	}
1426 	kernel_fpu_end();
1427 	if (nbytes)
1428 		skcipher_walk_done(&walk, 0);
1429 	return err;
1430 }
1431 
1432 #define DEFINE_GCM_ALGS(suffix, flags, generic_driver_name, rfc_driver_name,   \
1433 			ctxsize, priority)				       \
1434 									       \
1435 static int gcm_setkey_##suffix(struct crypto_aead *tfm, const u8 *raw_key,     \
1436 			       unsigned int keylen)			       \
1437 {									       \
1438 	return gcm_setkey(tfm, raw_key, keylen, (flags));		       \
1439 }									       \
1440 									       \
1441 static int gcm_encrypt_##suffix(struct aead_request *req)		       \
1442 {									       \
1443 	return gcm_crypt(req, (flags) | FLAG_ENC);			       \
1444 }									       \
1445 									       \
1446 static int gcm_decrypt_##suffix(struct aead_request *req)		       \
1447 {									       \
1448 	return gcm_crypt(req, (flags));					       \
1449 }									       \
1450 									       \
1451 static int rfc4106_setkey_##suffix(struct crypto_aead *tfm, const u8 *raw_key, \
1452 				   unsigned int keylen)			       \
1453 {									       \
1454 	return gcm_setkey(tfm, raw_key, keylen, (flags) | FLAG_RFC4106);       \
1455 }									       \
1456 									       \
1457 static int rfc4106_encrypt_##suffix(struct aead_request *req)		       \
1458 {									       \
1459 	return gcm_crypt(req, (flags) | FLAG_RFC4106 | FLAG_ENC);	       \
1460 }									       \
1461 									       \
1462 static int rfc4106_decrypt_##suffix(struct aead_request *req)		       \
1463 {									       \
1464 	return gcm_crypt(req, (flags) | FLAG_RFC4106);			       \
1465 }									       \
1466 									       \
1467 static struct aead_alg aes_gcm_algs_##suffix[] = { {			       \
1468 	.setkey			= gcm_setkey_##suffix,			       \
1469 	.setauthsize		= generic_gcmaes_set_authsize,		       \
1470 	.encrypt		= gcm_encrypt_##suffix,			       \
1471 	.decrypt		= gcm_decrypt_##suffix,			       \
1472 	.ivsize			= GCM_AES_IV_SIZE,			       \
1473 	.chunksize		= AES_BLOCK_SIZE,			       \
1474 	.maxauthsize		= 16,					       \
1475 	.base = {							       \
1476 		.cra_name		= "gcm(aes)",			       \
1477 		.cra_driver_name	= generic_driver_name,		       \
1478 		.cra_priority		= (priority),			       \
1479 		.cra_blocksize		= 1,				       \
1480 		.cra_ctxsize		= (ctxsize),			       \
1481 		.cra_module		= THIS_MODULE,			       \
1482 	},								       \
1483 }, {									       \
1484 	.setkey			= rfc4106_setkey_##suffix,		       \
1485 	.setauthsize		= common_rfc4106_set_authsize,		       \
1486 	.encrypt		= rfc4106_encrypt_##suffix,		       \
1487 	.decrypt		= rfc4106_decrypt_##suffix,		       \
1488 	.ivsize			= GCM_RFC4106_IV_SIZE,			       \
1489 	.chunksize		= AES_BLOCK_SIZE,			       \
1490 	.maxauthsize		= 16,					       \
1491 	.base = {							       \
1492 		.cra_name		= "rfc4106(gcm(aes))",		       \
1493 		.cra_driver_name	= rfc_driver_name,		       \
1494 		.cra_priority		= (priority),			       \
1495 		.cra_blocksize		= 1,				       \
1496 		.cra_ctxsize		= (ctxsize),			       \
1497 		.cra_module		= THIS_MODULE,			       \
1498 	},								       \
1499 } }
1500 
1501 /* aes_gcm_algs_aesni */
1502 DEFINE_GCM_ALGS(aesni, /* no flags */ 0,
1503 		"generic-gcm-aesni", "rfc4106-gcm-aesni",
1504 		AES_GCM_KEY_AESNI_SIZE, 400);
1505 
1506 /* aes_gcm_algs_aesni_avx */
1507 DEFINE_GCM_ALGS(aesni_avx, FLAG_AVX,
1508 		"generic-gcm-aesni-avx", "rfc4106-gcm-aesni-avx",
1509 		AES_GCM_KEY_AESNI_SIZE, 500);
1510 
1511 /* aes_gcm_algs_vaes_avx10_256 */
1512 DEFINE_GCM_ALGS(vaes_avx10_256, FLAG_AVX10_256,
1513 		"generic-gcm-vaes-avx10_256", "rfc4106-gcm-vaes-avx10_256",
1514 		AES_GCM_KEY_AVX10_SIZE, 700);
1515 
1516 /* aes_gcm_algs_vaes_avx10_512 */
1517 DEFINE_GCM_ALGS(vaes_avx10_512, FLAG_AVX10_512,
1518 		"generic-gcm-vaes-avx10_512", "rfc4106-gcm-vaes-avx10_512",
1519 		AES_GCM_KEY_AVX10_SIZE, 800);
1520 
register_avx_algs(void)1521 static int __init register_avx_algs(void)
1522 {
1523 	int err;
1524 
1525 	if (!boot_cpu_has(X86_FEATURE_AVX))
1526 		return 0;
1527 	err = crypto_register_skciphers(skcipher_algs_aesni_avx,
1528 					ARRAY_SIZE(skcipher_algs_aesni_avx));
1529 	if (err)
1530 		return err;
1531 	err = crypto_register_aeads(aes_gcm_algs_aesni_avx,
1532 				    ARRAY_SIZE(aes_gcm_algs_aesni_avx));
1533 	if (err)
1534 		return err;
1535 	/*
1536 	 * Note: not all the algorithms registered below actually require
1537 	 * VPCLMULQDQ.  But in practice every CPU with VAES also has VPCLMULQDQ.
1538 	 * Similarly, the assembler support was added at about the same time.
1539 	 * For simplicity, just always check for VAES and VPCLMULQDQ together.
1540 	 */
1541 	if (!boot_cpu_has(X86_FEATURE_AVX2) ||
1542 	    !boot_cpu_has(X86_FEATURE_VAES) ||
1543 	    !boot_cpu_has(X86_FEATURE_VPCLMULQDQ) ||
1544 	    !boot_cpu_has(X86_FEATURE_PCLMULQDQ) ||
1545 	    !cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
1546 		return 0;
1547 	err = crypto_register_skciphers(skcipher_algs_vaes_avx2,
1548 					ARRAY_SIZE(skcipher_algs_vaes_avx2));
1549 	if (err)
1550 		return err;
1551 
1552 	if (!boot_cpu_has(X86_FEATURE_AVX512BW) ||
1553 	    !boot_cpu_has(X86_FEATURE_AVX512VL) ||
1554 	    !boot_cpu_has(X86_FEATURE_BMI2) ||
1555 	    !cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM |
1556 			       XFEATURE_MASK_AVX512, NULL))
1557 		return 0;
1558 
1559 	err = crypto_register_aeads(aes_gcm_algs_vaes_avx10_256,
1560 				    ARRAY_SIZE(aes_gcm_algs_vaes_avx10_256));
1561 	if (err)
1562 		return err;
1563 
1564 	if (boot_cpu_has(X86_FEATURE_PREFER_YMM)) {
1565 		int i;
1566 
1567 		for (i = 0; i < ARRAY_SIZE(skcipher_algs_vaes_avx512); i++)
1568 			skcipher_algs_vaes_avx512[i].base.cra_priority = 1;
1569 		for (i = 0; i < ARRAY_SIZE(aes_gcm_algs_vaes_avx10_512); i++)
1570 			aes_gcm_algs_vaes_avx10_512[i].base.cra_priority = 1;
1571 	}
1572 
1573 	err = crypto_register_skciphers(skcipher_algs_vaes_avx512,
1574 					ARRAY_SIZE(skcipher_algs_vaes_avx512));
1575 	if (err)
1576 		return err;
1577 	err = crypto_register_aeads(aes_gcm_algs_vaes_avx10_512,
1578 				    ARRAY_SIZE(aes_gcm_algs_vaes_avx10_512));
1579 	if (err)
1580 		return err;
1581 
1582 	return 0;
1583 }
1584 
1585 #define unregister_skciphers(A) \
1586 	if (refcount_read(&(A)[0].base.cra_refcnt) != 0) \
1587 		crypto_unregister_skciphers((A), ARRAY_SIZE(A))
1588 #define unregister_aeads(A) \
1589 	if (refcount_read(&(A)[0].base.cra_refcnt) != 0) \
1590 		crypto_unregister_aeads((A), ARRAY_SIZE(A))
1591 
unregister_avx_algs(void)1592 static void unregister_avx_algs(void)
1593 {
1594 	unregister_skciphers(skcipher_algs_aesni_avx);
1595 	unregister_aeads(aes_gcm_algs_aesni_avx);
1596 	unregister_skciphers(skcipher_algs_vaes_avx2);
1597 	unregister_skciphers(skcipher_algs_vaes_avx512);
1598 	unregister_aeads(aes_gcm_algs_vaes_avx10_256);
1599 	unregister_aeads(aes_gcm_algs_vaes_avx10_512);
1600 }
1601 #else /* CONFIG_X86_64 */
1602 static struct aead_alg aes_gcm_algs_aesni[0];
1603 
register_avx_algs(void)1604 static int __init register_avx_algs(void)
1605 {
1606 	return 0;
1607 }
1608 
unregister_avx_algs(void)1609 static void unregister_avx_algs(void)
1610 {
1611 }
1612 #endif /* !CONFIG_X86_64 */
1613 
1614 static const struct x86_cpu_id aesni_cpu_id[] = {
1615 	X86_MATCH_FEATURE(X86_FEATURE_AES, NULL),
1616 	{}
1617 };
1618 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1619 
aesni_init(void)1620 static int __init aesni_init(void)
1621 {
1622 	int err;
1623 
1624 	if (!x86_match_cpu(aesni_cpu_id))
1625 		return -ENODEV;
1626 
1627 	err = crypto_register_alg(&aesni_cipher_alg);
1628 	if (err)
1629 		return err;
1630 
1631 	err = crypto_register_skciphers(aesni_skciphers,
1632 					ARRAY_SIZE(aesni_skciphers));
1633 	if (err)
1634 		goto unregister_cipher;
1635 
1636 	err = crypto_register_aeads(aes_gcm_algs_aesni,
1637 				    ARRAY_SIZE(aes_gcm_algs_aesni));
1638 	if (err)
1639 		goto unregister_skciphers;
1640 
1641 	err = register_avx_algs();
1642 	if (err)
1643 		goto unregister_avx;
1644 
1645 	return 0;
1646 
1647 unregister_avx:
1648 	unregister_avx_algs();
1649 	crypto_unregister_aeads(aes_gcm_algs_aesni,
1650 				ARRAY_SIZE(aes_gcm_algs_aesni));
1651 unregister_skciphers:
1652 	crypto_unregister_skciphers(aesni_skciphers,
1653 				    ARRAY_SIZE(aesni_skciphers));
1654 unregister_cipher:
1655 	crypto_unregister_alg(&aesni_cipher_alg);
1656 	return err;
1657 }
1658 
aesni_exit(void)1659 static void __exit aesni_exit(void)
1660 {
1661 	crypto_unregister_aeads(aes_gcm_algs_aesni,
1662 				ARRAY_SIZE(aes_gcm_algs_aesni));
1663 	crypto_unregister_skciphers(aesni_skciphers,
1664 				    ARRAY_SIZE(aesni_skciphers));
1665 	crypto_unregister_alg(&aesni_cipher_alg);
1666 	unregister_avx_algs();
1667 }
1668 
1669 module_init(aesni_init);
1670 module_exit(aesni_exit);
1671 
1672 MODULE_DESCRIPTION("AES cipher and modes, optimized with AES-NI or VAES instructions");
1673 MODULE_LICENSE("GPL");
1674 MODULE_ALIAS_CRYPTO("aes");
1675