xref: /linux/lib/crypto/arm64/aes.h (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * AES block cipher, optimized for ARM64
4  *
5  * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
6  * Copyright 2026 Google LLC
7  */
8 
9 #include <asm/neon.h>
10 #include <asm/simd.h>
11 #include <linux/unaligned.h>
12 #include <linux/cpufeature.h>
13 
14 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
15 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_aes);
16 
17 struct aes_block {
18 	u8 b[AES_BLOCK_SIZE];
19 };
20 
21 asmlinkage void __aes_arm64_encrypt(const u32 rk[], u8 out[AES_BLOCK_SIZE],
22 				    const u8 in[AES_BLOCK_SIZE], int rounds);
23 asmlinkage void __aes_arm64_decrypt(const u32 inv_rk[], u8 out[AES_BLOCK_SIZE],
24 				    const u8 in[AES_BLOCK_SIZE], int rounds);
25 asmlinkage void __aes_ce_encrypt(const u32 rk[], u8 out[AES_BLOCK_SIZE],
26 				 const u8 in[AES_BLOCK_SIZE], int rounds);
27 asmlinkage void __aes_ce_decrypt(const u32 inv_rk[], u8 out[AES_BLOCK_SIZE],
28 				 const u8 in[AES_BLOCK_SIZE], int rounds);
29 asmlinkage u32 __aes_ce_sub(u32 l);
30 asmlinkage void __aes_ce_invert(struct aes_block *out,
31 				const struct aes_block *in);
32 asmlinkage void neon_aes_mac_update(u8 const in[], u32 const rk[], int rounds,
33 				    size_t blocks, u8 dg[], int enc_before,
34 				    int enc_after);
35 
36 /*
37  * Expand an AES key using the crypto extensions if supported and usable or
38  * generic code otherwise.  The expanded key format is compatible between the
39  * two cases.  The outputs are @rndkeys (required) and @inv_rndkeys (optional).
40  */
41 static void aes_expandkey_arm64(u32 rndkeys[], u32 *inv_rndkeys,
42 				const u8 *in_key, int key_len, int nrounds)
43 {
44 	/*
45 	 * The AES key schedule round constants
46 	 */
47 	static u8 const rcon[] = {
48 		0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36,
49 	};
50 
51 	u32 kwords = key_len / sizeof(u32);
52 	struct aes_block *key_enc, *key_dec;
53 	int i, j;
54 
55 	if (!static_branch_likely(&have_aes) || unlikely(!may_use_simd())) {
56 		aes_expandkey_generic(rndkeys, inv_rndkeys, in_key, key_len);
57 		return;
58 	}
59 
60 	for (i = 0; i < kwords; i++)
61 		rndkeys[i] = get_unaligned_le32(&in_key[i * sizeof(u32)]);
62 
63 	scoped_ksimd() {
64 		for (i = 0; i < sizeof(rcon); i++) {
65 			u32 *rki = &rndkeys[i * kwords];
66 			u32 *rko = rki + kwords;
67 
68 			rko[0] = ror32(__aes_ce_sub(rki[kwords - 1]), 8) ^
69 				 rcon[i] ^ rki[0];
70 			rko[1] = rko[0] ^ rki[1];
71 			rko[2] = rko[1] ^ rki[2];
72 			rko[3] = rko[2] ^ rki[3];
73 
74 			if (key_len == AES_KEYSIZE_192) {
75 				if (i >= 7)
76 					break;
77 				rko[4] = rko[3] ^ rki[4];
78 				rko[5] = rko[4] ^ rki[5];
79 			} else if (key_len == AES_KEYSIZE_256) {
80 				if (i >= 6)
81 					break;
82 				rko[4] = __aes_ce_sub(rko[3]) ^ rki[4];
83 				rko[5] = rko[4] ^ rki[5];
84 				rko[6] = rko[5] ^ rki[6];
85 				rko[7] = rko[6] ^ rki[7];
86 			}
87 		}
88 
89 		/*
90 		 * Generate the decryption keys for the Equivalent Inverse
91 		 * Cipher.  This involves reversing the order of the round
92 		 * keys, and applying the Inverse Mix Columns transformation on
93 		 * all but the first and the last one.
94 		 */
95 		if (inv_rndkeys) {
96 			key_enc = (struct aes_block *)rndkeys;
97 			key_dec = (struct aes_block *)inv_rndkeys;
98 			j = nrounds;
99 
100 			key_dec[0] = key_enc[j];
101 			for (i = 1, j--; j > 0; i++, j--)
102 				__aes_ce_invert(key_dec + i, key_enc + j);
103 			key_dec[i] = key_enc[0];
104 		}
105 	}
106 }
107 
108 static void aes_preparekey_arch(union aes_enckey_arch *k,
109 				union aes_invkey_arch *inv_k,
110 				const u8 *in_key, int key_len, int nrounds)
111 {
112 	aes_expandkey_arm64(k->rndkeys, inv_k ? inv_k->inv_rndkeys : NULL,
113 			    in_key, key_len, nrounds);
114 }
115 
116 /*
117  * This is here temporarily until the remaining AES mode implementations are
118  * migrated from arch/arm64/crypto/ to lib/crypto/arm64/.
119  */
120 int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
121 		     unsigned int key_len)
122 {
123 	if (aes_check_keylen(key_len) != 0)
124 		return -EINVAL;
125 	ctx->key_length = key_len;
126 	aes_expandkey_arm64(ctx->key_enc, ctx->key_dec, in_key, key_len,
127 			    6 + key_len / 4);
128 	return 0;
129 }
130 EXPORT_SYMBOL(ce_aes_expandkey);
131 
132 EXPORT_SYMBOL_NS_GPL(neon_aes_ecb_encrypt, "CRYPTO_INTERNAL");
133 EXPORT_SYMBOL_NS_GPL(neon_aes_ecb_decrypt, "CRYPTO_INTERNAL");
134 EXPORT_SYMBOL_NS_GPL(neon_aes_cbc_encrypt, "CRYPTO_INTERNAL");
135 EXPORT_SYMBOL_NS_GPL(neon_aes_cbc_decrypt, "CRYPTO_INTERNAL");
136 EXPORT_SYMBOL_NS_GPL(neon_aes_cbc_cts_encrypt, "CRYPTO_INTERNAL");
137 EXPORT_SYMBOL_NS_GPL(neon_aes_cbc_cts_decrypt, "CRYPTO_INTERNAL");
138 EXPORT_SYMBOL_NS_GPL(neon_aes_ctr_encrypt, "CRYPTO_INTERNAL");
139 EXPORT_SYMBOL_NS_GPL(neon_aes_xctr_encrypt, "CRYPTO_INTERNAL");
140 EXPORT_SYMBOL_NS_GPL(neon_aes_xts_encrypt, "CRYPTO_INTERNAL");
141 EXPORT_SYMBOL_NS_GPL(neon_aes_xts_decrypt, "CRYPTO_INTERNAL");
142 EXPORT_SYMBOL_NS_GPL(neon_aes_essiv_cbc_encrypt, "CRYPTO_INTERNAL");
143 EXPORT_SYMBOL_NS_GPL(neon_aes_essiv_cbc_decrypt, "CRYPTO_INTERNAL");
144 
145 EXPORT_SYMBOL_NS_GPL(ce_aes_ecb_encrypt, "CRYPTO_INTERNAL");
146 EXPORT_SYMBOL_NS_GPL(ce_aes_ecb_decrypt, "CRYPTO_INTERNAL");
147 EXPORT_SYMBOL_NS_GPL(ce_aes_cbc_encrypt, "CRYPTO_INTERNAL");
148 EXPORT_SYMBOL_NS_GPL(ce_aes_cbc_decrypt, "CRYPTO_INTERNAL");
149 EXPORT_SYMBOL_NS_GPL(ce_aes_cbc_cts_encrypt, "CRYPTO_INTERNAL");
150 EXPORT_SYMBOL_NS_GPL(ce_aes_cbc_cts_decrypt, "CRYPTO_INTERNAL");
151 EXPORT_SYMBOL_NS_GPL(ce_aes_ctr_encrypt, "CRYPTO_INTERNAL");
152 EXPORT_SYMBOL_NS_GPL(ce_aes_xctr_encrypt, "CRYPTO_INTERNAL");
153 EXPORT_SYMBOL_NS_GPL(ce_aes_xts_encrypt, "CRYPTO_INTERNAL");
154 EXPORT_SYMBOL_NS_GPL(ce_aes_xts_decrypt, "CRYPTO_INTERNAL");
155 EXPORT_SYMBOL_NS_GPL(ce_aes_essiv_cbc_encrypt, "CRYPTO_INTERNAL");
156 EXPORT_SYMBOL_NS_GPL(ce_aes_essiv_cbc_decrypt, "CRYPTO_INTERNAL");
157 #if IS_MODULE(CONFIG_CRYPTO_AES_ARM64_CE_CCM)
158 EXPORT_SYMBOL_NS_GPL(ce_aes_mac_update, "CRYPTO_INTERNAL");
159 #endif
160 
161 static void aes_encrypt_arch(const struct aes_enckey *key,
162 			     u8 out[AES_BLOCK_SIZE],
163 			     const u8 in[AES_BLOCK_SIZE])
164 {
165 	if (static_branch_likely(&have_aes) && likely(may_use_simd())) {
166 		scoped_ksimd()
167 			__aes_ce_encrypt(key->k.rndkeys, out, in, key->nrounds);
168 	} else {
169 		__aes_arm64_encrypt(key->k.rndkeys, out, in, key->nrounds);
170 	}
171 }
172 
173 static void aes_decrypt_arch(const struct aes_key *key,
174 			     u8 out[AES_BLOCK_SIZE],
175 			     const u8 in[AES_BLOCK_SIZE])
176 {
177 	if (static_branch_likely(&have_aes) && likely(may_use_simd())) {
178 		scoped_ksimd()
179 			__aes_ce_decrypt(key->inv_k.inv_rndkeys, out, in,
180 					 key->nrounds);
181 	} else {
182 		__aes_arm64_decrypt(key->inv_k.inv_rndkeys, out, in,
183 				    key->nrounds);
184 	}
185 }
186 
187 #if IS_ENABLED(CONFIG_CRYPTO_LIB_AES_CBC_MACS)
188 #define aes_cbcmac_blocks_arch aes_cbcmac_blocks_arch
189 static bool aes_cbcmac_blocks_arch(u8 h[AES_BLOCK_SIZE],
190 				   const struct aes_enckey *key, const u8 *data,
191 				   size_t nblocks, bool enc_before,
192 				   bool enc_after)
193 {
194 	if (static_branch_likely(&have_neon) && likely(may_use_simd())) {
195 		scoped_ksimd() {
196 			if (static_branch_likely(&have_aes))
197 				ce_aes_mac_update(data, key->k.rndkeys,
198 						  key->nrounds, nblocks, h,
199 						  enc_before, enc_after);
200 			else
201 				neon_aes_mac_update(data, key->k.rndkeys,
202 						    key->nrounds, nblocks, h,
203 						    enc_before, enc_after);
204 		}
205 		return true;
206 	}
207 	return false;
208 }
209 #endif /* CONFIG_CRYPTO_LIB_AES_CBC_MACS */
210 
211 #define aes_mod_init_arch aes_mod_init_arch
212 static void aes_mod_init_arch(void)
213 {
214 	if (cpu_have_named_feature(ASIMD)) {
215 		static_branch_enable(&have_neon);
216 		if (cpu_have_named_feature(AES))
217 			static_branch_enable(&have_aes);
218 	}
219 }
220