xref: /linux/arch/arm64/crypto/aes-ce-ccm-glue.c (revision 7fc2cd2e4b398c57c9cf961cfea05eadbf34c05c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * aes-ce-ccm-glue.c - AES-CCM transform for ARMv8 with Crypto Extensions
4  *
5  * Copyright (C) 2013 - 2017 Linaro Ltd.
6  * Copyright (C) 2024 Google LLC
7  *
8  * Author: Ard Biesheuvel <ardb@kernel.org>
9  */
10 
11 #include <linux/unaligned.h>
12 #include <crypto/aes.h>
13 #include <crypto/scatterwalk.h>
14 #include <crypto/internal/aead.h>
15 #include <crypto/internal/skcipher.h>
16 #include <linux/module.h>
17 
18 #include <asm/simd.h>
19 
20 #include "aes-ce-setkey.h"
21 
22 MODULE_IMPORT_NS("CRYPTO_INTERNAL");
23 
24 static int num_rounds(struct crypto_aes_ctx *ctx)
25 {
26 	/*
27 	 * # of rounds specified by AES:
28 	 * 128 bit key		10 rounds
29 	 * 192 bit key		12 rounds
30 	 * 256 bit key		14 rounds
31 	 * => n byte key	=> 6 + (n/4) rounds
32 	 */
33 	return 6 + ctx->key_length / 4;
34 }
35 
36 asmlinkage u32 ce_aes_mac_update(u8 const in[], u32 const rk[], int rounds,
37 				 int blocks, u8 dg[], int enc_before,
38 				 int enc_after);
39 
40 asmlinkage void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
41 				   u32 const rk[], u32 rounds, u8 mac[],
42 				   u8 ctr[], u8 const final_iv[]);
43 
44 asmlinkage void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
45 				   u32 const rk[], u32 rounds, u8 mac[],
46 				   u8 ctr[], u8 const final_iv[]);
47 
48 static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key,
49 		      unsigned int key_len)
50 {
51 	struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm);
52 
53 	return ce_aes_expandkey(ctx, in_key, key_len);
54 }
55 
56 static int ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
57 {
58 	if ((authsize & 1) || authsize < 4)
59 		return -EINVAL;
60 	return 0;
61 }
62 
63 static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
64 {
65 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
66 	__be32 *n = (__be32 *)&maciv[AES_BLOCK_SIZE - 8];
67 	u32 l = req->iv[0] + 1;
68 
69 	/* verify that CCM dimension 'L' is set correctly in the IV */
70 	if (l < 2 || l > 8)
71 		return -EINVAL;
72 
73 	/* verify that msglen can in fact be represented in L bytes */
74 	if (l < 4 && msglen >> (8 * l))
75 		return -EOVERFLOW;
76 
77 	/*
78 	 * Even if the CCM spec allows L values of up to 8, the Linux cryptoapi
79 	 * uses a u32 type to represent msglen so the top 4 bytes are always 0.
80 	 */
81 	n[0] = 0;
82 	n[1] = cpu_to_be32(msglen);
83 
84 	memcpy(maciv, req->iv, AES_BLOCK_SIZE - l);
85 
86 	/*
87 	 * Meaning of byte 0 according to CCM spec (RFC 3610/NIST 800-38C)
88 	 * - bits 0..2	: max # of bytes required to represent msglen, minus 1
89 	 *                (already set by caller)
90 	 * - bits 3..5	: size of auth tag (1 => 4 bytes, 2 => 6 bytes, etc)
91 	 * - bit 6	: indicates presence of authenticate-only data
92 	 */
93 	maciv[0] |= (crypto_aead_authsize(aead) - 2) << 2;
94 	if (req->assoclen)
95 		maciv[0] |= 0x40;
96 
97 	memset(&req->iv[AES_BLOCK_SIZE - l], 0, l);
98 	return 0;
99 }
100 
101 static u32 ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
102 				u32 macp, u32 const rk[], u32 rounds)
103 {
104 	int enc_after = (macp + abytes) % AES_BLOCK_SIZE;
105 
106 	do {
107 		u32 blocks = abytes / AES_BLOCK_SIZE;
108 
109 		if (macp == AES_BLOCK_SIZE || (!macp && blocks > 0)) {
110 			u32 rem = ce_aes_mac_update(in, rk, rounds, blocks, mac,
111 						    macp, enc_after);
112 			u32 adv = (blocks - rem) * AES_BLOCK_SIZE;
113 
114 			macp = enc_after ? 0 : AES_BLOCK_SIZE;
115 			in += adv;
116 			abytes -= adv;
117 
118 			if (unlikely(rem))
119 				macp = 0;
120 		} else {
121 			u32 l = min(AES_BLOCK_SIZE - macp, abytes);
122 
123 			crypto_xor(&mac[macp], in, l);
124 			in += l;
125 			macp += l;
126 			abytes -= l;
127 		}
128 	} while (abytes > 0);
129 
130 	return macp;
131 }
132 
133 static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
134 {
135 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
136 	struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
137 	struct __packed { __be16 l; __be32 h; u16 len; } ltag;
138 	struct scatter_walk walk;
139 	u32 len = req->assoclen;
140 	u32 macp = AES_BLOCK_SIZE;
141 
142 	/* prepend the AAD with a length tag */
143 	if (len < 0xff00) {
144 		ltag.l = cpu_to_be16(len);
145 		ltag.len = 2;
146 	} else  {
147 		ltag.l = cpu_to_be16(0xfffe);
148 		put_unaligned_be32(len, &ltag.h);
149 		ltag.len = 6;
150 	}
151 
152 	macp = ce_aes_ccm_auth_data(mac, (u8 *)&ltag, ltag.len, macp,
153 				    ctx->key_enc, num_rounds(ctx));
154 	scatterwalk_start(&walk, req->src);
155 
156 	do {
157 		unsigned int n;
158 
159 		n = scatterwalk_next(&walk, len);
160 		macp = ce_aes_ccm_auth_data(mac, walk.addr, n, macp,
161 					    ctx->key_enc, num_rounds(ctx));
162 		scatterwalk_done_src(&walk, n);
163 		len -= n;
164 	} while (len);
165 }
166 
167 static int ccm_encrypt(struct aead_request *req)
168 {
169 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
170 	struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
171 	struct skcipher_walk walk;
172 	u8 __aligned(8) mac[AES_BLOCK_SIZE];
173 	u8 orig_iv[AES_BLOCK_SIZE];
174 	u32 len = req->cryptlen;
175 	int err;
176 
177 	err = ccm_init_mac(req, mac, len);
178 	if (err)
179 		return err;
180 
181 	/* preserve the original iv for the final round */
182 	memcpy(orig_iv, req->iv, AES_BLOCK_SIZE);
183 
184 	err = skcipher_walk_aead_encrypt(&walk, req, false);
185 	if (unlikely(err))
186 		return err;
187 
188 	scoped_ksimd() {
189 		if (req->assoclen)
190 			ccm_calculate_auth_mac(req, mac);
191 
192 		do {
193 			u32 tail = walk.nbytes % AES_BLOCK_SIZE;
194 			const u8 *src = walk.src.virt.addr;
195 			u8 *dst = walk.dst.virt.addr;
196 			u8 buf[AES_BLOCK_SIZE];
197 			u8 *final_iv = NULL;
198 
199 			if (walk.nbytes == walk.total) {
200 				tail = 0;
201 				final_iv = orig_iv;
202 			}
203 
204 			if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
205 				src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes],
206 						   src, walk.nbytes);
207 
208 			ce_aes_ccm_encrypt(dst, src, walk.nbytes - tail,
209 					   ctx->key_enc, num_rounds(ctx),
210 					   mac, walk.iv, final_iv);
211 
212 			if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
213 				memcpy(walk.dst.virt.addr, dst, walk.nbytes);
214 
215 			if (walk.nbytes) {
216 				err = skcipher_walk_done(&walk, tail);
217 			}
218 		} while (walk.nbytes);
219 	}
220 
221 	if (unlikely(err))
222 		return err;
223 
224 	/* copy authtag to end of dst */
225 	scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen,
226 				 crypto_aead_authsize(aead), 1);
227 
228 	return 0;
229 }
230 
231 static int ccm_decrypt(struct aead_request *req)
232 {
233 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
234 	struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
235 	unsigned int authsize = crypto_aead_authsize(aead);
236 	struct skcipher_walk walk;
237 	u8 __aligned(8) mac[AES_BLOCK_SIZE];
238 	u8 orig_iv[AES_BLOCK_SIZE];
239 	u32 len = req->cryptlen - authsize;
240 	int err;
241 
242 	err = ccm_init_mac(req, mac, len);
243 	if (err)
244 		return err;
245 
246 	/* preserve the original iv for the final round */
247 	memcpy(orig_iv, req->iv, AES_BLOCK_SIZE);
248 
249 	err = skcipher_walk_aead_decrypt(&walk, req, false);
250 	if (unlikely(err))
251 		return err;
252 
253 	scoped_ksimd() {
254 		if (req->assoclen)
255 			ccm_calculate_auth_mac(req, mac);
256 
257 		do {
258 			u32 tail = walk.nbytes % AES_BLOCK_SIZE;
259 			const u8 *src = walk.src.virt.addr;
260 			u8 *dst = walk.dst.virt.addr;
261 			u8 buf[AES_BLOCK_SIZE];
262 			u8 *final_iv = NULL;
263 
264 			if (walk.nbytes == walk.total) {
265 				tail = 0;
266 				final_iv = orig_iv;
267 			}
268 
269 			if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
270 				src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes],
271 						   src, walk.nbytes);
272 
273 			ce_aes_ccm_decrypt(dst, src, walk.nbytes - tail,
274 					   ctx->key_enc, num_rounds(ctx),
275 					   mac, walk.iv, final_iv);
276 
277 			if (unlikely(walk.nbytes < AES_BLOCK_SIZE))
278 				memcpy(walk.dst.virt.addr, dst, walk.nbytes);
279 
280 			if (walk.nbytes) {
281 				err = skcipher_walk_done(&walk, tail);
282 			}
283 		} while (walk.nbytes);
284 	}
285 
286 	if (unlikely(err))
287 		return err;
288 
289 	/* compare calculated auth tag with the stored one */
290 	scatterwalk_map_and_copy(orig_iv, req->src,
291 				 req->assoclen + req->cryptlen - authsize,
292 				 authsize, 0);
293 
294 	if (crypto_memneq(mac, orig_iv, authsize))
295 		return -EBADMSG;
296 	return 0;
297 }
298 
299 static struct aead_alg ccm_aes_alg = {
300 	.base = {
301 		.cra_name		= "ccm(aes)",
302 		.cra_driver_name	= "ccm-aes-ce",
303 		.cra_priority		= 300,
304 		.cra_blocksize		= 1,
305 		.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
306 		.cra_module		= THIS_MODULE,
307 	},
308 	.ivsize		= AES_BLOCK_SIZE,
309 	.chunksize	= AES_BLOCK_SIZE,
310 	.maxauthsize	= AES_BLOCK_SIZE,
311 	.setkey		= ccm_setkey,
312 	.setauthsize	= ccm_setauthsize,
313 	.encrypt	= ccm_encrypt,
314 	.decrypt	= ccm_decrypt,
315 };
316 
317 static int __init aes_mod_init(void)
318 {
319 	if (!cpu_have_named_feature(AES))
320 		return -ENODEV;
321 	return crypto_register_aead(&ccm_aes_alg);
322 }
323 
324 static void __exit aes_mod_exit(void)
325 {
326 	crypto_unregister_aead(&ccm_aes_alg);
327 }
328 
329 module_init(aes_mod_init);
330 module_exit(aes_mod_exit);
331 
332 MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions");
333 MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>");
334 MODULE_LICENSE("GPL v2");
335 MODULE_ALIAS_CRYPTO("ccm(aes)");
336