xref: /linux/net/mac80211/fils_aead.c (revision f85f5ae45ad945270a8884261de8249431e8b5a6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * FILS AEAD for (Re)Association Request/Response frames
4  * Copyright 2016, Qualcomm Atheros, Inc.
5  */
6 
7 #include <crypto/aes.h>
8 #include <crypto/hash.h>
9 #include <crypto/skcipher.h>
10 #include <crypto/utils.h>
11 
12 #include "ieee80211_i.h"
13 #include "aes_cmac.h"
14 #include "fils_aead.h"
15 
16 static void gf_mulx(u8 *pad)
17 {
18 	u64 a = get_unaligned_be64(pad);
19 	u64 b = get_unaligned_be64(pad + 8);
20 
21 	put_unaligned_be64((a << 1) | (b >> 63), pad);
22 	put_unaligned_be64((b << 1) ^ ((a >> 63) ? 0x87 : 0), pad + 8);
23 }
24 
25 static int aes_s2v(struct crypto_shash *tfm,
26 		   size_t num_elem, const u8 *addr[], size_t len[], u8 *v)
27 {
28 	u8 d[AES_BLOCK_SIZE], tmp[AES_BLOCK_SIZE] = {};
29 	SHASH_DESC_ON_STACK(desc, tfm);
30 	size_t i;
31 
32 	desc->tfm = tfm;
33 
34 	/* D = AES-CMAC(K, <zero>) */
35 	crypto_shash_digest(desc, tmp, AES_BLOCK_SIZE, d);
36 
37 	for (i = 0; i < num_elem - 1; i++) {
38 		/* D = dbl(D) xor AES_CMAC(K, Si) */
39 		gf_mulx(d); /* dbl */
40 		crypto_shash_digest(desc, addr[i], len[i], tmp);
41 		crypto_xor(d, tmp, AES_BLOCK_SIZE);
42 	}
43 
44 	crypto_shash_init(desc);
45 
46 	if (len[i] >= AES_BLOCK_SIZE) {
47 		/* len(Sn) >= 128 */
48 		/* T = Sn xorend D */
49 		crypto_shash_update(desc, addr[i], len[i] - AES_BLOCK_SIZE);
50 		crypto_xor(d, addr[i] + len[i] - AES_BLOCK_SIZE,
51 			   AES_BLOCK_SIZE);
52 	} else {
53 		/* len(Sn) < 128 */
54 		/* T = dbl(D) xor pad(Sn) */
55 		gf_mulx(d); /* dbl */
56 		crypto_xor(d, addr[i], len[i]);
57 		d[len[i]] ^= 0x80;
58 	}
59 	/* V = AES-CMAC(K, T) */
60 	crypto_shash_finup(desc, d, AES_BLOCK_SIZE, v);
61 
62 	return 0;
63 }
64 
65 /* Note: addr[] and len[] needs to have one extra slot at the end. */
66 static int aes_siv_encrypt(const u8 *key, size_t key_len,
67 			   const u8 *plain, size_t plain_len,
68 			   size_t num_elem, const u8 *addr[],
69 			   size_t len[], u8 *out)
70 {
71 	u8 v[AES_BLOCK_SIZE];
72 	struct crypto_shash *tfm;
73 	struct crypto_skcipher *tfm2;
74 	struct skcipher_request *req;
75 	int res;
76 	struct scatterlist src[1], dst[1];
77 	u8 *tmp;
78 
79 	key_len /= 2; /* S2V key || CTR key */
80 
81 	addr[num_elem] = plain;
82 	len[num_elem] = plain_len;
83 	num_elem++;
84 
85 	/* S2V */
86 
87 	tfm = crypto_alloc_shash("cmac(aes)", 0, 0);
88 	if (IS_ERR(tfm))
89 		return PTR_ERR(tfm);
90 	/* K1 for S2V */
91 	res = crypto_shash_setkey(tfm, key, key_len);
92 	if (!res)
93 		res = aes_s2v(tfm, num_elem, addr, len, v);
94 	crypto_free_shash(tfm);
95 	if (res)
96 		return res;
97 
98 	/* Use a temporary buffer of the plaintext to handle need for
99 	 * overwriting this during AES-CTR.
100 	 */
101 	tmp = kmemdup(plain, plain_len, GFP_KERNEL);
102 	if (!tmp)
103 		return -ENOMEM;
104 
105 	/* IV for CTR before encrypted data */
106 	memcpy(out, v, AES_BLOCK_SIZE);
107 
108 	/* Synthetic IV to be used as the initial counter in CTR:
109 	 * Q = V bitand (1^64 || 0^1 || 1^31 || 0^1 || 1^31)
110 	 */
111 	v[8] &= 0x7f;
112 	v[12] &= 0x7f;
113 
114 	/* CTR */
115 
116 	tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
117 	if (IS_ERR(tfm2)) {
118 		kfree(tmp);
119 		return PTR_ERR(tfm2);
120 	}
121 	/* K2 for CTR */
122 	res = crypto_skcipher_setkey(tfm2, key + key_len, key_len);
123 	if (res)
124 		goto fail;
125 
126 	req = skcipher_request_alloc(tfm2, GFP_KERNEL);
127 	if (!req) {
128 		res = -ENOMEM;
129 		goto fail;
130 	}
131 
132 	sg_init_one(src, tmp, plain_len);
133 	sg_init_one(dst, out + AES_BLOCK_SIZE, plain_len);
134 	skcipher_request_set_crypt(req, src, dst, plain_len, v);
135 	res = crypto_skcipher_encrypt(req);
136 	skcipher_request_free(req);
137 fail:
138 	kfree(tmp);
139 	crypto_free_skcipher(tfm2);
140 	return res;
141 }
142 
143 /* Note: addr[] and len[] needs to have one extra slot at the end. */
144 static int aes_siv_decrypt(const u8 *key, size_t key_len,
145 			   const u8 *iv_crypt, size_t iv_c_len,
146 			   size_t num_elem, const u8 *addr[], size_t len[],
147 			   u8 *out)
148 {
149 	struct crypto_shash *tfm;
150 	struct crypto_skcipher *tfm2;
151 	struct skcipher_request *req;
152 	struct scatterlist src[1], dst[1];
153 	size_t crypt_len;
154 	int res;
155 	u8 frame_iv[AES_BLOCK_SIZE], iv[AES_BLOCK_SIZE];
156 	u8 check[AES_BLOCK_SIZE];
157 
158 	crypt_len = iv_c_len - AES_BLOCK_SIZE;
159 	key_len /= 2; /* S2V key || CTR key */
160 	addr[num_elem] = out;
161 	len[num_elem] = crypt_len;
162 	num_elem++;
163 
164 	memcpy(iv, iv_crypt, AES_BLOCK_SIZE);
165 	memcpy(frame_iv, iv_crypt, AES_BLOCK_SIZE);
166 
167 	/* Synthetic IV to be used as the initial counter in CTR:
168 	 * Q = V bitand (1^64 || 0^1 || 1^31 || 0^1 || 1^31)
169 	 */
170 	iv[8] &= 0x7f;
171 	iv[12] &= 0x7f;
172 
173 	/* CTR */
174 
175 	tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
176 	if (IS_ERR(tfm2))
177 		return PTR_ERR(tfm2);
178 	/* K2 for CTR */
179 	res = crypto_skcipher_setkey(tfm2, key + key_len, key_len);
180 	if (res) {
181 		crypto_free_skcipher(tfm2);
182 		return res;
183 	}
184 
185 	req = skcipher_request_alloc(tfm2, GFP_KERNEL);
186 	if (!req) {
187 		crypto_free_skcipher(tfm2);
188 		return -ENOMEM;
189 	}
190 
191 	sg_init_one(src, iv_crypt + AES_BLOCK_SIZE, crypt_len);
192 	sg_init_one(dst, out, crypt_len);
193 	skcipher_request_set_crypt(req, src, dst, crypt_len, iv);
194 	res = crypto_skcipher_decrypt(req);
195 	skcipher_request_free(req);
196 	crypto_free_skcipher(tfm2);
197 	if (res)
198 		return res;
199 
200 	/* S2V */
201 
202 	tfm = crypto_alloc_shash("cmac(aes)", 0, 0);
203 	if (IS_ERR(tfm))
204 		return PTR_ERR(tfm);
205 	/* K1 for S2V */
206 	res = crypto_shash_setkey(tfm, key, key_len);
207 	if (!res)
208 		res = aes_s2v(tfm, num_elem, addr, len, check);
209 	crypto_free_shash(tfm);
210 	if (res)
211 		return res;
212 	if (memcmp(check, frame_iv, AES_BLOCK_SIZE) != 0)
213 		return -EINVAL;
214 	return 0;
215 }
216 
217 int fils_encrypt_assoc_req(struct sk_buff *skb,
218 			   struct ieee80211_mgd_assoc_data *assoc_data)
219 {
220 	struct ieee80211_mgmt *mgmt = (void *)skb->data;
221 	u8 *capab, *ies, *encr;
222 	const u8 *addr[5 + 1];
223 	const struct element *session;
224 	size_t len[5 + 1];
225 	size_t crypt_len;
226 
227 	if (ieee80211_is_reassoc_req(mgmt->frame_control)) {
228 		capab = (u8 *)&mgmt->u.reassoc_req.capab_info;
229 		ies = mgmt->u.reassoc_req.variable;
230 	} else {
231 		capab = (u8 *)&mgmt->u.assoc_req.capab_info;
232 		ies = mgmt->u.assoc_req.variable;
233 	}
234 
235 	session = cfg80211_find_ext_elem(WLAN_EID_EXT_FILS_SESSION,
236 					 ies, skb->data + skb->len - ies);
237 	if (!session || session->datalen != 1 + 8)
238 		return -EINVAL;
239 	/* encrypt after FILS Session element */
240 	encr = (u8 *)session->data + 1 + 8;
241 
242 	/* AES-SIV AAD vectors */
243 
244 	/* The STA's MAC address */
245 	addr[0] = mgmt->sa;
246 	len[0] = ETH_ALEN;
247 	/* The AP's BSSID */
248 	addr[1] = mgmt->da;
249 	len[1] = ETH_ALEN;
250 	/* The STA's nonce */
251 	addr[2] = assoc_data->fils_nonces;
252 	len[2] = FILS_NONCE_LEN;
253 	/* The AP's nonce */
254 	addr[3] = &assoc_data->fils_nonces[FILS_NONCE_LEN];
255 	len[3] = FILS_NONCE_LEN;
256 	/* The (Re)Association Request frame from the Capability Information
257 	 * field to the FILS Session element (both inclusive).
258 	 */
259 	addr[4] = capab;
260 	len[4] = encr - capab;
261 
262 	crypt_len = skb->data + skb->len - encr;
263 	skb_put(skb, AES_BLOCK_SIZE);
264 	return aes_siv_encrypt(assoc_data->fils_kek, assoc_data->fils_kek_len,
265 			       encr, crypt_len, 5, addr, len, encr);
266 }
267 
268 int fils_decrypt_assoc_resp(struct ieee80211_sub_if_data *sdata,
269 			    u8 *frame, size_t *frame_len,
270 			    struct ieee80211_mgd_assoc_data *assoc_data)
271 {
272 	struct ieee80211_mgmt *mgmt = (void *)frame;
273 	u8 *capab, *ies, *encr;
274 	const u8 *addr[5 + 1];
275 	const struct element *session;
276 	size_t len[5 + 1];
277 	int res;
278 	size_t crypt_len;
279 
280 	if (*frame_len < 24 + 6)
281 		return -EINVAL;
282 
283 	capab = (u8 *)&mgmt->u.assoc_resp.capab_info;
284 	ies = mgmt->u.assoc_resp.variable;
285 	session = cfg80211_find_ext_elem(WLAN_EID_EXT_FILS_SESSION,
286 					 ies, frame + *frame_len - ies);
287 	if (!session || session->datalen != 1 + 8) {
288 		mlme_dbg(sdata,
289 			 "No (valid) FILS Session element in (Re)Association Response frame from %pM",
290 			 mgmt->sa);
291 		return -EINVAL;
292 	}
293 	/* decrypt after FILS Session element */
294 	encr = (u8 *)session->data + 1 + 8;
295 
296 	/* AES-SIV AAD vectors */
297 
298 	/* The AP's BSSID */
299 	addr[0] = mgmt->sa;
300 	len[0] = ETH_ALEN;
301 	/* The STA's MAC address */
302 	addr[1] = mgmt->da;
303 	len[1] = ETH_ALEN;
304 	/* The AP's nonce */
305 	addr[2] = &assoc_data->fils_nonces[FILS_NONCE_LEN];
306 	len[2] = FILS_NONCE_LEN;
307 	/* The STA's nonce */
308 	addr[3] = assoc_data->fils_nonces;
309 	len[3] = FILS_NONCE_LEN;
310 	/* The (Re)Association Response frame from the Capability Information
311 	 * field to the FILS Session element (both inclusive).
312 	 */
313 	addr[4] = capab;
314 	len[4] = encr - capab;
315 
316 	crypt_len = frame + *frame_len - encr;
317 	if (crypt_len < AES_BLOCK_SIZE) {
318 		mlme_dbg(sdata,
319 			 "Not enough room for AES-SIV data after FILS Session element in (Re)Association Response frame from %pM",
320 			 mgmt->sa);
321 		return -EINVAL;
322 	}
323 	res = aes_siv_decrypt(assoc_data->fils_kek, assoc_data->fils_kek_len,
324 			      encr, crypt_len, 5, addr, len, encr);
325 	if (res != 0) {
326 		mlme_dbg(sdata,
327 			 "AES-SIV decryption of (Re)Association Response frame from %pM failed",
328 			 mgmt->sa);
329 		return res;
330 	}
331 	*frame_len -= AES_BLOCK_SIZE;
332 	return 0;
333 }
334