xref: /linux/drivers/net/ovpn/crypto_aead.c (revision aec2f682d47c54ef434b2d440992626d80b1ebdc)
1 // SPDX-License-Identifier: GPL-2.0
2 /*  OpenVPN data channel offload
3  *
4  *  Copyright (C) 2020-2025 OpenVPN, Inc.
5  *
6  *  Author:	James Yonan <james@openvpn.net>
7  *		Antonio Quartulli <antonio@openvpn.net>
8  */
9 
10 #include <crypto/aead.h>
11 #include <linux/skbuff.h>
12 #include <net/ip.h>
13 #include <net/ipv6.h>
14 #include <net/udp.h>
15 
16 #include "ovpnpriv.h"
17 #include "main.h"
18 #include "io.h"
19 #include "pktid.h"
20 #include "crypto_aead.h"
21 #include "crypto.h"
22 #include "peer.h"
23 #include "proto.h"
24 #include "skb.h"
25 
26 #define OVPN_AUTH_TAG_SIZE	16
27 #define OVPN_AAD_SIZE		(OVPN_OPCODE_SIZE + OVPN_NONCE_WIRE_SIZE)
28 
29 #define ALG_NAME_AES		"gcm(aes)"
30 #define ALG_NAME_CHACHAPOLY	"rfc7539(chacha20,poly1305)"
31 
32 static int ovpn_aead_encap_overhead(const struct ovpn_crypto_key_slot *ks)
33 {
34 	return  OVPN_OPCODE_SIZE +			/* OP header size */
35 		sizeof(u32) +				/* Packet ID */
36 		crypto_aead_authsize(ks->encrypt);	/* Auth Tag */
37 }
38 
39 /**
40  * ovpn_aead_crypto_tmp_size - compute the size of a temporary object containing
41  *			       an AEAD request structure with extra space for SG
42  *			       and IV.
43  * @tfm: the AEAD cipher handle
44  * @nfrags: the number of fragments in the skb
45  *
46  * This function calculates the size of a contiguous memory block that includes
47  * the initialization vector (IV), the AEAD request, and an array of scatterlist
48  * entries. For alignment considerations, the IV is placed first, followed by
49  * the request, and then the scatterlist.
50  * Additional alignment is applied according to the requirements of the
51  * underlying structures.
52  *
53  * Return: the size of the temporary memory that needs to be allocated
54  */
55 static unsigned int ovpn_aead_crypto_tmp_size(struct crypto_aead *tfm,
56 					      const unsigned int nfrags)
57 {
58 	unsigned int len = OVPN_NONCE_SIZE;
59 
60 	DEBUG_NET_WARN_ON_ONCE(crypto_aead_ivsize(tfm) != OVPN_NONCE_SIZE);
61 
62 	/* min size for a buffer of ivsize, aligned to alignmask */
63 	len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1);
64 	/* round up to the next multiple of the crypto ctx alignment */
65 	len = ALIGN(len, crypto_tfm_ctx_alignment());
66 
67 	/* reserve space for the AEAD request */
68 	len += sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
69 	/* round up to the next multiple of the scatterlist alignment */
70 	len = ALIGN(len, __alignof__(struct scatterlist));
71 
72 	/* add enough space for nfrags + 2 scatterlist entries */
73 	len += array_size(sizeof(struct scatterlist), nfrags + 2);
74 	return len;
75 }
76 
77 /**
78  * ovpn_aead_crypto_tmp_iv - retrieve the pointer to the IV within a temporary
79  *			     buffer allocated using ovpn_aead_crypto_tmp_size
80  * @aead: the AEAD cipher handle
81  * @tmp: a pointer to the beginning of the temporary buffer
82  *
83  * This function retrieves a pointer to the initialization vector (IV) in the
84  * temporary buffer. If the AEAD cipher specifies an IV size, the pointer is
85  * adjusted using the AEAD's alignment mask to ensure proper alignment.
86  *
87  * Returns: a pointer to the IV within the temporary buffer
88  */
89 static u8 *ovpn_aead_crypto_tmp_iv(struct crypto_aead *aead, void *tmp)
90 {
91 	return likely(crypto_aead_ivsize(aead)) ?
92 		      PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) :
93 		      tmp;
94 }
95 
96 /**
97  * ovpn_aead_crypto_tmp_req - retrieve the pointer to the AEAD request structure
98  *			      within a temporary buffer allocated using
99  *			      ovpn_aead_crypto_tmp_size
100  * @aead: the AEAD cipher handle
101  * @iv: a pointer to the initialization vector in the temporary buffer
102  *
103  * This function computes the location of the AEAD request structure that
104  * immediately follows the IV in the temporary buffer and it ensures the request
105  * is aligned to the crypto transform context alignment.
106  *
107  * Returns: a pointer to the AEAD request structure
108  */
109 static struct aead_request *ovpn_aead_crypto_tmp_req(struct crypto_aead *aead,
110 						     const u8 *iv)
111 {
112 	return (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
113 				 crypto_tfm_ctx_alignment());
114 }
115 
116 /**
117  * ovpn_aead_crypto_req_sg - locate the scatterlist following the AEAD request
118  *			     within a temporary buffer allocated using
119  *			     ovpn_aead_crypto_tmp_size
120  * @aead: the AEAD cipher handle
121  * @req: a pointer to the AEAD request structure in the temporary buffer
122  *
123  * This function computes the starting address of the scatterlist that is
124  * allocated immediately after the AEAD request structure. It aligns the pointer
125  * based on the alignment requirements of the scatterlist structure.
126  *
127  * Returns: a pointer to the scatterlist
128  */
129 static struct scatterlist *ovpn_aead_crypto_req_sg(struct crypto_aead *aead,
130 						   struct aead_request *req)
131 {
132 	return (void *)ALIGN((unsigned long)(req + 1) +
133 			     crypto_aead_reqsize(aead),
134 			     __alignof__(struct scatterlist));
135 }
136 
137 int ovpn_aead_encrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks,
138 		      struct sk_buff *skb)
139 {
140 	const unsigned int tag_size = crypto_aead_authsize(ks->encrypt);
141 	struct aead_request *req;
142 	struct sk_buff *trailer;
143 	struct scatterlist *sg;
144 	int nfrags, ret;
145 	u32 pktid, op;
146 	void *tmp;
147 	u8 *iv;
148 
149 	ovpn_skb_cb(skb)->peer = peer;
150 	ovpn_skb_cb(skb)->ks = ks;
151 
152 	/* Sample AEAD header format:
153 	 * 48000001 00000005 7e7046bd 444a7e28 cc6387b1 64a4d6c1 380275a...
154 	 * [ OP32 ] [seq # ] [             auth tag            ] [ payload ... ]
155 	 *          [4-byte
156 	 *          IV head]
157 	 */
158 
159 	/* check that there's enough headroom in the skb for packet
160 	 * encapsulation
161 	 */
162 	if (unlikely(skb_cow_head(skb, OVPN_HEAD_ROOM)))
163 		return -ENOBUFS;
164 
165 	/* get number of skb frags and ensure that packet data is writable */
166 	nfrags = skb_cow_data(skb, 0, &trailer);
167 	if (unlikely(nfrags < 0))
168 		return nfrags;
169 
170 	if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2)))
171 		return -ENOSPC;
172 
173 	/* allocate temporary memory for iv, sg and req */
174 	tmp = kmalloc(ovpn_aead_crypto_tmp_size(ks->encrypt, nfrags),
175 		      GFP_ATOMIC);
176 	if (unlikely(!tmp))
177 		return -ENOMEM;
178 
179 	ovpn_skb_cb(skb)->crypto_tmp = tmp;
180 
181 	iv = ovpn_aead_crypto_tmp_iv(ks->encrypt, tmp);
182 	req = ovpn_aead_crypto_tmp_req(ks->encrypt, iv);
183 	sg = ovpn_aead_crypto_req_sg(ks->encrypt, req);
184 
185 	/* sg table:
186 	 * 0: op, wire nonce (AD, len=OVPN_OP_SIZE_V2+OVPN_NONCE_WIRE_SIZE),
187 	 * 1, 2, 3, ..., n: payload,
188 	 * n+1: auth_tag (len=tag_size)
189 	 */
190 	sg_init_table(sg, nfrags + 2);
191 
192 	/* build scatterlist to encrypt packet payload */
193 	ret = skb_to_sgvec_nomark(skb, sg + 1, 0, skb->len);
194 	if (unlikely(ret < 0)) {
195 		netdev_err(peer->ovpn->dev,
196 			   "encrypt: cannot map skb to sg: %d\n", ret);
197 		return ret;
198 	}
199 
200 	/* append auth_tag onto scatterlist */
201 	__skb_push(skb, tag_size);
202 	sg_set_buf(sg + ret + 1, skb->data, tag_size);
203 
204 	/* obtain packet ID, which is used both as a first
205 	 * 4 bytes of nonce and last 4 bytes of associated data.
206 	 */
207 	ret = ovpn_pktid_xmit_next(&ks->pid_xmit, &pktid);
208 	if (unlikely(ret < 0))
209 		return ret;
210 
211 	/* concat 4 bytes packet id and 8 bytes nonce tail into 12 bytes
212 	 * nonce
213 	 */
214 	ovpn_pktid_aead_write(pktid, ks->nonce_tail_xmit, iv);
215 
216 	/* make space for packet id and push it to the front */
217 	__skb_push(skb, OVPN_NONCE_WIRE_SIZE);
218 	memcpy(skb->data, iv, OVPN_NONCE_WIRE_SIZE);
219 
220 	/* add packet op as head of additional data */
221 	op = ovpn_opcode_compose(OVPN_DATA_V2, ks->key_id, peer->tx_id);
222 	__skb_push(skb, OVPN_OPCODE_SIZE);
223 	BUILD_BUG_ON(sizeof(op) != OVPN_OPCODE_SIZE);
224 	*((__force __be32 *)skb->data) = htonl(op);
225 
226 	/* AEAD Additional data */
227 	sg_set_buf(sg, skb->data, OVPN_AAD_SIZE);
228 
229 	/* setup async crypto operation */
230 	aead_request_set_tfm(req, ks->encrypt);
231 	aead_request_set_callback(req, 0, ovpn_encrypt_post, skb);
232 	aead_request_set_crypt(req, sg, sg,
233 			       skb->len - ovpn_aead_encap_overhead(ks), iv);
234 	aead_request_set_ad(req, OVPN_AAD_SIZE);
235 
236 	/* encrypt it */
237 	return crypto_aead_encrypt(req);
238 }
239 
240 int ovpn_aead_decrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks,
241 		      struct sk_buff *skb)
242 {
243 	const unsigned int tag_size = crypto_aead_authsize(ks->decrypt);
244 	int ret, payload_len, nfrags;
245 	unsigned int payload_offset;
246 	struct aead_request *req;
247 	struct sk_buff *trailer;
248 	struct scatterlist *sg;
249 	void *tmp;
250 	u8 *iv;
251 
252 	payload_offset = OVPN_AAD_SIZE + tag_size;
253 	payload_len = skb->len - payload_offset;
254 
255 	ovpn_skb_cb(skb)->payload_offset = payload_offset;
256 	ovpn_skb_cb(skb)->peer = peer;
257 	ovpn_skb_cb(skb)->ks = ks;
258 
259 	/* sanity check on packet size, payload size must be >= 0 */
260 	if (unlikely(payload_len < 0))
261 		return -EINVAL;
262 
263 	/* Prepare the skb data buffer to be accessed up until the auth tag.
264 	 * This is required because this area is directly mapped into the sg
265 	 * list.
266 	 */
267 	if (unlikely(!pskb_may_pull(skb, payload_offset)))
268 		return -ENODATA;
269 
270 	/* get number of skb frags and ensure that packet data is writable */
271 	nfrags = skb_cow_data(skb, 0, &trailer);
272 	if (unlikely(nfrags < 0))
273 		return nfrags;
274 
275 	if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2)))
276 		return -ENOSPC;
277 
278 	/* allocate temporary memory for iv, sg and req */
279 	tmp = kmalloc(ovpn_aead_crypto_tmp_size(ks->decrypt, nfrags),
280 		      GFP_ATOMIC);
281 	if (unlikely(!tmp))
282 		return -ENOMEM;
283 
284 	ovpn_skb_cb(skb)->crypto_tmp = tmp;
285 
286 	iv = ovpn_aead_crypto_tmp_iv(ks->decrypt, tmp);
287 	req = ovpn_aead_crypto_tmp_req(ks->decrypt, iv);
288 	sg = ovpn_aead_crypto_req_sg(ks->decrypt, req);
289 
290 	/* sg table:
291 	 * 0: op, wire nonce (AD, len=OVPN_OPCODE_SIZE+OVPN_NONCE_WIRE_SIZE),
292 	 * 1, 2, 3, ..., n: payload,
293 	 * n+1: auth_tag (len=tag_size)
294 	 */
295 	sg_init_table(sg, nfrags + 2);
296 
297 	/* packet op is head of additional data */
298 	sg_set_buf(sg, skb->data, OVPN_AAD_SIZE);
299 
300 	/* build scatterlist to decrypt packet payload */
301 	ret = skb_to_sgvec_nomark(skb, sg + 1, payload_offset, payload_len);
302 	if (unlikely(ret < 0)) {
303 		netdev_err(peer->ovpn->dev,
304 			   "decrypt: cannot map skb to sg: %d\n", ret);
305 		return ret;
306 	}
307 
308 	/* append auth_tag onto scatterlist */
309 	sg_set_buf(sg + ret + 1, skb->data + OVPN_AAD_SIZE, tag_size);
310 
311 	/* copy nonce into IV buffer */
312 	memcpy(iv, skb->data + OVPN_OPCODE_SIZE, OVPN_NONCE_WIRE_SIZE);
313 	memcpy(iv + OVPN_NONCE_WIRE_SIZE, ks->nonce_tail_recv,
314 	       OVPN_NONCE_TAIL_SIZE);
315 
316 	/* setup async crypto operation */
317 	aead_request_set_tfm(req, ks->decrypt);
318 	aead_request_set_callback(req, 0, ovpn_decrypt_post, skb);
319 	aead_request_set_crypt(req, sg, sg, payload_len + tag_size, iv);
320 
321 	aead_request_set_ad(req, OVPN_AAD_SIZE);
322 
323 	/* decrypt it */
324 	return crypto_aead_decrypt(req);
325 }
326 
327 /* Initialize a struct crypto_aead object */
328 static struct crypto_aead *ovpn_aead_init(const char *title,
329 					  const char *alg_name,
330 					  const unsigned char *key,
331 					  unsigned int keylen)
332 {
333 	struct crypto_aead *aead;
334 	int ret;
335 
336 	aead = crypto_alloc_aead(alg_name, 0, 0);
337 	if (IS_ERR(aead)) {
338 		ret = PTR_ERR(aead);
339 		pr_err("%s crypto_alloc_aead failed, err=%d\n", title, ret);
340 		aead = NULL;
341 		goto error;
342 	}
343 
344 	ret = crypto_aead_setkey(aead, key, keylen);
345 	if (ret) {
346 		pr_err("%s crypto_aead_setkey size=%u failed, err=%d\n", title,
347 		       keylen, ret);
348 		goto error;
349 	}
350 
351 	ret = crypto_aead_setauthsize(aead, OVPN_AUTH_TAG_SIZE);
352 	if (ret) {
353 		pr_err("%s crypto_aead_setauthsize failed, err=%d\n", title,
354 		       ret);
355 		goto error;
356 	}
357 
358 	/* basic AEAD assumption
359 	 * all current algorithms use OVPN_NONCE_SIZE.
360 	 * ovpn_aead_crypto_tmp_size and ovpn_aead_encrypt/decrypt
361 	 * expect this.
362 	 */
363 	if (crypto_aead_ivsize(aead) != OVPN_NONCE_SIZE) {
364 		pr_err("%s IV size must be %d\n", title, OVPN_NONCE_SIZE);
365 		ret = -EINVAL;
366 		goto error;
367 	}
368 
369 	pr_debug("********* Cipher %s (%s)\n", alg_name, title);
370 	pr_debug("*** IV size=%u\n", crypto_aead_ivsize(aead));
371 	pr_debug("*** req size=%u\n", crypto_aead_reqsize(aead));
372 	pr_debug("*** block size=%u\n", crypto_aead_blocksize(aead));
373 	pr_debug("*** auth size=%u\n", crypto_aead_authsize(aead));
374 	pr_debug("*** alignmask=0x%x\n", crypto_aead_alignmask(aead));
375 
376 	return aead;
377 
378 error:
379 	crypto_free_aead(aead);
380 	return ERR_PTR(ret);
381 }
382 
383 void ovpn_aead_crypto_key_slot_destroy(struct ovpn_crypto_key_slot *ks)
384 {
385 	if (!ks)
386 		return;
387 
388 	crypto_free_aead(ks->encrypt);
389 	crypto_free_aead(ks->decrypt);
390 	kfree(ks);
391 }
392 
393 struct ovpn_crypto_key_slot *
394 ovpn_aead_crypto_key_slot_new(const struct ovpn_key_config *kc)
395 {
396 	struct ovpn_crypto_key_slot *ks = NULL;
397 	const char *alg_name;
398 	int ret;
399 
400 	/* validate crypto alg */
401 	switch (kc->cipher_alg) {
402 	case OVPN_CIPHER_ALG_AES_GCM:
403 		alg_name = ALG_NAME_AES;
404 		break;
405 	case OVPN_CIPHER_ALG_CHACHA20_POLY1305:
406 		alg_name = ALG_NAME_CHACHAPOLY;
407 		break;
408 	default:
409 		return ERR_PTR(-EOPNOTSUPP);
410 	}
411 
412 	if (kc->encrypt.nonce_tail_size != OVPN_NONCE_TAIL_SIZE ||
413 	    kc->decrypt.nonce_tail_size != OVPN_NONCE_TAIL_SIZE)
414 		return ERR_PTR(-EINVAL);
415 
416 	/* build the key slot */
417 	ks = kmalloc_obj(*ks);
418 	if (!ks)
419 		return ERR_PTR(-ENOMEM);
420 
421 	ks->encrypt = NULL;
422 	ks->decrypt = NULL;
423 	kref_init(&ks->refcount);
424 	ks->key_id = kc->key_id;
425 
426 	ks->encrypt = ovpn_aead_init("encrypt", alg_name,
427 				     kc->encrypt.cipher_key,
428 				     kc->encrypt.cipher_key_size);
429 	if (IS_ERR(ks->encrypt)) {
430 		ret = PTR_ERR(ks->encrypt);
431 		ks->encrypt = NULL;
432 		goto destroy_ks;
433 	}
434 
435 	ks->decrypt = ovpn_aead_init("decrypt", alg_name,
436 				     kc->decrypt.cipher_key,
437 				     kc->decrypt.cipher_key_size);
438 	if (IS_ERR(ks->decrypt)) {
439 		ret = PTR_ERR(ks->decrypt);
440 		ks->decrypt = NULL;
441 		goto destroy_ks;
442 	}
443 
444 	memcpy(ks->nonce_tail_xmit, kc->encrypt.nonce_tail,
445 	       OVPN_NONCE_TAIL_SIZE);
446 	memcpy(ks->nonce_tail_recv, kc->decrypt.nonce_tail,
447 	       OVPN_NONCE_TAIL_SIZE);
448 
449 	/* init packet ID generation/validation */
450 	ovpn_pktid_xmit_init(&ks->pid_xmit);
451 	ovpn_pktid_recv_init(&ks->pid_recv);
452 
453 	return ks;
454 
455 destroy_ks:
456 	ovpn_aead_crypto_key_slot_destroy(ks);
457 	return ERR_PTR(ret);
458 }
459 
460 enum ovpn_cipher_alg ovpn_aead_crypto_alg(struct ovpn_crypto_key_slot *ks)
461 {
462 	const char *alg_name;
463 
464 	if (!ks->encrypt)
465 		return OVPN_CIPHER_ALG_NONE;
466 
467 	alg_name = crypto_tfm_alg_name(crypto_aead_tfm(ks->encrypt));
468 
469 	if (!strcmp(alg_name, ALG_NAME_AES))
470 		return OVPN_CIPHER_ALG_AES_GCM;
471 	else if (!strcmp(alg_name, ALG_NAME_CHACHAPOLY))
472 		return OVPN_CIPHER_ALG_CHACHA20_POLY1305;
473 	else
474 		return OVPN_CIPHER_ALG_NONE;
475 }
476