xref: /linux/drivers/net/ovpn/crypto_aead.c (revision 3186a8e55ae3428ec1e06af09075e20885376e4e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*  OpenVPN data channel offload
3  *
4  *  Copyright (C) 2020-2025 OpenVPN, Inc.
5  *
6  *  Author:	James Yonan <james@openvpn.net>
7  *		Antonio Quartulli <antonio@openvpn.net>
8  */
9 
10 #include <crypto/aead.h>
11 #include <linux/skbuff.h>
12 #include <net/ip.h>
13 #include <net/ipv6.h>
14 #include <net/udp.h>
15 
16 #include "ovpnpriv.h"
17 #include "main.h"
18 #include "io.h"
19 #include "pktid.h"
20 #include "crypto_aead.h"
21 #include "crypto.h"
22 #include "peer.h"
23 #include "proto.h"
24 #include "skb.h"
25 
26 #define OVPN_AUTH_TAG_SIZE	16
27 #define OVPN_AAD_SIZE		(OVPN_OPCODE_SIZE + OVPN_NONCE_WIRE_SIZE)
28 
29 #define ALG_NAME_AES		"gcm(aes)"
30 #define ALG_NAME_CHACHAPOLY	"rfc7539(chacha20,poly1305)"
31 
32 static int ovpn_aead_encap_overhead(const struct ovpn_crypto_key_slot *ks)
33 {
34 	return  OVPN_OPCODE_SIZE +			/* OP header size */
35 		sizeof(u32) +				/* Packet ID */
36 		crypto_aead_authsize(ks->encrypt);	/* Auth Tag */
37 }
38 
39 int ovpn_aead_encrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks,
40 		      struct sk_buff *skb)
41 {
42 	const unsigned int tag_size = crypto_aead_authsize(ks->encrypt);
43 	struct aead_request *req;
44 	struct sk_buff *trailer;
45 	struct scatterlist *sg;
46 	int nfrags, ret;
47 	u32 pktid, op;
48 	u8 *iv;
49 
50 	ovpn_skb_cb(skb)->peer = peer;
51 	ovpn_skb_cb(skb)->ks = ks;
52 
53 	/* Sample AEAD header format:
54 	 * 48000001 00000005 7e7046bd 444a7e28 cc6387b1 64a4d6c1 380275a...
55 	 * [ OP32 ] [seq # ] [             auth tag            ] [ payload ... ]
56 	 *          [4-byte
57 	 *          IV head]
58 	 */
59 
60 	/* check that there's enough headroom in the skb for packet
61 	 * encapsulation
62 	 */
63 	if (unlikely(skb_cow_head(skb, OVPN_HEAD_ROOM)))
64 		return -ENOBUFS;
65 
66 	/* get number of skb frags and ensure that packet data is writable */
67 	nfrags = skb_cow_data(skb, 0, &trailer);
68 	if (unlikely(nfrags < 0))
69 		return nfrags;
70 
71 	if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2)))
72 		return -ENOSPC;
73 
74 	/* sg may be required by async crypto */
75 	ovpn_skb_cb(skb)->sg = kmalloc(sizeof(*ovpn_skb_cb(skb)->sg) *
76 				       (nfrags + 2), GFP_ATOMIC);
77 	if (unlikely(!ovpn_skb_cb(skb)->sg))
78 		return -ENOMEM;
79 
80 	sg = ovpn_skb_cb(skb)->sg;
81 
82 	/* sg table:
83 	 * 0: op, wire nonce (AD, len=OVPN_OP_SIZE_V2+OVPN_NONCE_WIRE_SIZE),
84 	 * 1, 2, 3, ..., n: payload,
85 	 * n+1: auth_tag (len=tag_size)
86 	 */
87 	sg_init_table(sg, nfrags + 2);
88 
89 	/* build scatterlist to encrypt packet payload */
90 	ret = skb_to_sgvec_nomark(skb, sg + 1, 0, skb->len);
91 	if (unlikely(nfrags != ret))
92 		return -EINVAL;
93 
94 	/* append auth_tag onto scatterlist */
95 	__skb_push(skb, tag_size);
96 	sg_set_buf(sg + nfrags + 1, skb->data, tag_size);
97 
98 	/* obtain packet ID, which is used both as a first
99 	 * 4 bytes of nonce and last 4 bytes of associated data.
100 	 */
101 	ret = ovpn_pktid_xmit_next(&ks->pid_xmit, &pktid);
102 	if (unlikely(ret < 0))
103 		return ret;
104 
105 	/* iv may be required by async crypto */
106 	ovpn_skb_cb(skb)->iv = kmalloc(OVPN_NONCE_SIZE, GFP_ATOMIC);
107 	if (unlikely(!ovpn_skb_cb(skb)->iv))
108 		return -ENOMEM;
109 
110 	iv = ovpn_skb_cb(skb)->iv;
111 
112 	/* concat 4 bytes packet id and 8 bytes nonce tail into 12 bytes
113 	 * nonce
114 	 */
115 	ovpn_pktid_aead_write(pktid, ks->nonce_tail_xmit, iv);
116 
117 	/* make space for packet id and push it to the front */
118 	__skb_push(skb, OVPN_NONCE_WIRE_SIZE);
119 	memcpy(skb->data, iv, OVPN_NONCE_WIRE_SIZE);
120 
121 	/* add packet op as head of additional data */
122 	op = ovpn_opcode_compose(OVPN_DATA_V2, ks->key_id, peer->id);
123 	__skb_push(skb, OVPN_OPCODE_SIZE);
124 	BUILD_BUG_ON(sizeof(op) != OVPN_OPCODE_SIZE);
125 	*((__force __be32 *)skb->data) = htonl(op);
126 
127 	/* AEAD Additional data */
128 	sg_set_buf(sg, skb->data, OVPN_AAD_SIZE);
129 
130 	req = aead_request_alloc(ks->encrypt, GFP_ATOMIC);
131 	if (unlikely(!req))
132 		return -ENOMEM;
133 
134 	ovpn_skb_cb(skb)->req = req;
135 
136 	/* setup async crypto operation */
137 	aead_request_set_tfm(req, ks->encrypt);
138 	aead_request_set_callback(req, 0, ovpn_encrypt_post, skb);
139 	aead_request_set_crypt(req, sg, sg,
140 			       skb->len - ovpn_aead_encap_overhead(ks), iv);
141 	aead_request_set_ad(req, OVPN_AAD_SIZE);
142 
143 	/* encrypt it */
144 	return crypto_aead_encrypt(req);
145 }
146 
147 int ovpn_aead_decrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks,
148 		      struct sk_buff *skb)
149 {
150 	const unsigned int tag_size = crypto_aead_authsize(ks->decrypt);
151 	int ret, payload_len, nfrags;
152 	unsigned int payload_offset;
153 	struct aead_request *req;
154 	struct sk_buff *trailer;
155 	struct scatterlist *sg;
156 	u8 *iv;
157 
158 	payload_offset = OVPN_AAD_SIZE + tag_size;
159 	payload_len = skb->len - payload_offset;
160 
161 	ovpn_skb_cb(skb)->payload_offset = payload_offset;
162 	ovpn_skb_cb(skb)->peer = peer;
163 	ovpn_skb_cb(skb)->ks = ks;
164 
165 	/* sanity check on packet size, payload size must be >= 0 */
166 	if (unlikely(payload_len < 0))
167 		return -EINVAL;
168 
169 	/* Prepare the skb data buffer to be accessed up until the auth tag.
170 	 * This is required because this area is directly mapped into the sg
171 	 * list.
172 	 */
173 	if (unlikely(!pskb_may_pull(skb, payload_offset)))
174 		return -ENODATA;
175 
176 	/* get number of skb frags and ensure that packet data is writable */
177 	nfrags = skb_cow_data(skb, 0, &trailer);
178 	if (unlikely(nfrags < 0))
179 		return nfrags;
180 
181 	if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2)))
182 		return -ENOSPC;
183 
184 	/* sg may be required by async crypto */
185 	ovpn_skb_cb(skb)->sg = kmalloc(sizeof(*ovpn_skb_cb(skb)->sg) *
186 				       (nfrags + 2), GFP_ATOMIC);
187 	if (unlikely(!ovpn_skb_cb(skb)->sg))
188 		return -ENOMEM;
189 
190 	sg = ovpn_skb_cb(skb)->sg;
191 
192 	/* sg table:
193 	 * 0: op, wire nonce (AD, len=OVPN_OPCODE_SIZE+OVPN_NONCE_WIRE_SIZE),
194 	 * 1, 2, 3, ..., n: payload,
195 	 * n+1: auth_tag (len=tag_size)
196 	 */
197 	sg_init_table(sg, nfrags + 2);
198 
199 	/* packet op is head of additional data */
200 	sg_set_buf(sg, skb->data, OVPN_AAD_SIZE);
201 
202 	/* build scatterlist to decrypt packet payload */
203 	ret = skb_to_sgvec_nomark(skb, sg + 1, payload_offset, payload_len);
204 	if (unlikely(nfrags != ret))
205 		return -EINVAL;
206 
207 	/* append auth_tag onto scatterlist */
208 	sg_set_buf(sg + nfrags + 1, skb->data + OVPN_AAD_SIZE, tag_size);
209 
210 	/* iv may be required by async crypto */
211 	ovpn_skb_cb(skb)->iv = kmalloc(OVPN_NONCE_SIZE, GFP_ATOMIC);
212 	if (unlikely(!ovpn_skb_cb(skb)->iv))
213 		return -ENOMEM;
214 
215 	iv = ovpn_skb_cb(skb)->iv;
216 
217 	/* copy nonce into IV buffer */
218 	memcpy(iv, skb->data + OVPN_OPCODE_SIZE, OVPN_NONCE_WIRE_SIZE);
219 	memcpy(iv + OVPN_NONCE_WIRE_SIZE, ks->nonce_tail_recv,
220 	       OVPN_NONCE_TAIL_SIZE);
221 
222 	req = aead_request_alloc(ks->decrypt, GFP_ATOMIC);
223 	if (unlikely(!req))
224 		return -ENOMEM;
225 
226 	ovpn_skb_cb(skb)->req = req;
227 
228 	/* setup async crypto operation */
229 	aead_request_set_tfm(req, ks->decrypt);
230 	aead_request_set_callback(req, 0, ovpn_decrypt_post, skb);
231 	aead_request_set_crypt(req, sg, sg, payload_len + tag_size, iv);
232 
233 	aead_request_set_ad(req, OVPN_AAD_SIZE);
234 
235 	/* decrypt it */
236 	return crypto_aead_decrypt(req);
237 }
238 
239 /* Initialize a struct crypto_aead object */
240 static struct crypto_aead *ovpn_aead_init(const char *title,
241 					  const char *alg_name,
242 					  const unsigned char *key,
243 					  unsigned int keylen)
244 {
245 	struct crypto_aead *aead;
246 	int ret;
247 
248 	aead = crypto_alloc_aead(alg_name, 0, 0);
249 	if (IS_ERR(aead)) {
250 		ret = PTR_ERR(aead);
251 		pr_err("%s crypto_alloc_aead failed, err=%d\n", title, ret);
252 		aead = NULL;
253 		goto error;
254 	}
255 
256 	ret = crypto_aead_setkey(aead, key, keylen);
257 	if (ret) {
258 		pr_err("%s crypto_aead_setkey size=%u failed, err=%d\n", title,
259 		       keylen, ret);
260 		goto error;
261 	}
262 
263 	ret = crypto_aead_setauthsize(aead, OVPN_AUTH_TAG_SIZE);
264 	if (ret) {
265 		pr_err("%s crypto_aead_setauthsize failed, err=%d\n", title,
266 		       ret);
267 		goto error;
268 	}
269 
270 	/* basic AEAD assumption */
271 	if (crypto_aead_ivsize(aead) != OVPN_NONCE_SIZE) {
272 		pr_err("%s IV size must be %d\n", title, OVPN_NONCE_SIZE);
273 		ret = -EINVAL;
274 		goto error;
275 	}
276 
277 	pr_debug("********* Cipher %s (%s)\n", alg_name, title);
278 	pr_debug("*** IV size=%u\n", crypto_aead_ivsize(aead));
279 	pr_debug("*** req size=%u\n", crypto_aead_reqsize(aead));
280 	pr_debug("*** block size=%u\n", crypto_aead_blocksize(aead));
281 	pr_debug("*** auth size=%u\n", crypto_aead_authsize(aead));
282 	pr_debug("*** alignmask=0x%x\n", crypto_aead_alignmask(aead));
283 
284 	return aead;
285 
286 error:
287 	crypto_free_aead(aead);
288 	return ERR_PTR(ret);
289 }
290 
291 void ovpn_aead_crypto_key_slot_destroy(struct ovpn_crypto_key_slot *ks)
292 {
293 	if (!ks)
294 		return;
295 
296 	crypto_free_aead(ks->encrypt);
297 	crypto_free_aead(ks->decrypt);
298 	kfree(ks);
299 }
300 
301 struct ovpn_crypto_key_slot *
302 ovpn_aead_crypto_key_slot_new(const struct ovpn_key_config *kc)
303 {
304 	struct ovpn_crypto_key_slot *ks = NULL;
305 	const char *alg_name;
306 	int ret;
307 
308 	/* validate crypto alg */
309 	switch (kc->cipher_alg) {
310 	case OVPN_CIPHER_ALG_AES_GCM:
311 		alg_name = ALG_NAME_AES;
312 		break;
313 	case OVPN_CIPHER_ALG_CHACHA20_POLY1305:
314 		alg_name = ALG_NAME_CHACHAPOLY;
315 		break;
316 	default:
317 		return ERR_PTR(-EOPNOTSUPP);
318 	}
319 
320 	if (kc->encrypt.nonce_tail_size != OVPN_NONCE_TAIL_SIZE ||
321 	    kc->decrypt.nonce_tail_size != OVPN_NONCE_TAIL_SIZE)
322 		return ERR_PTR(-EINVAL);
323 
324 	/* build the key slot */
325 	ks = kmalloc(sizeof(*ks), GFP_KERNEL);
326 	if (!ks)
327 		return ERR_PTR(-ENOMEM);
328 
329 	ks->encrypt = NULL;
330 	ks->decrypt = NULL;
331 	kref_init(&ks->refcount);
332 	ks->key_id = kc->key_id;
333 
334 	ks->encrypt = ovpn_aead_init("encrypt", alg_name,
335 				     kc->encrypt.cipher_key,
336 				     kc->encrypt.cipher_key_size);
337 	if (IS_ERR(ks->encrypt)) {
338 		ret = PTR_ERR(ks->encrypt);
339 		ks->encrypt = NULL;
340 		goto destroy_ks;
341 	}
342 
343 	ks->decrypt = ovpn_aead_init("decrypt", alg_name,
344 				     kc->decrypt.cipher_key,
345 				     kc->decrypt.cipher_key_size);
346 	if (IS_ERR(ks->decrypt)) {
347 		ret = PTR_ERR(ks->decrypt);
348 		ks->decrypt = NULL;
349 		goto destroy_ks;
350 	}
351 
352 	memcpy(ks->nonce_tail_xmit, kc->encrypt.nonce_tail,
353 	       OVPN_NONCE_TAIL_SIZE);
354 	memcpy(ks->nonce_tail_recv, kc->decrypt.nonce_tail,
355 	       OVPN_NONCE_TAIL_SIZE);
356 
357 	/* init packet ID generation/validation */
358 	ovpn_pktid_xmit_init(&ks->pid_xmit);
359 	ovpn_pktid_recv_init(&ks->pid_recv);
360 
361 	return ks;
362 
363 destroy_ks:
364 	ovpn_aead_crypto_key_slot_destroy(ks);
365 	return ERR_PTR(ret);
366 }
367 
368 enum ovpn_cipher_alg ovpn_aead_crypto_alg(struct ovpn_crypto_key_slot *ks)
369 {
370 	const char *alg_name;
371 
372 	if (!ks->encrypt)
373 		return OVPN_CIPHER_ALG_NONE;
374 
375 	alg_name = crypto_tfm_alg_name(crypto_aead_tfm(ks->encrypt));
376 
377 	if (!strcmp(alg_name, ALG_NAME_AES))
378 		return OVPN_CIPHER_ALG_AES_GCM;
379 	else if (!strcmp(alg_name, ALG_NAME_CHACHAPOLY))
380 		return OVPN_CIPHER_ALG_CHACHA20_POLY1305;
381 	else
382 		return OVPN_CIPHER_ALG_NONE;
383 }
384