xref: /linux/drivers/net/ovpn/io.c (revision aec2f682d47c54ef434b2d440992626d80b1ebdc)
1 // SPDX-License-Identifier: GPL-2.0
2 /*  OpenVPN data channel offload
3  *
4  *  Copyright (C) 2019-2025 OpenVPN, Inc.
5  *
6  *  Author:	James Yonan <james@openvpn.net>
7  *		Antonio Quartulli <antonio@openvpn.net>
8  */
9 
10 #include <crypto/aead.h>
11 #include <linux/netdevice.h>
12 #include <linux/skbuff.h>
13 #include <net/gro_cells.h>
14 #include <net/gso.h>
15 #include <net/ip.h>
16 
17 #include "ovpnpriv.h"
18 #include "peer.h"
19 #include "io.h"
20 #include "bind.h"
21 #include "crypto.h"
22 #include "crypto_aead.h"
23 #include "netlink.h"
24 #include "proto.h"
25 #include "tcp.h"
26 #include "udp.h"
27 #include "skb.h"
28 #include "socket.h"
29 
30 const unsigned char ovpn_keepalive_message[OVPN_KEEPALIVE_SIZE] = {
31 	0x2a, 0x18, 0x7b, 0xf3, 0x64, 0x1e, 0xb4, 0xcb,
32 	0x07, 0xed, 0x2d, 0x0a, 0x98, 0x1f, 0xc7, 0x48
33 };
34 
35 /**
36  * ovpn_is_keepalive - check if skb contains a keepalive message
37  * @skb: packet to check
38  *
39  * Assumes that the first byte of skb->data is defined.
40  *
41  * Return: true if skb contains a keepalive or false otherwise
42  */
43 static bool ovpn_is_keepalive(struct sk_buff *skb)
44 {
45 	if (*skb->data != ovpn_keepalive_message[0])
46 		return false;
47 
48 	if (skb->len != OVPN_KEEPALIVE_SIZE)
49 		return false;
50 
51 	if (!pskb_may_pull(skb, OVPN_KEEPALIVE_SIZE))
52 		return false;
53 
54 	return !memcmp(skb->data, ovpn_keepalive_message, OVPN_KEEPALIVE_SIZE);
55 }
56 
57 /* Called after decrypt to write the IP packet to the device.
58  * This method is expected to manage/free the skb.
59  */
60 static void ovpn_netdev_write(struct ovpn_peer *peer, struct sk_buff *skb)
61 {
62 	unsigned int pkt_len;
63 	int ret;
64 
65 	/*
66 	 * GSO state from the transport layer is not valid for the tunnel/data
67 	 * path. Reset all GSO fields to prevent any further GSO processing
68 	 * from entering an inconsistent state.
69 	 */
70 	skb_gso_reset(skb);
71 
72 	/* we can't guarantee the packet wasn't corrupted before entering the
73 	 * VPN, therefore we give other layers a chance to check that
74 	 */
75 	skb->ip_summed = CHECKSUM_NONE;
76 
77 	/* skb hash for transport packet no longer valid after decapsulation */
78 	skb_clear_hash(skb);
79 
80 	/* post-decrypt scrub -- prepare to inject encapsulated packet onto the
81 	 * interface, based on __skb_tunnel_rx() in dst.h
82 	 */
83 	skb->dev = peer->ovpn->dev;
84 	skb_set_queue_mapping(skb, 0);
85 	skb_scrub_packet(skb, true);
86 
87 	/* network header reset in ovpn_decrypt_post() */
88 	skb_reset_transport_header(skb);
89 	skb_reset_inner_headers(skb);
90 
91 	/* cause packet to be "received" by the interface */
92 	pkt_len = skb->len;
93 	ret = gro_cells_receive(&peer->ovpn->gro_cells, skb);
94 	if (likely(ret == NET_RX_SUCCESS)) {
95 		/* update RX stats with the size of decrypted packet */
96 		ovpn_peer_stats_increment_rx(&peer->vpn_stats, pkt_len);
97 		dev_dstats_rx_add(peer->ovpn->dev, pkt_len);
98 	}
99 }
100 
101 void ovpn_decrypt_post(void *data, int ret)
102 {
103 	struct ovpn_crypto_key_slot *ks;
104 	unsigned int payload_offset = 0;
105 	struct sk_buff *skb = data;
106 	struct ovpn_socket *sock;
107 	struct ovpn_peer *peer;
108 	__be16 proto;
109 	__be32 *pid;
110 
111 	/* crypto is happening asynchronously. this function will be called
112 	 * again later by the crypto callback with a proper return code
113 	 */
114 	if (unlikely(ret == -EINPROGRESS))
115 		return;
116 
117 	payload_offset = ovpn_skb_cb(skb)->payload_offset;
118 	ks = ovpn_skb_cb(skb)->ks;
119 	peer = ovpn_skb_cb(skb)->peer;
120 
121 	/* crypto is done, cleanup skb CB and its members */
122 	kfree(ovpn_skb_cb(skb)->crypto_tmp);
123 
124 	if (unlikely(ret < 0))
125 		goto drop;
126 
127 	/* PID sits after the op */
128 	pid = (__force __be32 *)(skb->data + OVPN_OPCODE_SIZE);
129 	ret = ovpn_pktid_recv(&ks->pid_recv, ntohl(*pid), 0);
130 	if (unlikely(ret < 0)) {
131 		net_err_ratelimited("%s: PKT ID RX error for peer %u: %d\n",
132 				    netdev_name(peer->ovpn->dev), peer->id,
133 				    ret);
134 		goto drop;
135 	}
136 
137 	/* keep track of last received authenticated packet for keepalive */
138 	WRITE_ONCE(peer->last_recv, ktime_get_real_seconds());
139 
140 	rcu_read_lock();
141 	sock = rcu_dereference(peer->sock);
142 	if (sock && sock->sk->sk_protocol == IPPROTO_UDP)
143 		/* check if this peer changed local or remote endpoint */
144 		ovpn_peer_endpoints_update(peer, skb);
145 	rcu_read_unlock();
146 
147 	/* point to encapsulated IP packet */
148 	__skb_pull(skb, payload_offset);
149 
150 	/* check if this is a valid datapacket that has to be delivered to the
151 	 * ovpn interface
152 	 */
153 	skb_reset_network_header(skb);
154 	proto = ovpn_ip_check_protocol(skb);
155 	if (unlikely(!proto)) {
156 		/* check if null packet */
157 		if (unlikely(!pskb_may_pull(skb, 1))) {
158 			net_info_ratelimited("%s: NULL packet received from peer %u\n",
159 					     netdev_name(peer->ovpn->dev),
160 					     peer->id);
161 			goto drop;
162 		}
163 
164 		if (ovpn_is_keepalive(skb)) {
165 			net_dbg_ratelimited("%s: ping received from peer %u\n",
166 					    netdev_name(peer->ovpn->dev),
167 					    peer->id);
168 			/* we drop the packet, but this is not a failure */
169 			consume_skb(skb);
170 			goto drop_nocount;
171 		}
172 
173 		net_info_ratelimited("%s: unsupported protocol received from peer %u\n",
174 				     netdev_name(peer->ovpn->dev), peer->id);
175 		goto drop;
176 	}
177 	skb->protocol = proto;
178 
179 	/* perform Reverse Path Filtering (RPF) */
180 	if (unlikely(!ovpn_peer_check_by_src(peer->ovpn, skb, peer))) {
181 		if (skb->protocol == htons(ETH_P_IPV6))
182 			net_dbg_ratelimited("%s: RPF dropped packet from peer %u, src: %pI6c\n",
183 					    netdev_name(peer->ovpn->dev),
184 					    peer->id, &ipv6_hdr(skb)->saddr);
185 		else
186 			net_dbg_ratelimited("%s: RPF dropped packet from peer %u, src: %pI4\n",
187 					    netdev_name(peer->ovpn->dev),
188 					    peer->id, &ip_hdr(skb)->saddr);
189 		goto drop;
190 	}
191 
192 	ovpn_netdev_write(peer, skb);
193 	/* skb is passed to upper layer - don't free it */
194 	skb = NULL;
195 drop:
196 	if (unlikely(skb))
197 		dev_dstats_rx_dropped(peer->ovpn->dev);
198 	kfree_skb(skb);
199 drop_nocount:
200 	if (likely(peer))
201 		ovpn_peer_put(peer);
202 	if (likely(ks))
203 		ovpn_crypto_key_slot_put(ks);
204 }
205 
206 /* RX path entry point: decrypt packet and forward it to the device */
207 void ovpn_recv(struct ovpn_peer *peer, struct sk_buff *skb)
208 {
209 	struct ovpn_crypto_key_slot *ks;
210 	u8 key_id;
211 
212 	ovpn_peer_stats_increment_rx(&peer->link_stats, skb->len);
213 
214 	/* get the key slot matching the key ID in the received packet */
215 	key_id = ovpn_key_id_from_skb(skb);
216 	ks = ovpn_crypto_key_id_to_slot(&peer->crypto, key_id);
217 	if (unlikely(!ks)) {
218 		net_info_ratelimited("%s: no available key for peer %u, key-id: %u\n",
219 				     netdev_name(peer->ovpn->dev), peer->id,
220 				     key_id);
221 		dev_dstats_rx_dropped(peer->ovpn->dev);
222 		kfree_skb(skb);
223 		ovpn_peer_put(peer);
224 		return;
225 	}
226 
227 	memset(ovpn_skb_cb(skb), 0, sizeof(struct ovpn_cb));
228 	ovpn_decrypt_post(skb, ovpn_aead_decrypt(peer, ks, skb));
229 }
230 
231 void ovpn_encrypt_post(void *data, int ret)
232 {
233 	struct ovpn_crypto_key_slot *ks;
234 	struct sk_buff *skb = data;
235 	struct ovpn_socket *sock;
236 	struct ovpn_peer *peer;
237 	unsigned int orig_len;
238 
239 	/* encryption is happening asynchronously. This function will be
240 	 * called later by the crypto callback with a proper return value
241 	 */
242 	if (unlikely(ret == -EINPROGRESS))
243 		return;
244 
245 	ks = ovpn_skb_cb(skb)->ks;
246 	peer = ovpn_skb_cb(skb)->peer;
247 
248 	/* crypto is done, cleanup skb CB and its members */
249 	kfree(ovpn_skb_cb(skb)->crypto_tmp);
250 
251 	if (unlikely(ret == -ERANGE)) {
252 		/* we ran out of IVs and we must kill the key as it can't be
253 		 * use anymore
254 		 */
255 		netdev_warn(peer->ovpn->dev,
256 			    "killing key %u for peer %u\n", ks->key_id,
257 			    peer->id);
258 		if (ovpn_crypto_kill_key(&peer->crypto, ks->key_id))
259 			/* let userspace know so that a new key must be negotiated */
260 			ovpn_nl_key_swap_notify(peer, ks->key_id);
261 
262 		goto err;
263 	}
264 
265 	if (unlikely(ret < 0))
266 		goto err;
267 
268 	skb_mark_not_on_list(skb);
269 	orig_len = skb->len;
270 
271 	rcu_read_lock();
272 	sock = rcu_dereference(peer->sock);
273 	if (unlikely(!sock))
274 		goto err_unlock;
275 
276 	switch (sock->sk->sk_protocol) {
277 	case IPPROTO_UDP:
278 		ovpn_udp_send_skb(peer, sock->sk, skb);
279 		break;
280 	case IPPROTO_TCP:
281 		ovpn_tcp_send_skb(peer, sock->sk, skb);
282 		break;
283 	default:
284 		/* no transport configured yet */
285 		goto err_unlock;
286 	}
287 
288 	ovpn_peer_stats_increment_tx(&peer->link_stats, orig_len);
289 	/* keep track of last sent packet for keepalive */
290 	WRITE_ONCE(peer->last_sent, ktime_get_real_seconds());
291 	/* skb passed down the stack - don't free it */
292 	skb = NULL;
293 err_unlock:
294 	rcu_read_unlock();
295 err:
296 	if (unlikely(skb))
297 		dev_dstats_tx_dropped(peer->ovpn->dev);
298 	if (likely(peer))
299 		ovpn_peer_put(peer);
300 	if (likely(ks))
301 		ovpn_crypto_key_slot_put(ks);
302 	kfree_skb(skb);
303 }
304 
305 static bool ovpn_encrypt_one(struct ovpn_peer *peer, struct sk_buff *skb)
306 {
307 	struct ovpn_crypto_key_slot *ks;
308 
309 	/* get primary key to be used for encrypting data */
310 	ks = ovpn_crypto_key_slot_primary(&peer->crypto);
311 	if (unlikely(!ks))
312 		return false;
313 
314 	/* take a reference to the peer because the crypto code may run async.
315 	 * ovpn_encrypt_post() will release it upon completion
316 	 */
317 	if (unlikely(!ovpn_peer_hold(peer))) {
318 		DEBUG_NET_WARN_ON_ONCE(1);
319 		ovpn_crypto_key_slot_put(ks);
320 		return false;
321 	}
322 
323 	memset(ovpn_skb_cb(skb), 0, sizeof(struct ovpn_cb));
324 	ovpn_encrypt_post(skb, ovpn_aead_encrypt(peer, ks, skb));
325 	return true;
326 }
327 
328 /* send skb to connected peer, if any */
329 static void ovpn_send(struct ovpn_priv *ovpn, struct sk_buff *skb,
330 		      struct ovpn_peer *peer)
331 {
332 	struct sk_buff *curr, *next;
333 
334 	/* this might be a GSO-segmented skb list: process each skb
335 	 * independently
336 	 */
337 	skb_list_walk_safe(skb, curr, next) {
338 		if (unlikely(!ovpn_encrypt_one(peer, curr))) {
339 			dev_dstats_tx_dropped(ovpn->dev);
340 			kfree_skb(curr);
341 		}
342 	}
343 
344 	ovpn_peer_put(peer);
345 }
346 
347 /* Send user data to the network
348  */
349 netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev)
350 {
351 	struct ovpn_priv *ovpn = netdev_priv(dev);
352 	struct sk_buff *segments, *curr, *next;
353 	struct sk_buff_head skb_list;
354 	unsigned int tx_bytes = 0;
355 	struct ovpn_peer *peer;
356 	__be16 proto;
357 	int ret;
358 
359 	/* reset netfilter state */
360 	nf_reset_ct(skb);
361 
362 	/* verify IP header size in network packet */
363 	proto = ovpn_ip_check_protocol(skb);
364 	if (unlikely(!proto || skb->protocol != proto))
365 		goto drop_no_peer;
366 
367 	/* retrieve peer serving the destination IP of this packet */
368 	peer = ovpn_peer_get_by_dst(ovpn, skb);
369 	if (unlikely(!peer)) {
370 		switch (skb->protocol) {
371 		case htons(ETH_P_IP):
372 			net_dbg_ratelimited("%s: no peer to send data to dst=%pI4\n",
373 					    netdev_name(ovpn->dev),
374 					    &ip_hdr(skb)->daddr);
375 			break;
376 		case htons(ETH_P_IPV6):
377 			net_dbg_ratelimited("%s: no peer to send data to dst=%pI6c\n",
378 					    netdev_name(ovpn->dev),
379 					    &ipv6_hdr(skb)->daddr);
380 			break;
381 		}
382 		goto drop_no_peer;
383 	}
384 	/* dst was needed for peer selection - it can now be dropped */
385 	skb_dst_drop(skb);
386 
387 	if (skb_is_gso(skb)) {
388 		segments = skb_gso_segment(skb, 0);
389 		if (IS_ERR(segments)) {
390 			ret = PTR_ERR(segments);
391 			net_err_ratelimited("%s: cannot segment payload packet: %d\n",
392 					    netdev_name(dev), ret);
393 			goto drop;
394 		}
395 
396 		consume_skb(skb);
397 		skb = segments;
398 	}
399 
400 	/* from this moment on, "skb" might be a list */
401 
402 	__skb_queue_head_init(&skb_list);
403 	skb_list_walk_safe(skb, curr, next) {
404 		skb_mark_not_on_list(curr);
405 
406 		curr = skb_share_check(curr, GFP_ATOMIC);
407 		if (unlikely(!curr)) {
408 			net_err_ratelimited("%s: skb_share_check failed for payload packet\n",
409 					    netdev_name(dev));
410 			dev_dstats_tx_dropped(ovpn->dev);
411 			continue;
412 		}
413 
414 		/* only count what we actually send */
415 		tx_bytes += curr->len;
416 		__skb_queue_tail(&skb_list, curr);
417 	}
418 
419 	/* no segments survived: don't jump to 'drop' because we already
420 	 * incremented the counter for each failure in the loop
421 	 */
422 	if (unlikely(skb_queue_empty(&skb_list))) {
423 		ovpn_peer_put(peer);
424 		return NETDEV_TX_OK;
425 	}
426 	skb_list.prev->next = NULL;
427 
428 	ovpn_peer_stats_increment_tx(&peer->vpn_stats, tx_bytes);
429 	ovpn_send(ovpn, skb_list.next, peer);
430 
431 	return NETDEV_TX_OK;
432 
433 drop:
434 	ovpn_peer_put(peer);
435 drop_no_peer:
436 	dev_dstats_tx_dropped(ovpn->dev);
437 	skb_tx_error(skb);
438 	kfree_skb_list(skb);
439 	return NETDEV_TX_OK;
440 }
441 
442 /**
443  * ovpn_xmit_special - encrypt and transmit an out-of-band message to peer
444  * @peer: peer to send the message to
445  * @data: message content
446  * @len: message length
447  *
448  * Assumes that caller holds a reference to peer, which will be
449  * passed to ovpn_send()
450  */
451 void ovpn_xmit_special(struct ovpn_peer *peer, const void *data,
452 		       const unsigned int len)
453 {
454 	struct ovpn_priv *ovpn;
455 	struct sk_buff *skb;
456 
457 	ovpn = peer->ovpn;
458 	if (unlikely(!ovpn)) {
459 		ovpn_peer_put(peer);
460 		return;
461 	}
462 
463 	skb = alloc_skb(256 + len, GFP_ATOMIC);
464 	if (unlikely(!skb)) {
465 		ovpn_peer_put(peer);
466 		return;
467 	}
468 
469 	skb_reserve(skb, 128);
470 	skb->priority = TC_PRIO_BESTEFFORT;
471 	__skb_put_data(skb, data, len);
472 
473 	ovpn_send(ovpn, skb, peer);
474 }
475