1 // SPDX-License-Identifier: GPL-2.0
2 /* OpenVPN data channel offload
3 *
4 * Copyright (C) 2019-2025 OpenVPN, Inc.
5 *
6 * Author: James Yonan <james@openvpn.net>
7 * Antonio Quartulli <antonio@openvpn.net>
8 */
9
10 #include <crypto/aead.h>
11 #include <linux/netdevice.h>
12 #include <linux/skbuff.h>
13 #include <net/gro_cells.h>
14 #include <net/gso.h>
15 #include <net/ip.h>
16
17 #include "ovpnpriv.h"
18 #include "peer.h"
19 #include "io.h"
20 #include "bind.h"
21 #include "crypto.h"
22 #include "crypto_aead.h"
23 #include "netlink.h"
24 #include "proto.h"
25 #include "tcp.h"
26 #include "udp.h"
27 #include "skb.h"
28 #include "socket.h"
29
30 const unsigned char ovpn_keepalive_message[OVPN_KEEPALIVE_SIZE] = {
31 0x2a, 0x18, 0x7b, 0xf3, 0x64, 0x1e, 0xb4, 0xcb,
32 0x07, 0xed, 0x2d, 0x0a, 0x98, 0x1f, 0xc7, 0x48
33 };
34
35 /**
36 * ovpn_is_keepalive - check if skb contains a keepalive message
37 * @skb: packet to check
38 *
39 * Assumes that the first byte of skb->data is defined.
40 *
41 * Return: true if skb contains a keepalive or false otherwise
42 */
ovpn_is_keepalive(struct sk_buff * skb)43 static bool ovpn_is_keepalive(struct sk_buff *skb)
44 {
45 if (*skb->data != ovpn_keepalive_message[0])
46 return false;
47
48 if (skb->len != OVPN_KEEPALIVE_SIZE)
49 return false;
50
51 if (!pskb_may_pull(skb, OVPN_KEEPALIVE_SIZE))
52 return false;
53
54 return !memcmp(skb->data, ovpn_keepalive_message, OVPN_KEEPALIVE_SIZE);
55 }
56
57 /* Called after decrypt to write the IP packet to the device.
58 * This method is expected to manage/free the skb.
59 */
ovpn_netdev_write(struct ovpn_peer * peer,struct sk_buff * skb)60 static void ovpn_netdev_write(struct ovpn_peer *peer, struct sk_buff *skb)
61 {
62 unsigned int pkt_len;
63 int ret;
64
65 /*
66 * GSO state from the transport layer is not valid for the tunnel/data
67 * path. Reset all GSO fields to prevent any further GSO processing
68 * from entering an inconsistent state.
69 */
70 skb_gso_reset(skb);
71
72 /* we can't guarantee the packet wasn't corrupted before entering the
73 * VPN, therefore we give other layers a chance to check that
74 */
75 skb->ip_summed = CHECKSUM_NONE;
76
77 /* skb hash for transport packet no longer valid after decapsulation */
78 skb_clear_hash(skb);
79
80 /* post-decrypt scrub -- prepare to inject encapsulated packet onto the
81 * interface, based on __skb_tunnel_rx() in dst.h
82 */
83 skb->dev = peer->ovpn->dev;
84 skb_set_queue_mapping(skb, 0);
85 skb_scrub_packet(skb, true);
86
87 /* network header reset in ovpn_decrypt_post() */
88 skb_reset_mac_header(skb);
89 skb_reset_transport_header(skb);
90 skb_reset_inner_headers(skb);
91
92 /* cause packet to be "received" by the interface */
93 pkt_len = skb->len;
94 /* we may get here in process context in case of TCP connections,
95 * therefore we have to disable BHs to ensure gro_cells_receive()
96 * and dev_dstats_rx_add() do not get corrupted or enter deadlock
97 */
98 local_bh_disable();
99 ret = gro_cells_receive(&peer->ovpn->gro_cells, skb);
100 if (likely(ret == NET_RX_SUCCESS)) {
101 /* update RX stats with the size of decrypted packet */
102 ovpn_peer_stats_increment_rx(&peer->vpn_stats, pkt_len);
103 dev_dstats_rx_add(peer->ovpn->dev, pkt_len);
104 }
105 local_bh_enable();
106 }
107
ovpn_decrypt_post(void * data,int ret)108 void ovpn_decrypt_post(void *data, int ret)
109 {
110 struct ovpn_crypto_key_slot *ks;
111 unsigned int payload_offset = 0;
112 struct sk_buff *skb = data;
113 struct ovpn_socket *sock;
114 struct ovpn_peer *peer;
115 __be16 proto;
116 __be32 *pid;
117
118 /* crypto is happening asynchronously. this function will be called
119 * again later by the crypto callback with a proper return code
120 */
121 if (unlikely(ret == -EINPROGRESS))
122 return;
123
124 payload_offset = ovpn_skb_cb(skb)->payload_offset;
125 ks = ovpn_skb_cb(skb)->ks;
126 peer = ovpn_skb_cb(skb)->peer;
127
128 /* crypto is done, cleanup skb CB and its members */
129 kfree(ovpn_skb_cb(skb)->crypto_tmp);
130
131 if (unlikely(ret < 0))
132 goto drop;
133
134 /* PID sits after the op */
135 pid = (__force __be32 *)(skb->data + OVPN_OPCODE_SIZE);
136 ret = ovpn_pktid_recv(&ks->pid_recv, ntohl(*pid), 0);
137 if (unlikely(ret < 0)) {
138 net_err_ratelimited("%s: PKT ID RX error for peer %u: %d\n",
139 netdev_name(peer->ovpn->dev), peer->id,
140 ret);
141 goto drop;
142 }
143
144 /* keep track of last received authenticated packet for keepalive */
145 WRITE_ONCE(peer->last_recv, ktime_get_real_seconds());
146
147 rcu_read_lock();
148 sock = rcu_dereference(peer->sock);
149 if (sock && sock->sk->sk_protocol == IPPROTO_UDP)
150 /* check if this peer changed local or remote endpoint */
151 ovpn_peer_endpoints_update(peer, skb);
152 rcu_read_unlock();
153
154 /* point to encapsulated IP packet */
155 __skb_pull(skb, payload_offset);
156
157 /* check if this is a valid datapacket that has to be delivered to the
158 * ovpn interface
159 */
160 skb_reset_network_header(skb);
161 proto = ovpn_ip_check_protocol(skb);
162 if (unlikely(!proto)) {
163 /* check if null packet */
164 if (unlikely(!pskb_may_pull(skb, 1))) {
165 net_info_ratelimited("%s: NULL packet received from peer %u\n",
166 netdev_name(peer->ovpn->dev),
167 peer->id);
168 goto drop;
169 }
170
171 if (ovpn_is_keepalive(skb)) {
172 net_dbg_ratelimited("%s: ping received from peer %u\n",
173 netdev_name(peer->ovpn->dev),
174 peer->id);
175 /* we drop the packet, but this is not a failure */
176 consume_skb(skb);
177 goto drop_nocount;
178 }
179
180 net_info_ratelimited("%s: unsupported protocol received from peer %u\n",
181 netdev_name(peer->ovpn->dev), peer->id);
182 goto drop;
183 }
184 skb->protocol = proto;
185
186 /* perform Reverse Path Filtering (RPF) */
187 if (unlikely(!ovpn_peer_check_by_src(peer->ovpn, skb, peer))) {
188 if (skb->protocol == htons(ETH_P_IPV6))
189 net_dbg_ratelimited("%s: RPF dropped packet from peer %u, src: %pI6c\n",
190 netdev_name(peer->ovpn->dev),
191 peer->id, &ipv6_hdr(skb)->saddr);
192 else
193 net_dbg_ratelimited("%s: RPF dropped packet from peer %u, src: %pI4\n",
194 netdev_name(peer->ovpn->dev),
195 peer->id, &ip_hdr(skb)->saddr);
196 goto drop;
197 }
198
199 ovpn_netdev_write(peer, skb);
200 /* skb is passed to upper layer - don't free it */
201 skb = NULL;
202 drop:
203 if (unlikely(skb))
204 dev_dstats_rx_dropped(peer->ovpn->dev);
205 kfree_skb(skb);
206 drop_nocount:
207 if (likely(peer))
208 ovpn_peer_put(peer);
209 if (likely(ks))
210 ovpn_crypto_key_slot_put(ks);
211 }
212
213 /* RX path entry point: decrypt packet and forward it to the device */
ovpn_recv(struct ovpn_peer * peer,struct sk_buff * skb)214 void ovpn_recv(struct ovpn_peer *peer, struct sk_buff *skb)
215 {
216 struct ovpn_crypto_key_slot *ks;
217 u8 key_id;
218
219 ovpn_peer_stats_increment_rx(&peer->link_stats, skb->len);
220
221 /* get the key slot matching the key ID in the received packet */
222 key_id = ovpn_key_id_from_skb(skb);
223 ks = ovpn_crypto_key_id_to_slot(&peer->crypto, key_id);
224 if (unlikely(!ks)) {
225 net_info_ratelimited("%s: no available key for peer %u, key-id: %u\n",
226 netdev_name(peer->ovpn->dev), peer->id,
227 key_id);
228 dev_dstats_rx_dropped(peer->ovpn->dev);
229 kfree_skb(skb);
230 ovpn_peer_put(peer);
231 return;
232 }
233
234 memset(ovpn_skb_cb(skb), 0, sizeof(struct ovpn_cb));
235 ovpn_decrypt_post(skb, ovpn_aead_decrypt(peer, ks, skb));
236 }
237
ovpn_encrypt_post(void * data,int ret)238 void ovpn_encrypt_post(void *data, int ret)
239 {
240 struct ovpn_crypto_key_slot *ks;
241 struct sk_buff *skb = data;
242 struct ovpn_socket *sock;
243 struct ovpn_peer *peer;
244 unsigned int orig_len;
245
246 /* encryption is happening asynchronously. This function will be
247 * called later by the crypto callback with a proper return value
248 */
249 if (unlikely(ret == -EINPROGRESS))
250 return;
251
252 ks = ovpn_skb_cb(skb)->ks;
253 peer = ovpn_skb_cb(skb)->peer;
254
255 /* crypto is done, cleanup skb CB and its members */
256 kfree(ovpn_skb_cb(skb)->crypto_tmp);
257
258 if (unlikely(ret == -ERANGE)) {
259 /* we ran out of IVs and we must kill the key as it can't be
260 * use anymore
261 */
262 netdev_warn(peer->ovpn->dev,
263 "killing key %u for peer %u\n", ks->key_id,
264 peer->id);
265 if (ovpn_crypto_kill_key(&peer->crypto, ks->key_id))
266 /* let userspace know so that a new key must be negotiated */
267 ovpn_nl_key_swap_notify(peer, ks->key_id);
268
269 goto err;
270 }
271
272 if (unlikely(ret < 0))
273 goto err;
274
275 skb_mark_not_on_list(skb);
276 orig_len = skb->len;
277
278 rcu_read_lock();
279 sock = rcu_dereference(peer->sock);
280 if (unlikely(!sock))
281 goto err_unlock;
282
283 switch (sock->sk->sk_protocol) {
284 case IPPROTO_UDP:
285 ovpn_udp_send_skb(peer, sock->sk, skb);
286 break;
287 case IPPROTO_TCP:
288 ovpn_tcp_send_skb(peer, sock->sk, skb);
289 break;
290 default:
291 /* no transport configured yet */
292 goto err_unlock;
293 }
294
295 ovpn_peer_stats_increment_tx(&peer->link_stats, orig_len);
296 /* keep track of last sent packet for keepalive */
297 WRITE_ONCE(peer->last_sent, ktime_get_real_seconds());
298 /* skb passed down the stack - don't free it */
299 skb = NULL;
300 err_unlock:
301 rcu_read_unlock();
302 err:
303 if (unlikely(skb))
304 dev_dstats_tx_dropped(peer->ovpn->dev);
305 if (likely(peer))
306 ovpn_peer_put(peer);
307 if (likely(ks))
308 ovpn_crypto_key_slot_put(ks);
309 kfree_skb(skb);
310 }
311
ovpn_encrypt_one(struct ovpn_peer * peer,struct sk_buff * skb)312 static bool ovpn_encrypt_one(struct ovpn_peer *peer, struct sk_buff *skb)
313 {
314 struct ovpn_crypto_key_slot *ks;
315
316 /* get primary key to be used for encrypting data */
317 ks = ovpn_crypto_key_slot_primary(&peer->crypto);
318 if (unlikely(!ks))
319 return false;
320
321 /* take a reference to the peer because the crypto code may run async.
322 * ovpn_encrypt_post() will release it upon completion
323 */
324 if (unlikely(!ovpn_peer_hold(peer))) {
325 DEBUG_NET_WARN_ON_ONCE(1);
326 ovpn_crypto_key_slot_put(ks);
327 return false;
328 }
329
330 memset(ovpn_skb_cb(skb), 0, sizeof(struct ovpn_cb));
331 ovpn_encrypt_post(skb, ovpn_aead_encrypt(peer, ks, skb));
332 return true;
333 }
334
335 /* send skb to connected peer, if any */
ovpn_send(struct ovpn_priv * ovpn,struct sk_buff * skb,struct ovpn_peer * peer)336 static void ovpn_send(struct ovpn_priv *ovpn, struct sk_buff *skb,
337 struct ovpn_peer *peer)
338 {
339 struct sk_buff *curr, *next;
340
341 /* this might be a GSO-segmented skb list: process each skb
342 * independently
343 */
344 skb_list_walk_safe(skb, curr, next) {
345 if (unlikely(!ovpn_encrypt_one(peer, curr))) {
346 dev_dstats_tx_dropped(ovpn->dev);
347 kfree_skb(curr);
348 }
349 }
350
351 ovpn_peer_put(peer);
352 }
353
354 /* Send user data to the network
355 */
ovpn_net_xmit(struct sk_buff * skb,struct net_device * dev)356 netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev)
357 {
358 struct ovpn_priv *ovpn = netdev_priv(dev);
359 struct sk_buff *segments, *curr, *next;
360 struct sk_buff_head skb_list;
361 unsigned int tx_bytes = 0;
362 struct ovpn_peer *peer;
363 __be16 proto;
364 int ret;
365
366 /* reset netfilter state */
367 nf_reset_ct(skb);
368
369 /* verify IP header size in network packet */
370 proto = ovpn_ip_check_protocol(skb);
371 if (unlikely(!proto || skb->protocol != proto))
372 goto drop_no_peer;
373
374 /* retrieve peer serving the destination IP of this packet */
375 peer = ovpn_peer_get_by_dst(ovpn, skb);
376 if (unlikely(!peer)) {
377 switch (skb->protocol) {
378 case htons(ETH_P_IP):
379 net_dbg_ratelimited("%s: no peer to send data to dst=%pI4\n",
380 netdev_name(ovpn->dev),
381 &ip_hdr(skb)->daddr);
382 break;
383 case htons(ETH_P_IPV6):
384 net_dbg_ratelimited("%s: no peer to send data to dst=%pI6c\n",
385 netdev_name(ovpn->dev),
386 &ipv6_hdr(skb)->daddr);
387 break;
388 }
389 goto drop_no_peer;
390 }
391 /* dst was needed for peer selection - it can now be dropped */
392 skb_dst_drop(skb);
393
394 if (skb_is_gso(skb)) {
395 segments = skb_gso_segment(skb, 0);
396 if (IS_ERR(segments)) {
397 ret = PTR_ERR(segments);
398 net_err_ratelimited("%s: cannot segment payload packet: %d\n",
399 netdev_name(dev), ret);
400 goto drop;
401 }
402
403 consume_skb(skb);
404 skb = segments;
405 }
406
407 /* from this moment on, "skb" might be a list */
408
409 __skb_queue_head_init(&skb_list);
410 skb_list_walk_safe(skb, curr, next) {
411 skb_mark_not_on_list(curr);
412
413 curr = skb_share_check(curr, GFP_ATOMIC);
414 if (unlikely(!curr)) {
415 net_err_ratelimited("%s: skb_share_check failed for payload packet\n",
416 netdev_name(dev));
417 dev_dstats_tx_dropped(ovpn->dev);
418 continue;
419 }
420
421 /* only count what we actually send */
422 tx_bytes += curr->len;
423 __skb_queue_tail(&skb_list, curr);
424 }
425
426 /* no segments survived: don't jump to 'drop' because we already
427 * incremented the counter for each failure in the loop
428 */
429 if (unlikely(skb_queue_empty(&skb_list))) {
430 ovpn_peer_put(peer);
431 return NETDEV_TX_OK;
432 }
433 skb_list.prev->next = NULL;
434
435 ovpn_peer_stats_increment_tx(&peer->vpn_stats, tx_bytes);
436 ovpn_send(ovpn, skb_list.next, peer);
437
438 return NETDEV_TX_OK;
439
440 drop:
441 ovpn_peer_put(peer);
442 drop_no_peer:
443 dev_dstats_tx_dropped(ovpn->dev);
444 skb_tx_error(skb);
445 kfree_skb_list(skb);
446 return NETDEV_TX_OK;
447 }
448
449 /**
450 * ovpn_xmit_special - encrypt and transmit an out-of-band message to peer
451 * @peer: peer to send the message to
452 * @data: message content
453 * @len: message length
454 *
455 * Assumes that caller holds a reference to peer, which will be
456 * passed to ovpn_send()
457 */
ovpn_xmit_special(struct ovpn_peer * peer,const void * data,const unsigned int len)458 void ovpn_xmit_special(struct ovpn_peer *peer, const void *data,
459 const unsigned int len)
460 {
461 struct ovpn_priv *ovpn;
462 struct sk_buff *skb;
463
464 ovpn = peer->ovpn;
465 if (unlikely(!ovpn)) {
466 ovpn_peer_put(peer);
467 return;
468 }
469
470 skb = alloc_skb(256 + len, GFP_ATOMIC);
471 if (unlikely(!skb)) {
472 ovpn_peer_put(peer);
473 return;
474 }
475
476 skb_reserve(skb, 128);
477 skb->priority = TC_PRIO_BESTEFFORT;
478 __skb_put_data(skb, data, len);
479
480 ovpn_send(ovpn, skb, peer);
481 }
482