xref: /linux/drivers/net/ovpn/udp.c (revision 6bab77ced3ffbce3d6c5b5bcce17da7c8a3f8266)
1 // SPDX-License-Identifier: GPL-2.0
2 /*  OpenVPN data channel offload
3  *
4  *  Copyright (C) 2019-2025 OpenVPN, Inc.
5  *
6  *  Author:	Antonio Quartulli <antonio@openvpn.net>
7  */
8 
9 #include <linux/netdevice.h>
10 #include <linux/inetdevice.h>
11 #include <linux/skbuff.h>
12 #include <linux/socket.h>
13 #include <linux/udp.h>
14 #include <net/addrconf.h>
15 #include <net/dst_cache.h>
16 #include <net/route.h>
17 #include <net/ipv6_stubs.h>
18 #include <net/transp_v6.h>
19 #include <net/udp.h>
20 #include <net/udp_tunnel.h>
21 
22 #include "ovpnpriv.h"
23 #include "main.h"
24 #include "bind.h"
25 #include "io.h"
26 #include "peer.h"
27 #include "proto.h"
28 #include "socket.h"
29 #include "udp.h"
30 
31 /* Retrieve the corresponding ovpn object from a UDP socket
32  * rcu_read_lock must be held on entry
33  */
34 static struct ovpn_socket *ovpn_socket_from_udp_sock(struct sock *sk)
35 {
36 	struct ovpn_socket *ovpn_sock;
37 
38 	if (unlikely(READ_ONCE(udp_sk(sk)->encap_type) != UDP_ENCAP_OVPNINUDP))
39 		return NULL;
40 
41 	ovpn_sock = rcu_dereference_sk_user_data(sk);
42 	if (unlikely(!ovpn_sock))
43 		return NULL;
44 
45 	/* make sure that sk matches our stored transport socket */
46 	if (unlikely(!ovpn_sock->sock || sk != ovpn_sock->sock->sk))
47 		return NULL;
48 
49 	return ovpn_sock;
50 }
51 
52 /**
53  * ovpn_udp_encap_recv - Start processing a received UDP packet.
54  * @sk: socket over which the packet was received
55  * @skb: the received packet
56  *
57  * If the first byte of the payload is:
58  * - DATA_V2 the packet is accepted for further processing,
59  * - DATA_V1 the packet is dropped as not supported,
60  * - anything else the packet is forwarded to the UDP stack for
61  *   delivery to user space.
62  *
63  * Return:
64  *  0 if skb was consumed or dropped
65  * >0 if skb should be passed up to userspace as UDP (packet not consumed)
66  * <0 if skb should be resubmitted as proto -N (packet not consumed)
67  */
68 static int ovpn_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
69 {
70 	struct ovpn_socket *ovpn_sock;
71 	struct ovpn_priv *ovpn;
72 	struct ovpn_peer *peer;
73 	u32 peer_id;
74 	u8 opcode;
75 
76 	ovpn_sock = ovpn_socket_from_udp_sock(sk);
77 	if (unlikely(!ovpn_sock)) {
78 		net_err_ratelimited("ovpn: %s invoked on non ovpn socket\n",
79 				    __func__);
80 		goto drop_noovpn;
81 	}
82 
83 	ovpn = ovpn_sock->ovpn;
84 	if (unlikely(!ovpn)) {
85 		net_err_ratelimited("ovpn: cannot obtain ovpn object from UDP socket\n");
86 		goto drop_noovpn;
87 	}
88 
89 	/* Make sure the first 4 bytes of the skb data buffer after the UDP
90 	 * header are accessible.
91 	 * They are required to fetch the OP code, the key ID and the peer ID.
92 	 */
93 	if (unlikely(!pskb_may_pull(skb, sizeof(struct udphdr) +
94 				    OVPN_OPCODE_SIZE))) {
95 		net_dbg_ratelimited("%s: packet too small from UDP socket\n",
96 				    netdev_name(ovpn->dev));
97 		goto drop;
98 	}
99 
100 	opcode = ovpn_opcode_from_skb(skb, sizeof(struct udphdr));
101 	if (unlikely(opcode != OVPN_DATA_V2)) {
102 		/* DATA_V1 is not supported */
103 		if (opcode == OVPN_DATA_V1)
104 			goto drop;
105 
106 		/* unknown or control packet: let it bubble up to userspace */
107 		return 1;
108 	}
109 
110 	peer_id = ovpn_peer_id_from_skb(skb, sizeof(struct udphdr));
111 	/* some OpenVPN server implementations send data packets with the
112 	 * peer-id set to UNDEF. In this case we skip the peer lookup by peer-id
113 	 * and we try with the transport address
114 	 */
115 	if (peer_id == OVPN_PEER_ID_UNDEF)
116 		peer = ovpn_peer_get_by_transp_addr(ovpn, skb);
117 	else
118 		peer = ovpn_peer_get_by_id(ovpn, peer_id);
119 
120 	if (unlikely(!peer))
121 		goto drop;
122 
123 	/* pop off outer UDP header */
124 	__skb_pull(skb, sizeof(struct udphdr));
125 	ovpn_recv(peer, skb);
126 	return 0;
127 
128 drop:
129 	dev_dstats_rx_dropped(ovpn->dev);
130 drop_noovpn:
131 	kfree_skb(skb);
132 	return 0;
133 }
134 
135 /**
136  * ovpn_udp4_output - send IPv4 packet over udp socket
137  * @peer: the destination peer
138  * @bind: the binding related to the destination peer
139  * @cache: dst cache
140  * @sk: the socket to send the packet over
141  * @skb: the packet to send
142  *
143  * Return: 0 on success or a negative error code otherwise
144  */
145 static int ovpn_udp4_output(struct ovpn_peer *peer, struct ovpn_bind *bind,
146 			    struct dst_cache *cache, struct sock *sk,
147 			    struct sk_buff *skb)
148 {
149 	struct rtable *rt;
150 	struct flowi4 fl = {
151 		.saddr = bind->local.ipv4.s_addr,
152 		.daddr = bind->remote.in4.sin_addr.s_addr,
153 		.fl4_sport = inet_sk(sk)->inet_sport,
154 		.fl4_dport = bind->remote.in4.sin_port,
155 		.flowi4_proto = sk->sk_protocol,
156 		.flowi4_mark = sk->sk_mark,
157 	};
158 	int ret;
159 
160 	local_bh_disable();
161 	rt = dst_cache_get_ip4(cache, &fl.saddr);
162 	if (rt)
163 		goto transmit;
164 
165 	if (unlikely(!inet_confirm_addr(sock_net(sk), NULL, 0, fl.saddr,
166 					RT_SCOPE_HOST))) {
167 		/* we may end up here when the cached address is not usable
168 		 * anymore. In this case we reset address/cache and perform a
169 		 * new look up
170 		 */
171 		fl.saddr = 0;
172 		spin_lock_bh(&peer->lock);
173 		bind->local.ipv4.s_addr = 0;
174 		spin_unlock_bh(&peer->lock);
175 		dst_cache_reset(cache);
176 	}
177 
178 	rt = ip_route_output_flow(sock_net(sk), &fl, sk);
179 	if (IS_ERR(rt) && PTR_ERR(rt) == -EINVAL) {
180 		fl.saddr = 0;
181 		spin_lock_bh(&peer->lock);
182 		bind->local.ipv4.s_addr = 0;
183 		spin_unlock_bh(&peer->lock);
184 		dst_cache_reset(cache);
185 
186 		rt = ip_route_output_flow(sock_net(sk), &fl, sk);
187 	}
188 
189 	if (IS_ERR(rt)) {
190 		ret = PTR_ERR(rt);
191 		net_dbg_ratelimited("%s: no route to host %pISpc: %d\n",
192 				    netdev_name(peer->ovpn->dev),
193 				    &bind->remote.in4,
194 				    ret);
195 		goto err;
196 	}
197 	dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
198 
199 transmit:
200 	udp_tunnel_xmit_skb(rt, sk, skb, fl.saddr, fl.daddr, 0,
201 			    ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport,
202 			    fl.fl4_dport, false, sk->sk_no_check_tx);
203 	ret = 0;
204 err:
205 	local_bh_enable();
206 	return ret;
207 }
208 
209 #if IS_ENABLED(CONFIG_IPV6)
210 /**
211  * ovpn_udp6_output - send IPv6 packet over udp socket
212  * @peer: the destination peer
213  * @bind: the binding related to the destination peer
214  * @cache: dst cache
215  * @sk: the socket to send the packet over
216  * @skb: the packet to send
217  *
218  * Return: 0 on success or a negative error code otherwise
219  */
220 static int ovpn_udp6_output(struct ovpn_peer *peer, struct ovpn_bind *bind,
221 			    struct dst_cache *cache, struct sock *sk,
222 			    struct sk_buff *skb)
223 {
224 	struct dst_entry *dst;
225 	int ret;
226 
227 	struct flowi6 fl = {
228 		.saddr = bind->local.ipv6,
229 		.daddr = bind->remote.in6.sin6_addr,
230 		.fl6_sport = inet_sk(sk)->inet_sport,
231 		.fl6_dport = bind->remote.in6.sin6_port,
232 		.flowi6_proto = sk->sk_protocol,
233 		.flowi6_mark = sk->sk_mark,
234 		.flowi6_oif = bind->remote.in6.sin6_scope_id,
235 	};
236 
237 	local_bh_disable();
238 	dst = dst_cache_get_ip6(cache, &fl.saddr);
239 	if (dst)
240 		goto transmit;
241 
242 	if (unlikely(!ipv6_chk_addr(sock_net(sk), &fl.saddr, NULL, 0))) {
243 		/* we may end up here when the cached address is not usable
244 		 * anymore. In this case we reset address/cache and perform a
245 		 * new look up
246 		 */
247 		fl.saddr = in6addr_any;
248 		spin_lock_bh(&peer->lock);
249 		bind->local.ipv6 = in6addr_any;
250 		spin_unlock_bh(&peer->lock);
251 		dst_cache_reset(cache);
252 	}
253 
254 	dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sk), sk, &fl, NULL);
255 	if (IS_ERR(dst)) {
256 		ret = PTR_ERR(dst);
257 		net_dbg_ratelimited("%s: no route to host %pISpc: %d\n",
258 				    netdev_name(peer->ovpn->dev),
259 				    &bind->remote.in6, ret);
260 		goto err;
261 	}
262 	dst_cache_set_ip6(cache, dst, &fl.saddr);
263 
264 transmit:
265 	udp_tunnel6_xmit_skb(dst, sk, skb, skb->dev, &fl.saddr, &fl.daddr, 0,
266 			     ip6_dst_hoplimit(dst), 0, fl.fl6_sport,
267 			     fl.fl6_dport, udp_get_no_check6_tx(sk));
268 	ret = 0;
269 err:
270 	local_bh_enable();
271 	return ret;
272 }
273 #endif
274 
275 /**
276  * ovpn_udp_output - transmit skb using udp-tunnel
277  * @peer: the destination peer
278  * @cache: dst cache
279  * @sk: the socket to send the packet over
280  * @skb: the packet to send
281  *
282  * rcu_read_lock should be held on entry.
283  * On return, the skb is consumed.
284  *
285  * Return: 0 on success or a negative error code otherwise
286  */
287 static int ovpn_udp_output(struct ovpn_peer *peer, struct dst_cache *cache,
288 			   struct sock *sk, struct sk_buff *skb)
289 {
290 	struct ovpn_bind *bind;
291 	int ret;
292 
293 	/* set sk to null if skb is already orphaned */
294 	if (!skb->destructor)
295 		skb->sk = NULL;
296 
297 	rcu_read_lock();
298 	bind = rcu_dereference(peer->bind);
299 	if (unlikely(!bind)) {
300 		net_warn_ratelimited("%s: no bind for remote peer %u\n",
301 				     netdev_name(peer->ovpn->dev), peer->id);
302 		ret = -ENODEV;
303 		goto out;
304 	}
305 
306 	switch (bind->remote.in4.sin_family) {
307 	case AF_INET:
308 		ret = ovpn_udp4_output(peer, bind, cache, sk, skb);
309 		break;
310 #if IS_ENABLED(CONFIG_IPV6)
311 	case AF_INET6:
312 		ret = ovpn_udp6_output(peer, bind, cache, sk, skb);
313 		break;
314 #endif
315 	default:
316 		ret = -EAFNOSUPPORT;
317 		break;
318 	}
319 
320 out:
321 	rcu_read_unlock();
322 	return ret;
323 }
324 
325 /**
326  * ovpn_udp_send_skb - prepare skb and send it over via UDP
327  * @peer: the destination peer
328  * @sock: the RCU protected peer socket
329  * @skb: the packet to send
330  */
331 void ovpn_udp_send_skb(struct ovpn_peer *peer, struct socket *sock,
332 		       struct sk_buff *skb)
333 {
334 	int ret = -1;
335 
336 	skb->dev = peer->ovpn->dev;
337 	/* no checksum performed at this layer */
338 	skb->ip_summed = CHECKSUM_NONE;
339 
340 	/* get socket info */
341 	if (unlikely(!sock)) {
342 		net_warn_ratelimited("%s: no sock for remote peer %u\n",
343 				     netdev_name(peer->ovpn->dev), peer->id);
344 		goto out;
345 	}
346 
347 	/* crypto layer -> transport (UDP) */
348 	ret = ovpn_udp_output(peer, &peer->dst_cache, sock->sk, skb);
349 out:
350 	if (unlikely(ret < 0)) {
351 		kfree_skb(skb);
352 		return;
353 	}
354 }
355 
356 static void ovpn_udp_encap_destroy(struct sock *sk)
357 {
358 	struct ovpn_socket *sock;
359 	struct ovpn_priv *ovpn;
360 
361 	rcu_read_lock();
362 	sock = rcu_dereference_sk_user_data(sk);
363 	if (!sock || !sock->ovpn) {
364 		rcu_read_unlock();
365 		return;
366 	}
367 	ovpn = sock->ovpn;
368 	rcu_read_unlock();
369 
370 	ovpn_peers_free(ovpn, sk, OVPN_DEL_PEER_REASON_TRANSPORT_DISCONNECT);
371 }
372 
373 /**
374  * ovpn_udp_socket_attach - set udp-tunnel CBs on socket and link it to ovpn
375  * @ovpn_sock: socket to configure
376  * @ovpn: the openvp instance to link
377  *
378  * After invoking this function, the sock will be controlled by ovpn so that
379  * any incoming packet may be processed by ovpn first.
380  *
381  * Return: 0 on success or a negative error code otherwise
382  */
383 int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock,
384 			   struct ovpn_priv *ovpn)
385 {
386 	struct udp_tunnel_sock_cfg cfg = {
387 		.encap_type = UDP_ENCAP_OVPNINUDP,
388 		.encap_rcv = ovpn_udp_encap_recv,
389 		.encap_destroy = ovpn_udp_encap_destroy,
390 	};
391 	struct socket *sock = ovpn_sock->sock;
392 	struct ovpn_socket *old_data;
393 	int ret;
394 
395 	/* make sure no pre-existing encapsulation handler exists */
396 	rcu_read_lock();
397 	old_data = rcu_dereference_sk_user_data(sock->sk);
398 	if (!old_data) {
399 		/* socket is currently unused - we can take it */
400 		rcu_read_unlock();
401 		setup_udp_tunnel_sock(sock_net(sock->sk), sock, &cfg);
402 		return 0;
403 	}
404 
405 	/* socket is in use. We need to understand if it's owned by this ovpn
406 	 * instance or by something else.
407 	 * In the former case, we can increase the refcounter and happily
408 	 * use it, because the same UDP socket is expected to be shared among
409 	 * different peers.
410 	 *
411 	 * Unlikely TCP, a single UDP socket can be used to talk to many remote
412 	 * hosts and therefore openvpn instantiates one only for all its peers
413 	 */
414 	if ((READ_ONCE(udp_sk(sock->sk)->encap_type) == UDP_ENCAP_OVPNINUDP) &&
415 	    old_data->ovpn == ovpn) {
416 		netdev_dbg(ovpn->dev,
417 			   "provided socket already owned by this interface\n");
418 		ret = -EALREADY;
419 	} else {
420 		netdev_dbg(ovpn->dev,
421 			   "provided socket already taken by other user\n");
422 		ret = -EBUSY;
423 	}
424 	rcu_read_unlock();
425 
426 	return ret;
427 }
428 
429 /**
430  * ovpn_udp_socket_detach - clean udp-tunnel status for this socket
431  * @ovpn_sock: the socket to clean
432  */
433 void ovpn_udp_socket_detach(struct ovpn_socket *ovpn_sock)
434 {
435 	struct udp_tunnel_sock_cfg cfg = { };
436 
437 	setup_udp_tunnel_sock(sock_net(ovpn_sock->sock->sk), ovpn_sock->sock,
438 			      &cfg);
439 }
440