xref: /linux/include/net/udp_tunnel.h (revision 0c09e89f6cea6598439edca7ff1ef97fde3edb46)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_UDP_TUNNEL_H
3 #define __NET_UDP_TUNNEL_H
4 
5 #include <net/ip_tunnels.h>
6 #include <net/udp.h>
7 
8 #if IS_ENABLED(CONFIG_IPV6)
9 #include <net/ipv6.h>
10 #include <net/ipv6_stubs.h>
11 #endif
12 
13 #define UDP_TUNNEL_PARTIAL_FEATURES	NETIF_F_GSO_ENCAP_ALL
14 #define UDP_TUNNEL_STRIPPED_GSO_TYPES	((UDP_TUNNEL_PARTIAL_FEATURES |	\
15 					  NETIF_F_GSO_PARTIAL) >>	\
16 					 NETIF_F_GSO_SHIFT)
17 
18 struct udp_port_cfg {
19 	u8			family;
20 
21 	/* Used only for kernel-created sockets */
22 	union {
23 		struct in_addr		local_ip;
24 #if IS_ENABLED(CONFIG_IPV6)
25 		struct in6_addr		local_ip6;
26 #endif
27 	};
28 
29 	union {
30 		struct in_addr		peer_ip;
31 #if IS_ENABLED(CONFIG_IPV6)
32 		struct in6_addr		peer_ip6;
33 #endif
34 	};
35 
36 	__be16			local_udp_port;
37 	__be16			peer_udp_port;
38 	int			bind_ifindex;
39 	unsigned int		use_udp_checksums:1,
40 				use_udp6_tx_checksums:1,
41 				use_udp6_rx_checksums:1,
42 				ipv6_v6only:1;
43 };
44 
45 int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
46 		     struct socket **sockp);
47 
48 #if IS_ENABLED(CONFIG_IPV6)
49 int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
50 		     struct socket **sockp);
51 #else
52 static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
53 				   struct socket **sockp)
54 {
55 	return 0;
56 }
57 #endif
58 
59 static inline int udp_sock_create(struct net *net,
60 				  struct udp_port_cfg *cfg,
61 				  struct socket **sockp)
62 {
63 	if (cfg->family == AF_INET)
64 		return udp_sock_create4(net, cfg, sockp);
65 
66 	if (cfg->family == AF_INET6)
67 		return udp_sock_create6(net, cfg, sockp);
68 
69 	return -EPFNOSUPPORT;
70 }
71 
72 typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
73 typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk,
74 					     struct sk_buff *skb);
75 typedef void (*udp_tunnel_encap_err_rcv_t)(struct sock *sk,
76 					   struct sk_buff *skb, int err,
77 					   __be16 port, u32 info, u8 *payload);
78 typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
79 typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
80 						    struct list_head *head,
81 						    struct sk_buff *skb);
82 typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb,
83 					 int nhoff);
84 
85 struct udp_tunnel_sock_cfg {
86 	void *sk_user_data;     /* user data used by encap_rcv call back */
87 	/* Used for setting up udp_sock fields, see udp.h for details */
88 	__u8  encap_type;
89 	udp_tunnel_encap_rcv_t encap_rcv;
90 	udp_tunnel_encap_err_lookup_t encap_err_lookup;
91 	udp_tunnel_encap_err_rcv_t encap_err_rcv;
92 	udp_tunnel_encap_destroy_t encap_destroy;
93 	udp_tunnel_gro_receive_t gro_receive;
94 	udp_tunnel_gro_complete_t gro_complete;
95 };
96 
97 /* Setup the given (UDP) sock to receive UDP encapsulated packets */
98 void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
99 			   struct udp_tunnel_sock_cfg *sock_cfg);
100 
101 /* -- List of parsable UDP tunnel types --
102  *
103  * Adding to this list will result in serious debate.  The main issue is
104  * that this list is essentially a list of workarounds for either poorly
105  * designed tunnels, or poorly designed device offloads.
106  *
107  * The parsing supported via these types should really be used for Rx
108  * traffic only as the network stack will have already inserted offsets for
109  * the location of the headers in the skb.  In addition any ports that are
110  * pushed should be kept within the namespace without leaking to other
111  * devices such as VFs or other ports on the same device.
112  *
113  * It is strongly encouraged to use CHECKSUM_COMPLETE for Rx to avoid the
114  * need to use this for Rx checksum offload.  It should not be necessary to
115  * call this function to perform Tx offloads on outgoing traffic.
116  */
117 enum udp_parsable_tunnel_type {
118 	UDP_TUNNEL_TYPE_VXLAN	  = BIT(0), /* RFC 7348 */
119 	UDP_TUNNEL_TYPE_GENEVE	  = BIT(1), /* draft-ietf-nvo3-geneve */
120 	UDP_TUNNEL_TYPE_VXLAN_GPE = BIT(2), /* draft-ietf-nvo3-vxlan-gpe */
121 };
122 
123 struct udp_tunnel_info {
124 	unsigned short type;
125 	sa_family_t sa_family;
126 	__be16 port;
127 	u8 hw_priv;
128 };
129 
130 /* Notify network devices of offloadable types */
131 void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock,
132 			     unsigned short type);
133 void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock,
134 			     unsigned short type);
135 void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type);
136 void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type);
137 
138 /* Transmit the skb using UDP encapsulation. */
139 void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
140 			 __be32 src, __be32 dst, __u8 tos, __u8 ttl,
141 			 __be16 df, __be16 src_port, __be16 dst_port,
142 			 bool xnet, bool nocheck, u16 ipcb_flags);
143 
144 void udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
145 			  struct sk_buff *skb,
146 			  struct net_device *dev,
147 			  const struct in6_addr *saddr,
148 			  const struct in6_addr *daddr,
149 			  __u8 prio, __u8 ttl, __be32 label,
150 			  __be16 src_port, __be16 dst_port, bool nocheck,
151 			  u16 ip6cb_flags);
152 
153 static inline bool udp_tunnel_handle_partial(struct sk_buff *skb)
154 {
155 	bool double_encap = !!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL);
156 
157 	/*
158 	 * If the skb went through partial segmentation, lower devices
159 	 * will not need to offload the related features - except for
160 	 * UDP_TUNNEL, that will be re-added by the later
161 	 * udp_tunnel_handle_offloads().
162 	 */
163 	if (double_encap)
164 		skb_shinfo(skb)->gso_type &= ~UDP_TUNNEL_STRIPPED_GSO_TYPES;
165 	return double_encap;
166 }
167 
168 static inline void udp_tunnel_set_inner_protocol(struct sk_buff *skb,
169 						 bool double_encap,
170 						 __be16 inner_proto)
171 {
172 	/*
173 	 * The inner protocol has been set by the nested tunnel, don't
174 	 * overraid it.
175 	 */
176 	if (!double_encap)
177 		skb_set_inner_protocol(skb, inner_proto);
178 }
179 
180 void udp_tunnel_sock_release(struct socket *sock);
181 
182 struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb,
183 				     struct net_device *dev,
184 				     struct net *net, int oif,
185 				     __be32 *saddr,
186 				     const struct ip_tunnel_key *key,
187 				     __be16 sport, __be16 dport, u8 tos,
188 				     struct dst_cache *dst_cache);
189 struct dst_entry *udp_tunnel6_dst_lookup(struct sk_buff *skb,
190 					 struct net_device *dev,
191 					 struct net *net,
192 					 struct socket *sock, int oif,
193 					 struct in6_addr *saddr,
194 					 const struct ip_tunnel_key *key,
195 					 __be16 sport, __be16 dport, u8 dsfield,
196 					 struct dst_cache *dst_cache);
197 
198 struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
199 				    const unsigned long *flags,
200 				    __be64 tunnel_id, int md_size);
201 
202 #ifdef CONFIG_INET
203 static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
204 {
205 	int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
206 
207 	return iptunnel_handle_offloads(skb, type);
208 }
209 #endif
210 
211 #if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)
212 void udp_tunnel_update_gro_lookup(struct net *net, struct sock *sk, bool add);
213 void udp_tunnel_update_gro_rcv(struct sock *sk, bool add);
214 #else
215 static inline void udp_tunnel_update_gro_lookup(struct net *net,
216 						struct sock *sk, bool add) {}
217 static inline void udp_tunnel_update_gro_rcv(struct sock *sk, bool add) {}
218 #endif
219 
220 static inline void udp_tunnel_cleanup_gro(struct sock *sk)
221 {
222 	udp_tunnel_update_gro_rcv(sk, false);
223 	udp_tunnel_update_gro_lookup(sock_net(sk), sk, false);
224 }
225 
226 static inline void udp_tunnel_encap_enable(struct sock *sk)
227 {
228 	if (udp_test_and_set_bit(ENCAP_ENABLED, sk))
229 		return;
230 
231 #if IS_ENABLED(CONFIG_IPV6)
232 	if (READ_ONCE(sk->sk_family) == PF_INET6)
233 		ipv6_stub->udpv6_encap_enable();
234 #endif
235 	udp_encap_enable();
236 }
237 
238 #define UDP_TUNNEL_NIC_MAX_TABLES	4
239 
240 enum udp_tunnel_nic_info_flags {
241 	/* Device only supports offloads when it's open, all ports
242 	 * will be removed before close and re-added after open.
243 	 */
244 	UDP_TUNNEL_NIC_INFO_OPEN_ONLY	= BIT(0),
245 	/* Device supports only IPv4 tunnels */
246 	UDP_TUNNEL_NIC_INFO_IPV4_ONLY	= BIT(1),
247 	/* Device has hard-coded the IANA VXLAN port (4789) as VXLAN.
248 	 * This port must not be counted towards n_entries of any table.
249 	 * Driver will not receive any callback associated with port 4789.
250 	 */
251 	UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN	= BIT(2),
252 };
253 
254 struct udp_tunnel_nic;
255 
256 #define UDP_TUNNEL_NIC_MAX_SHARING_DEVICES	(U16_MAX / 2)
257 
258 struct udp_tunnel_nic_shared {
259 	struct udp_tunnel_nic *udp_tunnel_nic_info;
260 
261 	struct list_head devices;
262 };
263 
264 struct udp_tunnel_nic_shared_node {
265 	struct net_device *dev;
266 	struct list_head list;
267 };
268 
269 /**
270  * struct udp_tunnel_nic_info - driver UDP tunnel offload information
271  * @set_port:	callback for adding a new port
272  * @unset_port:	callback for removing a port
273  * @sync_table:	callback for syncing the entire port table at once
274  * @shared:	reference to device global state (optional)
275  * @flags:	device flags from enum udp_tunnel_nic_info_flags
276  * @tables:	UDP port tables this device has
277  * @tables.n_entries:		number of entries in this table
278  * @tables.tunnel_types:	types of tunnels this table accepts
279  *
280  * Drivers are expected to provide either @set_port and @unset_port callbacks
281  * or the @sync_table callback. Callbacks are invoked with rtnl lock held.
282  *
283  * Devices which (misguidedly) share the UDP tunnel port table across multiple
284  * netdevs should allocate an instance of struct udp_tunnel_nic_shared and
285  * point @shared at it.
286  * There must never be more than %UDP_TUNNEL_NIC_MAX_SHARING_DEVICES devices
287  * sharing a table.
288  *
289  * Known limitations:
290  *  - UDP tunnel port notifications are fundamentally best-effort -
291  *    it is likely the driver will both see skbs which use a UDP tunnel port,
292  *    while not being a tunneled skb, and tunnel skbs from other ports -
293  *    drivers should only use these ports for non-critical RX-side offloads,
294  *    e.g. the checksum offload;
295  *  - none of the devices care about the socket family at present, so we don't
296  *    track it. Please extend this code if you care.
297  */
298 struct udp_tunnel_nic_info {
299 	/* one-by-one */
300 	int (*set_port)(struct net_device *dev,
301 			unsigned int table, unsigned int entry,
302 			struct udp_tunnel_info *ti);
303 	int (*unset_port)(struct net_device *dev,
304 			  unsigned int table, unsigned int entry,
305 			  struct udp_tunnel_info *ti);
306 
307 	/* all at once */
308 	int (*sync_table)(struct net_device *dev, unsigned int table);
309 
310 	struct udp_tunnel_nic_shared *shared;
311 
312 	unsigned int flags;
313 
314 	struct udp_tunnel_nic_table_info {
315 		unsigned int n_entries;
316 		unsigned int tunnel_types;
317 	} tables[UDP_TUNNEL_NIC_MAX_TABLES];
318 };
319 
320 /* UDP tunnel module dependencies
321  *
322  * Tunnel drivers are expected to have a hard dependency on the udp_tunnel
323  * module. NIC drivers are not, they just attach their
324  * struct udp_tunnel_nic_info to the netdev and wait for callbacks to come.
325  * Loading a tunnel driver will cause the udp_tunnel module to be loaded
326  * and only then will all the required state structures be allocated.
327  * Since we want a weak dependency from the drivers and the core to udp_tunnel
328  * we call things through the following stubs.
329  */
330 struct udp_tunnel_nic_ops {
331 	void (*get_port)(struct net_device *dev, unsigned int table,
332 			 unsigned int idx, struct udp_tunnel_info *ti);
333 	void (*set_port_priv)(struct net_device *dev, unsigned int table,
334 			      unsigned int idx, u8 priv);
335 	void (*add_port)(struct net_device *dev, struct udp_tunnel_info *ti);
336 	void (*del_port)(struct net_device *dev, struct udp_tunnel_info *ti);
337 	void (*reset_ntf)(struct net_device *dev);
338 
339 	size_t (*dump_size)(struct net_device *dev, unsigned int table);
340 	int (*dump_write)(struct net_device *dev, unsigned int table,
341 			  struct sk_buff *skb);
342 	void (*assert_locked)(struct net_device *dev);
343 	void (*lock)(struct net_device *dev);
344 	void (*unlock)(struct net_device *dev);
345 };
346 
347 #ifdef CONFIG_INET
348 extern const struct udp_tunnel_nic_ops *udp_tunnel_nic_ops;
349 #else
350 #define udp_tunnel_nic_ops	((struct udp_tunnel_nic_ops *)NULL)
351 #endif
352 
353 static inline void
354 udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table,
355 			unsigned int idx, struct udp_tunnel_info *ti)
356 {
357 	/* This helper is used from .sync_table, we indicate empty entries
358 	 * by zero'ed @ti. Drivers which need to know the details of a port
359 	 * when it gets deleted should use the .set_port / .unset_port
360 	 * callbacks.
361 	 * Zero out here, otherwise !CONFIG_INET causes uninitilized warnings.
362 	 */
363 	memset(ti, 0, sizeof(*ti));
364 
365 	if (udp_tunnel_nic_ops)
366 		udp_tunnel_nic_ops->get_port(dev, table, idx, ti);
367 }
368 
369 static inline void
370 udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table,
371 			     unsigned int idx, u8 priv)
372 {
373 	if (udp_tunnel_nic_ops) {
374 		udp_tunnel_nic_ops->assert_locked(dev);
375 		udp_tunnel_nic_ops->set_port_priv(dev, table, idx, priv);
376 	}
377 }
378 
379 static inline void udp_tunnel_nic_assert_locked(struct net_device *dev)
380 {
381 	if (udp_tunnel_nic_ops)
382 		udp_tunnel_nic_ops->assert_locked(dev);
383 }
384 
385 static inline void udp_tunnel_nic_lock(struct net_device *dev)
386 {
387 	if (udp_tunnel_nic_ops)
388 		udp_tunnel_nic_ops->lock(dev);
389 }
390 
391 static inline void udp_tunnel_nic_unlock(struct net_device *dev)
392 {
393 	if (udp_tunnel_nic_ops)
394 		udp_tunnel_nic_ops->unlock(dev);
395 }
396 
397 static inline void
398 udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti)
399 {
400 	if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
401 		return;
402 	if (udp_tunnel_nic_ops)
403 		udp_tunnel_nic_ops->add_port(dev, ti);
404 }
405 
406 static inline void
407 udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti)
408 {
409 	if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
410 		return;
411 	if (udp_tunnel_nic_ops)
412 		udp_tunnel_nic_ops->del_port(dev, ti);
413 }
414 
415 /**
416  * udp_tunnel_nic_reset_ntf() - device-originating reset notification
417  * @dev: network interface device structure
418  *
419  * Called by the driver to inform the core that the entire UDP tunnel port
420  * state has been lost, usually due to device reset. Core will assume device
421  * forgot all the ports and issue .set_port and .sync_table callbacks as
422  * necessary.
423  *
424  * This function must be called with rtnl lock held, and will issue all
425  * the callbacks before returning.
426  */
427 static inline void udp_tunnel_nic_reset_ntf(struct net_device *dev)
428 {
429 	if (udp_tunnel_nic_ops)
430 		udp_tunnel_nic_ops->reset_ntf(dev);
431 }
432 
433 static inline size_t
434 udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table)
435 {
436 	size_t ret;
437 
438 	if (!udp_tunnel_nic_ops)
439 		return 0;
440 
441 	udp_tunnel_nic_ops->lock(dev);
442 	ret = udp_tunnel_nic_ops->dump_size(dev, table);
443 	udp_tunnel_nic_ops->unlock(dev);
444 
445 	return ret;
446 }
447 
448 static inline int
449 udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table,
450 			  struct sk_buff *skb)
451 {
452 	int ret;
453 
454 	if (!udp_tunnel_nic_ops)
455 		return 0;
456 
457 	udp_tunnel_nic_ops->lock(dev);
458 	ret = udp_tunnel_nic_ops->dump_write(dev, table, skb);
459 	udp_tunnel_nic_ops->unlock(dev);
460 
461 	return ret;
462 }
463 
464 static inline void udp_tunnel_get_rx_info(struct net_device *dev)
465 {
466 	ASSERT_RTNL();
467 	if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
468 		return;
469 	udp_tunnel_nic_assert_locked(dev);
470 	call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev);
471 }
472 
473 static inline void udp_tunnel_drop_rx_info(struct net_device *dev)
474 {
475 	ASSERT_RTNL();
476 	if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
477 		return;
478 	udp_tunnel_nic_assert_locked(dev);
479 	call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev);
480 }
481 
482 #endif
483