1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_UDP_TUNNEL_H
3 #define __NET_UDP_TUNNEL_H
4
5 #include <net/ip_tunnels.h>
6 #include <net/udp.h>
7
8 #if IS_ENABLED(CONFIG_IPV6)
9 #include <net/ipv6.h>
10 #endif
11
12 #define UDP_TUNNEL_PARTIAL_FEATURES NETIF_F_GSO_ENCAP_ALL
13 #define UDP_TUNNEL_STRIPPED_GSO_TYPES ((UDP_TUNNEL_PARTIAL_FEATURES | \
14 NETIF_F_GSO_PARTIAL) >> \
15 NETIF_F_GSO_SHIFT)
16
17 struct udp_port_cfg {
18 u8 family;
19
20 /* Used only for kernel-created sockets */
21 union {
22 struct in_addr local_ip;
23 #if IS_ENABLED(CONFIG_IPV6)
24 struct in6_addr local_ip6;
25 #endif
26 };
27
28 union {
29 struct in_addr peer_ip;
30 #if IS_ENABLED(CONFIG_IPV6)
31 struct in6_addr peer_ip6;
32 #endif
33 };
34
35 __be16 local_udp_port;
36 __be16 peer_udp_port;
37 int bind_ifindex;
38 unsigned int use_udp_checksums:1,
39 use_udp6_tx_checksums:1,
40 use_udp6_rx_checksums:1,
41 ipv6_v6only:1;
42 };
43
44 int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
45 struct socket **sockp);
46
47 #if IS_ENABLED(CONFIG_IPV6)
48 int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
49 struct socket **sockp);
50 #else
udp_sock_create6(struct net * net,struct udp_port_cfg * cfg,struct socket ** sockp)51 static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
52 struct socket **sockp)
53 {
54 return -EPFNOSUPPORT;
55 }
56 #endif
57
udp_sock_create(struct net * net,struct udp_port_cfg * cfg,struct socket ** sockp)58 static inline int udp_sock_create(struct net *net,
59 struct udp_port_cfg *cfg,
60 struct socket **sockp)
61 {
62 if (cfg->family == AF_INET)
63 return udp_sock_create4(net, cfg, sockp);
64
65 if (cfg->family == AF_INET6)
66 return udp_sock_create6(net, cfg, sockp);
67
68 return -EPFNOSUPPORT;
69 }
70
71 typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
72 typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk,
73 struct sk_buff *skb);
74 typedef void (*udp_tunnel_encap_err_rcv_t)(struct sock *sk,
75 struct sk_buff *skb, int err,
76 __be16 port, u32 info, u8 *payload);
77 typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
78 typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
79 struct list_head *head,
80 struct sk_buff *skb);
81 typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb,
82 int nhoff);
83
84 struct udp_tunnel_sock_cfg {
85 void *sk_user_data; /* user data used by encap_rcv call back */
86 /* Used for setting up udp_sock fields, see udp.h for details */
87 __u8 encap_type;
88 udp_tunnel_encap_rcv_t encap_rcv;
89 udp_tunnel_encap_err_lookup_t encap_err_lookup;
90 udp_tunnel_encap_err_rcv_t encap_err_rcv;
91 udp_tunnel_encap_destroy_t encap_destroy;
92 udp_tunnel_gro_receive_t gro_receive;
93 udp_tunnel_gro_complete_t gro_complete;
94 };
95
96 /* Setup the given (UDP) sock to receive UDP encapsulated packets */
97 void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
98 struct udp_tunnel_sock_cfg *sock_cfg);
99
100 /* -- List of parsable UDP tunnel types --
101 *
102 * Adding to this list will result in serious debate. The main issue is
103 * that this list is essentially a list of workarounds for either poorly
104 * designed tunnels, or poorly designed device offloads.
105 *
106 * The parsing supported via these types should really be used for Rx
107 * traffic only as the network stack will have already inserted offsets for
108 * the location of the headers in the skb. In addition any ports that are
109 * pushed should be kept within the namespace without leaking to other
110 * devices such as VFs or other ports on the same device.
111 *
112 * It is strongly encouraged to use CHECKSUM_COMPLETE for Rx to avoid the
113 * need to use this for Rx checksum offload. It should not be necessary to
114 * call this function to perform Tx offloads on outgoing traffic.
115 */
116 enum udp_parsable_tunnel_type {
117 UDP_TUNNEL_TYPE_VXLAN = BIT(0), /* RFC 7348 */
118 UDP_TUNNEL_TYPE_GENEVE = BIT(1), /* draft-ietf-nvo3-geneve */
119 UDP_TUNNEL_TYPE_VXLAN_GPE = BIT(2), /* draft-ietf-nvo3-vxlan-gpe */
120 };
121
122 struct udp_tunnel_info {
123 unsigned short type;
124 sa_family_t sa_family;
125 __be16 port;
126 u8 hw_priv;
127 };
128
129 /* Notify network devices of offloadable types */
130 void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock,
131 unsigned short type);
132 void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock,
133 unsigned short type);
134 void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type);
135 void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type);
136
137 /* Transmit the skb using UDP encapsulation. */
138 void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
139 __be32 src, __be32 dst, __u8 tos, __u8 ttl,
140 __be16 df, __be16 src_port, __be16 dst_port,
141 bool xnet, bool nocheck, u16 ipcb_flags);
142
143 void udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
144 struct sk_buff *skb,
145 struct net_device *dev,
146 const struct in6_addr *saddr,
147 const struct in6_addr *daddr,
148 __u8 prio, __u8 ttl, __be32 label,
149 __be16 src_port, __be16 dst_port, bool nocheck,
150 u16 ip6cb_flags);
151
udp_tunnel_handle_partial(struct sk_buff * skb)152 static inline bool udp_tunnel_handle_partial(struct sk_buff *skb)
153 {
154 bool double_encap = !!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL);
155
156 /*
157 * If the skb went through partial segmentation, lower devices
158 * will not need to offload the related features - except for
159 * UDP_TUNNEL, that will be re-added by the later
160 * udp_tunnel_handle_offloads().
161 */
162 if (double_encap)
163 skb_shinfo(skb)->gso_type &= ~UDP_TUNNEL_STRIPPED_GSO_TYPES;
164 return double_encap;
165 }
166
udp_tunnel_set_inner_protocol(struct sk_buff * skb,bool double_encap,__be16 inner_proto)167 static inline void udp_tunnel_set_inner_protocol(struct sk_buff *skb,
168 bool double_encap,
169 __be16 inner_proto)
170 {
171 /*
172 * The inner protocol has been set by the nested tunnel, don't
173 * overraid it.
174 */
175 if (!double_encap)
176 skb_set_inner_protocol(skb, inner_proto);
177 }
178
179 void udp_tunnel_sock_release(struct socket *sock);
180
181 struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb,
182 struct net_device *dev,
183 struct net *net, int oif,
184 __be32 *saddr,
185 const struct ip_tunnel_key *key,
186 __be16 sport, __be16 dport, u8 tos,
187 struct dst_cache *dst_cache);
188 struct dst_entry *udp_tunnel6_dst_lookup(struct sk_buff *skb,
189 struct net_device *dev,
190 struct net *net,
191 struct socket *sock, int oif,
192 struct in6_addr *saddr,
193 const struct ip_tunnel_key *key,
194 __be16 sport, __be16 dport, u8 dsfield,
195 struct dst_cache *dst_cache);
196
197 struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
198 const unsigned long *flags,
199 __be64 tunnel_id, int md_size);
200
201 #ifdef CONFIG_INET
udp_tunnel_handle_offloads(struct sk_buff * skb,bool udp_csum)202 static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
203 {
204 int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
205
206 return iptunnel_handle_offloads(skb, type);
207 }
208 #endif
209
210 #if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)
211 void udp_tunnel_update_gro_lookup(struct net *net, struct sock *sk, bool add);
212 void udp_tunnel_update_gro_rcv(struct sock *sk, bool add);
213 #else
udp_tunnel_update_gro_lookup(struct net * net,struct sock * sk,bool add)214 static inline void udp_tunnel_update_gro_lookup(struct net *net,
215 struct sock *sk, bool add) {}
udp_tunnel_update_gro_rcv(struct sock * sk,bool add)216 static inline void udp_tunnel_update_gro_rcv(struct sock *sk, bool add) {}
217 #endif
218
udp_tunnel_cleanup_gro(struct sock * sk)219 static inline void udp_tunnel_cleanup_gro(struct sock *sk)
220 {
221 udp_tunnel_update_gro_rcv(sk, false);
222 udp_tunnel_update_gro_lookup(sock_net(sk), sk, false);
223 }
224
udp_tunnel_encap_enable(struct sock * sk)225 static inline void udp_tunnel_encap_enable(struct sock *sk)
226 {
227 if (udp_test_and_set_bit(ENCAP_ENABLED, sk))
228 return;
229
230 #if IS_ENABLED(CONFIG_IPV6)
231 if (READ_ONCE(sk->sk_family) == PF_INET6)
232 udpv6_encap_enable();
233 #endif
234 udp_encap_enable();
235 }
236
237 #define UDP_TUNNEL_NIC_MAX_TABLES 4
238
239 enum udp_tunnel_nic_info_flags {
240 /* Device only supports offloads when it's open, all ports
241 * will be removed before close and re-added after open.
242 */
243 UDP_TUNNEL_NIC_INFO_OPEN_ONLY = BIT(0),
244 /* Device supports only IPv4 tunnels */
245 UDP_TUNNEL_NIC_INFO_IPV4_ONLY = BIT(1),
246 /* Device has hard-coded the IANA VXLAN port (4789) as VXLAN.
247 * This port must not be counted towards n_entries of any table.
248 * Driver will not receive any callback associated with port 4789.
249 */
250 UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = BIT(2),
251 };
252
253 struct udp_tunnel_nic;
254
255 #define UDP_TUNNEL_NIC_MAX_SHARING_DEVICES (U16_MAX / 2)
256
257 struct udp_tunnel_nic_shared {
258 struct udp_tunnel_nic *udp_tunnel_nic_info;
259
260 struct list_head devices;
261 };
262
263 struct udp_tunnel_nic_shared_node {
264 struct net_device *dev;
265 struct list_head list;
266 };
267
268 /**
269 * struct udp_tunnel_nic_info - driver UDP tunnel offload information
270 * @set_port: callback for adding a new port
271 * @unset_port: callback for removing a port
272 * @sync_table: callback for syncing the entire port table at once
273 * @shared: reference to device global state (optional)
274 * @flags: device flags from enum udp_tunnel_nic_info_flags
275 * @tables: UDP port tables this device has
276 * @tables.n_entries: number of entries in this table
277 * @tables.tunnel_types: types of tunnels this table accepts
278 *
279 * Drivers are expected to provide either @set_port and @unset_port callbacks
280 * or the @sync_table callback. Callbacks are invoked with rtnl lock held.
281 *
282 * Devices which (misguidedly) share the UDP tunnel port table across multiple
283 * netdevs should allocate an instance of struct udp_tunnel_nic_shared and
284 * point @shared at it.
285 * There must never be more than %UDP_TUNNEL_NIC_MAX_SHARING_DEVICES devices
286 * sharing a table.
287 *
288 * Known limitations:
289 * - UDP tunnel port notifications are fundamentally best-effort -
290 * it is likely the driver will both see skbs which use a UDP tunnel port,
291 * while not being a tunneled skb, and tunnel skbs from other ports -
292 * drivers should only use these ports for non-critical RX-side offloads,
293 * e.g. the checksum offload;
294 * - none of the devices care about the socket family at present, so we don't
295 * track it. Please extend this code if you care.
296 */
297 struct udp_tunnel_nic_info {
298 /* one-by-one */
299 int (*set_port)(struct net_device *dev,
300 unsigned int table, unsigned int entry,
301 struct udp_tunnel_info *ti);
302 int (*unset_port)(struct net_device *dev,
303 unsigned int table, unsigned int entry,
304 struct udp_tunnel_info *ti);
305
306 /* all at once */
307 int (*sync_table)(struct net_device *dev, unsigned int table);
308
309 struct udp_tunnel_nic_shared *shared;
310
311 unsigned int flags;
312
313 struct udp_tunnel_nic_table_info {
314 unsigned int n_entries;
315 unsigned int tunnel_types;
316 } tables[UDP_TUNNEL_NIC_MAX_TABLES];
317 };
318
319 /* UDP tunnel module dependencies
320 *
321 * Tunnel drivers are expected to have a hard dependency on the udp_tunnel
322 * module. NIC drivers are not, they just attach their
323 * struct udp_tunnel_nic_info to the netdev and wait for callbacks to come.
324 * Loading a tunnel driver will cause the udp_tunnel module to be loaded
325 * and only then will all the required state structures be allocated.
326 * Since we want a weak dependency from the drivers and the core to udp_tunnel
327 * we call things through the following stubs.
328 */
329 struct udp_tunnel_nic_ops {
330 void (*get_port)(struct net_device *dev, unsigned int table,
331 unsigned int idx, struct udp_tunnel_info *ti);
332 void (*set_port_priv)(struct net_device *dev, unsigned int table,
333 unsigned int idx, u8 priv);
334 void (*add_port)(struct net_device *dev, struct udp_tunnel_info *ti);
335 void (*del_port)(struct net_device *dev, struct udp_tunnel_info *ti);
336 void (*reset_ntf)(struct net_device *dev);
337
338 size_t (*dump_size)(struct net_device *dev, unsigned int table);
339 int (*dump_write)(struct net_device *dev, unsigned int table,
340 struct sk_buff *skb);
341 void (*assert_locked)(struct net_device *dev);
342 void (*lock)(struct net_device *dev);
343 void (*unlock)(struct net_device *dev);
344 };
345
346 #ifdef CONFIG_INET
347 extern const struct udp_tunnel_nic_ops *udp_tunnel_nic_ops;
348 #else
349 #define udp_tunnel_nic_ops ((struct udp_tunnel_nic_ops *)NULL)
350 #endif
351
352 static inline void
udp_tunnel_nic_get_port(struct net_device * dev,unsigned int table,unsigned int idx,struct udp_tunnel_info * ti)353 udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table,
354 unsigned int idx, struct udp_tunnel_info *ti)
355 {
356 /* This helper is used from .sync_table, we indicate empty entries
357 * by zero'ed @ti. Drivers which need to know the details of a port
358 * when it gets deleted should use the .set_port / .unset_port
359 * callbacks.
360 * Zero out here, otherwise !CONFIG_INET causes uninitilized warnings.
361 */
362 memset(ti, 0, sizeof(*ti));
363
364 if (udp_tunnel_nic_ops)
365 udp_tunnel_nic_ops->get_port(dev, table, idx, ti);
366 }
367
368 static inline void
udp_tunnel_nic_set_port_priv(struct net_device * dev,unsigned int table,unsigned int idx,u8 priv)369 udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table,
370 unsigned int idx, u8 priv)
371 {
372 if (udp_tunnel_nic_ops) {
373 udp_tunnel_nic_ops->assert_locked(dev);
374 udp_tunnel_nic_ops->set_port_priv(dev, table, idx, priv);
375 }
376 }
377
udp_tunnel_nic_assert_locked(struct net_device * dev)378 static inline void udp_tunnel_nic_assert_locked(struct net_device *dev)
379 {
380 if (udp_tunnel_nic_ops)
381 udp_tunnel_nic_ops->assert_locked(dev);
382 }
383
udp_tunnel_nic_lock(struct net_device * dev)384 static inline void udp_tunnel_nic_lock(struct net_device *dev)
385 {
386 if (udp_tunnel_nic_ops)
387 udp_tunnel_nic_ops->lock(dev);
388 }
389
udp_tunnel_nic_unlock(struct net_device * dev)390 static inline void udp_tunnel_nic_unlock(struct net_device *dev)
391 {
392 if (udp_tunnel_nic_ops)
393 udp_tunnel_nic_ops->unlock(dev);
394 }
395
396 static inline void
udp_tunnel_nic_add_port(struct net_device * dev,struct udp_tunnel_info * ti)397 udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti)
398 {
399 if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
400 return;
401 if (udp_tunnel_nic_ops)
402 udp_tunnel_nic_ops->add_port(dev, ti);
403 }
404
405 static inline void
udp_tunnel_nic_del_port(struct net_device * dev,struct udp_tunnel_info * ti)406 udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti)
407 {
408 if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
409 return;
410 if (udp_tunnel_nic_ops)
411 udp_tunnel_nic_ops->del_port(dev, ti);
412 }
413
414 /**
415 * udp_tunnel_nic_reset_ntf() - device-originating reset notification
416 * @dev: network interface device structure
417 *
418 * Called by the driver to inform the core that the entire UDP tunnel port
419 * state has been lost, usually due to device reset. Core will assume device
420 * forgot all the ports and issue .set_port and .sync_table callbacks as
421 * necessary.
422 *
423 * This function must be called with rtnl lock held, and will issue all
424 * the callbacks before returning.
425 */
udp_tunnel_nic_reset_ntf(struct net_device * dev)426 static inline void udp_tunnel_nic_reset_ntf(struct net_device *dev)
427 {
428 if (udp_tunnel_nic_ops)
429 udp_tunnel_nic_ops->reset_ntf(dev);
430 }
431
432 static inline size_t
udp_tunnel_nic_dump_size(struct net_device * dev,unsigned int table)433 udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table)
434 {
435 size_t ret;
436
437 if (!udp_tunnel_nic_ops)
438 return 0;
439
440 udp_tunnel_nic_ops->lock(dev);
441 ret = udp_tunnel_nic_ops->dump_size(dev, table);
442 udp_tunnel_nic_ops->unlock(dev);
443
444 return ret;
445 }
446
447 static inline int
udp_tunnel_nic_dump_write(struct net_device * dev,unsigned int table,struct sk_buff * skb)448 udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table,
449 struct sk_buff *skb)
450 {
451 int ret;
452
453 if (!udp_tunnel_nic_ops)
454 return 0;
455
456 udp_tunnel_nic_ops->lock(dev);
457 ret = udp_tunnel_nic_ops->dump_write(dev, table, skb);
458 udp_tunnel_nic_ops->unlock(dev);
459
460 return ret;
461 }
462
udp_tunnel_get_rx_info(struct net_device * dev)463 static inline void udp_tunnel_get_rx_info(struct net_device *dev)
464 {
465 ASSERT_RTNL();
466 if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
467 return;
468 udp_tunnel_nic_assert_locked(dev);
469 call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev);
470 }
471
udp_tunnel_drop_rx_info(struct net_device * dev)472 static inline void udp_tunnel_drop_rx_info(struct net_device *dev)
473 {
474 ASSERT_RTNL();
475 if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
476 return;
477 udp_tunnel_nic_assert_locked(dev);
478 call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev);
479 }
480
481 #endif
482