xref: /linux/net/ipv4/udp_tunnel_core.c (revision a202f24b08587021a39eade5aa5444d5714689fb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/module.h>
3 #include <linux/errno.h>
4 #include <linux/socket.h>
5 #include <linux/kernel.h>
6 #include <net/dst_metadata.h>
7 #include <net/udp.h>
8 #include <net/udp_tunnel.h>
9 #include <net/inet_dscp.h>
10 
11 int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
12 		     struct socket **sockp)
13 {
14 	int err;
15 	struct socket *sock = NULL;
16 	struct sockaddr_in udp_addr;
17 
18 	err = sock_create_kern(net, AF_INET, SOCK_DGRAM, 0, &sock);
19 	if (err < 0)
20 		goto error;
21 
22 	if (cfg->bind_ifindex) {
23 		err = sock_bindtoindex(sock->sk, cfg->bind_ifindex, true);
24 		if (err < 0)
25 			goto error;
26 	}
27 
28 	udp_addr.sin_family = AF_INET;
29 	udp_addr.sin_addr = cfg->local_ip;
30 	udp_addr.sin_port = cfg->local_udp_port;
31 	err = kernel_bind(sock, (struct sockaddr *)&udp_addr,
32 			  sizeof(udp_addr));
33 	if (err < 0)
34 		goto error;
35 
36 	if (cfg->peer_udp_port) {
37 		udp_addr.sin_family = AF_INET;
38 		udp_addr.sin_addr = cfg->peer_ip;
39 		udp_addr.sin_port = cfg->peer_udp_port;
40 		err = kernel_connect(sock, (struct sockaddr *)&udp_addr,
41 				     sizeof(udp_addr), 0);
42 		if (err < 0)
43 			goto error;
44 	}
45 
46 	sock->sk->sk_no_check_tx = !cfg->use_udp_checksums;
47 
48 	*sockp = sock;
49 	return 0;
50 
51 error:
52 	if (sock) {
53 		kernel_sock_shutdown(sock, SHUT_RDWR);
54 		sock_release(sock);
55 	}
56 	*sockp = NULL;
57 	return err;
58 }
59 EXPORT_SYMBOL(udp_sock_create4);
60 
61 static bool sk_saddr_any(struct sock *sk)
62 {
63 #if IS_ENABLED(CONFIG_IPV6)
64 	return ipv6_addr_any(&sk->sk_v6_rcv_saddr);
65 #else
66 	return !sk->sk_rcv_saddr;
67 #endif
68 }
69 
70 void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
71 			   struct udp_tunnel_sock_cfg *cfg)
72 {
73 	struct sock *sk = sock->sk;
74 
75 	/* Disable multicast loopback */
76 	inet_clear_bit(MC_LOOP, sk);
77 
78 	/* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
79 	inet_inc_convert_csum(sk);
80 
81 	rcu_assign_sk_user_data(sk, cfg->sk_user_data);
82 
83 	udp_sk(sk)->encap_type = cfg->encap_type;
84 	udp_sk(sk)->encap_rcv = cfg->encap_rcv;
85 	udp_sk(sk)->encap_err_rcv = cfg->encap_err_rcv;
86 	udp_sk(sk)->encap_err_lookup = cfg->encap_err_lookup;
87 	udp_sk(sk)->encap_destroy = cfg->encap_destroy;
88 	udp_sk(sk)->gro_receive = cfg->gro_receive;
89 	udp_sk(sk)->gro_complete = cfg->gro_complete;
90 
91 	udp_tunnel_encap_enable(sk);
92 
93 	udp_tunnel_update_gro_rcv(sk, true);
94 
95 	if (!sk->sk_dport && !sk->sk_bound_dev_if && sk_saddr_any(sk) &&
96 	    sk->sk_kern_sock)
97 		udp_tunnel_update_gro_lookup(net, sk, true);
98 }
99 EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock);
100 
101 void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock,
102 			     unsigned short type)
103 {
104 	struct sock *sk = sock->sk;
105 	struct udp_tunnel_info ti;
106 
107 	ti.type = type;
108 	ti.sa_family = sk->sk_family;
109 	ti.port = inet_sk(sk)->inet_sport;
110 
111 	udp_tunnel_nic_add_port(dev, &ti);
112 }
113 EXPORT_SYMBOL_GPL(udp_tunnel_push_rx_port);
114 
115 void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock,
116 			     unsigned short type)
117 {
118 	struct sock *sk = sock->sk;
119 	struct udp_tunnel_info ti;
120 
121 	ti.type = type;
122 	ti.sa_family = sk->sk_family;
123 	ti.port = inet_sk(sk)->inet_sport;
124 
125 	udp_tunnel_nic_del_port(dev, &ti);
126 }
127 EXPORT_SYMBOL_GPL(udp_tunnel_drop_rx_port);
128 
129 /* Notify netdevs that UDP port started listening */
130 void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type)
131 {
132 	struct sock *sk = sock->sk;
133 	struct net *net = sock_net(sk);
134 	struct udp_tunnel_info ti;
135 	struct net_device *dev;
136 
137 	ti.type = type;
138 	ti.sa_family = sk->sk_family;
139 	ti.port = inet_sk(sk)->inet_sport;
140 
141 	rcu_read_lock();
142 	for_each_netdev_rcu(net, dev) {
143 		udp_tunnel_nic_add_port(dev, &ti);
144 	}
145 	rcu_read_unlock();
146 }
147 EXPORT_SYMBOL_GPL(udp_tunnel_notify_add_rx_port);
148 
149 /* Notify netdevs that UDP port is no more listening */
150 void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type)
151 {
152 	struct sock *sk = sock->sk;
153 	struct net *net = sock_net(sk);
154 	struct udp_tunnel_info ti;
155 	struct net_device *dev;
156 
157 	ti.type = type;
158 	ti.sa_family = sk->sk_family;
159 	ti.port = inet_sk(sk)->inet_sport;
160 
161 	rcu_read_lock();
162 	for_each_netdev_rcu(net, dev) {
163 		udp_tunnel_nic_del_port(dev, &ti);
164 	}
165 	rcu_read_unlock();
166 }
167 EXPORT_SYMBOL_GPL(udp_tunnel_notify_del_rx_port);
168 
169 void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
170 			 __be32 src, __be32 dst, __u8 tos, __u8 ttl,
171 			 __be16 df, __be16 src_port, __be16 dst_port,
172 			 bool xnet, bool nocheck, u16 ipcb_flags)
173 {
174 	struct udphdr *uh;
175 
176 	__skb_push(skb, sizeof(*uh));
177 	skb_reset_transport_header(skb);
178 	uh = udp_hdr(skb);
179 
180 	uh->dest = dst_port;
181 	uh->source = src_port;
182 	uh->len = htons(skb->len);
183 
184 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
185 
186 	udp_set_csum(nocheck, skb, src, dst, skb->len);
187 
188 	iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet,
189 		      ipcb_flags);
190 }
191 EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb);
192 
193 void udp_tunnel_sock_release(struct socket *sock)
194 {
195 	rcu_assign_sk_user_data(sock->sk, NULL);
196 	synchronize_rcu();
197 	kernel_sock_shutdown(sock, SHUT_RDWR);
198 	sock_release(sock);
199 }
200 EXPORT_SYMBOL_GPL(udp_tunnel_sock_release);
201 
202 struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb,  unsigned short family,
203 				    const unsigned long *flags,
204 				    __be64 tunnel_id, int md_size)
205 {
206 	struct metadata_dst *tun_dst;
207 	struct ip_tunnel_info *info;
208 
209 	if (family == AF_INET)
210 		tun_dst = ip_tun_rx_dst(skb, flags, tunnel_id, md_size);
211 	else
212 		tun_dst = ipv6_tun_rx_dst(skb, flags, tunnel_id, md_size);
213 	if (!tun_dst)
214 		return NULL;
215 
216 	info = &tun_dst->u.tun_info;
217 	info->key.tp_src = udp_hdr(skb)->source;
218 	info->key.tp_dst = udp_hdr(skb)->dest;
219 	if (udp_hdr(skb)->check)
220 		__set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
221 	return tun_dst;
222 }
223 EXPORT_SYMBOL_GPL(udp_tun_rx_dst);
224 
225 struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb,
226 				     struct net_device *dev,
227 				     struct net *net, int oif,
228 				     __be32 *saddr,
229 				     const struct ip_tunnel_key *key,
230 				     __be16 sport, __be16 dport, u8 tos,
231 				     struct dst_cache *dst_cache)
232 {
233 	struct rtable *rt = NULL;
234 	struct flowi4 fl4;
235 
236 #ifdef CONFIG_DST_CACHE
237 	if (dst_cache) {
238 		rt = dst_cache_get_ip4(dst_cache, saddr);
239 		if (rt)
240 			return rt;
241 	}
242 #endif
243 
244 	memset(&fl4, 0, sizeof(fl4));
245 	fl4.flowi4_mark = skb->mark;
246 	fl4.flowi4_proto = IPPROTO_UDP;
247 	fl4.flowi4_oif = oif;
248 	fl4.daddr = key->u.ipv4.dst;
249 	fl4.saddr = key->u.ipv4.src;
250 	fl4.fl4_dport = dport;
251 	fl4.fl4_sport = sport;
252 	fl4.flowi4_tos = tos & INET_DSCP_MASK;
253 	fl4.flowi4_flags = key->flow_flags;
254 
255 	rt = ip_route_output_key(net, &fl4);
256 	if (IS_ERR(rt)) {
257 		netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
258 		return ERR_PTR(-ENETUNREACH);
259 	}
260 	if (rt->dst.dev == dev) { /* is this necessary? */
261 		netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr);
262 		ip_rt_put(rt);
263 		return ERR_PTR(-ELOOP);
264 	}
265 #ifdef CONFIG_DST_CACHE
266 	if (dst_cache)
267 		dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
268 #endif
269 	*saddr = fl4.saddr;
270 	return rt;
271 }
272 EXPORT_SYMBOL_GPL(udp_tunnel_dst_lookup);
273 
274 MODULE_DESCRIPTION("IPv4 Foo over UDP tunnel driver");
275 MODULE_LICENSE("GPL");
276