1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/module.h> 3 #include <linux/errno.h> 4 #include <linux/socket.h> 5 #include <linux/kernel.h> 6 #include <net/dst_metadata.h> 7 #include <net/udp.h> 8 #include <net/udp_tunnel.h> 9 #include <net/inet_dscp.h> 10 11 int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, 12 struct socket **sockp) 13 { 14 int err; 15 struct socket *sock = NULL; 16 struct sockaddr_in udp_addr; 17 18 err = sock_create_kern(net, AF_INET, SOCK_DGRAM, 0, &sock); 19 if (err < 0) 20 goto error; 21 22 if (cfg->bind_ifindex) { 23 err = sock_bindtoindex(sock->sk, cfg->bind_ifindex, true); 24 if (err < 0) 25 goto error; 26 } 27 28 udp_addr.sin_family = AF_INET; 29 udp_addr.sin_addr = cfg->local_ip; 30 udp_addr.sin_port = cfg->local_udp_port; 31 err = kernel_bind(sock, (struct sockaddr *)&udp_addr, 32 sizeof(udp_addr)); 33 if (err < 0) 34 goto error; 35 36 if (cfg->peer_udp_port) { 37 udp_addr.sin_family = AF_INET; 38 udp_addr.sin_addr = cfg->peer_ip; 39 udp_addr.sin_port = cfg->peer_udp_port; 40 err = kernel_connect(sock, (struct sockaddr *)&udp_addr, 41 sizeof(udp_addr), 0); 42 if (err < 0) 43 goto error; 44 } 45 46 sock->sk->sk_no_check_tx = !cfg->use_udp_checksums; 47 48 *sockp = sock; 49 return 0; 50 51 error: 52 if (sock) { 53 kernel_sock_shutdown(sock, SHUT_RDWR); 54 sock_release(sock); 55 } 56 *sockp = NULL; 57 return err; 58 } 59 EXPORT_SYMBOL(udp_sock_create4); 60 61 static bool sk_saddr_any(struct sock *sk) 62 { 63 #if IS_ENABLED(CONFIG_IPV6) 64 return ipv6_addr_any(&sk->sk_v6_rcv_saddr); 65 #else 66 return !sk->sk_rcv_saddr; 67 #endif 68 } 69 70 void setup_udp_tunnel_sock(struct net *net, struct socket *sock, 71 struct udp_tunnel_sock_cfg *cfg) 72 { 73 struct sock *sk = sock->sk; 74 75 /* Disable multicast loopback */ 76 inet_clear_bit(MC_LOOP, sk); 77 78 /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */ 79 inet_inc_convert_csum(sk); 80 81 rcu_assign_sk_user_data(sk, cfg->sk_user_data); 82 83 udp_sk(sk)->encap_type = cfg->encap_type; 84 udp_sk(sk)->encap_rcv = cfg->encap_rcv; 85 udp_sk(sk)->encap_err_rcv = cfg->encap_err_rcv; 86 udp_sk(sk)->encap_err_lookup = cfg->encap_err_lookup; 87 udp_sk(sk)->encap_destroy = cfg->encap_destroy; 88 udp_sk(sk)->gro_receive = cfg->gro_receive; 89 udp_sk(sk)->gro_complete = cfg->gro_complete; 90 91 udp_tunnel_encap_enable(sk); 92 93 udp_tunnel_update_gro_rcv(sk, true); 94 95 if (!sk->sk_dport && !sk->sk_bound_dev_if && sk_saddr_any(sk) && 96 sk->sk_kern_sock) 97 udp_tunnel_update_gro_lookup(net, sk, true); 98 } 99 EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock); 100 101 void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock, 102 unsigned short type) 103 { 104 struct sock *sk = sock->sk; 105 struct udp_tunnel_info ti; 106 107 ti.type = type; 108 ti.sa_family = sk->sk_family; 109 ti.port = inet_sk(sk)->inet_sport; 110 111 udp_tunnel_nic_add_port(dev, &ti); 112 } 113 EXPORT_SYMBOL_GPL(udp_tunnel_push_rx_port); 114 115 void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock, 116 unsigned short type) 117 { 118 struct sock *sk = sock->sk; 119 struct udp_tunnel_info ti; 120 121 ti.type = type; 122 ti.sa_family = sk->sk_family; 123 ti.port = inet_sk(sk)->inet_sport; 124 125 udp_tunnel_nic_del_port(dev, &ti); 126 } 127 EXPORT_SYMBOL_GPL(udp_tunnel_drop_rx_port); 128 129 /* Notify netdevs that UDP port started listening */ 130 void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type) 131 { 132 struct sock *sk = sock->sk; 133 struct net *net = sock_net(sk); 134 struct udp_tunnel_info ti; 135 struct net_device *dev; 136 137 ti.type = type; 138 ti.sa_family = sk->sk_family; 139 ti.port = inet_sk(sk)->inet_sport; 140 141 rcu_read_lock(); 142 for_each_netdev_rcu(net, dev) { 143 udp_tunnel_nic_add_port(dev, &ti); 144 } 145 rcu_read_unlock(); 146 } 147 EXPORT_SYMBOL_GPL(udp_tunnel_notify_add_rx_port); 148 149 /* Notify netdevs that UDP port is no more listening */ 150 void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type) 151 { 152 struct sock *sk = sock->sk; 153 struct net *net = sock_net(sk); 154 struct udp_tunnel_info ti; 155 struct net_device *dev; 156 157 ti.type = type; 158 ti.sa_family = sk->sk_family; 159 ti.port = inet_sk(sk)->inet_sport; 160 161 rcu_read_lock(); 162 for_each_netdev_rcu(net, dev) { 163 udp_tunnel_nic_del_port(dev, &ti); 164 } 165 rcu_read_unlock(); 166 } 167 EXPORT_SYMBOL_GPL(udp_tunnel_notify_del_rx_port); 168 169 void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, 170 __be32 src, __be32 dst, __u8 tos, __u8 ttl, 171 __be16 df, __be16 src_port, __be16 dst_port, 172 bool xnet, bool nocheck) 173 { 174 struct udphdr *uh; 175 176 __skb_push(skb, sizeof(*uh)); 177 skb_reset_transport_header(skb); 178 uh = udp_hdr(skb); 179 180 uh->dest = dst_port; 181 uh->source = src_port; 182 uh->len = htons(skb->len); 183 184 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 185 186 udp_set_csum(nocheck, skb, src, dst, skb->len); 187 188 iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet); 189 } 190 EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb); 191 192 void udp_tunnel_sock_release(struct socket *sock) 193 { 194 rcu_assign_sk_user_data(sock->sk, NULL); 195 synchronize_rcu(); 196 kernel_sock_shutdown(sock, SHUT_RDWR); 197 sock_release(sock); 198 } 199 EXPORT_SYMBOL_GPL(udp_tunnel_sock_release); 200 201 struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family, 202 const unsigned long *flags, 203 __be64 tunnel_id, int md_size) 204 { 205 struct metadata_dst *tun_dst; 206 struct ip_tunnel_info *info; 207 208 if (family == AF_INET) 209 tun_dst = ip_tun_rx_dst(skb, flags, tunnel_id, md_size); 210 else 211 tun_dst = ipv6_tun_rx_dst(skb, flags, tunnel_id, md_size); 212 if (!tun_dst) 213 return NULL; 214 215 info = &tun_dst->u.tun_info; 216 info->key.tp_src = udp_hdr(skb)->source; 217 info->key.tp_dst = udp_hdr(skb)->dest; 218 if (udp_hdr(skb)->check) 219 __set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); 220 return tun_dst; 221 } 222 EXPORT_SYMBOL_GPL(udp_tun_rx_dst); 223 224 struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb, 225 struct net_device *dev, 226 struct net *net, int oif, 227 __be32 *saddr, 228 const struct ip_tunnel_key *key, 229 __be16 sport, __be16 dport, u8 tos, 230 struct dst_cache *dst_cache) 231 { 232 struct rtable *rt = NULL; 233 struct flowi4 fl4; 234 235 #ifdef CONFIG_DST_CACHE 236 if (dst_cache) { 237 rt = dst_cache_get_ip4(dst_cache, saddr); 238 if (rt) 239 return rt; 240 } 241 #endif 242 243 memset(&fl4, 0, sizeof(fl4)); 244 fl4.flowi4_mark = skb->mark; 245 fl4.flowi4_proto = IPPROTO_UDP; 246 fl4.flowi4_oif = oif; 247 fl4.daddr = key->u.ipv4.dst; 248 fl4.saddr = key->u.ipv4.src; 249 fl4.fl4_dport = dport; 250 fl4.fl4_sport = sport; 251 fl4.flowi4_tos = tos & INET_DSCP_MASK; 252 fl4.flowi4_flags = key->flow_flags; 253 254 rt = ip_route_output_key(net, &fl4); 255 if (IS_ERR(rt)) { 256 netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr); 257 return ERR_PTR(-ENETUNREACH); 258 } 259 if (rt->dst.dev == dev) { /* is this necessary? */ 260 netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr); 261 ip_rt_put(rt); 262 return ERR_PTR(-ELOOP); 263 } 264 #ifdef CONFIG_DST_CACHE 265 if (dst_cache) 266 dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr); 267 #endif 268 *saddr = fl4.saddr; 269 return rt; 270 } 271 EXPORT_SYMBOL_GPL(udp_tunnel_dst_lookup); 272 273 MODULE_DESCRIPTION("IPv4 Foo over UDP tunnel driver"); 274 MODULE_LICENSE("GPL"); 275