1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * UDP over IPv6 4 * Linux INET6 implementation 5 * 6 * Authors: 7 * Pedro Roque <roque@di.fc.ul.pt> 8 * 9 * Based on linux/ipv4/udp.c 10 * 11 * Fixes: 12 * Hideaki YOSHIFUJI : sin6_scope_id support 13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind 15 * a single port at the same time. 16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data 17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file. 18 */ 19 20 #include <linux/errno.h> 21 #include <linux/types.h> 22 #include <linux/socket.h> 23 #include <linux/sockios.h> 24 #include <linux/net.h> 25 #include <linux/in6.h> 26 #include <linux/netdevice.h> 27 #include <linux/if_arp.h> 28 #include <linux/ipv6.h> 29 #include <linux/icmpv6.h> 30 #include <linux/init.h> 31 #include <linux/module.h> 32 #include <linux/skbuff.h> 33 #include <linux/slab.h> 34 #include <linux/uaccess.h> 35 #include <linux/indirect_call_wrapper.h> 36 37 #include <net/addrconf.h> 38 #include <net/ndisc.h> 39 #include <net/protocol.h> 40 #include <net/transp_v6.h> 41 #include <net/ip6_route.h> 42 #include <net/raw.h> 43 #include <net/tcp_states.h> 44 #include <net/ip6_checksum.h> 45 #include <net/ip6_tunnel.h> 46 #include <net/xfrm.h> 47 #include <net/inet_hashtables.h> 48 #include <net/inet6_hashtables.h> 49 #include <net/busy_poll.h> 50 #include <net/sock_reuseport.h> 51 52 #include <linux/proc_fs.h> 53 #include <linux/seq_file.h> 54 #include <trace/events/skb.h> 55 #include "udp_impl.h" 56 57 static u32 udp6_ehashfn(const struct net *net, 58 const struct in6_addr *laddr, 59 const u16 lport, 60 const struct in6_addr *faddr, 61 const __be16 fport) 62 { 63 static u32 udp6_ehash_secret __read_mostly; 64 static u32 udp_ipv6_hash_secret __read_mostly; 65 66 u32 lhash, fhash; 67 68 net_get_random_once(&udp6_ehash_secret, 69 sizeof(udp6_ehash_secret)); 70 net_get_random_once(&udp_ipv6_hash_secret, 71 sizeof(udp_ipv6_hash_secret)); 72 73 lhash = (__force u32)laddr->s6_addr32[3]; 74 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret); 75 76 return __inet6_ehashfn(lhash, lport, fhash, fport, 77 udp_ipv6_hash_secret + net_hash_mix(net)); 78 } 79 80 int udp_v6_get_port(struct sock *sk, unsigned short snum) 81 { 82 unsigned int hash2_nulladdr = 83 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum); 84 unsigned int hash2_partial = 85 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0); 86 87 /* precompute partial secondary hash */ 88 udp_sk(sk)->udp_portaddr_hash = hash2_partial; 89 return udp_lib_get_port(sk, snum, hash2_nulladdr); 90 } 91 92 void udp_v6_rehash(struct sock *sk) 93 { 94 u16 new_hash = ipv6_portaddr_hash(sock_net(sk), 95 &sk->sk_v6_rcv_saddr, 96 inet_sk(sk)->inet_num); 97 98 udp_lib_rehash(sk, new_hash); 99 } 100 101 static int compute_score(struct sock *sk, struct net *net, 102 const struct in6_addr *saddr, __be16 sport, 103 const struct in6_addr *daddr, unsigned short hnum, 104 int dif, int sdif) 105 { 106 int score; 107 struct inet_sock *inet; 108 bool dev_match; 109 110 if (!net_eq(sock_net(sk), net) || 111 udp_sk(sk)->udp_port_hash != hnum || 112 sk->sk_family != PF_INET6) 113 return -1; 114 115 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) 116 return -1; 117 118 score = 0; 119 inet = inet_sk(sk); 120 121 if (inet->inet_dport) { 122 if (inet->inet_dport != sport) 123 return -1; 124 score++; 125 } 126 127 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 128 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr)) 129 return -1; 130 score++; 131 } 132 133 dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif); 134 if (!dev_match) 135 return -1; 136 score++; 137 138 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) 139 score++; 140 141 return score; 142 } 143 144 static struct sock *lookup_reuseport(struct net *net, struct sock *sk, 145 struct sk_buff *skb, 146 const struct in6_addr *saddr, 147 __be16 sport, 148 const struct in6_addr *daddr, 149 unsigned int hnum) 150 { 151 struct sock *reuse_sk = NULL; 152 u32 hash; 153 154 if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) { 155 hash = udp6_ehashfn(net, daddr, hnum, saddr, sport); 156 reuse_sk = reuseport_select_sock(sk, hash, skb, 157 sizeof(struct udphdr)); 158 } 159 return reuse_sk; 160 } 161 162 /* called with rcu_read_lock() */ 163 static struct sock *udp6_lib_lookup2(struct net *net, 164 const struct in6_addr *saddr, __be16 sport, 165 const struct in6_addr *daddr, unsigned int hnum, 166 int dif, int sdif, struct udp_hslot *hslot2, 167 struct sk_buff *skb) 168 { 169 struct sock *sk, *result; 170 int score, badness; 171 172 result = NULL; 173 badness = -1; 174 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 175 score = compute_score(sk, net, saddr, sport, 176 daddr, hnum, dif, sdif); 177 if (score > badness) { 178 result = lookup_reuseport(net, sk, skb, 179 saddr, sport, daddr, hnum); 180 /* Fall back to scoring if group has connections */ 181 if (result && !reuseport_has_conns(sk, false)) 182 return result; 183 184 result = result ? : sk; 185 badness = score; 186 } 187 } 188 return result; 189 } 190 191 static inline struct sock *udp6_lookup_run_bpf(struct net *net, 192 struct udp_table *udptable, 193 struct sk_buff *skb, 194 const struct in6_addr *saddr, 195 __be16 sport, 196 const struct in6_addr *daddr, 197 u16 hnum) 198 { 199 struct sock *sk, *reuse_sk; 200 bool no_reuseport; 201 202 if (udptable != &udp_table) 203 return NULL; /* only UDP is supported */ 204 205 no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, 206 saddr, sport, daddr, hnum, &sk); 207 if (no_reuseport || IS_ERR_OR_NULL(sk)) 208 return sk; 209 210 reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); 211 if (reuse_sk) 212 sk = reuse_sk; 213 return sk; 214 } 215 216 /* rcu_read_lock() must be held */ 217 struct sock *__udp6_lib_lookup(struct net *net, 218 const struct in6_addr *saddr, __be16 sport, 219 const struct in6_addr *daddr, __be16 dport, 220 int dif, int sdif, struct udp_table *udptable, 221 struct sk_buff *skb) 222 { 223 unsigned short hnum = ntohs(dport); 224 unsigned int hash2, slot2; 225 struct udp_hslot *hslot2; 226 struct sock *result, *sk; 227 228 hash2 = ipv6_portaddr_hash(net, daddr, hnum); 229 slot2 = hash2 & udptable->mask; 230 hslot2 = &udptable->hash2[slot2]; 231 232 /* Lookup connected or non-wildcard sockets */ 233 result = udp6_lib_lookup2(net, saddr, sport, 234 daddr, hnum, dif, sdif, 235 hslot2, skb); 236 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED) 237 goto done; 238 239 /* Lookup redirect from BPF */ 240 if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { 241 sk = udp6_lookup_run_bpf(net, udptable, skb, 242 saddr, sport, daddr, hnum); 243 if (sk) { 244 result = sk; 245 goto done; 246 } 247 } 248 249 /* Got non-wildcard socket or error on first lookup */ 250 if (result) 251 goto done; 252 253 /* Lookup wildcard sockets */ 254 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum); 255 slot2 = hash2 & udptable->mask; 256 hslot2 = &udptable->hash2[slot2]; 257 258 result = udp6_lib_lookup2(net, saddr, sport, 259 &in6addr_any, hnum, dif, sdif, 260 hslot2, skb); 261 done: 262 if (IS_ERR(result)) 263 return NULL; 264 return result; 265 } 266 EXPORT_SYMBOL_GPL(__udp6_lib_lookup); 267 268 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, 269 __be16 sport, __be16 dport, 270 struct udp_table *udptable) 271 { 272 const struct ipv6hdr *iph = ipv6_hdr(skb); 273 274 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, 275 &iph->daddr, dport, inet6_iif(skb), 276 inet6_sdif(skb), udptable, skb); 277 } 278 279 struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb, 280 __be16 sport, __be16 dport) 281 { 282 const struct ipv6hdr *iph = ipv6_hdr(skb); 283 284 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, 285 &iph->daddr, dport, inet6_iif(skb), 286 inet6_sdif(skb), &udp_table, NULL); 287 } 288 289 /* Must be called under rcu_read_lock(). 290 * Does increment socket refcount. 291 */ 292 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6) 293 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, 294 const struct in6_addr *daddr, __be16 dport, int dif) 295 { 296 struct sock *sk; 297 298 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport, 299 dif, 0, &udp_table, NULL); 300 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) 301 sk = NULL; 302 return sk; 303 } 304 EXPORT_SYMBOL_GPL(udp6_lib_lookup); 305 #endif 306 307 /* do not use the scratch area len for jumbogram: their length execeeds the 308 * scratch area space; note that the IP6CB flags is still in the first 309 * cacheline, so checking for jumbograms is cheap 310 */ 311 static int udp6_skb_len(struct sk_buff *skb) 312 { 313 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb); 314 } 315 316 /* 317 * This should be easy, if there is something there we 318 * return it, otherwise we block. 319 */ 320 321 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 322 int noblock, int flags, int *addr_len) 323 { 324 struct ipv6_pinfo *np = inet6_sk(sk); 325 struct inet_sock *inet = inet_sk(sk); 326 struct sk_buff *skb; 327 unsigned int ulen, copied; 328 int off, err, peeking = flags & MSG_PEEK; 329 int is_udplite = IS_UDPLITE(sk); 330 struct udp_mib __percpu *mib; 331 bool checksum_valid = false; 332 int is_udp4; 333 334 if (flags & MSG_ERRQUEUE) 335 return ipv6_recv_error(sk, msg, len, addr_len); 336 337 if (np->rxpmtu && np->rxopt.bits.rxpmtu) 338 return ipv6_recv_rxpmtu(sk, msg, len, addr_len); 339 340 try_again: 341 off = sk_peek_offset(sk, flags); 342 skb = __skb_recv_udp(sk, flags, noblock, &off, &err); 343 if (!skb) 344 return err; 345 346 ulen = udp6_skb_len(skb); 347 copied = len; 348 if (copied > ulen - off) 349 copied = ulen - off; 350 else if (copied < ulen) 351 msg->msg_flags |= MSG_TRUNC; 352 353 is_udp4 = (skb->protocol == htons(ETH_P_IP)); 354 mib = __UDPX_MIB(sk, is_udp4); 355 356 /* 357 * If checksum is needed at all, try to do it while copying the 358 * data. If the data is truncated, or if we only want a partial 359 * coverage checksum (UDP-Lite), do it before the copy. 360 */ 361 362 if (copied < ulen || peeking || 363 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { 364 checksum_valid = udp_skb_csum_unnecessary(skb) || 365 !__udp_lib_checksum_complete(skb); 366 if (!checksum_valid) 367 goto csum_copy_err; 368 } 369 370 if (checksum_valid || udp_skb_csum_unnecessary(skb)) { 371 if (udp_skb_is_linear(skb)) 372 err = copy_linear_skb(skb, copied, off, &msg->msg_iter); 373 else 374 err = skb_copy_datagram_msg(skb, off, msg, copied); 375 } else { 376 err = skb_copy_and_csum_datagram_msg(skb, off, msg); 377 if (err == -EINVAL) 378 goto csum_copy_err; 379 } 380 if (unlikely(err)) { 381 if (!peeking) { 382 atomic_inc(&sk->sk_drops); 383 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 384 } 385 kfree_skb(skb); 386 return err; 387 } 388 if (!peeking) 389 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS); 390 391 sock_recv_ts_and_drops(msg, sk, skb); 392 393 /* Copy the address. */ 394 if (msg->msg_name) { 395 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 396 sin6->sin6_family = AF_INET6; 397 sin6->sin6_port = udp_hdr(skb)->source; 398 sin6->sin6_flowinfo = 0; 399 400 if (is_udp4) { 401 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, 402 &sin6->sin6_addr); 403 sin6->sin6_scope_id = 0; 404 } else { 405 sin6->sin6_addr = ipv6_hdr(skb)->saddr; 406 sin6->sin6_scope_id = 407 ipv6_iface_scope_id(&sin6->sin6_addr, 408 inet6_iif(skb)); 409 } 410 *addr_len = sizeof(*sin6); 411 412 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, 413 (struct sockaddr *)sin6); 414 } 415 416 if (udp_sk(sk)->gro_enabled) 417 udp_cmsg_recv(msg, sk, skb); 418 419 if (np->rxopt.all) 420 ip6_datagram_recv_common_ctl(sk, msg, skb); 421 422 if (is_udp4) { 423 if (inet->cmsg_flags) 424 ip_cmsg_recv_offset(msg, sk, skb, 425 sizeof(struct udphdr), off); 426 } else { 427 if (np->rxopt.all) 428 ip6_datagram_recv_specific_ctl(sk, msg, skb); 429 } 430 431 err = copied; 432 if (flags & MSG_TRUNC) 433 err = ulen; 434 435 skb_consume_udp(sk, skb, peeking ? -err : err); 436 return err; 437 438 csum_copy_err: 439 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags, 440 udp_skb_destructor)) { 441 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS); 442 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 443 } 444 kfree_skb(skb); 445 446 /* starting over for a new packet, but check if we need to yield */ 447 cond_resched(); 448 msg->msg_flags &= ~MSG_TRUNC; 449 goto try_again; 450 } 451 452 DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key); 453 void udpv6_encap_enable(void) 454 { 455 static_branch_inc(&udpv6_encap_needed_key); 456 } 457 EXPORT_SYMBOL(udpv6_encap_enable); 458 459 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go 460 * through error handlers in encapsulations looking for a match. 461 */ 462 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb, 463 struct inet6_skb_parm *opt, 464 u8 type, u8 code, int offset, __be32 info) 465 { 466 int i; 467 468 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 469 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 470 u8 type, u8 code, int offset, __be32 info); 471 const struct ip6_tnl_encap_ops *encap; 472 473 encap = rcu_dereference(ip6tun_encaps[i]); 474 if (!encap) 475 continue; 476 handler = encap->err_handler; 477 if (handler && !handler(skb, opt, type, code, offset, info)) 478 return 0; 479 } 480 481 return -ENOENT; 482 } 483 484 /* Try to match ICMP errors to UDP tunnels by looking up a socket without 485 * reversing source and destination port: this will match tunnels that force the 486 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that 487 * lwtunnels might actually break this assumption by being configured with 488 * different destination ports on endpoints, in this case we won't be able to 489 * trace ICMP messages back to them. 490 * 491 * If this doesn't match any socket, probe tunnels with arbitrary destination 492 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port 493 * we've sent packets to won't necessarily match the local destination port. 494 * 495 * Then ask the tunnel implementation to match the error against a valid 496 * association. 497 * 498 * Return an error if we can't find a match, the socket if we need further 499 * processing, zero otherwise. 500 */ 501 static struct sock *__udp6_lib_err_encap(struct net *net, 502 const struct ipv6hdr *hdr, int offset, 503 struct udphdr *uh, 504 struct udp_table *udptable, 505 struct sk_buff *skb, 506 struct inet6_skb_parm *opt, 507 u8 type, u8 code, __be32 info) 508 { 509 int network_offset, transport_offset; 510 struct sock *sk; 511 512 network_offset = skb_network_offset(skb); 513 transport_offset = skb_transport_offset(skb); 514 515 /* Network header needs to point to the outer IPv6 header inside ICMP */ 516 skb_reset_network_header(skb); 517 518 /* Transport header needs to point to the UDP header */ 519 skb_set_transport_header(skb, offset); 520 521 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source, 522 &hdr->saddr, uh->dest, 523 inet6_iif(skb), 0, udptable, skb); 524 if (sk) { 525 int (*lookup)(struct sock *sk, struct sk_buff *skb); 526 struct udp_sock *up = udp_sk(sk); 527 528 lookup = READ_ONCE(up->encap_err_lookup); 529 if (!lookup || lookup(sk, skb)) 530 sk = NULL; 531 } 532 533 if (!sk) { 534 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code, 535 offset, info)); 536 } 537 538 skb_set_transport_header(skb, transport_offset); 539 skb_set_network_header(skb, network_offset); 540 541 return sk; 542 } 543 544 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 545 u8 type, u8 code, int offset, __be32 info, 546 struct udp_table *udptable) 547 { 548 struct ipv6_pinfo *np; 549 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; 550 const struct in6_addr *saddr = &hdr->saddr; 551 const struct in6_addr *daddr = &hdr->daddr; 552 struct udphdr *uh = (struct udphdr *)(skb->data+offset); 553 bool tunnel = false; 554 struct sock *sk; 555 int harderr; 556 int err; 557 struct net *net = dev_net(skb->dev); 558 559 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source, 560 inet6_iif(skb), inet6_sdif(skb), udptable, NULL); 561 if (!sk || udp_sk(sk)->encap_type) { 562 /* No socket for error: try tunnels before discarding */ 563 sk = ERR_PTR(-ENOENT); 564 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 565 sk = __udp6_lib_err_encap(net, hdr, offset, uh, 566 udptable, skb, 567 opt, type, code, info); 568 if (!sk) 569 return 0; 570 } 571 572 if (IS_ERR(sk)) { 573 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), 574 ICMP6_MIB_INERRORS); 575 return PTR_ERR(sk); 576 } 577 578 tunnel = true; 579 } 580 581 harderr = icmpv6_err_convert(type, code, &err); 582 np = inet6_sk(sk); 583 584 if (type == ICMPV6_PKT_TOOBIG) { 585 if (!ip6_sk_accept_pmtu(sk)) 586 goto out; 587 ip6_sk_update_pmtu(skb, sk, info); 588 if (np->pmtudisc != IPV6_PMTUDISC_DONT) 589 harderr = 1; 590 } 591 if (type == NDISC_REDIRECT) { 592 if (tunnel) { 593 ip6_redirect(skb, sock_net(sk), inet6_iif(skb), 594 sk->sk_mark, sk->sk_uid); 595 } else { 596 ip6_sk_redirect(skb, sk); 597 } 598 goto out; 599 } 600 601 /* Tunnels don't have an application socket: don't pass errors back */ 602 if (tunnel) 603 goto out; 604 605 if (!np->recverr) { 606 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 607 goto out; 608 } else { 609 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); 610 } 611 612 sk->sk_err = err; 613 sk->sk_error_report(sk); 614 out: 615 return 0; 616 } 617 618 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 619 { 620 int rc; 621 622 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 623 sock_rps_save_rxhash(sk, skb); 624 sk_mark_napi_id(sk, skb); 625 sk_incoming_cpu_update(sk); 626 } else { 627 sk_mark_napi_id_once(sk, skb); 628 } 629 630 rc = __udp_enqueue_schedule_skb(sk, skb); 631 if (rc < 0) { 632 int is_udplite = IS_UDPLITE(sk); 633 634 /* Note that an ENOMEM error is charged twice */ 635 if (rc == -ENOMEM) 636 UDP6_INC_STATS(sock_net(sk), 637 UDP_MIB_RCVBUFERRORS, is_udplite); 638 else 639 UDP6_INC_STATS(sock_net(sk), 640 UDP_MIB_MEMERRORS, is_udplite); 641 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 642 kfree_skb(skb); 643 return -1; 644 } 645 646 return 0; 647 } 648 649 static __inline__ int udpv6_err(struct sk_buff *skb, 650 struct inet6_skb_parm *opt, u8 type, 651 u8 code, int offset, __be32 info) 652 { 653 return __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table); 654 } 655 656 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) 657 { 658 struct udp_sock *up = udp_sk(sk); 659 int is_udplite = IS_UDPLITE(sk); 660 661 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 662 goto drop; 663 664 if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) { 665 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 666 667 /* 668 * This is an encapsulation socket so pass the skb to 669 * the socket's udp_encap_rcv() hook. Otherwise, just 670 * fall through and pass this up the UDP socket. 671 * up->encap_rcv() returns the following value: 672 * =0 if skb was successfully passed to the encap 673 * handler or was discarded by it. 674 * >0 if skb should be passed on to UDP. 675 * <0 if skb should be resubmitted as proto -N 676 */ 677 678 /* if we're overly short, let UDP handle it */ 679 encap_rcv = READ_ONCE(up->encap_rcv); 680 if (encap_rcv) { 681 int ret; 682 683 /* Verify checksum before giving to encap */ 684 if (udp_lib_checksum_complete(skb)) 685 goto csum_error; 686 687 ret = encap_rcv(sk, skb); 688 if (ret <= 0) { 689 __UDP_INC_STATS(sock_net(sk), 690 UDP_MIB_INDATAGRAMS, 691 is_udplite); 692 return -ret; 693 } 694 } 695 696 /* FALLTHROUGH -- it's a UDP Packet */ 697 } 698 699 /* 700 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). 701 */ 702 if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { 703 704 if (up->pcrlen == 0) { /* full coverage was set */ 705 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n", 706 UDP_SKB_CB(skb)->cscov, skb->len); 707 goto drop; 708 } 709 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { 710 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n", 711 UDP_SKB_CB(skb)->cscov, up->pcrlen); 712 goto drop; 713 } 714 } 715 716 prefetch(&sk->sk_rmem_alloc); 717 if (rcu_access_pointer(sk->sk_filter) && 718 udp_lib_checksum_complete(skb)) 719 goto csum_error; 720 721 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) 722 goto drop; 723 724 udp_csum_pull_header(skb); 725 726 skb_dst_drop(skb); 727 728 return __udpv6_queue_rcv_skb(sk, skb); 729 730 csum_error: 731 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 732 drop: 733 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 734 atomic_inc(&sk->sk_drops); 735 kfree_skb(skb); 736 return -1; 737 } 738 739 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 740 { 741 struct sk_buff *next, *segs; 742 int ret; 743 744 if (likely(!udp_unexpected_gso(sk, skb))) 745 return udpv6_queue_rcv_one_skb(sk, skb); 746 747 __skb_push(skb, -skb_mac_offset(skb)); 748 segs = udp_rcv_segment(sk, skb, false); 749 skb_list_walk_safe(segs, skb, next) { 750 __skb_pull(skb, skb_transport_offset(skb)); 751 752 ret = udpv6_queue_rcv_one_skb(sk, skb); 753 if (ret > 0) 754 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret, 755 true); 756 } 757 return 0; 758 } 759 760 static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk, 761 __be16 loc_port, const struct in6_addr *loc_addr, 762 __be16 rmt_port, const struct in6_addr *rmt_addr, 763 int dif, int sdif, unsigned short hnum) 764 { 765 struct inet_sock *inet = inet_sk(sk); 766 767 if (!net_eq(sock_net(sk), net)) 768 return false; 769 770 if (udp_sk(sk)->udp_port_hash != hnum || 771 sk->sk_family != PF_INET6 || 772 (inet->inet_dport && inet->inet_dport != rmt_port) || 773 (!ipv6_addr_any(&sk->sk_v6_daddr) && 774 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) || 775 !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif) || 776 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) && 777 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))) 778 return false; 779 if (!inet6_mc_check(sk, loc_addr, rmt_addr)) 780 return false; 781 return true; 782 } 783 784 static void udp6_csum_zero_error(struct sk_buff *skb) 785 { 786 /* RFC 2460 section 8.1 says that we SHOULD log 787 * this error. Well, it is reasonable. 788 */ 789 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n", 790 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source), 791 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest)); 792 } 793 794 /* 795 * Note: called only from the BH handler context, 796 * so we don't need to lock the hashes. 797 */ 798 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 799 const struct in6_addr *saddr, const struct in6_addr *daddr, 800 struct udp_table *udptable, int proto) 801 { 802 struct sock *sk, *first = NULL; 803 const struct udphdr *uh = udp_hdr(skb); 804 unsigned short hnum = ntohs(uh->dest); 805 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); 806 unsigned int offset = offsetof(typeof(*sk), sk_node); 807 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); 808 int dif = inet6_iif(skb); 809 int sdif = inet6_sdif(skb); 810 struct hlist_node *node; 811 struct sk_buff *nskb; 812 813 if (use_hash2) { 814 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) & 815 udptable->mask; 816 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask; 817 start_lookup: 818 hslot = &udptable->hash2[hash2]; 819 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 820 } 821 822 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { 823 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr, 824 uh->source, saddr, dif, sdif, 825 hnum)) 826 continue; 827 /* If zero checksum and no_check is not on for 828 * the socket then skip it. 829 */ 830 if (!uh->check && !udp_sk(sk)->no_check6_rx) 831 continue; 832 if (!first) { 833 first = sk; 834 continue; 835 } 836 nskb = skb_clone(skb, GFP_ATOMIC); 837 if (unlikely(!nskb)) { 838 atomic_inc(&sk->sk_drops); 839 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 840 IS_UDPLITE(sk)); 841 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, 842 IS_UDPLITE(sk)); 843 continue; 844 } 845 846 if (udpv6_queue_rcv_skb(sk, nskb) > 0) 847 consume_skb(nskb); 848 } 849 850 /* Also lookup *:port if we are using hash2 and haven't done so yet. */ 851 if (use_hash2 && hash2 != hash2_any) { 852 hash2 = hash2_any; 853 goto start_lookup; 854 } 855 856 if (first) { 857 if (udpv6_queue_rcv_skb(first, skb) > 0) 858 consume_skb(skb); 859 } else { 860 kfree_skb(skb); 861 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI, 862 proto == IPPROTO_UDPLITE); 863 } 864 return 0; 865 } 866 867 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) 868 { 869 if (udp_sk_rx_dst_set(sk, dst)) { 870 const struct rt6_info *rt = (const struct rt6_info *)dst; 871 872 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); 873 } 874 } 875 876 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and 877 * return code conversion for ip layer consumption 878 */ 879 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, 880 struct udphdr *uh) 881 { 882 int ret; 883 884 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) 885 skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo); 886 887 ret = udpv6_queue_rcv_skb(sk, skb); 888 889 /* a return value > 0 means to resubmit the input */ 890 if (ret > 0) 891 return ret; 892 return 0; 893 } 894 895 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 896 int proto) 897 { 898 const struct in6_addr *saddr, *daddr; 899 struct net *net = dev_net(skb->dev); 900 struct udphdr *uh; 901 struct sock *sk; 902 bool refcounted; 903 u32 ulen = 0; 904 905 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 906 goto discard; 907 908 saddr = &ipv6_hdr(skb)->saddr; 909 daddr = &ipv6_hdr(skb)->daddr; 910 uh = udp_hdr(skb); 911 912 ulen = ntohs(uh->len); 913 if (ulen > skb->len) 914 goto short_packet; 915 916 if (proto == IPPROTO_UDP) { 917 /* UDP validates ulen. */ 918 919 /* Check for jumbo payload */ 920 if (ulen == 0) 921 ulen = skb->len; 922 923 if (ulen < sizeof(*uh)) 924 goto short_packet; 925 926 if (ulen < skb->len) { 927 if (pskb_trim_rcsum(skb, ulen)) 928 goto short_packet; 929 saddr = &ipv6_hdr(skb)->saddr; 930 daddr = &ipv6_hdr(skb)->daddr; 931 uh = udp_hdr(skb); 932 } 933 } 934 935 if (udp6_csum_init(skb, uh, proto)) 936 goto csum_error; 937 938 /* Check if the socket is already available, e.g. due to early demux */ 939 sk = skb_steal_sock(skb, &refcounted); 940 if (sk) { 941 struct dst_entry *dst = skb_dst(skb); 942 int ret; 943 944 if (unlikely(sk->sk_rx_dst != dst)) 945 udp6_sk_rx_dst_set(sk, dst); 946 947 if (!uh->check && !udp_sk(sk)->no_check6_rx) { 948 if (refcounted) 949 sock_put(sk); 950 goto report_csum_error; 951 } 952 953 ret = udp6_unicast_rcv_skb(sk, skb, uh); 954 if (refcounted) 955 sock_put(sk); 956 return ret; 957 } 958 959 /* 960 * Multicast receive code 961 */ 962 if (ipv6_addr_is_multicast(daddr)) 963 return __udp6_lib_mcast_deliver(net, skb, 964 saddr, daddr, udptable, proto); 965 966 /* Unicast */ 967 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 968 if (sk) { 969 if (!uh->check && !udp_sk(sk)->no_check6_rx) 970 goto report_csum_error; 971 return udp6_unicast_rcv_skb(sk, skb, uh); 972 } 973 974 if (!uh->check) 975 goto report_csum_error; 976 977 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 978 goto discard; 979 980 if (udp_lib_checksum_complete(skb)) 981 goto csum_error; 982 983 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 984 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); 985 986 kfree_skb(skb); 987 return 0; 988 989 short_packet: 990 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n", 991 proto == IPPROTO_UDPLITE ? "-Lite" : "", 992 saddr, ntohs(uh->source), 993 ulen, skb->len, 994 daddr, ntohs(uh->dest)); 995 goto discard; 996 997 report_csum_error: 998 udp6_csum_zero_error(skb); 999 csum_error: 1000 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); 1001 discard: 1002 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 1003 kfree_skb(skb); 1004 return 0; 1005 } 1006 1007 1008 static struct sock *__udp6_lib_demux_lookup(struct net *net, 1009 __be16 loc_port, const struct in6_addr *loc_addr, 1010 __be16 rmt_port, const struct in6_addr *rmt_addr, 1011 int dif, int sdif) 1012 { 1013 unsigned short hnum = ntohs(loc_port); 1014 unsigned int hash2 = ipv6_portaddr_hash(net, loc_addr, hnum); 1015 unsigned int slot2 = hash2 & udp_table.mask; 1016 struct udp_hslot *hslot2 = &udp_table.hash2[slot2]; 1017 const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum); 1018 struct sock *sk; 1019 1020 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 1021 if (sk->sk_state == TCP_ESTABLISHED && 1022 INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif, sdif)) 1023 return sk; 1024 /* Only check first socket in chain */ 1025 break; 1026 } 1027 return NULL; 1028 } 1029 1030 INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb) 1031 { 1032 struct net *net = dev_net(skb->dev); 1033 const struct udphdr *uh; 1034 struct sock *sk; 1035 struct dst_entry *dst; 1036 int dif = skb->dev->ifindex; 1037 int sdif = inet6_sdif(skb); 1038 1039 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 1040 sizeof(struct udphdr))) 1041 return; 1042 1043 uh = udp_hdr(skb); 1044 1045 if (skb->pkt_type == PACKET_HOST) 1046 sk = __udp6_lib_demux_lookup(net, uh->dest, 1047 &ipv6_hdr(skb)->daddr, 1048 uh->source, &ipv6_hdr(skb)->saddr, 1049 dif, sdif); 1050 else 1051 return; 1052 1053 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 1054 return; 1055 1056 skb->sk = sk; 1057 skb->destructor = sock_efree; 1058 dst = READ_ONCE(sk->sk_rx_dst); 1059 1060 if (dst) 1061 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); 1062 if (dst) { 1063 /* set noref for now. 1064 * any place which wants to hold dst has to call 1065 * dst_hold_safe() 1066 */ 1067 skb_dst_set_noref(skb, dst); 1068 } 1069 } 1070 1071 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb) 1072 { 1073 return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP); 1074 } 1075 1076 /* 1077 * Throw away all pending data and cancel the corking. Socket is locked. 1078 */ 1079 static void udp_v6_flush_pending_frames(struct sock *sk) 1080 { 1081 struct udp_sock *up = udp_sk(sk); 1082 1083 if (up->pending == AF_INET) 1084 udp_flush_pending_frames(sk); 1085 else if (up->pending) { 1086 up->len = 0; 1087 up->pending = 0; 1088 ip6_flush_pending_frames(sk); 1089 } 1090 } 1091 1092 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr, 1093 int addr_len) 1094 { 1095 if (addr_len < offsetofend(struct sockaddr, sa_family)) 1096 return -EINVAL; 1097 /* The following checks are replicated from __ip6_datagram_connect() 1098 * and intended to prevent BPF program called below from accessing 1099 * bytes that are out of the bound specified by user in addr_len. 1100 */ 1101 if (uaddr->sa_family == AF_INET) { 1102 if (__ipv6_only_sock(sk)) 1103 return -EAFNOSUPPORT; 1104 return udp_pre_connect(sk, uaddr, addr_len); 1105 } 1106 1107 if (addr_len < SIN6_LEN_RFC2133) 1108 return -EINVAL; 1109 1110 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr); 1111 } 1112 1113 /** 1114 * udp6_hwcsum_outgoing - handle outgoing HW checksumming 1115 * @sk: socket we are sending on 1116 * @skb: sk_buff containing the filled-in UDP header 1117 * (checksum field must be zeroed out) 1118 * @saddr: source address 1119 * @daddr: destination address 1120 * @len: length of packet 1121 */ 1122 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, 1123 const struct in6_addr *saddr, 1124 const struct in6_addr *daddr, int len) 1125 { 1126 unsigned int offset; 1127 struct udphdr *uh = udp_hdr(skb); 1128 struct sk_buff *frags = skb_shinfo(skb)->frag_list; 1129 __wsum csum = 0; 1130 1131 if (!frags) { 1132 /* Only one fragment on the socket. */ 1133 skb->csum_start = skb_transport_header(skb) - skb->head; 1134 skb->csum_offset = offsetof(struct udphdr, check); 1135 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0); 1136 } else { 1137 /* 1138 * HW-checksum won't work as there are two or more 1139 * fragments on the socket so that all csums of sk_buffs 1140 * should be together 1141 */ 1142 offset = skb_transport_offset(skb); 1143 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 1144 csum = skb->csum; 1145 1146 skb->ip_summed = CHECKSUM_NONE; 1147 1148 do { 1149 csum = csum_add(csum, frags->csum); 1150 } while ((frags = frags->next)); 1151 1152 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 1153 csum); 1154 if (uh->check == 0) 1155 uh->check = CSUM_MANGLED_0; 1156 } 1157 } 1158 1159 /* 1160 * Sending 1161 */ 1162 1163 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, 1164 struct inet_cork *cork) 1165 { 1166 struct sock *sk = skb->sk; 1167 struct udphdr *uh; 1168 int err = 0; 1169 int is_udplite = IS_UDPLITE(sk); 1170 __wsum csum = 0; 1171 int offset = skb_transport_offset(skb); 1172 int len = skb->len - offset; 1173 int datalen = len - sizeof(*uh); 1174 1175 /* 1176 * Create a UDP header 1177 */ 1178 uh = udp_hdr(skb); 1179 uh->source = fl6->fl6_sport; 1180 uh->dest = fl6->fl6_dport; 1181 uh->len = htons(len); 1182 uh->check = 0; 1183 1184 if (cork->gso_size) { 1185 const int hlen = skb_network_header_len(skb) + 1186 sizeof(struct udphdr); 1187 1188 if (hlen + cork->gso_size > cork->fragsize) { 1189 kfree_skb(skb); 1190 return -EINVAL; 1191 } 1192 if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) { 1193 kfree_skb(skb); 1194 return -EINVAL; 1195 } 1196 if (udp_sk(sk)->no_check6_tx) { 1197 kfree_skb(skb); 1198 return -EINVAL; 1199 } 1200 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || 1201 dst_xfrm(skb_dst(skb))) { 1202 kfree_skb(skb); 1203 return -EIO; 1204 } 1205 1206 if (datalen > cork->gso_size) { 1207 skb_shinfo(skb)->gso_size = cork->gso_size; 1208 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; 1209 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen, 1210 cork->gso_size); 1211 } 1212 goto csum_partial; 1213 } 1214 1215 if (is_udplite) 1216 csum = udplite_csum(skb); 1217 else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */ 1218 skb->ip_summed = CHECKSUM_NONE; 1219 goto send; 1220 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 1221 csum_partial: 1222 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len); 1223 goto send; 1224 } else 1225 csum = udp_csum(skb); 1226 1227 /* add protocol-dependent pseudo-header */ 1228 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, 1229 len, fl6->flowi6_proto, csum); 1230 if (uh->check == 0) 1231 uh->check = CSUM_MANGLED_0; 1232 1233 send: 1234 err = ip6_send_skb(skb); 1235 if (err) { 1236 if (err == -ENOBUFS && !inet6_sk(sk)->recverr) { 1237 UDP6_INC_STATS(sock_net(sk), 1238 UDP_MIB_SNDBUFERRORS, is_udplite); 1239 err = 0; 1240 } 1241 } else { 1242 UDP6_INC_STATS(sock_net(sk), 1243 UDP_MIB_OUTDATAGRAMS, is_udplite); 1244 } 1245 return err; 1246 } 1247 1248 static int udp_v6_push_pending_frames(struct sock *sk) 1249 { 1250 struct sk_buff *skb; 1251 struct udp_sock *up = udp_sk(sk); 1252 struct flowi6 fl6; 1253 int err = 0; 1254 1255 if (up->pending == AF_INET) 1256 return udp_push_pending_frames(sk); 1257 1258 /* ip6_finish_skb will release the cork, so make a copy of 1259 * fl6 here. 1260 */ 1261 fl6 = inet_sk(sk)->cork.fl.u.ip6; 1262 1263 skb = ip6_finish_skb(sk); 1264 if (!skb) 1265 goto out; 1266 1267 err = udp_v6_send_skb(skb, &fl6, &inet_sk(sk)->cork.base); 1268 1269 out: 1270 up->len = 0; 1271 up->pending = 0; 1272 return err; 1273 } 1274 1275 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 1276 { 1277 struct ipv6_txoptions opt_space; 1278 struct udp_sock *up = udp_sk(sk); 1279 struct inet_sock *inet = inet_sk(sk); 1280 struct ipv6_pinfo *np = inet6_sk(sk); 1281 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 1282 struct in6_addr *daddr, *final_p, final; 1283 struct ipv6_txoptions *opt = NULL; 1284 struct ipv6_txoptions *opt_to_free = NULL; 1285 struct ip6_flowlabel *flowlabel = NULL; 1286 struct flowi6 fl6; 1287 struct dst_entry *dst; 1288 struct ipcm6_cookie ipc6; 1289 int addr_len = msg->msg_namelen; 1290 bool connected = false; 1291 int ulen = len; 1292 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; 1293 int err; 1294 int is_udplite = IS_UDPLITE(sk); 1295 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 1296 1297 ipcm6_init(&ipc6); 1298 ipc6.gso_size = up->gso_size; 1299 ipc6.sockc.tsflags = sk->sk_tsflags; 1300 ipc6.sockc.mark = sk->sk_mark; 1301 1302 /* destination address check */ 1303 if (sin6) { 1304 if (addr_len < offsetof(struct sockaddr, sa_data)) 1305 return -EINVAL; 1306 1307 switch (sin6->sin6_family) { 1308 case AF_INET6: 1309 if (addr_len < SIN6_LEN_RFC2133) 1310 return -EINVAL; 1311 daddr = &sin6->sin6_addr; 1312 if (ipv6_addr_any(daddr) && 1313 ipv6_addr_v4mapped(&np->saddr)) 1314 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), 1315 daddr); 1316 break; 1317 case AF_INET: 1318 goto do_udp_sendmsg; 1319 case AF_UNSPEC: 1320 msg->msg_name = sin6 = NULL; 1321 msg->msg_namelen = addr_len = 0; 1322 daddr = NULL; 1323 break; 1324 default: 1325 return -EINVAL; 1326 } 1327 } else if (!up->pending) { 1328 if (sk->sk_state != TCP_ESTABLISHED) 1329 return -EDESTADDRREQ; 1330 daddr = &sk->sk_v6_daddr; 1331 } else 1332 daddr = NULL; 1333 1334 if (daddr) { 1335 if (ipv6_addr_v4mapped(daddr)) { 1336 struct sockaddr_in sin; 1337 sin.sin_family = AF_INET; 1338 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport; 1339 sin.sin_addr.s_addr = daddr->s6_addr32[3]; 1340 msg->msg_name = &sin; 1341 msg->msg_namelen = sizeof(sin); 1342 do_udp_sendmsg: 1343 if (__ipv6_only_sock(sk)) 1344 return -ENETUNREACH; 1345 return udp_sendmsg(sk, msg, len); 1346 } 1347 } 1348 1349 if (up->pending == AF_INET) 1350 return udp_sendmsg(sk, msg, len); 1351 1352 /* Rough check on arithmetic overflow, 1353 better check is made in ip6_append_data(). 1354 */ 1355 if (len > INT_MAX - sizeof(struct udphdr)) 1356 return -EMSGSIZE; 1357 1358 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 1359 if (up->pending) { 1360 /* 1361 * There are pending frames. 1362 * The socket lock must be held while it's corked. 1363 */ 1364 lock_sock(sk); 1365 if (likely(up->pending)) { 1366 if (unlikely(up->pending != AF_INET6)) { 1367 release_sock(sk); 1368 return -EAFNOSUPPORT; 1369 } 1370 dst = NULL; 1371 goto do_append_data; 1372 } 1373 release_sock(sk); 1374 } 1375 ulen += sizeof(struct udphdr); 1376 1377 memset(&fl6, 0, sizeof(fl6)); 1378 1379 if (sin6) { 1380 if (sin6->sin6_port == 0) 1381 return -EINVAL; 1382 1383 fl6.fl6_dport = sin6->sin6_port; 1384 daddr = &sin6->sin6_addr; 1385 1386 if (np->sndflow) { 1387 fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK; 1388 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { 1389 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 1390 if (IS_ERR(flowlabel)) 1391 return -EINVAL; 1392 } 1393 } 1394 1395 /* 1396 * Otherwise it will be difficult to maintain 1397 * sk->sk_dst_cache. 1398 */ 1399 if (sk->sk_state == TCP_ESTABLISHED && 1400 ipv6_addr_equal(daddr, &sk->sk_v6_daddr)) 1401 daddr = &sk->sk_v6_daddr; 1402 1403 if (addr_len >= sizeof(struct sockaddr_in6) && 1404 sin6->sin6_scope_id && 1405 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr))) 1406 fl6.flowi6_oif = sin6->sin6_scope_id; 1407 } else { 1408 if (sk->sk_state != TCP_ESTABLISHED) 1409 return -EDESTADDRREQ; 1410 1411 fl6.fl6_dport = inet->inet_dport; 1412 daddr = &sk->sk_v6_daddr; 1413 fl6.flowlabel = np->flow_label; 1414 connected = true; 1415 } 1416 1417 if (!fl6.flowi6_oif) 1418 fl6.flowi6_oif = sk->sk_bound_dev_if; 1419 1420 if (!fl6.flowi6_oif) 1421 fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; 1422 1423 fl6.flowi6_mark = ipc6.sockc.mark; 1424 fl6.flowi6_uid = sk->sk_uid; 1425 1426 if (msg->msg_controllen) { 1427 opt = &opt_space; 1428 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1429 opt->tot_len = sizeof(*opt); 1430 ipc6.opt = opt; 1431 1432 err = udp_cmsg_send(sk, msg, &ipc6.gso_size); 1433 if (err > 0) 1434 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, 1435 &ipc6); 1436 if (err < 0) { 1437 fl6_sock_release(flowlabel); 1438 return err; 1439 } 1440 if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) { 1441 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 1442 if (IS_ERR(flowlabel)) 1443 return -EINVAL; 1444 } 1445 if (!(opt->opt_nflen|opt->opt_flen)) 1446 opt = NULL; 1447 connected = false; 1448 } 1449 if (!opt) { 1450 opt = txopt_get(np); 1451 opt_to_free = opt; 1452 } 1453 if (flowlabel) 1454 opt = fl6_merge_options(&opt_space, flowlabel, opt); 1455 opt = ipv6_fixup_options(&opt_space, opt); 1456 ipc6.opt = opt; 1457 1458 fl6.flowi6_proto = sk->sk_protocol; 1459 fl6.daddr = *daddr; 1460 if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) 1461 fl6.saddr = np->saddr; 1462 fl6.fl6_sport = inet->inet_sport; 1463 1464 if (cgroup_bpf_enabled(BPF_CGROUP_UDP6_SENDMSG) && !connected) { 1465 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, 1466 (struct sockaddr *)sin6, &fl6.saddr); 1467 if (err) 1468 goto out_no_dst; 1469 if (sin6) { 1470 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) { 1471 /* BPF program rewrote IPv6-only by IPv4-mapped 1472 * IPv6. It's currently unsupported. 1473 */ 1474 err = -ENOTSUPP; 1475 goto out_no_dst; 1476 } 1477 if (sin6->sin6_port == 0) { 1478 /* BPF program set invalid port. Reject it. */ 1479 err = -EINVAL; 1480 goto out_no_dst; 1481 } 1482 fl6.fl6_dport = sin6->sin6_port; 1483 fl6.daddr = sin6->sin6_addr; 1484 } 1485 } 1486 1487 if (ipv6_addr_any(&fl6.daddr)) 1488 fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ 1489 1490 final_p = fl6_update_dst(&fl6, opt, &final); 1491 if (final_p) 1492 connected = false; 1493 1494 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) { 1495 fl6.flowi6_oif = np->mcast_oif; 1496 connected = false; 1497 } else if (!fl6.flowi6_oif) 1498 fl6.flowi6_oif = np->ucast_oif; 1499 1500 security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); 1501 1502 if (ipc6.tclass < 0) 1503 ipc6.tclass = np->tclass; 1504 1505 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); 1506 1507 dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, connected); 1508 if (IS_ERR(dst)) { 1509 err = PTR_ERR(dst); 1510 dst = NULL; 1511 goto out; 1512 } 1513 1514 if (ipc6.hlimit < 0) 1515 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); 1516 1517 if (msg->msg_flags&MSG_CONFIRM) 1518 goto do_confirm; 1519 back_from_confirm: 1520 1521 /* Lockless fast path for the non-corking case */ 1522 if (!corkreq) { 1523 struct inet_cork_full cork; 1524 struct sk_buff *skb; 1525 1526 skb = ip6_make_skb(sk, getfrag, msg, ulen, 1527 sizeof(struct udphdr), &ipc6, 1528 &fl6, (struct rt6_info *)dst, 1529 msg->msg_flags, &cork); 1530 err = PTR_ERR(skb); 1531 if (!IS_ERR_OR_NULL(skb)) 1532 err = udp_v6_send_skb(skb, &fl6, &cork.base); 1533 goto out; 1534 } 1535 1536 lock_sock(sk); 1537 if (unlikely(up->pending)) { 1538 /* The socket is already corked while preparing it. */ 1539 /* ... which is an evident application bug. --ANK */ 1540 release_sock(sk); 1541 1542 net_dbg_ratelimited("udp cork app bug 2\n"); 1543 err = -EINVAL; 1544 goto out; 1545 } 1546 1547 up->pending = AF_INET6; 1548 1549 do_append_data: 1550 if (ipc6.dontfrag < 0) 1551 ipc6.dontfrag = np->dontfrag; 1552 up->len += ulen; 1553 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr), 1554 &ipc6, &fl6, (struct rt6_info *)dst, 1555 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 1556 if (err) 1557 udp_v6_flush_pending_frames(sk); 1558 else if (!corkreq) 1559 err = udp_v6_push_pending_frames(sk); 1560 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) 1561 up->pending = 0; 1562 1563 if (err > 0) 1564 err = np->recverr ? net_xmit_errno(err) : 0; 1565 release_sock(sk); 1566 1567 out: 1568 dst_release(dst); 1569 out_no_dst: 1570 fl6_sock_release(flowlabel); 1571 txopt_put(opt_to_free); 1572 if (!err) 1573 return len; 1574 /* 1575 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 1576 * ENOBUFS might not be good (it's not tunable per se), but otherwise 1577 * we don't have a good statistic (IpOutDiscards but it can be too many 1578 * things). We could add another new stat but at least for now that 1579 * seems like overkill. 1580 */ 1581 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 1582 UDP6_INC_STATS(sock_net(sk), 1583 UDP_MIB_SNDBUFERRORS, is_udplite); 1584 } 1585 return err; 1586 1587 do_confirm: 1588 if (msg->msg_flags & MSG_PROBE) 1589 dst_confirm_neigh(dst, &fl6.daddr); 1590 if (!(msg->msg_flags&MSG_PROBE) || len) 1591 goto back_from_confirm; 1592 err = 0; 1593 goto out; 1594 } 1595 1596 void udpv6_destroy_sock(struct sock *sk) 1597 { 1598 struct udp_sock *up = udp_sk(sk); 1599 lock_sock(sk); 1600 udp_v6_flush_pending_frames(sk); 1601 release_sock(sk); 1602 1603 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 1604 if (up->encap_type) { 1605 void (*encap_destroy)(struct sock *sk); 1606 encap_destroy = READ_ONCE(up->encap_destroy); 1607 if (encap_destroy) 1608 encap_destroy(sk); 1609 } 1610 if (up->encap_enabled) { 1611 static_branch_dec(&udpv6_encap_needed_key); 1612 udp_encap_disable(); 1613 } 1614 } 1615 1616 inet6_destroy_sock(sk); 1617 } 1618 1619 /* 1620 * Socket option code for UDP 1621 */ 1622 int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 1623 unsigned int optlen) 1624 { 1625 if (level == SOL_UDP || level == SOL_UDPLITE) 1626 return udp_lib_setsockopt(sk, level, optname, 1627 optval, optlen, 1628 udp_v6_push_pending_frames); 1629 return ipv6_setsockopt(sk, level, optname, optval, optlen); 1630 } 1631 1632 int udpv6_getsockopt(struct sock *sk, int level, int optname, 1633 char __user *optval, int __user *optlen) 1634 { 1635 if (level == SOL_UDP || level == SOL_UDPLITE) 1636 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 1637 return ipv6_getsockopt(sk, level, optname, optval, optlen); 1638 } 1639 1640 /* thinking of making this const? Don't. 1641 * early_demux can change based on sysctl. 1642 */ 1643 static struct inet6_protocol udpv6_protocol = { 1644 .early_demux = udp_v6_early_demux, 1645 .early_demux_handler = udp_v6_early_demux, 1646 .handler = udpv6_rcv, 1647 .err_handler = udpv6_err, 1648 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1649 }; 1650 1651 /* ------------------------------------------------------------------------ */ 1652 #ifdef CONFIG_PROC_FS 1653 int udp6_seq_show(struct seq_file *seq, void *v) 1654 { 1655 if (v == SEQ_START_TOKEN) { 1656 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER); 1657 } else { 1658 int bucket = ((struct udp_iter_state *)seq->private)->bucket; 1659 struct inet_sock *inet = inet_sk(v); 1660 __u16 srcp = ntohs(inet->inet_sport); 1661 __u16 destp = ntohs(inet->inet_dport); 1662 __ip6_dgram_sock_seq_show(seq, v, srcp, destp, 1663 udp_rqueue_get(v), bucket); 1664 } 1665 return 0; 1666 } 1667 1668 const struct seq_operations udp6_seq_ops = { 1669 .start = udp_seq_start, 1670 .next = udp_seq_next, 1671 .stop = udp_seq_stop, 1672 .show = udp6_seq_show, 1673 }; 1674 EXPORT_SYMBOL(udp6_seq_ops); 1675 1676 static struct udp_seq_afinfo udp6_seq_afinfo = { 1677 .family = AF_INET6, 1678 .udp_table = &udp_table, 1679 }; 1680 1681 int __net_init udp6_proc_init(struct net *net) 1682 { 1683 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops, 1684 sizeof(struct udp_iter_state), &udp6_seq_afinfo)) 1685 return -ENOMEM; 1686 return 0; 1687 } 1688 1689 void udp6_proc_exit(struct net *net) 1690 { 1691 remove_proc_entry("udp6", net->proc_net); 1692 } 1693 #endif /* CONFIG_PROC_FS */ 1694 1695 /* ------------------------------------------------------------------------ */ 1696 1697 struct proto udpv6_prot = { 1698 .name = "UDPv6", 1699 .owner = THIS_MODULE, 1700 .close = udp_lib_close, 1701 .pre_connect = udpv6_pre_connect, 1702 .connect = ip6_datagram_connect, 1703 .disconnect = udp_disconnect, 1704 .ioctl = udp_ioctl, 1705 .init = udp_init_sock, 1706 .destroy = udpv6_destroy_sock, 1707 .setsockopt = udpv6_setsockopt, 1708 .getsockopt = udpv6_getsockopt, 1709 .sendmsg = udpv6_sendmsg, 1710 .recvmsg = udpv6_recvmsg, 1711 .release_cb = ip6_datagram_release_cb, 1712 .hash = udp_lib_hash, 1713 .unhash = udp_lib_unhash, 1714 .rehash = udp_v6_rehash, 1715 .get_port = udp_v6_get_port, 1716 .memory_allocated = &udp_memory_allocated, 1717 .sysctl_mem = sysctl_udp_mem, 1718 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min), 1719 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), 1720 .obj_size = sizeof(struct udp6_sock), 1721 .h.udp_table = &udp_table, 1722 .diag_destroy = udp_abort, 1723 }; 1724 1725 static struct inet_protosw udpv6_protosw = { 1726 .type = SOCK_DGRAM, 1727 .protocol = IPPROTO_UDP, 1728 .prot = &udpv6_prot, 1729 .ops = &inet6_dgram_ops, 1730 .flags = INET_PROTOSW_PERMANENT, 1731 }; 1732 1733 int __init udpv6_init(void) 1734 { 1735 int ret; 1736 1737 ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP); 1738 if (ret) 1739 goto out; 1740 1741 ret = inet6_register_protosw(&udpv6_protosw); 1742 if (ret) 1743 goto out_udpv6_protocol; 1744 out: 1745 return ret; 1746 1747 out_udpv6_protocol: 1748 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); 1749 goto out; 1750 } 1751 1752 void udpv6_exit(void) 1753 { 1754 inet6_unregister_protosw(&udpv6_protosw); 1755 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); 1756 } 1757