1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * UDP over IPv6 4 * Linux INET6 implementation 5 * 6 * Authors: 7 * Pedro Roque <roque@di.fc.ul.pt> 8 * 9 * Based on linux/ipv4/udp.c 10 * 11 * Fixes: 12 * Hideaki YOSHIFUJI : sin6_scope_id support 13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind 15 * a single port at the same time. 16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data 17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file. 18 */ 19 20 #include <linux/bpf-cgroup.h> 21 #include <linux/errno.h> 22 #include <linux/types.h> 23 #include <linux/socket.h> 24 #include <linux/sockios.h> 25 #include <linux/net.h> 26 #include <linux/in6.h> 27 #include <linux/netdevice.h> 28 #include <linux/if_arp.h> 29 #include <linux/ipv6.h> 30 #include <linux/icmpv6.h> 31 #include <linux/init.h> 32 #include <linux/module.h> 33 #include <linux/skbuff.h> 34 #include <linux/slab.h> 35 #include <linux/uaccess.h> 36 #include <linux/indirect_call_wrapper.h> 37 38 #include <net/addrconf.h> 39 #include <net/ndisc.h> 40 #include <net/protocol.h> 41 #include <net/transp_v6.h> 42 #include <net/ip6_route.h> 43 #include <net/raw.h> 44 #include <net/seg6.h> 45 #include <net/tcp_states.h> 46 #include <net/ip6_checksum.h> 47 #include <net/ip6_tunnel.h> 48 #include <net/xfrm.h> 49 #include <net/inet_hashtables.h> 50 #include <net/inet6_hashtables.h> 51 #include <net/busy_poll.h> 52 #include <net/sock_reuseport.h> 53 54 #include <linux/proc_fs.h> 55 #include <linux/seq_file.h> 56 #include <trace/events/skb.h> 57 #include "udp_impl.h" 58 59 static void udpv6_destruct_sock(struct sock *sk) 60 { 61 udp_destruct_common(sk); 62 inet6_sock_destruct(sk); 63 } 64 65 int udpv6_init_sock(struct sock *sk) 66 { 67 udp_lib_init_sock(sk); 68 sk->sk_destruct = udpv6_destruct_sock; 69 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); 70 return 0; 71 } 72 73 static u32 udp6_ehashfn(const struct net *net, 74 const struct in6_addr *laddr, 75 const u16 lport, 76 const struct in6_addr *faddr, 77 const __be16 fport) 78 { 79 static u32 udp6_ehash_secret __read_mostly; 80 static u32 udp_ipv6_hash_secret __read_mostly; 81 82 u32 lhash, fhash; 83 84 net_get_random_once(&udp6_ehash_secret, 85 sizeof(udp6_ehash_secret)); 86 net_get_random_once(&udp_ipv6_hash_secret, 87 sizeof(udp_ipv6_hash_secret)); 88 89 lhash = (__force u32)laddr->s6_addr32[3]; 90 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret); 91 92 return __inet6_ehashfn(lhash, lport, fhash, fport, 93 udp_ipv6_hash_secret + net_hash_mix(net)); 94 } 95 96 int udp_v6_get_port(struct sock *sk, unsigned short snum) 97 { 98 unsigned int hash2_nulladdr = 99 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum); 100 unsigned int hash2_partial = 101 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0); 102 103 /* precompute partial secondary hash */ 104 udp_sk(sk)->udp_portaddr_hash = hash2_partial; 105 return udp_lib_get_port(sk, snum, hash2_nulladdr); 106 } 107 108 void udp_v6_rehash(struct sock *sk) 109 { 110 u16 new_hash = ipv6_portaddr_hash(sock_net(sk), 111 &sk->sk_v6_rcv_saddr, 112 inet_sk(sk)->inet_num); 113 114 udp_lib_rehash(sk, new_hash); 115 } 116 117 static int compute_score(struct sock *sk, struct net *net, 118 const struct in6_addr *saddr, __be16 sport, 119 const struct in6_addr *daddr, unsigned short hnum, 120 int dif, int sdif) 121 { 122 int bound_dev_if, score; 123 struct inet_sock *inet; 124 bool dev_match; 125 126 if (!net_eq(sock_net(sk), net) || 127 udp_sk(sk)->udp_port_hash != hnum || 128 sk->sk_family != PF_INET6) 129 return -1; 130 131 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) 132 return -1; 133 134 score = 0; 135 inet = inet_sk(sk); 136 137 if (inet->inet_dport) { 138 if (inet->inet_dport != sport) 139 return -1; 140 score++; 141 } 142 143 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 144 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr)) 145 return -1; 146 score++; 147 } 148 149 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); 150 dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif); 151 if (!dev_match) 152 return -1; 153 if (bound_dev_if) 154 score++; 155 156 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) 157 score++; 158 159 return score; 160 } 161 162 static struct sock *lookup_reuseport(struct net *net, struct sock *sk, 163 struct sk_buff *skb, 164 const struct in6_addr *saddr, 165 __be16 sport, 166 const struct in6_addr *daddr, 167 unsigned int hnum) 168 { 169 struct sock *reuse_sk = NULL; 170 u32 hash; 171 172 if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) { 173 hash = udp6_ehashfn(net, daddr, hnum, saddr, sport); 174 reuse_sk = reuseport_select_sock(sk, hash, skb, 175 sizeof(struct udphdr)); 176 } 177 return reuse_sk; 178 } 179 180 /* called with rcu_read_lock() */ 181 static struct sock *udp6_lib_lookup2(struct net *net, 182 const struct in6_addr *saddr, __be16 sport, 183 const struct in6_addr *daddr, unsigned int hnum, 184 int dif, int sdif, struct udp_hslot *hslot2, 185 struct sk_buff *skb) 186 { 187 struct sock *sk, *result; 188 int score, badness; 189 190 result = NULL; 191 badness = -1; 192 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 193 score = compute_score(sk, net, saddr, sport, 194 daddr, hnum, dif, sdif); 195 if (score > badness) { 196 result = lookup_reuseport(net, sk, skb, 197 saddr, sport, daddr, hnum); 198 /* Fall back to scoring if group has connections */ 199 if (result && !reuseport_has_conns(sk)) 200 return result; 201 202 result = result ? : sk; 203 badness = score; 204 } 205 } 206 return result; 207 } 208 209 static inline struct sock *udp6_lookup_run_bpf(struct net *net, 210 struct udp_table *udptable, 211 struct sk_buff *skb, 212 const struct in6_addr *saddr, 213 __be16 sport, 214 const struct in6_addr *daddr, 215 u16 hnum, const int dif) 216 { 217 struct sock *sk, *reuse_sk; 218 bool no_reuseport; 219 220 if (udptable != &udp_table) 221 return NULL; /* only UDP is supported */ 222 223 no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, saddr, sport, 224 daddr, hnum, dif, &sk); 225 if (no_reuseport || IS_ERR_OR_NULL(sk)) 226 return sk; 227 228 reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); 229 if (reuse_sk) 230 sk = reuse_sk; 231 return sk; 232 } 233 234 /* rcu_read_lock() must be held */ 235 struct sock *__udp6_lib_lookup(struct net *net, 236 const struct in6_addr *saddr, __be16 sport, 237 const struct in6_addr *daddr, __be16 dport, 238 int dif, int sdif, struct udp_table *udptable, 239 struct sk_buff *skb) 240 { 241 unsigned short hnum = ntohs(dport); 242 unsigned int hash2, slot2; 243 struct udp_hslot *hslot2; 244 struct sock *result, *sk; 245 246 hash2 = ipv6_portaddr_hash(net, daddr, hnum); 247 slot2 = hash2 & udptable->mask; 248 hslot2 = &udptable->hash2[slot2]; 249 250 /* Lookup connected or non-wildcard sockets */ 251 result = udp6_lib_lookup2(net, saddr, sport, 252 daddr, hnum, dif, sdif, 253 hslot2, skb); 254 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED) 255 goto done; 256 257 /* Lookup redirect from BPF */ 258 if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { 259 sk = udp6_lookup_run_bpf(net, udptable, skb, 260 saddr, sport, daddr, hnum, dif); 261 if (sk) { 262 result = sk; 263 goto done; 264 } 265 } 266 267 /* Got non-wildcard socket or error on first lookup */ 268 if (result) 269 goto done; 270 271 /* Lookup wildcard sockets */ 272 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum); 273 slot2 = hash2 & udptable->mask; 274 hslot2 = &udptable->hash2[slot2]; 275 276 result = udp6_lib_lookup2(net, saddr, sport, 277 &in6addr_any, hnum, dif, sdif, 278 hslot2, skb); 279 done: 280 if (IS_ERR(result)) 281 return NULL; 282 return result; 283 } 284 EXPORT_SYMBOL_GPL(__udp6_lib_lookup); 285 286 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, 287 __be16 sport, __be16 dport, 288 struct udp_table *udptable) 289 { 290 const struct ipv6hdr *iph = ipv6_hdr(skb); 291 292 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, 293 &iph->daddr, dport, inet6_iif(skb), 294 inet6_sdif(skb), udptable, skb); 295 } 296 297 struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb, 298 __be16 sport, __be16 dport) 299 { 300 const struct ipv6hdr *iph = ipv6_hdr(skb); 301 302 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, 303 &iph->daddr, dport, inet6_iif(skb), 304 inet6_sdif(skb), &udp_table, NULL); 305 } 306 307 /* Must be called under rcu_read_lock(). 308 * Does increment socket refcount. 309 */ 310 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6) 311 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, 312 const struct in6_addr *daddr, __be16 dport, int dif) 313 { 314 struct sock *sk; 315 316 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport, 317 dif, 0, &udp_table, NULL); 318 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) 319 sk = NULL; 320 return sk; 321 } 322 EXPORT_SYMBOL_GPL(udp6_lib_lookup); 323 #endif 324 325 /* do not use the scratch area len for jumbogram: their length execeeds the 326 * scratch area space; note that the IP6CB flags is still in the first 327 * cacheline, so checking for jumbograms is cheap 328 */ 329 static int udp6_skb_len(struct sk_buff *skb) 330 { 331 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb); 332 } 333 334 /* 335 * This should be easy, if there is something there we 336 * return it, otherwise we block. 337 */ 338 339 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 340 int flags, int *addr_len) 341 { 342 struct ipv6_pinfo *np = inet6_sk(sk); 343 struct inet_sock *inet = inet_sk(sk); 344 struct sk_buff *skb; 345 unsigned int ulen, copied; 346 int off, err, peeking = flags & MSG_PEEK; 347 int is_udplite = IS_UDPLITE(sk); 348 struct udp_mib __percpu *mib; 349 bool checksum_valid = false; 350 int is_udp4; 351 352 if (flags & MSG_ERRQUEUE) 353 return ipv6_recv_error(sk, msg, len, addr_len); 354 355 if (np->rxpmtu && np->rxopt.bits.rxpmtu) 356 return ipv6_recv_rxpmtu(sk, msg, len, addr_len); 357 358 try_again: 359 off = sk_peek_offset(sk, flags); 360 skb = __skb_recv_udp(sk, flags, &off, &err); 361 if (!skb) 362 return err; 363 364 ulen = udp6_skb_len(skb); 365 copied = len; 366 if (copied > ulen - off) 367 copied = ulen - off; 368 else if (copied < ulen) 369 msg->msg_flags |= MSG_TRUNC; 370 371 is_udp4 = (skb->protocol == htons(ETH_P_IP)); 372 mib = __UDPX_MIB(sk, is_udp4); 373 374 /* 375 * If checksum is needed at all, try to do it while copying the 376 * data. If the data is truncated, or if we only want a partial 377 * coverage checksum (UDP-Lite), do it before the copy. 378 */ 379 380 if (copied < ulen || peeking || 381 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { 382 checksum_valid = udp_skb_csum_unnecessary(skb) || 383 !__udp_lib_checksum_complete(skb); 384 if (!checksum_valid) 385 goto csum_copy_err; 386 } 387 388 if (checksum_valid || udp_skb_csum_unnecessary(skb)) { 389 if (udp_skb_is_linear(skb)) 390 err = copy_linear_skb(skb, copied, off, &msg->msg_iter); 391 else 392 err = skb_copy_datagram_msg(skb, off, msg, copied); 393 } else { 394 err = skb_copy_and_csum_datagram_msg(skb, off, msg); 395 if (err == -EINVAL) 396 goto csum_copy_err; 397 } 398 if (unlikely(err)) { 399 if (!peeking) { 400 atomic_inc(&sk->sk_drops); 401 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 402 } 403 kfree_skb(skb); 404 return err; 405 } 406 if (!peeking) 407 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS); 408 409 sock_recv_cmsgs(msg, sk, skb); 410 411 /* Copy the address. */ 412 if (msg->msg_name) { 413 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 414 sin6->sin6_family = AF_INET6; 415 sin6->sin6_port = udp_hdr(skb)->source; 416 sin6->sin6_flowinfo = 0; 417 418 if (is_udp4) { 419 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, 420 &sin6->sin6_addr); 421 sin6->sin6_scope_id = 0; 422 } else { 423 sin6->sin6_addr = ipv6_hdr(skb)->saddr; 424 sin6->sin6_scope_id = 425 ipv6_iface_scope_id(&sin6->sin6_addr, 426 inet6_iif(skb)); 427 } 428 *addr_len = sizeof(*sin6); 429 430 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, 431 (struct sockaddr *)sin6); 432 } 433 434 if (udp_sk(sk)->gro_enabled) 435 udp_cmsg_recv(msg, sk, skb); 436 437 if (np->rxopt.all) 438 ip6_datagram_recv_common_ctl(sk, msg, skb); 439 440 if (is_udp4) { 441 if (inet->cmsg_flags) 442 ip_cmsg_recv_offset(msg, sk, skb, 443 sizeof(struct udphdr), off); 444 } else { 445 if (np->rxopt.all) 446 ip6_datagram_recv_specific_ctl(sk, msg, skb); 447 } 448 449 err = copied; 450 if (flags & MSG_TRUNC) 451 err = ulen; 452 453 skb_consume_udp(sk, skb, peeking ? -err : err); 454 return err; 455 456 csum_copy_err: 457 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags, 458 udp_skb_destructor)) { 459 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS); 460 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 461 } 462 kfree_skb(skb); 463 464 /* starting over for a new packet, but check if we need to yield */ 465 cond_resched(); 466 msg->msg_flags &= ~MSG_TRUNC; 467 goto try_again; 468 } 469 470 DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key); 471 void udpv6_encap_enable(void) 472 { 473 static_branch_inc(&udpv6_encap_needed_key); 474 } 475 EXPORT_SYMBOL(udpv6_encap_enable); 476 477 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go 478 * through error handlers in encapsulations looking for a match. 479 */ 480 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb, 481 struct inet6_skb_parm *opt, 482 u8 type, u8 code, int offset, __be32 info) 483 { 484 int i; 485 486 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 487 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 488 u8 type, u8 code, int offset, __be32 info); 489 const struct ip6_tnl_encap_ops *encap; 490 491 encap = rcu_dereference(ip6tun_encaps[i]); 492 if (!encap) 493 continue; 494 handler = encap->err_handler; 495 if (handler && !handler(skb, opt, type, code, offset, info)) 496 return 0; 497 } 498 499 return -ENOENT; 500 } 501 502 /* Try to match ICMP errors to UDP tunnels by looking up a socket without 503 * reversing source and destination port: this will match tunnels that force the 504 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that 505 * lwtunnels might actually break this assumption by being configured with 506 * different destination ports on endpoints, in this case we won't be able to 507 * trace ICMP messages back to them. 508 * 509 * If this doesn't match any socket, probe tunnels with arbitrary destination 510 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port 511 * we've sent packets to won't necessarily match the local destination port. 512 * 513 * Then ask the tunnel implementation to match the error against a valid 514 * association. 515 * 516 * Return an error if we can't find a match, the socket if we need further 517 * processing, zero otherwise. 518 */ 519 static struct sock *__udp6_lib_err_encap(struct net *net, 520 const struct ipv6hdr *hdr, int offset, 521 struct udphdr *uh, 522 struct udp_table *udptable, 523 struct sock *sk, 524 struct sk_buff *skb, 525 struct inet6_skb_parm *opt, 526 u8 type, u8 code, __be32 info) 527 { 528 int (*lookup)(struct sock *sk, struct sk_buff *skb); 529 int network_offset, transport_offset; 530 struct udp_sock *up; 531 532 network_offset = skb_network_offset(skb); 533 transport_offset = skb_transport_offset(skb); 534 535 /* Network header needs to point to the outer IPv6 header inside ICMP */ 536 skb_reset_network_header(skb); 537 538 /* Transport header needs to point to the UDP header */ 539 skb_set_transport_header(skb, offset); 540 541 if (sk) { 542 up = udp_sk(sk); 543 544 lookup = READ_ONCE(up->encap_err_lookup); 545 if (lookup && lookup(sk, skb)) 546 sk = NULL; 547 548 goto out; 549 } 550 551 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source, 552 &hdr->saddr, uh->dest, 553 inet6_iif(skb), 0, udptable, skb); 554 if (sk) { 555 up = udp_sk(sk); 556 557 lookup = READ_ONCE(up->encap_err_lookup); 558 if (!lookup || lookup(sk, skb)) 559 sk = NULL; 560 } 561 562 out: 563 if (!sk) { 564 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code, 565 offset, info)); 566 } 567 568 skb_set_transport_header(skb, transport_offset); 569 skb_set_network_header(skb, network_offset); 570 571 return sk; 572 } 573 574 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 575 u8 type, u8 code, int offset, __be32 info, 576 struct udp_table *udptable) 577 { 578 struct ipv6_pinfo *np; 579 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; 580 const struct in6_addr *saddr = &hdr->saddr; 581 const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr; 582 struct udphdr *uh = (struct udphdr *)(skb->data+offset); 583 bool tunnel = false; 584 struct sock *sk; 585 int harderr; 586 int err; 587 struct net *net = dev_net(skb->dev); 588 589 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source, 590 inet6_iif(skb), inet6_sdif(skb), udptable, NULL); 591 592 if (!sk || udp_sk(sk)->encap_type) { 593 /* No socket for error: try tunnels before discarding */ 594 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 595 sk = __udp6_lib_err_encap(net, hdr, offset, uh, 596 udptable, sk, skb, 597 opt, type, code, info); 598 if (!sk) 599 return 0; 600 } else 601 sk = ERR_PTR(-ENOENT); 602 603 if (IS_ERR(sk)) { 604 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), 605 ICMP6_MIB_INERRORS); 606 return PTR_ERR(sk); 607 } 608 609 tunnel = true; 610 } 611 612 harderr = icmpv6_err_convert(type, code, &err); 613 np = inet6_sk(sk); 614 615 if (type == ICMPV6_PKT_TOOBIG) { 616 if (!ip6_sk_accept_pmtu(sk)) 617 goto out; 618 ip6_sk_update_pmtu(skb, sk, info); 619 if (np->pmtudisc != IPV6_PMTUDISC_DONT) 620 harderr = 1; 621 } 622 if (type == NDISC_REDIRECT) { 623 if (tunnel) { 624 ip6_redirect(skb, sock_net(sk), inet6_iif(skb), 625 sk->sk_mark, sk->sk_uid); 626 } else { 627 ip6_sk_redirect(skb, sk); 628 } 629 goto out; 630 } 631 632 /* Tunnels don't have an application socket: don't pass errors back */ 633 if (tunnel) { 634 if (udp_sk(sk)->encap_err_rcv) 635 udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest, 636 ntohl(info), (u8 *)(uh+1)); 637 goto out; 638 } 639 640 if (!np->recverr) { 641 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 642 goto out; 643 } else { 644 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); 645 } 646 647 sk->sk_err = err; 648 sk_error_report(sk); 649 out: 650 return 0; 651 } 652 653 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 654 { 655 int rc; 656 657 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 658 sock_rps_save_rxhash(sk, skb); 659 sk_mark_napi_id(sk, skb); 660 sk_incoming_cpu_update(sk); 661 } else { 662 sk_mark_napi_id_once(sk, skb); 663 } 664 665 rc = __udp_enqueue_schedule_skb(sk, skb); 666 if (rc < 0) { 667 int is_udplite = IS_UDPLITE(sk); 668 enum skb_drop_reason drop_reason; 669 670 /* Note that an ENOMEM error is charged twice */ 671 if (rc == -ENOMEM) { 672 UDP6_INC_STATS(sock_net(sk), 673 UDP_MIB_RCVBUFERRORS, is_udplite); 674 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF; 675 } else { 676 UDP6_INC_STATS(sock_net(sk), 677 UDP_MIB_MEMERRORS, is_udplite); 678 drop_reason = SKB_DROP_REASON_PROTO_MEM; 679 } 680 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 681 kfree_skb_reason(skb, drop_reason); 682 return -1; 683 } 684 685 return 0; 686 } 687 688 static __inline__ int udpv6_err(struct sk_buff *skb, 689 struct inet6_skb_parm *opt, u8 type, 690 u8 code, int offset, __be32 info) 691 { 692 return __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table); 693 } 694 695 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) 696 { 697 enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; 698 struct udp_sock *up = udp_sk(sk); 699 int is_udplite = IS_UDPLITE(sk); 700 701 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { 702 drop_reason = SKB_DROP_REASON_XFRM_POLICY; 703 goto drop; 704 } 705 706 if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) { 707 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 708 709 /* 710 * This is an encapsulation socket so pass the skb to 711 * the socket's udp_encap_rcv() hook. Otherwise, just 712 * fall through and pass this up the UDP socket. 713 * up->encap_rcv() returns the following value: 714 * =0 if skb was successfully passed to the encap 715 * handler or was discarded by it. 716 * >0 if skb should be passed on to UDP. 717 * <0 if skb should be resubmitted as proto -N 718 */ 719 720 /* if we're overly short, let UDP handle it */ 721 encap_rcv = READ_ONCE(up->encap_rcv); 722 if (encap_rcv) { 723 int ret; 724 725 /* Verify checksum before giving to encap */ 726 if (udp_lib_checksum_complete(skb)) 727 goto csum_error; 728 729 ret = encap_rcv(sk, skb); 730 if (ret <= 0) { 731 __UDP6_INC_STATS(sock_net(sk), 732 UDP_MIB_INDATAGRAMS, 733 is_udplite); 734 return -ret; 735 } 736 } 737 738 /* FALLTHROUGH -- it's a UDP Packet */ 739 } 740 741 /* 742 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). 743 */ 744 if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { 745 746 if (up->pcrlen == 0) { /* full coverage was set */ 747 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n", 748 UDP_SKB_CB(skb)->cscov, skb->len); 749 goto drop; 750 } 751 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { 752 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n", 753 UDP_SKB_CB(skb)->cscov, up->pcrlen); 754 goto drop; 755 } 756 } 757 758 prefetch(&sk->sk_rmem_alloc); 759 if (rcu_access_pointer(sk->sk_filter) && 760 udp_lib_checksum_complete(skb)) 761 goto csum_error; 762 763 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) { 764 drop_reason = SKB_DROP_REASON_SOCKET_FILTER; 765 goto drop; 766 } 767 768 udp_csum_pull_header(skb); 769 770 skb_dst_drop(skb); 771 772 return __udpv6_queue_rcv_skb(sk, skb); 773 774 csum_error: 775 drop_reason = SKB_DROP_REASON_UDP_CSUM; 776 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 777 drop: 778 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 779 atomic_inc(&sk->sk_drops); 780 kfree_skb_reason(skb, drop_reason); 781 return -1; 782 } 783 784 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 785 { 786 struct sk_buff *next, *segs; 787 int ret; 788 789 if (likely(!udp_unexpected_gso(sk, skb))) 790 return udpv6_queue_rcv_one_skb(sk, skb); 791 792 __skb_push(skb, -skb_mac_offset(skb)); 793 segs = udp_rcv_segment(sk, skb, false); 794 skb_list_walk_safe(segs, skb, next) { 795 __skb_pull(skb, skb_transport_offset(skb)); 796 797 udp_post_segment_fix_csum(skb); 798 ret = udpv6_queue_rcv_one_skb(sk, skb); 799 if (ret > 0) 800 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret, 801 true); 802 } 803 return 0; 804 } 805 806 static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk, 807 __be16 loc_port, const struct in6_addr *loc_addr, 808 __be16 rmt_port, const struct in6_addr *rmt_addr, 809 int dif, int sdif, unsigned short hnum) 810 { 811 struct inet_sock *inet = inet_sk(sk); 812 813 if (!net_eq(sock_net(sk), net)) 814 return false; 815 816 if (udp_sk(sk)->udp_port_hash != hnum || 817 sk->sk_family != PF_INET6 || 818 (inet->inet_dport && inet->inet_dport != rmt_port) || 819 (!ipv6_addr_any(&sk->sk_v6_daddr) && 820 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) || 821 !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) || 822 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) && 823 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))) 824 return false; 825 if (!inet6_mc_check(sk, loc_addr, rmt_addr)) 826 return false; 827 return true; 828 } 829 830 static void udp6_csum_zero_error(struct sk_buff *skb) 831 { 832 /* RFC 2460 section 8.1 says that we SHOULD log 833 * this error. Well, it is reasonable. 834 */ 835 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n", 836 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source), 837 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest)); 838 } 839 840 /* 841 * Note: called only from the BH handler context, 842 * so we don't need to lock the hashes. 843 */ 844 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 845 const struct in6_addr *saddr, const struct in6_addr *daddr, 846 struct udp_table *udptable, int proto) 847 { 848 struct sock *sk, *first = NULL; 849 const struct udphdr *uh = udp_hdr(skb); 850 unsigned short hnum = ntohs(uh->dest); 851 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); 852 unsigned int offset = offsetof(typeof(*sk), sk_node); 853 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); 854 int dif = inet6_iif(skb); 855 int sdif = inet6_sdif(skb); 856 struct hlist_node *node; 857 struct sk_buff *nskb; 858 859 if (use_hash2) { 860 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) & 861 udptable->mask; 862 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask; 863 start_lookup: 864 hslot = &udptable->hash2[hash2]; 865 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 866 } 867 868 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { 869 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr, 870 uh->source, saddr, dif, sdif, 871 hnum)) 872 continue; 873 /* If zero checksum and no_check is not on for 874 * the socket then skip it. 875 */ 876 if (!uh->check && !udp_sk(sk)->no_check6_rx) 877 continue; 878 if (!first) { 879 first = sk; 880 continue; 881 } 882 nskb = skb_clone(skb, GFP_ATOMIC); 883 if (unlikely(!nskb)) { 884 atomic_inc(&sk->sk_drops); 885 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 886 IS_UDPLITE(sk)); 887 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, 888 IS_UDPLITE(sk)); 889 continue; 890 } 891 892 if (udpv6_queue_rcv_skb(sk, nskb) > 0) 893 consume_skb(nskb); 894 } 895 896 /* Also lookup *:port if we are using hash2 and haven't done so yet. */ 897 if (use_hash2 && hash2 != hash2_any) { 898 hash2 = hash2_any; 899 goto start_lookup; 900 } 901 902 if (first) { 903 if (udpv6_queue_rcv_skb(first, skb) > 0) 904 consume_skb(skb); 905 } else { 906 kfree_skb(skb); 907 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI, 908 proto == IPPROTO_UDPLITE); 909 } 910 return 0; 911 } 912 913 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) 914 { 915 if (udp_sk_rx_dst_set(sk, dst)) { 916 const struct rt6_info *rt = (const struct rt6_info *)dst; 917 918 sk->sk_rx_dst_cookie = rt6_get_cookie(rt); 919 } 920 } 921 922 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and 923 * return code conversion for ip layer consumption 924 */ 925 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, 926 struct udphdr *uh) 927 { 928 int ret; 929 930 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) 931 skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo); 932 933 ret = udpv6_queue_rcv_skb(sk, skb); 934 935 /* a return value > 0 means to resubmit the input */ 936 if (ret > 0) 937 return ret; 938 return 0; 939 } 940 941 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 942 int proto) 943 { 944 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; 945 const struct in6_addr *saddr, *daddr; 946 struct net *net = dev_net(skb->dev); 947 struct udphdr *uh; 948 struct sock *sk; 949 bool refcounted; 950 u32 ulen = 0; 951 952 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 953 goto discard; 954 955 saddr = &ipv6_hdr(skb)->saddr; 956 daddr = &ipv6_hdr(skb)->daddr; 957 uh = udp_hdr(skb); 958 959 ulen = ntohs(uh->len); 960 if (ulen > skb->len) 961 goto short_packet; 962 963 if (proto == IPPROTO_UDP) { 964 /* UDP validates ulen. */ 965 966 /* Check for jumbo payload */ 967 if (ulen == 0) 968 ulen = skb->len; 969 970 if (ulen < sizeof(*uh)) 971 goto short_packet; 972 973 if (ulen < skb->len) { 974 if (pskb_trim_rcsum(skb, ulen)) 975 goto short_packet; 976 saddr = &ipv6_hdr(skb)->saddr; 977 daddr = &ipv6_hdr(skb)->daddr; 978 uh = udp_hdr(skb); 979 } 980 } 981 982 if (udp6_csum_init(skb, uh, proto)) 983 goto csum_error; 984 985 /* Check if the socket is already available, e.g. due to early demux */ 986 sk = skb_steal_sock(skb, &refcounted); 987 if (sk) { 988 struct dst_entry *dst = skb_dst(skb); 989 int ret; 990 991 if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) 992 udp6_sk_rx_dst_set(sk, dst); 993 994 if (!uh->check && !udp_sk(sk)->no_check6_rx) { 995 if (refcounted) 996 sock_put(sk); 997 goto report_csum_error; 998 } 999 1000 ret = udp6_unicast_rcv_skb(sk, skb, uh); 1001 if (refcounted) 1002 sock_put(sk); 1003 return ret; 1004 } 1005 1006 /* 1007 * Multicast receive code 1008 */ 1009 if (ipv6_addr_is_multicast(daddr)) 1010 return __udp6_lib_mcast_deliver(net, skb, 1011 saddr, daddr, udptable, proto); 1012 1013 /* Unicast */ 1014 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 1015 if (sk) { 1016 if (!uh->check && !udp_sk(sk)->no_check6_rx) 1017 goto report_csum_error; 1018 return udp6_unicast_rcv_skb(sk, skb, uh); 1019 } 1020 1021 reason = SKB_DROP_REASON_NO_SOCKET; 1022 1023 if (!uh->check) 1024 goto report_csum_error; 1025 1026 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 1027 goto discard; 1028 1029 if (udp_lib_checksum_complete(skb)) 1030 goto csum_error; 1031 1032 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 1033 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); 1034 1035 kfree_skb_reason(skb, reason); 1036 return 0; 1037 1038 short_packet: 1039 if (reason == SKB_DROP_REASON_NOT_SPECIFIED) 1040 reason = SKB_DROP_REASON_PKT_TOO_SMALL; 1041 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n", 1042 proto == IPPROTO_UDPLITE ? "-Lite" : "", 1043 saddr, ntohs(uh->source), 1044 ulen, skb->len, 1045 daddr, ntohs(uh->dest)); 1046 goto discard; 1047 1048 report_csum_error: 1049 udp6_csum_zero_error(skb); 1050 csum_error: 1051 if (reason == SKB_DROP_REASON_NOT_SPECIFIED) 1052 reason = SKB_DROP_REASON_UDP_CSUM; 1053 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); 1054 discard: 1055 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 1056 kfree_skb_reason(skb, reason); 1057 return 0; 1058 } 1059 1060 1061 static struct sock *__udp6_lib_demux_lookup(struct net *net, 1062 __be16 loc_port, const struct in6_addr *loc_addr, 1063 __be16 rmt_port, const struct in6_addr *rmt_addr, 1064 int dif, int sdif) 1065 { 1066 unsigned short hnum = ntohs(loc_port); 1067 unsigned int hash2 = ipv6_portaddr_hash(net, loc_addr, hnum); 1068 unsigned int slot2 = hash2 & udp_table.mask; 1069 struct udp_hslot *hslot2 = &udp_table.hash2[slot2]; 1070 const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum); 1071 struct sock *sk; 1072 1073 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 1074 if (sk->sk_state == TCP_ESTABLISHED && 1075 inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif)) 1076 return sk; 1077 /* Only check first socket in chain */ 1078 break; 1079 } 1080 return NULL; 1081 } 1082 1083 void udp_v6_early_demux(struct sk_buff *skb) 1084 { 1085 struct net *net = dev_net(skb->dev); 1086 const struct udphdr *uh; 1087 struct sock *sk; 1088 struct dst_entry *dst; 1089 int dif = skb->dev->ifindex; 1090 int sdif = inet6_sdif(skb); 1091 1092 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 1093 sizeof(struct udphdr))) 1094 return; 1095 1096 uh = udp_hdr(skb); 1097 1098 if (skb->pkt_type == PACKET_HOST) 1099 sk = __udp6_lib_demux_lookup(net, uh->dest, 1100 &ipv6_hdr(skb)->daddr, 1101 uh->source, &ipv6_hdr(skb)->saddr, 1102 dif, sdif); 1103 else 1104 return; 1105 1106 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 1107 return; 1108 1109 skb->sk = sk; 1110 skb->destructor = sock_efree; 1111 dst = rcu_dereference(sk->sk_rx_dst); 1112 1113 if (dst) 1114 dst = dst_check(dst, sk->sk_rx_dst_cookie); 1115 if (dst) { 1116 /* set noref for now. 1117 * any place which wants to hold dst has to call 1118 * dst_hold_safe() 1119 */ 1120 skb_dst_set_noref(skb, dst); 1121 } 1122 } 1123 1124 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb) 1125 { 1126 return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP); 1127 } 1128 1129 /* 1130 * Throw away all pending data and cancel the corking. Socket is locked. 1131 */ 1132 static void udp_v6_flush_pending_frames(struct sock *sk) 1133 { 1134 struct udp_sock *up = udp_sk(sk); 1135 1136 if (up->pending == AF_INET) 1137 udp_flush_pending_frames(sk); 1138 else if (up->pending) { 1139 up->len = 0; 1140 up->pending = 0; 1141 ip6_flush_pending_frames(sk); 1142 } 1143 } 1144 1145 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr, 1146 int addr_len) 1147 { 1148 if (addr_len < offsetofend(struct sockaddr, sa_family)) 1149 return -EINVAL; 1150 /* The following checks are replicated from __ip6_datagram_connect() 1151 * and intended to prevent BPF program called below from accessing 1152 * bytes that are out of the bound specified by user in addr_len. 1153 */ 1154 if (uaddr->sa_family == AF_INET) { 1155 if (ipv6_only_sock(sk)) 1156 return -EAFNOSUPPORT; 1157 return udp_pre_connect(sk, uaddr, addr_len); 1158 } 1159 1160 if (addr_len < SIN6_LEN_RFC2133) 1161 return -EINVAL; 1162 1163 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr); 1164 } 1165 1166 /** 1167 * udp6_hwcsum_outgoing - handle outgoing HW checksumming 1168 * @sk: socket we are sending on 1169 * @skb: sk_buff containing the filled-in UDP header 1170 * (checksum field must be zeroed out) 1171 * @saddr: source address 1172 * @daddr: destination address 1173 * @len: length of packet 1174 */ 1175 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, 1176 const struct in6_addr *saddr, 1177 const struct in6_addr *daddr, int len) 1178 { 1179 unsigned int offset; 1180 struct udphdr *uh = udp_hdr(skb); 1181 struct sk_buff *frags = skb_shinfo(skb)->frag_list; 1182 __wsum csum = 0; 1183 1184 if (!frags) { 1185 /* Only one fragment on the socket. */ 1186 skb->csum_start = skb_transport_header(skb) - skb->head; 1187 skb->csum_offset = offsetof(struct udphdr, check); 1188 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0); 1189 } else { 1190 /* 1191 * HW-checksum won't work as there are two or more 1192 * fragments on the socket so that all csums of sk_buffs 1193 * should be together 1194 */ 1195 offset = skb_transport_offset(skb); 1196 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 1197 csum = skb->csum; 1198 1199 skb->ip_summed = CHECKSUM_NONE; 1200 1201 do { 1202 csum = csum_add(csum, frags->csum); 1203 } while ((frags = frags->next)); 1204 1205 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 1206 csum); 1207 if (uh->check == 0) 1208 uh->check = CSUM_MANGLED_0; 1209 } 1210 } 1211 1212 /* 1213 * Sending 1214 */ 1215 1216 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, 1217 struct inet_cork *cork) 1218 { 1219 struct sock *sk = skb->sk; 1220 struct udphdr *uh; 1221 int err = 0; 1222 int is_udplite = IS_UDPLITE(sk); 1223 __wsum csum = 0; 1224 int offset = skb_transport_offset(skb); 1225 int len = skb->len - offset; 1226 int datalen = len - sizeof(*uh); 1227 1228 /* 1229 * Create a UDP header 1230 */ 1231 uh = udp_hdr(skb); 1232 uh->source = fl6->fl6_sport; 1233 uh->dest = fl6->fl6_dport; 1234 uh->len = htons(len); 1235 uh->check = 0; 1236 1237 if (cork->gso_size) { 1238 const int hlen = skb_network_header_len(skb) + 1239 sizeof(struct udphdr); 1240 1241 if (hlen + cork->gso_size > cork->fragsize) { 1242 kfree_skb(skb); 1243 return -EINVAL; 1244 } 1245 if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) { 1246 kfree_skb(skb); 1247 return -EINVAL; 1248 } 1249 if (udp_sk(sk)->no_check6_tx) { 1250 kfree_skb(skb); 1251 return -EINVAL; 1252 } 1253 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || 1254 dst_xfrm(skb_dst(skb))) { 1255 kfree_skb(skb); 1256 return -EIO; 1257 } 1258 1259 if (datalen > cork->gso_size) { 1260 skb_shinfo(skb)->gso_size = cork->gso_size; 1261 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; 1262 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen, 1263 cork->gso_size); 1264 } 1265 goto csum_partial; 1266 } 1267 1268 if (is_udplite) 1269 csum = udplite_csum(skb); 1270 else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */ 1271 skb->ip_summed = CHECKSUM_NONE; 1272 goto send; 1273 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 1274 csum_partial: 1275 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len); 1276 goto send; 1277 } else 1278 csum = udp_csum(skb); 1279 1280 /* add protocol-dependent pseudo-header */ 1281 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, 1282 len, fl6->flowi6_proto, csum); 1283 if (uh->check == 0) 1284 uh->check = CSUM_MANGLED_0; 1285 1286 send: 1287 err = ip6_send_skb(skb); 1288 if (err) { 1289 if (err == -ENOBUFS && !inet6_sk(sk)->recverr) { 1290 UDP6_INC_STATS(sock_net(sk), 1291 UDP_MIB_SNDBUFERRORS, is_udplite); 1292 err = 0; 1293 } 1294 } else { 1295 UDP6_INC_STATS(sock_net(sk), 1296 UDP_MIB_OUTDATAGRAMS, is_udplite); 1297 } 1298 return err; 1299 } 1300 1301 static int udp_v6_push_pending_frames(struct sock *sk) 1302 { 1303 struct sk_buff *skb; 1304 struct udp_sock *up = udp_sk(sk); 1305 int err = 0; 1306 1307 if (up->pending == AF_INET) 1308 return udp_push_pending_frames(sk); 1309 1310 skb = ip6_finish_skb(sk); 1311 if (!skb) 1312 goto out; 1313 1314 err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6, 1315 &inet_sk(sk)->cork.base); 1316 out: 1317 up->len = 0; 1318 up->pending = 0; 1319 return err; 1320 } 1321 1322 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 1323 { 1324 struct ipv6_txoptions opt_space; 1325 struct udp_sock *up = udp_sk(sk); 1326 struct inet_sock *inet = inet_sk(sk); 1327 struct ipv6_pinfo *np = inet6_sk(sk); 1328 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 1329 struct in6_addr *daddr, *final_p, final; 1330 struct ipv6_txoptions *opt = NULL; 1331 struct ipv6_txoptions *opt_to_free = NULL; 1332 struct ip6_flowlabel *flowlabel = NULL; 1333 struct inet_cork_full cork; 1334 struct flowi6 *fl6 = &cork.fl.u.ip6; 1335 struct dst_entry *dst; 1336 struct ipcm6_cookie ipc6; 1337 int addr_len = msg->msg_namelen; 1338 bool connected = false; 1339 int ulen = len; 1340 int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE; 1341 int err; 1342 int is_udplite = IS_UDPLITE(sk); 1343 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 1344 1345 ipcm6_init(&ipc6); 1346 ipc6.gso_size = READ_ONCE(up->gso_size); 1347 ipc6.sockc.tsflags = sk->sk_tsflags; 1348 ipc6.sockc.mark = sk->sk_mark; 1349 1350 /* destination address check */ 1351 if (sin6) { 1352 if (addr_len < offsetof(struct sockaddr, sa_data)) 1353 return -EINVAL; 1354 1355 switch (sin6->sin6_family) { 1356 case AF_INET6: 1357 if (addr_len < SIN6_LEN_RFC2133) 1358 return -EINVAL; 1359 daddr = &sin6->sin6_addr; 1360 if (ipv6_addr_any(daddr) && 1361 ipv6_addr_v4mapped(&np->saddr)) 1362 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), 1363 daddr); 1364 break; 1365 case AF_INET: 1366 goto do_udp_sendmsg; 1367 case AF_UNSPEC: 1368 msg->msg_name = sin6 = NULL; 1369 msg->msg_namelen = addr_len = 0; 1370 daddr = NULL; 1371 break; 1372 default: 1373 return -EINVAL; 1374 } 1375 } else if (!up->pending) { 1376 if (sk->sk_state != TCP_ESTABLISHED) 1377 return -EDESTADDRREQ; 1378 daddr = &sk->sk_v6_daddr; 1379 } else 1380 daddr = NULL; 1381 1382 if (daddr) { 1383 if (ipv6_addr_v4mapped(daddr)) { 1384 struct sockaddr_in sin; 1385 sin.sin_family = AF_INET; 1386 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport; 1387 sin.sin_addr.s_addr = daddr->s6_addr32[3]; 1388 msg->msg_name = &sin; 1389 msg->msg_namelen = sizeof(sin); 1390 do_udp_sendmsg: 1391 if (ipv6_only_sock(sk)) 1392 return -ENETUNREACH; 1393 return udp_sendmsg(sk, msg, len); 1394 } 1395 } 1396 1397 /* Rough check on arithmetic overflow, 1398 better check is made in ip6_append_data(). 1399 */ 1400 if (len > INT_MAX - sizeof(struct udphdr)) 1401 return -EMSGSIZE; 1402 1403 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 1404 if (up->pending) { 1405 if (up->pending == AF_INET) 1406 return udp_sendmsg(sk, msg, len); 1407 /* 1408 * There are pending frames. 1409 * The socket lock must be held while it's corked. 1410 */ 1411 lock_sock(sk); 1412 if (likely(up->pending)) { 1413 if (unlikely(up->pending != AF_INET6)) { 1414 release_sock(sk); 1415 return -EAFNOSUPPORT; 1416 } 1417 dst = NULL; 1418 goto do_append_data; 1419 } 1420 release_sock(sk); 1421 } 1422 ulen += sizeof(struct udphdr); 1423 1424 memset(fl6, 0, sizeof(*fl6)); 1425 1426 if (sin6) { 1427 if (sin6->sin6_port == 0) 1428 return -EINVAL; 1429 1430 fl6->fl6_dport = sin6->sin6_port; 1431 daddr = &sin6->sin6_addr; 1432 1433 if (np->sndflow) { 1434 fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK; 1435 if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) { 1436 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel); 1437 if (IS_ERR(flowlabel)) 1438 return -EINVAL; 1439 } 1440 } 1441 1442 /* 1443 * Otherwise it will be difficult to maintain 1444 * sk->sk_dst_cache. 1445 */ 1446 if (sk->sk_state == TCP_ESTABLISHED && 1447 ipv6_addr_equal(daddr, &sk->sk_v6_daddr)) 1448 daddr = &sk->sk_v6_daddr; 1449 1450 if (addr_len >= sizeof(struct sockaddr_in6) && 1451 sin6->sin6_scope_id && 1452 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr))) 1453 fl6->flowi6_oif = sin6->sin6_scope_id; 1454 } else { 1455 if (sk->sk_state != TCP_ESTABLISHED) 1456 return -EDESTADDRREQ; 1457 1458 fl6->fl6_dport = inet->inet_dport; 1459 daddr = &sk->sk_v6_daddr; 1460 fl6->flowlabel = np->flow_label; 1461 connected = true; 1462 } 1463 1464 if (!fl6->flowi6_oif) 1465 fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if); 1466 1467 if (!fl6->flowi6_oif) 1468 fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; 1469 1470 fl6->flowi6_uid = sk->sk_uid; 1471 1472 if (msg->msg_controllen) { 1473 opt = &opt_space; 1474 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1475 opt->tot_len = sizeof(*opt); 1476 ipc6.opt = opt; 1477 1478 err = udp_cmsg_send(sk, msg, &ipc6.gso_size); 1479 if (err > 0) 1480 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6, 1481 &ipc6); 1482 if (err < 0) { 1483 fl6_sock_release(flowlabel); 1484 return err; 1485 } 1486 if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) { 1487 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel); 1488 if (IS_ERR(flowlabel)) 1489 return -EINVAL; 1490 } 1491 if (!(opt->opt_nflen|opt->opt_flen)) 1492 opt = NULL; 1493 connected = false; 1494 } 1495 if (!opt) { 1496 opt = txopt_get(np); 1497 opt_to_free = opt; 1498 } 1499 if (flowlabel) 1500 opt = fl6_merge_options(&opt_space, flowlabel, opt); 1501 opt = ipv6_fixup_options(&opt_space, opt); 1502 ipc6.opt = opt; 1503 1504 fl6->flowi6_proto = sk->sk_protocol; 1505 fl6->flowi6_mark = ipc6.sockc.mark; 1506 fl6->daddr = *daddr; 1507 if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr)) 1508 fl6->saddr = np->saddr; 1509 fl6->fl6_sport = inet->inet_sport; 1510 1511 if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) { 1512 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, 1513 (struct sockaddr *)sin6, 1514 &fl6->saddr); 1515 if (err) 1516 goto out_no_dst; 1517 if (sin6) { 1518 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) { 1519 /* BPF program rewrote IPv6-only by IPv4-mapped 1520 * IPv6. It's currently unsupported. 1521 */ 1522 err = -ENOTSUPP; 1523 goto out_no_dst; 1524 } 1525 if (sin6->sin6_port == 0) { 1526 /* BPF program set invalid port. Reject it. */ 1527 err = -EINVAL; 1528 goto out_no_dst; 1529 } 1530 fl6->fl6_dport = sin6->sin6_port; 1531 fl6->daddr = sin6->sin6_addr; 1532 } 1533 } 1534 1535 if (ipv6_addr_any(&fl6->daddr)) 1536 fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ 1537 1538 final_p = fl6_update_dst(fl6, opt, &final); 1539 if (final_p) 1540 connected = false; 1541 1542 if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) { 1543 fl6->flowi6_oif = np->mcast_oif; 1544 connected = false; 1545 } else if (!fl6->flowi6_oif) 1546 fl6->flowi6_oif = np->ucast_oif; 1547 1548 security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6)); 1549 1550 if (ipc6.tclass < 0) 1551 ipc6.tclass = np->tclass; 1552 1553 fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel); 1554 1555 dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected); 1556 if (IS_ERR(dst)) { 1557 err = PTR_ERR(dst); 1558 dst = NULL; 1559 goto out; 1560 } 1561 1562 if (ipc6.hlimit < 0) 1563 ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst); 1564 1565 if (msg->msg_flags&MSG_CONFIRM) 1566 goto do_confirm; 1567 back_from_confirm: 1568 1569 /* Lockless fast path for the non-corking case */ 1570 if (!corkreq) { 1571 struct sk_buff *skb; 1572 1573 skb = ip6_make_skb(sk, getfrag, msg, ulen, 1574 sizeof(struct udphdr), &ipc6, 1575 (struct rt6_info *)dst, 1576 msg->msg_flags, &cork); 1577 err = PTR_ERR(skb); 1578 if (!IS_ERR_OR_NULL(skb)) 1579 err = udp_v6_send_skb(skb, fl6, &cork.base); 1580 /* ip6_make_skb steals dst reference */ 1581 goto out_no_dst; 1582 } 1583 1584 lock_sock(sk); 1585 if (unlikely(up->pending)) { 1586 /* The socket is already corked while preparing it. */ 1587 /* ... which is an evident application bug. --ANK */ 1588 release_sock(sk); 1589 1590 net_dbg_ratelimited("udp cork app bug 2\n"); 1591 err = -EINVAL; 1592 goto out; 1593 } 1594 1595 up->pending = AF_INET6; 1596 1597 do_append_data: 1598 if (ipc6.dontfrag < 0) 1599 ipc6.dontfrag = np->dontfrag; 1600 up->len += ulen; 1601 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr), 1602 &ipc6, fl6, (struct rt6_info *)dst, 1603 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 1604 if (err) 1605 udp_v6_flush_pending_frames(sk); 1606 else if (!corkreq) 1607 err = udp_v6_push_pending_frames(sk); 1608 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) 1609 up->pending = 0; 1610 1611 if (err > 0) 1612 err = np->recverr ? net_xmit_errno(err) : 0; 1613 release_sock(sk); 1614 1615 out: 1616 dst_release(dst); 1617 out_no_dst: 1618 fl6_sock_release(flowlabel); 1619 txopt_put(opt_to_free); 1620 if (!err) 1621 return len; 1622 /* 1623 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 1624 * ENOBUFS might not be good (it's not tunable per se), but otherwise 1625 * we don't have a good statistic (IpOutDiscards but it can be too many 1626 * things). We could add another new stat but at least for now that 1627 * seems like overkill. 1628 */ 1629 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 1630 UDP6_INC_STATS(sock_net(sk), 1631 UDP_MIB_SNDBUFERRORS, is_udplite); 1632 } 1633 return err; 1634 1635 do_confirm: 1636 if (msg->msg_flags & MSG_PROBE) 1637 dst_confirm_neigh(dst, &fl6->daddr); 1638 if (!(msg->msg_flags&MSG_PROBE) || len) 1639 goto back_from_confirm; 1640 err = 0; 1641 goto out; 1642 } 1643 EXPORT_SYMBOL(udpv6_sendmsg); 1644 1645 void udpv6_destroy_sock(struct sock *sk) 1646 { 1647 struct udp_sock *up = udp_sk(sk); 1648 lock_sock(sk); 1649 1650 /* protects from races with udp_abort() */ 1651 sock_set_flag(sk, SOCK_DEAD); 1652 udp_v6_flush_pending_frames(sk); 1653 release_sock(sk); 1654 1655 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 1656 if (up->encap_type) { 1657 void (*encap_destroy)(struct sock *sk); 1658 encap_destroy = READ_ONCE(up->encap_destroy); 1659 if (encap_destroy) 1660 encap_destroy(sk); 1661 } 1662 if (up->encap_enabled) { 1663 static_branch_dec(&udpv6_encap_needed_key); 1664 udp_encap_disable(); 1665 } 1666 } 1667 } 1668 1669 /* 1670 * Socket option code for UDP 1671 */ 1672 int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 1673 unsigned int optlen) 1674 { 1675 if (level == SOL_UDP || level == SOL_UDPLITE || level == SOL_SOCKET) 1676 return udp_lib_setsockopt(sk, level, optname, 1677 optval, optlen, 1678 udp_v6_push_pending_frames); 1679 return ipv6_setsockopt(sk, level, optname, optval, optlen); 1680 } 1681 1682 int udpv6_getsockopt(struct sock *sk, int level, int optname, 1683 char __user *optval, int __user *optlen) 1684 { 1685 if (level == SOL_UDP || level == SOL_UDPLITE) 1686 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 1687 return ipv6_getsockopt(sk, level, optname, optval, optlen); 1688 } 1689 1690 static const struct inet6_protocol udpv6_protocol = { 1691 .handler = udpv6_rcv, 1692 .err_handler = udpv6_err, 1693 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1694 }; 1695 1696 /* ------------------------------------------------------------------------ */ 1697 #ifdef CONFIG_PROC_FS 1698 int udp6_seq_show(struct seq_file *seq, void *v) 1699 { 1700 if (v == SEQ_START_TOKEN) { 1701 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER); 1702 } else { 1703 int bucket = ((struct udp_iter_state *)seq->private)->bucket; 1704 struct inet_sock *inet = inet_sk(v); 1705 __u16 srcp = ntohs(inet->inet_sport); 1706 __u16 destp = ntohs(inet->inet_dport); 1707 __ip6_dgram_sock_seq_show(seq, v, srcp, destp, 1708 udp_rqueue_get(v), bucket); 1709 } 1710 return 0; 1711 } 1712 1713 const struct seq_operations udp6_seq_ops = { 1714 .start = udp_seq_start, 1715 .next = udp_seq_next, 1716 .stop = udp_seq_stop, 1717 .show = udp6_seq_show, 1718 }; 1719 EXPORT_SYMBOL(udp6_seq_ops); 1720 1721 static struct udp_seq_afinfo udp6_seq_afinfo = { 1722 .family = AF_INET6, 1723 .udp_table = &udp_table, 1724 }; 1725 1726 int __net_init udp6_proc_init(struct net *net) 1727 { 1728 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops, 1729 sizeof(struct udp_iter_state), &udp6_seq_afinfo)) 1730 return -ENOMEM; 1731 return 0; 1732 } 1733 1734 void udp6_proc_exit(struct net *net) 1735 { 1736 remove_proc_entry("udp6", net->proc_net); 1737 } 1738 #endif /* CONFIG_PROC_FS */ 1739 1740 /* ------------------------------------------------------------------------ */ 1741 1742 struct proto udpv6_prot = { 1743 .name = "UDPv6", 1744 .owner = THIS_MODULE, 1745 .close = udp_lib_close, 1746 .pre_connect = udpv6_pre_connect, 1747 .connect = ip6_datagram_connect, 1748 .disconnect = udp_disconnect, 1749 .ioctl = udp_ioctl, 1750 .init = udpv6_init_sock, 1751 .destroy = udpv6_destroy_sock, 1752 .setsockopt = udpv6_setsockopt, 1753 .getsockopt = udpv6_getsockopt, 1754 .sendmsg = udpv6_sendmsg, 1755 .recvmsg = udpv6_recvmsg, 1756 .release_cb = ip6_datagram_release_cb, 1757 .hash = udp_lib_hash, 1758 .unhash = udp_lib_unhash, 1759 .rehash = udp_v6_rehash, 1760 .get_port = udp_v6_get_port, 1761 .put_port = udp_lib_unhash, 1762 #ifdef CONFIG_BPF_SYSCALL 1763 .psock_update_sk_prot = udp_bpf_update_proto, 1764 #endif 1765 1766 .memory_allocated = &udp_memory_allocated, 1767 .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc, 1768 1769 .sysctl_mem = sysctl_udp_mem, 1770 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min), 1771 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), 1772 .obj_size = sizeof(struct udp6_sock), 1773 .h.udp_table = &udp_table, 1774 .diag_destroy = udp_abort, 1775 }; 1776 1777 static struct inet_protosw udpv6_protosw = { 1778 .type = SOCK_DGRAM, 1779 .protocol = IPPROTO_UDP, 1780 .prot = &udpv6_prot, 1781 .ops = &inet6_dgram_ops, 1782 .flags = INET_PROTOSW_PERMANENT, 1783 }; 1784 1785 int __init udpv6_init(void) 1786 { 1787 int ret; 1788 1789 ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP); 1790 if (ret) 1791 goto out; 1792 1793 ret = inet6_register_protosw(&udpv6_protosw); 1794 if (ret) 1795 goto out_udpv6_protocol; 1796 out: 1797 return ret; 1798 1799 out_udpv6_protocol: 1800 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); 1801 goto out; 1802 } 1803 1804 void udpv6_exit(void) 1805 { 1806 inet6_unregister_protosw(&udpv6_protosw); 1807 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); 1808 } 1809