1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * UDP over IPv6 4 * Linux INET6 implementation 5 * 6 * Authors: 7 * Pedro Roque <roque@di.fc.ul.pt> 8 * 9 * Based on linux/ipv4/udp.c 10 * 11 * Fixes: 12 * Hideaki YOSHIFUJI : sin6_scope_id support 13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind 15 * a single port at the same time. 16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data 17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file. 18 */ 19 20 #include <linux/bpf-cgroup.h> 21 #include <linux/errno.h> 22 #include <linux/types.h> 23 #include <linux/socket.h> 24 #include <linux/sockios.h> 25 #include <linux/net.h> 26 #include <linux/in6.h> 27 #include <linux/netdevice.h> 28 #include <linux/if_arp.h> 29 #include <linux/ipv6.h> 30 #include <linux/icmpv6.h> 31 #include <linux/init.h> 32 #include <linux/module.h> 33 #include <linux/skbuff.h> 34 #include <linux/slab.h> 35 #include <linux/uaccess.h> 36 #include <linux/indirect_call_wrapper.h> 37 #include <trace/events/udp.h> 38 39 #include <net/addrconf.h> 40 #include <net/ndisc.h> 41 #include <net/protocol.h> 42 #include <net/transp_v6.h> 43 #include <net/ip6_route.h> 44 #include <net/raw.h> 45 #include <net/seg6.h> 46 #include <net/tcp_states.h> 47 #include <net/ip6_checksum.h> 48 #include <net/ip6_tunnel.h> 49 #include <net/xfrm.h> 50 #include <net/inet_hashtables.h> 51 #include <net/inet6_hashtables.h> 52 #include <net/busy_poll.h> 53 #include <net/sock_reuseport.h> 54 #include <net/gro.h> 55 56 #include <linux/proc_fs.h> 57 #include <linux/seq_file.h> 58 #include <trace/events/skb.h> 59 #include "udp_impl.h" 60 61 static void udpv6_destruct_sock(struct sock *sk) 62 { 63 udp_destruct_common(sk); 64 inet6_sock_destruct(sk); 65 } 66 67 int udpv6_init_sock(struct sock *sk) 68 { 69 udp_lib_init_sock(sk); 70 sk->sk_destruct = udpv6_destruct_sock; 71 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); 72 return 0; 73 } 74 75 INDIRECT_CALLABLE_SCOPE 76 u32 udp6_ehashfn(const struct net *net, 77 const struct in6_addr *laddr, 78 const u16 lport, 79 const struct in6_addr *faddr, 80 const __be16 fport) 81 { 82 u32 lhash, fhash; 83 84 net_get_random_once(&udp6_ehash_secret, 85 sizeof(udp6_ehash_secret)); 86 net_get_random_once(&udp_ipv6_hash_secret, 87 sizeof(udp_ipv6_hash_secret)); 88 89 lhash = (__force u32)laddr->s6_addr32[3]; 90 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret); 91 92 return __inet6_ehashfn(lhash, lport, fhash, fport, 93 udp6_ehash_secret + net_hash_mix(net)); 94 } 95 96 int udp_v6_get_port(struct sock *sk, unsigned short snum) 97 { 98 unsigned int hash2_nulladdr = 99 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum); 100 unsigned int hash2_partial = 101 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0); 102 103 /* precompute partial secondary hash */ 104 udp_sk(sk)->udp_portaddr_hash = hash2_partial; 105 return udp_lib_get_port(sk, snum, hash2_nulladdr); 106 } 107 108 void udp_v6_rehash(struct sock *sk) 109 { 110 u16 new_hash = ipv6_portaddr_hash(sock_net(sk), 111 &sk->sk_v6_rcv_saddr, 112 inet_sk(sk)->inet_num); 113 114 udp_lib_rehash(sk, new_hash); 115 } 116 117 static int compute_score(struct sock *sk, const struct net *net, 118 const struct in6_addr *saddr, __be16 sport, 119 const struct in6_addr *daddr, unsigned short hnum, 120 int dif, int sdif) 121 { 122 int bound_dev_if, score; 123 struct inet_sock *inet; 124 bool dev_match; 125 126 if (!net_eq(sock_net(sk), net) || 127 udp_sk(sk)->udp_port_hash != hnum || 128 sk->sk_family != PF_INET6) 129 return -1; 130 131 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) 132 return -1; 133 134 score = 0; 135 inet = inet_sk(sk); 136 137 if (inet->inet_dport) { 138 if (inet->inet_dport != sport) 139 return -1; 140 score++; 141 } 142 143 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 144 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr)) 145 return -1; 146 score++; 147 } 148 149 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); 150 dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif); 151 if (!dev_match) 152 return -1; 153 if (bound_dev_if) 154 score++; 155 156 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) 157 score++; 158 159 return score; 160 } 161 162 /* called with rcu_read_lock() */ 163 static struct sock *udp6_lib_lookup2(const struct net *net, 164 const struct in6_addr *saddr, __be16 sport, 165 const struct in6_addr *daddr, unsigned int hnum, 166 int dif, int sdif, struct udp_hslot *hslot2, 167 struct sk_buff *skb) 168 { 169 struct sock *sk, *result; 170 int score, badness; 171 bool need_rescore; 172 173 result = NULL; 174 badness = -1; 175 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 176 need_rescore = false; 177 rescore: 178 score = compute_score(need_rescore ? result : sk, net, saddr, 179 sport, daddr, hnum, dif, sdif); 180 if (score > badness) { 181 badness = score; 182 183 if (need_rescore) 184 continue; 185 186 if (sk->sk_state == TCP_ESTABLISHED) { 187 result = sk; 188 continue; 189 } 190 191 result = inet6_lookup_reuseport(net, sk, skb, sizeof(struct udphdr), 192 saddr, sport, daddr, hnum, udp6_ehashfn); 193 if (!result) { 194 result = sk; 195 continue; 196 } 197 198 /* Fall back to scoring if group has connections */ 199 if (!reuseport_has_conns(sk)) 200 return result; 201 202 /* Reuseport logic returned an error, keep original score. */ 203 if (IS_ERR(result)) 204 continue; 205 206 /* compute_score is too long of a function to be 207 * inlined, and calling it again here yields 208 * measureable overhead for some 209 * workloads. Work around it by jumping 210 * backwards to rescore 'result'. 211 */ 212 need_rescore = true; 213 goto rescore; 214 } 215 } 216 return result; 217 } 218 219 /* rcu_read_lock() must be held */ 220 struct sock *__udp6_lib_lookup(const struct net *net, 221 const struct in6_addr *saddr, __be16 sport, 222 const struct in6_addr *daddr, __be16 dport, 223 int dif, int sdif, struct udp_table *udptable, 224 struct sk_buff *skb) 225 { 226 unsigned short hnum = ntohs(dport); 227 unsigned int hash2, slot2; 228 struct udp_hslot *hslot2; 229 struct sock *result, *sk; 230 231 hash2 = ipv6_portaddr_hash(net, daddr, hnum); 232 slot2 = hash2 & udptable->mask; 233 hslot2 = &udptable->hash2[slot2]; 234 235 /* Lookup connected or non-wildcard sockets */ 236 result = udp6_lib_lookup2(net, saddr, sport, 237 daddr, hnum, dif, sdif, 238 hslot2, skb); 239 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED) 240 goto done; 241 242 /* Lookup redirect from BPF */ 243 if (static_branch_unlikely(&bpf_sk_lookup_enabled) && 244 udptable == net->ipv4.udp_table) { 245 sk = inet6_lookup_run_sk_lookup(net, IPPROTO_UDP, skb, sizeof(struct udphdr), 246 saddr, sport, daddr, hnum, dif, 247 udp6_ehashfn); 248 if (sk) { 249 result = sk; 250 goto done; 251 } 252 } 253 254 /* Got non-wildcard socket or error on first lookup */ 255 if (result) 256 goto done; 257 258 /* Lookup wildcard sockets */ 259 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum); 260 slot2 = hash2 & udptable->mask; 261 hslot2 = &udptable->hash2[slot2]; 262 263 result = udp6_lib_lookup2(net, saddr, sport, 264 &in6addr_any, hnum, dif, sdif, 265 hslot2, skb); 266 done: 267 if (IS_ERR(result)) 268 return NULL; 269 return result; 270 } 271 EXPORT_SYMBOL_GPL(__udp6_lib_lookup); 272 273 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, 274 __be16 sport, __be16 dport, 275 struct udp_table *udptable) 276 { 277 const struct ipv6hdr *iph = ipv6_hdr(skb); 278 279 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, 280 &iph->daddr, dport, inet6_iif(skb), 281 inet6_sdif(skb), udptable, skb); 282 } 283 284 struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb, 285 __be16 sport, __be16 dport) 286 { 287 const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation]; 288 const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset); 289 struct net *net = dev_net(skb->dev); 290 int iif, sdif; 291 292 inet6_get_iif_sdif(skb, &iif, &sdif); 293 294 return __udp6_lib_lookup(net, &iph->saddr, sport, 295 &iph->daddr, dport, iif, 296 sdif, net->ipv4.udp_table, NULL); 297 } 298 299 /* Must be called under rcu_read_lock(). 300 * Does increment socket refcount. 301 */ 302 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6) 303 struct sock *udp6_lib_lookup(const struct net *net, const struct in6_addr *saddr, __be16 sport, 304 const struct in6_addr *daddr, __be16 dport, int dif) 305 { 306 struct sock *sk; 307 308 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport, 309 dif, 0, net->ipv4.udp_table, NULL); 310 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) 311 sk = NULL; 312 return sk; 313 } 314 EXPORT_SYMBOL_GPL(udp6_lib_lookup); 315 #endif 316 317 /* do not use the scratch area len for jumbogram: their length execeeds the 318 * scratch area space; note that the IP6CB flags is still in the first 319 * cacheline, so checking for jumbograms is cheap 320 */ 321 static int udp6_skb_len(struct sk_buff *skb) 322 { 323 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb); 324 } 325 326 /* 327 * This should be easy, if there is something there we 328 * return it, otherwise we block. 329 */ 330 331 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 332 int flags, int *addr_len) 333 { 334 struct ipv6_pinfo *np = inet6_sk(sk); 335 struct inet_sock *inet = inet_sk(sk); 336 struct sk_buff *skb; 337 unsigned int ulen, copied; 338 int off, err, peeking = flags & MSG_PEEK; 339 int is_udplite = IS_UDPLITE(sk); 340 struct udp_mib __percpu *mib; 341 bool checksum_valid = false; 342 int is_udp4; 343 344 if (flags & MSG_ERRQUEUE) 345 return ipv6_recv_error(sk, msg, len, addr_len); 346 347 if (np->rxpmtu && np->rxopt.bits.rxpmtu) 348 return ipv6_recv_rxpmtu(sk, msg, len, addr_len); 349 350 try_again: 351 off = sk_peek_offset(sk, flags); 352 skb = __skb_recv_udp(sk, flags, &off, &err); 353 if (!skb) 354 return err; 355 356 ulen = udp6_skb_len(skb); 357 copied = len; 358 if (copied > ulen - off) 359 copied = ulen - off; 360 else if (copied < ulen) 361 msg->msg_flags |= MSG_TRUNC; 362 363 is_udp4 = (skb->protocol == htons(ETH_P_IP)); 364 mib = __UDPX_MIB(sk, is_udp4); 365 366 /* 367 * If checksum is needed at all, try to do it while copying the 368 * data. If the data is truncated, or if we only want a partial 369 * coverage checksum (UDP-Lite), do it before the copy. 370 */ 371 372 if (copied < ulen || peeking || 373 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { 374 checksum_valid = udp_skb_csum_unnecessary(skb) || 375 !__udp_lib_checksum_complete(skb); 376 if (!checksum_valid) 377 goto csum_copy_err; 378 } 379 380 if (checksum_valid || udp_skb_csum_unnecessary(skb)) { 381 if (udp_skb_is_linear(skb)) 382 err = copy_linear_skb(skb, copied, off, &msg->msg_iter); 383 else 384 err = skb_copy_datagram_msg(skb, off, msg, copied); 385 } else { 386 err = skb_copy_and_csum_datagram_msg(skb, off, msg); 387 if (err == -EINVAL) 388 goto csum_copy_err; 389 } 390 if (unlikely(err)) { 391 if (!peeking) { 392 atomic_inc(&sk->sk_drops); 393 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 394 } 395 kfree_skb(skb); 396 return err; 397 } 398 if (!peeking) 399 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS); 400 401 sock_recv_cmsgs(msg, sk, skb); 402 403 /* Copy the address. */ 404 if (msg->msg_name) { 405 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 406 sin6->sin6_family = AF_INET6; 407 sin6->sin6_port = udp_hdr(skb)->source; 408 sin6->sin6_flowinfo = 0; 409 410 if (is_udp4) { 411 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, 412 &sin6->sin6_addr); 413 sin6->sin6_scope_id = 0; 414 } else { 415 sin6->sin6_addr = ipv6_hdr(skb)->saddr; 416 sin6->sin6_scope_id = 417 ipv6_iface_scope_id(&sin6->sin6_addr, 418 inet6_iif(skb)); 419 } 420 *addr_len = sizeof(*sin6); 421 422 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, 423 (struct sockaddr *)sin6, 424 addr_len); 425 } 426 427 if (udp_test_bit(GRO_ENABLED, sk)) 428 udp_cmsg_recv(msg, sk, skb); 429 430 if (np->rxopt.all) 431 ip6_datagram_recv_common_ctl(sk, msg, skb); 432 433 if (is_udp4) { 434 if (inet_cmsg_flags(inet)) 435 ip_cmsg_recv_offset(msg, sk, skb, 436 sizeof(struct udphdr), off); 437 } else { 438 if (np->rxopt.all) 439 ip6_datagram_recv_specific_ctl(sk, msg, skb); 440 } 441 442 err = copied; 443 if (flags & MSG_TRUNC) 444 err = ulen; 445 446 skb_consume_udp(sk, skb, peeking ? -err : err); 447 return err; 448 449 csum_copy_err: 450 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags, 451 udp_skb_destructor)) { 452 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS); 453 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 454 } 455 kfree_skb(skb); 456 457 /* starting over for a new packet, but check if we need to yield */ 458 cond_resched(); 459 msg->msg_flags &= ~MSG_TRUNC; 460 goto try_again; 461 } 462 463 DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key); 464 void udpv6_encap_enable(void) 465 { 466 static_branch_inc(&udpv6_encap_needed_key); 467 } 468 EXPORT_SYMBOL(udpv6_encap_enable); 469 470 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go 471 * through error handlers in encapsulations looking for a match. 472 */ 473 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb, 474 struct inet6_skb_parm *opt, 475 u8 type, u8 code, int offset, __be32 info) 476 { 477 int i; 478 479 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 480 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 481 u8 type, u8 code, int offset, __be32 info); 482 const struct ip6_tnl_encap_ops *encap; 483 484 encap = rcu_dereference(ip6tun_encaps[i]); 485 if (!encap) 486 continue; 487 handler = encap->err_handler; 488 if (handler && !handler(skb, opt, type, code, offset, info)) 489 return 0; 490 } 491 492 return -ENOENT; 493 } 494 495 /* Try to match ICMP errors to UDP tunnels by looking up a socket without 496 * reversing source and destination port: this will match tunnels that force the 497 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that 498 * lwtunnels might actually break this assumption by being configured with 499 * different destination ports on endpoints, in this case we won't be able to 500 * trace ICMP messages back to them. 501 * 502 * If this doesn't match any socket, probe tunnels with arbitrary destination 503 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port 504 * we've sent packets to won't necessarily match the local destination port. 505 * 506 * Then ask the tunnel implementation to match the error against a valid 507 * association. 508 * 509 * Return an error if we can't find a match, the socket if we need further 510 * processing, zero otherwise. 511 */ 512 static struct sock *__udp6_lib_err_encap(struct net *net, 513 const struct ipv6hdr *hdr, int offset, 514 struct udphdr *uh, 515 struct udp_table *udptable, 516 struct sock *sk, 517 struct sk_buff *skb, 518 struct inet6_skb_parm *opt, 519 u8 type, u8 code, __be32 info) 520 { 521 int (*lookup)(struct sock *sk, struct sk_buff *skb); 522 int network_offset, transport_offset; 523 struct udp_sock *up; 524 525 network_offset = skb_network_offset(skb); 526 transport_offset = skb_transport_offset(skb); 527 528 /* Network header needs to point to the outer IPv6 header inside ICMP */ 529 skb_reset_network_header(skb); 530 531 /* Transport header needs to point to the UDP header */ 532 skb_set_transport_header(skb, offset); 533 534 if (sk) { 535 up = udp_sk(sk); 536 537 lookup = READ_ONCE(up->encap_err_lookup); 538 if (lookup && lookup(sk, skb)) 539 sk = NULL; 540 541 goto out; 542 } 543 544 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source, 545 &hdr->saddr, uh->dest, 546 inet6_iif(skb), 0, udptable, skb); 547 if (sk) { 548 up = udp_sk(sk); 549 550 lookup = READ_ONCE(up->encap_err_lookup); 551 if (!lookup || lookup(sk, skb)) 552 sk = NULL; 553 } 554 555 out: 556 if (!sk) { 557 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code, 558 offset, info)); 559 } 560 561 skb_set_transport_header(skb, transport_offset); 562 skb_set_network_header(skb, network_offset); 563 564 return sk; 565 } 566 567 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 568 u8 type, u8 code, int offset, __be32 info, 569 struct udp_table *udptable) 570 { 571 struct ipv6_pinfo *np; 572 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; 573 const struct in6_addr *saddr = &hdr->saddr; 574 const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr; 575 struct udphdr *uh = (struct udphdr *)(skb->data+offset); 576 bool tunnel = false; 577 struct sock *sk; 578 int harderr; 579 int err; 580 struct net *net = dev_net(skb->dev); 581 582 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source, 583 inet6_iif(skb), inet6_sdif(skb), udptable, NULL); 584 585 if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) { 586 /* No socket for error: try tunnels before discarding */ 587 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 588 sk = __udp6_lib_err_encap(net, hdr, offset, uh, 589 udptable, sk, skb, 590 opt, type, code, info); 591 if (!sk) 592 return 0; 593 } else 594 sk = ERR_PTR(-ENOENT); 595 596 if (IS_ERR(sk)) { 597 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), 598 ICMP6_MIB_INERRORS); 599 return PTR_ERR(sk); 600 } 601 602 tunnel = true; 603 } 604 605 harderr = icmpv6_err_convert(type, code, &err); 606 np = inet6_sk(sk); 607 608 if (type == ICMPV6_PKT_TOOBIG) { 609 if (!ip6_sk_accept_pmtu(sk)) 610 goto out; 611 ip6_sk_update_pmtu(skb, sk, info); 612 if (READ_ONCE(np->pmtudisc) != IPV6_PMTUDISC_DONT) 613 harderr = 1; 614 } 615 if (type == NDISC_REDIRECT) { 616 if (tunnel) { 617 ip6_redirect(skb, sock_net(sk), inet6_iif(skb), 618 READ_ONCE(sk->sk_mark), sk->sk_uid); 619 } else { 620 ip6_sk_redirect(skb, sk); 621 } 622 goto out; 623 } 624 625 /* Tunnels don't have an application socket: don't pass errors back */ 626 if (tunnel) { 627 if (udp_sk(sk)->encap_err_rcv) 628 udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest, 629 ntohl(info), (u8 *)(uh+1)); 630 goto out; 631 } 632 633 if (!inet6_test_bit(RECVERR6, sk)) { 634 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 635 goto out; 636 } else { 637 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); 638 } 639 640 sk->sk_err = err; 641 sk_error_report(sk); 642 out: 643 return 0; 644 } 645 646 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 647 { 648 int rc; 649 650 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 651 sock_rps_save_rxhash(sk, skb); 652 sk_mark_napi_id(sk, skb); 653 sk_incoming_cpu_update(sk); 654 } else { 655 sk_mark_napi_id_once(sk, skb); 656 } 657 658 rc = __udp_enqueue_schedule_skb(sk, skb); 659 if (rc < 0) { 660 int is_udplite = IS_UDPLITE(sk); 661 enum skb_drop_reason drop_reason; 662 663 /* Note that an ENOMEM error is charged twice */ 664 if (rc == -ENOMEM) { 665 UDP6_INC_STATS(sock_net(sk), 666 UDP_MIB_RCVBUFERRORS, is_udplite); 667 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF; 668 } else { 669 UDP6_INC_STATS(sock_net(sk), 670 UDP_MIB_MEMERRORS, is_udplite); 671 drop_reason = SKB_DROP_REASON_PROTO_MEM; 672 } 673 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 674 trace_udp_fail_queue_rcv_skb(rc, sk, skb); 675 sk_skb_reason_drop(sk, skb, drop_reason); 676 return -1; 677 } 678 679 return 0; 680 } 681 682 static __inline__ int udpv6_err(struct sk_buff *skb, 683 struct inet6_skb_parm *opt, u8 type, 684 u8 code, int offset, __be32 info) 685 { 686 return __udp6_lib_err(skb, opt, type, code, offset, info, 687 dev_net(skb->dev)->ipv4.udp_table); 688 } 689 690 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) 691 { 692 enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; 693 struct udp_sock *up = udp_sk(sk); 694 int is_udplite = IS_UDPLITE(sk); 695 696 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { 697 drop_reason = SKB_DROP_REASON_XFRM_POLICY; 698 goto drop; 699 } 700 nf_reset_ct(skb); 701 702 if (static_branch_unlikely(&udpv6_encap_needed_key) && 703 READ_ONCE(up->encap_type)) { 704 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 705 706 /* 707 * This is an encapsulation socket so pass the skb to 708 * the socket's udp_encap_rcv() hook. Otherwise, just 709 * fall through and pass this up the UDP socket. 710 * up->encap_rcv() returns the following value: 711 * =0 if skb was successfully passed to the encap 712 * handler or was discarded by it. 713 * >0 if skb should be passed on to UDP. 714 * <0 if skb should be resubmitted as proto -N 715 */ 716 717 /* if we're overly short, let UDP handle it */ 718 encap_rcv = READ_ONCE(up->encap_rcv); 719 if (encap_rcv) { 720 int ret; 721 722 /* Verify checksum before giving to encap */ 723 if (udp_lib_checksum_complete(skb)) 724 goto csum_error; 725 726 ret = encap_rcv(sk, skb); 727 if (ret <= 0) { 728 __UDP6_INC_STATS(sock_net(sk), 729 UDP_MIB_INDATAGRAMS, 730 is_udplite); 731 return -ret; 732 } 733 } 734 735 /* FALLTHROUGH -- it's a UDP Packet */ 736 } 737 738 /* 739 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). 740 */ 741 if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) { 742 u16 pcrlen = READ_ONCE(up->pcrlen); 743 744 if (pcrlen == 0) { /* full coverage was set */ 745 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n", 746 UDP_SKB_CB(skb)->cscov, skb->len); 747 goto drop; 748 } 749 if (UDP_SKB_CB(skb)->cscov < pcrlen) { 750 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n", 751 UDP_SKB_CB(skb)->cscov, pcrlen); 752 goto drop; 753 } 754 } 755 756 prefetch(&sk->sk_rmem_alloc); 757 if (rcu_access_pointer(sk->sk_filter) && 758 udp_lib_checksum_complete(skb)) 759 goto csum_error; 760 761 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) { 762 drop_reason = SKB_DROP_REASON_SOCKET_FILTER; 763 goto drop; 764 } 765 766 udp_csum_pull_header(skb); 767 768 skb_dst_drop(skb); 769 770 return __udpv6_queue_rcv_skb(sk, skb); 771 772 csum_error: 773 drop_reason = SKB_DROP_REASON_UDP_CSUM; 774 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 775 drop: 776 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 777 atomic_inc(&sk->sk_drops); 778 sk_skb_reason_drop(sk, skb, drop_reason); 779 return -1; 780 } 781 782 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 783 { 784 struct sk_buff *next, *segs; 785 int ret; 786 787 if (likely(!udp_unexpected_gso(sk, skb))) 788 return udpv6_queue_rcv_one_skb(sk, skb); 789 790 __skb_push(skb, -skb_mac_offset(skb)); 791 segs = udp_rcv_segment(sk, skb, false); 792 skb_list_walk_safe(segs, skb, next) { 793 __skb_pull(skb, skb_transport_offset(skb)); 794 795 udp_post_segment_fix_csum(skb); 796 ret = udpv6_queue_rcv_one_skb(sk, skb); 797 if (ret > 0) 798 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret, 799 true); 800 } 801 return 0; 802 } 803 804 static bool __udp_v6_is_mcast_sock(struct net *net, const struct sock *sk, 805 __be16 loc_port, const struct in6_addr *loc_addr, 806 __be16 rmt_port, const struct in6_addr *rmt_addr, 807 int dif, int sdif, unsigned short hnum) 808 { 809 const struct inet_sock *inet = inet_sk(sk); 810 811 if (!net_eq(sock_net(sk), net)) 812 return false; 813 814 if (udp_sk(sk)->udp_port_hash != hnum || 815 sk->sk_family != PF_INET6 || 816 (inet->inet_dport && inet->inet_dport != rmt_port) || 817 (!ipv6_addr_any(&sk->sk_v6_daddr) && 818 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) || 819 !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) || 820 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) && 821 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))) 822 return false; 823 if (!inet6_mc_check(sk, loc_addr, rmt_addr)) 824 return false; 825 return true; 826 } 827 828 static void udp6_csum_zero_error(struct sk_buff *skb) 829 { 830 /* RFC 2460 section 8.1 says that we SHOULD log 831 * this error. Well, it is reasonable. 832 */ 833 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n", 834 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source), 835 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest)); 836 } 837 838 /* 839 * Note: called only from the BH handler context, 840 * so we don't need to lock the hashes. 841 */ 842 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 843 const struct in6_addr *saddr, const struct in6_addr *daddr, 844 struct udp_table *udptable, int proto) 845 { 846 struct sock *sk, *first = NULL; 847 const struct udphdr *uh = udp_hdr(skb); 848 unsigned short hnum = ntohs(uh->dest); 849 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); 850 unsigned int offset = offsetof(typeof(*sk), sk_node); 851 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); 852 int dif = inet6_iif(skb); 853 int sdif = inet6_sdif(skb); 854 struct hlist_node *node; 855 struct sk_buff *nskb; 856 857 if (use_hash2) { 858 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) & 859 udptable->mask; 860 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask; 861 start_lookup: 862 hslot = &udptable->hash2[hash2]; 863 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 864 } 865 866 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { 867 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr, 868 uh->source, saddr, dif, sdif, 869 hnum)) 870 continue; 871 /* If zero checksum and no_check is not on for 872 * the socket then skip it. 873 */ 874 if (!uh->check && !udp_get_no_check6_rx(sk)) 875 continue; 876 if (!first) { 877 first = sk; 878 continue; 879 } 880 nskb = skb_clone(skb, GFP_ATOMIC); 881 if (unlikely(!nskb)) { 882 atomic_inc(&sk->sk_drops); 883 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 884 IS_UDPLITE(sk)); 885 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, 886 IS_UDPLITE(sk)); 887 continue; 888 } 889 890 if (udpv6_queue_rcv_skb(sk, nskb) > 0) 891 consume_skb(nskb); 892 } 893 894 /* Also lookup *:port if we are using hash2 and haven't done so yet. */ 895 if (use_hash2 && hash2 != hash2_any) { 896 hash2 = hash2_any; 897 goto start_lookup; 898 } 899 900 if (first) { 901 if (udpv6_queue_rcv_skb(first, skb) > 0) 902 consume_skb(skb); 903 } else { 904 kfree_skb(skb); 905 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI, 906 proto == IPPROTO_UDPLITE); 907 } 908 return 0; 909 } 910 911 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) 912 { 913 if (udp_sk_rx_dst_set(sk, dst)) 914 sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst)); 915 } 916 917 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and 918 * return code conversion for ip layer consumption 919 */ 920 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, 921 struct udphdr *uh) 922 { 923 int ret; 924 925 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) 926 skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo); 927 928 ret = udpv6_queue_rcv_skb(sk, skb); 929 930 /* a return value > 0 means to resubmit the input */ 931 if (ret > 0) 932 return ret; 933 return 0; 934 } 935 936 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 937 int proto) 938 { 939 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; 940 const struct in6_addr *saddr, *daddr; 941 struct net *net = dev_net(skb->dev); 942 struct sock *sk = NULL; 943 struct udphdr *uh; 944 bool refcounted; 945 u32 ulen = 0; 946 947 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 948 goto discard; 949 950 saddr = &ipv6_hdr(skb)->saddr; 951 daddr = &ipv6_hdr(skb)->daddr; 952 uh = udp_hdr(skb); 953 954 ulen = ntohs(uh->len); 955 if (ulen > skb->len) 956 goto short_packet; 957 958 if (proto == IPPROTO_UDP) { 959 /* UDP validates ulen. */ 960 961 /* Check for jumbo payload */ 962 if (ulen == 0) 963 ulen = skb->len; 964 965 if (ulen < sizeof(*uh)) 966 goto short_packet; 967 968 if (ulen < skb->len) { 969 if (pskb_trim_rcsum(skb, ulen)) 970 goto short_packet; 971 saddr = &ipv6_hdr(skb)->saddr; 972 daddr = &ipv6_hdr(skb)->daddr; 973 uh = udp_hdr(skb); 974 } 975 } 976 977 if (udp6_csum_init(skb, uh, proto)) 978 goto csum_error; 979 980 /* Check if the socket is already available, e.g. due to early demux */ 981 sk = inet6_steal_sock(net, skb, sizeof(struct udphdr), saddr, uh->source, daddr, uh->dest, 982 &refcounted, udp6_ehashfn); 983 if (IS_ERR(sk)) 984 goto no_sk; 985 986 if (sk) { 987 struct dst_entry *dst = skb_dst(skb); 988 int ret; 989 990 if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) 991 udp6_sk_rx_dst_set(sk, dst); 992 993 if (!uh->check && !udp_get_no_check6_rx(sk)) { 994 if (refcounted) 995 sock_put(sk); 996 goto report_csum_error; 997 } 998 999 ret = udp6_unicast_rcv_skb(sk, skb, uh); 1000 if (refcounted) 1001 sock_put(sk); 1002 return ret; 1003 } 1004 1005 /* 1006 * Multicast receive code 1007 */ 1008 if (ipv6_addr_is_multicast(daddr)) 1009 return __udp6_lib_mcast_deliver(net, skb, 1010 saddr, daddr, udptable, proto); 1011 1012 /* Unicast */ 1013 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 1014 if (sk) { 1015 if (!uh->check && !udp_get_no_check6_rx(sk)) 1016 goto report_csum_error; 1017 return udp6_unicast_rcv_skb(sk, skb, uh); 1018 } 1019 no_sk: 1020 reason = SKB_DROP_REASON_NO_SOCKET; 1021 1022 if (!uh->check) 1023 goto report_csum_error; 1024 1025 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 1026 goto discard; 1027 nf_reset_ct(skb); 1028 1029 if (udp_lib_checksum_complete(skb)) 1030 goto csum_error; 1031 1032 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 1033 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); 1034 1035 sk_skb_reason_drop(sk, skb, reason); 1036 return 0; 1037 1038 short_packet: 1039 if (reason == SKB_DROP_REASON_NOT_SPECIFIED) 1040 reason = SKB_DROP_REASON_PKT_TOO_SMALL; 1041 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n", 1042 proto == IPPROTO_UDPLITE ? "-Lite" : "", 1043 saddr, ntohs(uh->source), 1044 ulen, skb->len, 1045 daddr, ntohs(uh->dest)); 1046 goto discard; 1047 1048 report_csum_error: 1049 udp6_csum_zero_error(skb); 1050 csum_error: 1051 if (reason == SKB_DROP_REASON_NOT_SPECIFIED) 1052 reason = SKB_DROP_REASON_UDP_CSUM; 1053 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); 1054 discard: 1055 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 1056 sk_skb_reason_drop(sk, skb, reason); 1057 return 0; 1058 } 1059 1060 1061 static struct sock *__udp6_lib_demux_lookup(struct net *net, 1062 __be16 loc_port, const struct in6_addr *loc_addr, 1063 __be16 rmt_port, const struct in6_addr *rmt_addr, 1064 int dif, int sdif) 1065 { 1066 struct udp_table *udptable = net->ipv4.udp_table; 1067 unsigned short hnum = ntohs(loc_port); 1068 unsigned int hash2, slot2; 1069 struct udp_hslot *hslot2; 1070 __portpair ports; 1071 struct sock *sk; 1072 1073 hash2 = ipv6_portaddr_hash(net, loc_addr, hnum); 1074 slot2 = hash2 & udptable->mask; 1075 hslot2 = &udptable->hash2[slot2]; 1076 ports = INET_COMBINED_PORTS(rmt_port, hnum); 1077 1078 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 1079 if (sk->sk_state == TCP_ESTABLISHED && 1080 inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif)) 1081 return sk; 1082 /* Only check first socket in chain */ 1083 break; 1084 } 1085 return NULL; 1086 } 1087 1088 void udp_v6_early_demux(struct sk_buff *skb) 1089 { 1090 struct net *net = dev_net(skb->dev); 1091 const struct udphdr *uh; 1092 struct sock *sk; 1093 struct dst_entry *dst; 1094 int dif = skb->dev->ifindex; 1095 int sdif = inet6_sdif(skb); 1096 1097 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 1098 sizeof(struct udphdr))) 1099 return; 1100 1101 uh = udp_hdr(skb); 1102 1103 if (skb->pkt_type == PACKET_HOST) 1104 sk = __udp6_lib_demux_lookup(net, uh->dest, 1105 &ipv6_hdr(skb)->daddr, 1106 uh->source, &ipv6_hdr(skb)->saddr, 1107 dif, sdif); 1108 else 1109 return; 1110 1111 if (!sk) 1112 return; 1113 1114 skb->sk = sk; 1115 DEBUG_NET_WARN_ON_ONCE(sk_is_refcounted(sk)); 1116 skb->destructor = sock_pfree; 1117 dst = rcu_dereference(sk->sk_rx_dst); 1118 1119 if (dst) 1120 dst = dst_check(dst, sk->sk_rx_dst_cookie); 1121 if (dst) { 1122 /* set noref for now. 1123 * any place which wants to hold dst has to call 1124 * dst_hold_safe() 1125 */ 1126 skb_dst_set_noref(skb, dst); 1127 } 1128 } 1129 1130 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb) 1131 { 1132 return __udp6_lib_rcv(skb, dev_net(skb->dev)->ipv4.udp_table, IPPROTO_UDP); 1133 } 1134 1135 /* 1136 * Throw away all pending data and cancel the corking. Socket is locked. 1137 */ 1138 static void udp_v6_flush_pending_frames(struct sock *sk) 1139 { 1140 struct udp_sock *up = udp_sk(sk); 1141 1142 if (up->pending == AF_INET) 1143 udp_flush_pending_frames(sk); 1144 else if (up->pending) { 1145 up->len = 0; 1146 WRITE_ONCE(up->pending, 0); 1147 ip6_flush_pending_frames(sk); 1148 } 1149 } 1150 1151 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr, 1152 int addr_len) 1153 { 1154 if (addr_len < offsetofend(struct sockaddr, sa_family)) 1155 return -EINVAL; 1156 /* The following checks are replicated from __ip6_datagram_connect() 1157 * and intended to prevent BPF program called below from accessing 1158 * bytes that are out of the bound specified by user in addr_len. 1159 */ 1160 if (uaddr->sa_family == AF_INET) { 1161 if (ipv6_only_sock(sk)) 1162 return -EAFNOSUPPORT; 1163 return udp_pre_connect(sk, uaddr, addr_len); 1164 } 1165 1166 if (addr_len < SIN6_LEN_RFC2133) 1167 return -EINVAL; 1168 1169 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len); 1170 } 1171 1172 /** 1173 * udp6_hwcsum_outgoing - handle outgoing HW checksumming 1174 * @sk: socket we are sending on 1175 * @skb: sk_buff containing the filled-in UDP header 1176 * (checksum field must be zeroed out) 1177 * @saddr: source address 1178 * @daddr: destination address 1179 * @len: length of packet 1180 */ 1181 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, 1182 const struct in6_addr *saddr, 1183 const struct in6_addr *daddr, int len) 1184 { 1185 unsigned int offset; 1186 struct udphdr *uh = udp_hdr(skb); 1187 struct sk_buff *frags = skb_shinfo(skb)->frag_list; 1188 __wsum csum = 0; 1189 1190 if (!frags) { 1191 /* Only one fragment on the socket. */ 1192 skb->csum_start = skb_transport_header(skb) - skb->head; 1193 skb->csum_offset = offsetof(struct udphdr, check); 1194 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0); 1195 } else { 1196 /* 1197 * HW-checksum won't work as there are two or more 1198 * fragments on the socket so that all csums of sk_buffs 1199 * should be together 1200 */ 1201 offset = skb_transport_offset(skb); 1202 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 1203 csum = skb->csum; 1204 1205 skb->ip_summed = CHECKSUM_NONE; 1206 1207 do { 1208 csum = csum_add(csum, frags->csum); 1209 } while ((frags = frags->next)); 1210 1211 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 1212 csum); 1213 if (uh->check == 0) 1214 uh->check = CSUM_MANGLED_0; 1215 } 1216 } 1217 1218 /* 1219 * Sending 1220 */ 1221 1222 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, 1223 struct inet_cork *cork) 1224 { 1225 struct sock *sk = skb->sk; 1226 struct udphdr *uh; 1227 int err = 0; 1228 int is_udplite = IS_UDPLITE(sk); 1229 __wsum csum = 0; 1230 int offset = skb_transport_offset(skb); 1231 int len = skb->len - offset; 1232 int datalen = len - sizeof(*uh); 1233 1234 /* 1235 * Create a UDP header 1236 */ 1237 uh = udp_hdr(skb); 1238 uh->source = fl6->fl6_sport; 1239 uh->dest = fl6->fl6_dport; 1240 uh->len = htons(len); 1241 uh->check = 0; 1242 1243 if (cork->gso_size) { 1244 const int hlen = skb_network_header_len(skb) + 1245 sizeof(struct udphdr); 1246 1247 if (hlen + cork->gso_size > cork->fragsize) { 1248 kfree_skb(skb); 1249 return -EINVAL; 1250 } 1251 if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) { 1252 kfree_skb(skb); 1253 return -EINVAL; 1254 } 1255 if (udp_get_no_check6_tx(sk)) { 1256 kfree_skb(skb); 1257 return -EINVAL; 1258 } 1259 if (is_udplite || dst_xfrm(skb_dst(skb))) { 1260 kfree_skb(skb); 1261 return -EIO; 1262 } 1263 1264 if (datalen > cork->gso_size) { 1265 skb_shinfo(skb)->gso_size = cork->gso_size; 1266 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; 1267 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen, 1268 cork->gso_size); 1269 } 1270 goto csum_partial; 1271 } 1272 1273 if (is_udplite) 1274 csum = udplite_csum(skb); 1275 else if (udp_get_no_check6_tx(sk)) { /* UDP csum disabled */ 1276 skb->ip_summed = CHECKSUM_NONE; 1277 goto send; 1278 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 1279 csum_partial: 1280 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len); 1281 goto send; 1282 } else 1283 csum = udp_csum(skb); 1284 1285 /* add protocol-dependent pseudo-header */ 1286 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, 1287 len, fl6->flowi6_proto, csum); 1288 if (uh->check == 0) 1289 uh->check = CSUM_MANGLED_0; 1290 1291 send: 1292 err = ip6_send_skb(skb); 1293 if (err) { 1294 if (err == -ENOBUFS && !inet6_test_bit(RECVERR6, sk)) { 1295 UDP6_INC_STATS(sock_net(sk), 1296 UDP_MIB_SNDBUFERRORS, is_udplite); 1297 err = 0; 1298 } 1299 } else { 1300 UDP6_INC_STATS(sock_net(sk), 1301 UDP_MIB_OUTDATAGRAMS, is_udplite); 1302 } 1303 return err; 1304 } 1305 1306 static int udp_v6_push_pending_frames(struct sock *sk) 1307 { 1308 struct sk_buff *skb; 1309 struct udp_sock *up = udp_sk(sk); 1310 int err = 0; 1311 1312 if (up->pending == AF_INET) 1313 return udp_push_pending_frames(sk); 1314 1315 skb = ip6_finish_skb(sk); 1316 if (!skb) 1317 goto out; 1318 1319 err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6, 1320 &inet_sk(sk)->cork.base); 1321 out: 1322 up->len = 0; 1323 WRITE_ONCE(up->pending, 0); 1324 return err; 1325 } 1326 1327 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 1328 { 1329 struct ipv6_txoptions opt_space; 1330 struct udp_sock *up = udp_sk(sk); 1331 struct inet_sock *inet = inet_sk(sk); 1332 struct ipv6_pinfo *np = inet6_sk(sk); 1333 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 1334 struct in6_addr *daddr, *final_p, final; 1335 struct ipv6_txoptions *opt = NULL; 1336 struct ipv6_txoptions *opt_to_free = NULL; 1337 struct ip6_flowlabel *flowlabel = NULL; 1338 struct inet_cork_full cork; 1339 struct flowi6 *fl6 = &cork.fl.u.ip6; 1340 struct dst_entry *dst; 1341 struct ipcm6_cookie ipc6; 1342 int addr_len = msg->msg_namelen; 1343 bool connected = false; 1344 int ulen = len; 1345 int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE; 1346 int err; 1347 int is_udplite = IS_UDPLITE(sk); 1348 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 1349 1350 ipcm6_init(&ipc6); 1351 ipc6.gso_size = READ_ONCE(up->gso_size); 1352 ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags); 1353 ipc6.sockc.mark = READ_ONCE(sk->sk_mark); 1354 1355 /* destination address check */ 1356 if (sin6) { 1357 if (addr_len < offsetof(struct sockaddr, sa_data)) 1358 return -EINVAL; 1359 1360 switch (sin6->sin6_family) { 1361 case AF_INET6: 1362 if (addr_len < SIN6_LEN_RFC2133) 1363 return -EINVAL; 1364 daddr = &sin6->sin6_addr; 1365 if (ipv6_addr_any(daddr) && 1366 ipv6_addr_v4mapped(&np->saddr)) 1367 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), 1368 daddr); 1369 break; 1370 case AF_INET: 1371 goto do_udp_sendmsg; 1372 case AF_UNSPEC: 1373 msg->msg_name = sin6 = NULL; 1374 msg->msg_namelen = addr_len = 0; 1375 daddr = NULL; 1376 break; 1377 default: 1378 return -EINVAL; 1379 } 1380 } else if (!READ_ONCE(up->pending)) { 1381 if (sk->sk_state != TCP_ESTABLISHED) 1382 return -EDESTADDRREQ; 1383 daddr = &sk->sk_v6_daddr; 1384 } else 1385 daddr = NULL; 1386 1387 if (daddr) { 1388 if (ipv6_addr_v4mapped(daddr)) { 1389 struct sockaddr_in sin; 1390 sin.sin_family = AF_INET; 1391 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport; 1392 sin.sin_addr.s_addr = daddr->s6_addr32[3]; 1393 msg->msg_name = &sin; 1394 msg->msg_namelen = sizeof(sin); 1395 do_udp_sendmsg: 1396 err = ipv6_only_sock(sk) ? 1397 -ENETUNREACH : udp_sendmsg(sk, msg, len); 1398 msg->msg_name = sin6; 1399 msg->msg_namelen = addr_len; 1400 return err; 1401 } 1402 } 1403 1404 /* Rough check on arithmetic overflow, 1405 better check is made in ip6_append_data(). 1406 */ 1407 if (len > INT_MAX - sizeof(struct udphdr)) 1408 return -EMSGSIZE; 1409 1410 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 1411 if (READ_ONCE(up->pending)) { 1412 if (READ_ONCE(up->pending) == AF_INET) 1413 return udp_sendmsg(sk, msg, len); 1414 /* 1415 * There are pending frames. 1416 * The socket lock must be held while it's corked. 1417 */ 1418 lock_sock(sk); 1419 if (likely(up->pending)) { 1420 if (unlikely(up->pending != AF_INET6)) { 1421 release_sock(sk); 1422 return -EAFNOSUPPORT; 1423 } 1424 dst = NULL; 1425 goto do_append_data; 1426 } 1427 release_sock(sk); 1428 } 1429 ulen += sizeof(struct udphdr); 1430 1431 memset(fl6, 0, sizeof(*fl6)); 1432 1433 if (sin6) { 1434 if (sin6->sin6_port == 0) 1435 return -EINVAL; 1436 1437 fl6->fl6_dport = sin6->sin6_port; 1438 daddr = &sin6->sin6_addr; 1439 1440 if (inet6_test_bit(SNDFLOW, sk)) { 1441 fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK; 1442 if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) { 1443 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel); 1444 if (IS_ERR(flowlabel)) 1445 return -EINVAL; 1446 } 1447 } 1448 1449 /* 1450 * Otherwise it will be difficult to maintain 1451 * sk->sk_dst_cache. 1452 */ 1453 if (sk->sk_state == TCP_ESTABLISHED && 1454 ipv6_addr_equal(daddr, &sk->sk_v6_daddr)) 1455 daddr = &sk->sk_v6_daddr; 1456 1457 if (addr_len >= sizeof(struct sockaddr_in6) && 1458 sin6->sin6_scope_id && 1459 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr))) 1460 fl6->flowi6_oif = sin6->sin6_scope_id; 1461 } else { 1462 if (sk->sk_state != TCP_ESTABLISHED) 1463 return -EDESTADDRREQ; 1464 1465 fl6->fl6_dport = inet->inet_dport; 1466 daddr = &sk->sk_v6_daddr; 1467 fl6->flowlabel = np->flow_label; 1468 connected = true; 1469 } 1470 1471 if (!fl6->flowi6_oif) 1472 fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if); 1473 1474 if (!fl6->flowi6_oif) 1475 fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; 1476 1477 fl6->flowi6_uid = sk->sk_uid; 1478 1479 if (msg->msg_controllen) { 1480 opt = &opt_space; 1481 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1482 opt->tot_len = sizeof(*opt); 1483 ipc6.opt = opt; 1484 1485 err = udp_cmsg_send(sk, msg, &ipc6.gso_size); 1486 if (err > 0) { 1487 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6, 1488 &ipc6); 1489 connected = false; 1490 } 1491 if (err < 0) { 1492 fl6_sock_release(flowlabel); 1493 return err; 1494 } 1495 if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) { 1496 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel); 1497 if (IS_ERR(flowlabel)) 1498 return -EINVAL; 1499 } 1500 if (!(opt->opt_nflen|opt->opt_flen)) 1501 opt = NULL; 1502 } 1503 if (!opt) { 1504 opt = txopt_get(np); 1505 opt_to_free = opt; 1506 } 1507 if (flowlabel) 1508 opt = fl6_merge_options(&opt_space, flowlabel, opt); 1509 opt = ipv6_fixup_options(&opt_space, opt); 1510 ipc6.opt = opt; 1511 1512 fl6->flowi6_proto = sk->sk_protocol; 1513 fl6->flowi6_mark = ipc6.sockc.mark; 1514 fl6->daddr = *daddr; 1515 if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr)) 1516 fl6->saddr = np->saddr; 1517 fl6->fl6_sport = inet->inet_sport; 1518 1519 if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) { 1520 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, 1521 (struct sockaddr *)sin6, 1522 &addr_len, 1523 &fl6->saddr); 1524 if (err) 1525 goto out_no_dst; 1526 if (sin6) { 1527 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) { 1528 /* BPF program rewrote IPv6-only by IPv4-mapped 1529 * IPv6. It's currently unsupported. 1530 */ 1531 err = -ENOTSUPP; 1532 goto out_no_dst; 1533 } 1534 if (sin6->sin6_port == 0) { 1535 /* BPF program set invalid port. Reject it. */ 1536 err = -EINVAL; 1537 goto out_no_dst; 1538 } 1539 fl6->fl6_dport = sin6->sin6_port; 1540 fl6->daddr = sin6->sin6_addr; 1541 } 1542 } 1543 1544 if (ipv6_addr_any(&fl6->daddr)) 1545 fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ 1546 1547 final_p = fl6_update_dst(fl6, opt, &final); 1548 if (final_p) 1549 connected = false; 1550 1551 if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) { 1552 fl6->flowi6_oif = READ_ONCE(np->mcast_oif); 1553 connected = false; 1554 } else if (!fl6->flowi6_oif) 1555 fl6->flowi6_oif = READ_ONCE(np->ucast_oif); 1556 1557 security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6)); 1558 1559 if (ipc6.tclass < 0) 1560 ipc6.tclass = np->tclass; 1561 1562 fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel); 1563 1564 dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected); 1565 if (IS_ERR(dst)) { 1566 err = PTR_ERR(dst); 1567 dst = NULL; 1568 goto out; 1569 } 1570 1571 if (ipc6.hlimit < 0) 1572 ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst); 1573 1574 if (msg->msg_flags&MSG_CONFIRM) 1575 goto do_confirm; 1576 back_from_confirm: 1577 1578 /* Lockless fast path for the non-corking case */ 1579 if (!corkreq) { 1580 struct sk_buff *skb; 1581 1582 skb = ip6_make_skb(sk, getfrag, msg, ulen, 1583 sizeof(struct udphdr), &ipc6, 1584 dst_rt6_info(dst), 1585 msg->msg_flags, &cork); 1586 err = PTR_ERR(skb); 1587 if (!IS_ERR_OR_NULL(skb)) 1588 err = udp_v6_send_skb(skb, fl6, &cork.base); 1589 /* ip6_make_skb steals dst reference */ 1590 goto out_no_dst; 1591 } 1592 1593 lock_sock(sk); 1594 if (unlikely(up->pending)) { 1595 /* The socket is already corked while preparing it. */ 1596 /* ... which is an evident application bug. --ANK */ 1597 release_sock(sk); 1598 1599 net_dbg_ratelimited("udp cork app bug 2\n"); 1600 err = -EINVAL; 1601 goto out; 1602 } 1603 1604 WRITE_ONCE(up->pending, AF_INET6); 1605 1606 do_append_data: 1607 if (ipc6.dontfrag < 0) 1608 ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk); 1609 up->len += ulen; 1610 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr), 1611 &ipc6, fl6, dst_rt6_info(dst), 1612 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 1613 if (err) 1614 udp_v6_flush_pending_frames(sk); 1615 else if (!corkreq) 1616 err = udp_v6_push_pending_frames(sk); 1617 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) 1618 WRITE_ONCE(up->pending, 0); 1619 1620 if (err > 0) 1621 err = inet6_test_bit(RECVERR6, sk) ? net_xmit_errno(err) : 0; 1622 release_sock(sk); 1623 1624 out: 1625 dst_release(dst); 1626 out_no_dst: 1627 fl6_sock_release(flowlabel); 1628 txopt_put(opt_to_free); 1629 if (!err) 1630 return len; 1631 /* 1632 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 1633 * ENOBUFS might not be good (it's not tunable per se), but otherwise 1634 * we don't have a good statistic (IpOutDiscards but it can be too many 1635 * things). We could add another new stat but at least for now that 1636 * seems like overkill. 1637 */ 1638 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 1639 UDP6_INC_STATS(sock_net(sk), 1640 UDP_MIB_SNDBUFERRORS, is_udplite); 1641 } 1642 return err; 1643 1644 do_confirm: 1645 if (msg->msg_flags & MSG_PROBE) 1646 dst_confirm_neigh(dst, &fl6->daddr); 1647 if (!(msg->msg_flags&MSG_PROBE) || len) 1648 goto back_from_confirm; 1649 err = 0; 1650 goto out; 1651 } 1652 EXPORT_SYMBOL(udpv6_sendmsg); 1653 1654 static void udpv6_splice_eof(struct socket *sock) 1655 { 1656 struct sock *sk = sock->sk; 1657 struct udp_sock *up = udp_sk(sk); 1658 1659 if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk)) 1660 return; 1661 1662 lock_sock(sk); 1663 if (up->pending && !udp_test_bit(CORK, sk)) 1664 udp_v6_push_pending_frames(sk); 1665 release_sock(sk); 1666 } 1667 1668 void udpv6_destroy_sock(struct sock *sk) 1669 { 1670 struct udp_sock *up = udp_sk(sk); 1671 lock_sock(sk); 1672 1673 /* protects from races with udp_abort() */ 1674 sock_set_flag(sk, SOCK_DEAD); 1675 udp_v6_flush_pending_frames(sk); 1676 release_sock(sk); 1677 1678 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 1679 if (up->encap_type) { 1680 void (*encap_destroy)(struct sock *sk); 1681 encap_destroy = READ_ONCE(up->encap_destroy); 1682 if (encap_destroy) 1683 encap_destroy(sk); 1684 } 1685 if (udp_test_bit(ENCAP_ENABLED, sk)) { 1686 static_branch_dec(&udpv6_encap_needed_key); 1687 udp_encap_disable(); 1688 } 1689 } 1690 } 1691 1692 /* 1693 * Socket option code for UDP 1694 */ 1695 int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 1696 unsigned int optlen) 1697 { 1698 if (level == SOL_UDP || level == SOL_UDPLITE || level == SOL_SOCKET) 1699 return udp_lib_setsockopt(sk, level, optname, 1700 optval, optlen, 1701 udp_v6_push_pending_frames); 1702 return ipv6_setsockopt(sk, level, optname, optval, optlen); 1703 } 1704 1705 int udpv6_getsockopt(struct sock *sk, int level, int optname, 1706 char __user *optval, int __user *optlen) 1707 { 1708 if (level == SOL_UDP || level == SOL_UDPLITE) 1709 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 1710 return ipv6_getsockopt(sk, level, optname, optval, optlen); 1711 } 1712 1713 1714 /* ------------------------------------------------------------------------ */ 1715 #ifdef CONFIG_PROC_FS 1716 int udp6_seq_show(struct seq_file *seq, void *v) 1717 { 1718 if (v == SEQ_START_TOKEN) { 1719 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER); 1720 } else { 1721 int bucket = ((struct udp_iter_state *)seq->private)->bucket; 1722 const struct inet_sock *inet = inet_sk((const struct sock *)v); 1723 __u16 srcp = ntohs(inet->inet_sport); 1724 __u16 destp = ntohs(inet->inet_dport); 1725 __ip6_dgram_sock_seq_show(seq, v, srcp, destp, 1726 udp_rqueue_get(v), bucket); 1727 } 1728 return 0; 1729 } 1730 1731 const struct seq_operations udp6_seq_ops = { 1732 .start = udp_seq_start, 1733 .next = udp_seq_next, 1734 .stop = udp_seq_stop, 1735 .show = udp6_seq_show, 1736 }; 1737 EXPORT_SYMBOL(udp6_seq_ops); 1738 1739 static struct udp_seq_afinfo udp6_seq_afinfo = { 1740 .family = AF_INET6, 1741 .udp_table = NULL, 1742 }; 1743 1744 int __net_init udp6_proc_init(struct net *net) 1745 { 1746 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops, 1747 sizeof(struct udp_iter_state), &udp6_seq_afinfo)) 1748 return -ENOMEM; 1749 return 0; 1750 } 1751 1752 void udp6_proc_exit(struct net *net) 1753 { 1754 remove_proc_entry("udp6", net->proc_net); 1755 } 1756 #endif /* CONFIG_PROC_FS */ 1757 1758 /* ------------------------------------------------------------------------ */ 1759 1760 struct proto udpv6_prot = { 1761 .name = "UDPv6", 1762 .owner = THIS_MODULE, 1763 .close = udp_lib_close, 1764 .pre_connect = udpv6_pre_connect, 1765 .connect = ip6_datagram_connect, 1766 .disconnect = udp_disconnect, 1767 .ioctl = udp_ioctl, 1768 .init = udpv6_init_sock, 1769 .destroy = udpv6_destroy_sock, 1770 .setsockopt = udpv6_setsockopt, 1771 .getsockopt = udpv6_getsockopt, 1772 .sendmsg = udpv6_sendmsg, 1773 .recvmsg = udpv6_recvmsg, 1774 .splice_eof = udpv6_splice_eof, 1775 .release_cb = ip6_datagram_release_cb, 1776 .hash = udp_lib_hash, 1777 .unhash = udp_lib_unhash, 1778 .rehash = udp_v6_rehash, 1779 .get_port = udp_v6_get_port, 1780 .put_port = udp_lib_unhash, 1781 #ifdef CONFIG_BPF_SYSCALL 1782 .psock_update_sk_prot = udp_bpf_update_proto, 1783 #endif 1784 1785 .memory_allocated = &udp_memory_allocated, 1786 .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc, 1787 1788 .sysctl_mem = sysctl_udp_mem, 1789 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min), 1790 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), 1791 .obj_size = sizeof(struct udp6_sock), 1792 .ipv6_pinfo_offset = offsetof(struct udp6_sock, inet6), 1793 .h.udp_table = NULL, 1794 .diag_destroy = udp_abort, 1795 }; 1796 1797 static struct inet_protosw udpv6_protosw = { 1798 .type = SOCK_DGRAM, 1799 .protocol = IPPROTO_UDP, 1800 .prot = &udpv6_prot, 1801 .ops = &inet6_dgram_ops, 1802 .flags = INET_PROTOSW_PERMANENT, 1803 }; 1804 1805 int __init udpv6_init(void) 1806 { 1807 int ret; 1808 1809 net_hotdata.udpv6_protocol = (struct inet6_protocol) { 1810 .handler = udpv6_rcv, 1811 .err_handler = udpv6_err, 1812 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL, 1813 }; 1814 ret = inet6_add_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP); 1815 if (ret) 1816 goto out; 1817 1818 ret = inet6_register_protosw(&udpv6_protosw); 1819 if (ret) 1820 goto out_udpv6_protocol; 1821 out: 1822 return ret; 1823 1824 out_udpv6_protocol: 1825 inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP); 1826 goto out; 1827 } 1828 1829 void udpv6_exit(void) 1830 { 1831 inet6_unregister_protosw(&udpv6_protosw); 1832 inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP); 1833 } 1834