1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * UDP over IPv6 4 * Linux INET6 implementation 5 * 6 * Authors: 7 * Pedro Roque <roque@di.fc.ul.pt> 8 * 9 * Based on linux/ipv4/udp.c 10 * 11 * Fixes: 12 * Hideaki YOSHIFUJI : sin6_scope_id support 13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind 15 * a single port at the same time. 16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data 17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file. 18 */ 19 20 #include <linux/bpf-cgroup.h> 21 #include <linux/errno.h> 22 #include <linux/types.h> 23 #include <linux/socket.h> 24 #include <linux/sockios.h> 25 #include <linux/net.h> 26 #include <linux/in6.h> 27 #include <linux/netdevice.h> 28 #include <linux/if_arp.h> 29 #include <linux/ipv6.h> 30 #include <linux/icmpv6.h> 31 #include <linux/init.h> 32 #include <linux/module.h> 33 #include <linux/skbuff.h> 34 #include <linux/slab.h> 35 #include <linux/uaccess.h> 36 #include <linux/indirect_call_wrapper.h> 37 38 #include <net/addrconf.h> 39 #include <net/ndisc.h> 40 #include <net/protocol.h> 41 #include <net/transp_v6.h> 42 #include <net/ip6_route.h> 43 #include <net/raw.h> 44 #include <net/seg6.h> 45 #include <net/tcp_states.h> 46 #include <net/ip6_checksum.h> 47 #include <net/ip6_tunnel.h> 48 #include <trace/events/udp.h> 49 #include <net/xfrm.h> 50 #include <net/inet_hashtables.h> 51 #include <net/inet6_hashtables.h> 52 #include <net/busy_poll.h> 53 #include <net/sock_reuseport.h> 54 #include <net/gro.h> 55 56 #include <linux/proc_fs.h> 57 #include <linux/seq_file.h> 58 #include <trace/events/skb.h> 59 #include "udp_impl.h" 60 61 static void udpv6_destruct_sock(struct sock *sk) 62 { 63 udp_destruct_common(sk); 64 inet6_sock_destruct(sk); 65 } 66 67 int udpv6_init_sock(struct sock *sk) 68 { 69 udp_lib_init_sock(sk); 70 sk->sk_destruct = udpv6_destruct_sock; 71 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); 72 return 0; 73 } 74 75 INDIRECT_CALLABLE_SCOPE 76 u32 udp6_ehashfn(const struct net *net, 77 const struct in6_addr *laddr, 78 const u16 lport, 79 const struct in6_addr *faddr, 80 const __be16 fport) 81 { 82 u32 lhash, fhash; 83 84 net_get_random_once(&udp6_ehash_secret, 85 sizeof(udp6_ehash_secret)); 86 net_get_random_once(&udp_ipv6_hash_secret, 87 sizeof(udp_ipv6_hash_secret)); 88 89 lhash = (__force u32)laddr->s6_addr32[3]; 90 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret); 91 92 return __inet6_ehashfn(lhash, lport, fhash, fport, 93 udp6_ehash_secret + net_hash_mix(net)); 94 } 95 96 int udp_v6_get_port(struct sock *sk, unsigned short snum) 97 { 98 unsigned int hash2_nulladdr = 99 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum); 100 unsigned int hash2_partial = 101 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0); 102 103 /* precompute partial secondary hash */ 104 udp_sk(sk)->udp_portaddr_hash = hash2_partial; 105 return udp_lib_get_port(sk, snum, hash2_nulladdr); 106 } 107 108 void udp_v6_rehash(struct sock *sk) 109 { 110 u16 new_hash = ipv6_portaddr_hash(sock_net(sk), 111 &sk->sk_v6_rcv_saddr, 112 inet_sk(sk)->inet_num); 113 114 udp_lib_rehash(sk, new_hash); 115 } 116 117 static int compute_score(struct sock *sk, struct net *net, 118 const struct in6_addr *saddr, __be16 sport, 119 const struct in6_addr *daddr, unsigned short hnum, 120 int dif, int sdif) 121 { 122 int bound_dev_if, score; 123 struct inet_sock *inet; 124 bool dev_match; 125 126 if (!net_eq(sock_net(sk), net) || 127 udp_sk(sk)->udp_port_hash != hnum || 128 sk->sk_family != PF_INET6) 129 return -1; 130 131 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) 132 return -1; 133 134 score = 0; 135 inet = inet_sk(sk); 136 137 if (inet->inet_dport) { 138 if (inet->inet_dport != sport) 139 return -1; 140 score++; 141 } 142 143 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 144 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr)) 145 return -1; 146 score++; 147 } 148 149 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); 150 dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif); 151 if (!dev_match) 152 return -1; 153 if (bound_dev_if) 154 score++; 155 156 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) 157 score++; 158 159 return score; 160 } 161 162 /* called with rcu_read_lock() */ 163 static struct sock *udp6_lib_lookup2(struct net *net, 164 const struct in6_addr *saddr, __be16 sport, 165 const struct in6_addr *daddr, unsigned int hnum, 166 int dif, int sdif, struct udp_hslot *hslot2, 167 struct sk_buff *skb) 168 { 169 struct sock *sk, *result; 170 int score, badness; 171 172 result = NULL; 173 badness = -1; 174 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 175 score = compute_score(sk, net, saddr, sport, 176 daddr, hnum, dif, sdif); 177 if (score > badness) { 178 badness = score; 179 180 if (sk->sk_state == TCP_ESTABLISHED) { 181 result = sk; 182 continue; 183 } 184 185 result = inet6_lookup_reuseport(net, sk, skb, sizeof(struct udphdr), 186 saddr, sport, daddr, hnum, udp6_ehashfn); 187 if (!result) { 188 result = sk; 189 continue; 190 } 191 192 /* Fall back to scoring if group has connections */ 193 if (!reuseport_has_conns(sk)) 194 return result; 195 196 /* Reuseport logic returned an error, keep original score. */ 197 if (IS_ERR(result)) 198 continue; 199 200 badness = compute_score(sk, net, saddr, sport, 201 daddr, hnum, dif, sdif); 202 } 203 } 204 return result; 205 } 206 207 /* rcu_read_lock() must be held */ 208 struct sock *__udp6_lib_lookup(struct net *net, 209 const struct in6_addr *saddr, __be16 sport, 210 const struct in6_addr *daddr, __be16 dport, 211 int dif, int sdif, struct udp_table *udptable, 212 struct sk_buff *skb) 213 { 214 unsigned short hnum = ntohs(dport); 215 unsigned int hash2, slot2; 216 struct udp_hslot *hslot2; 217 struct sock *result, *sk; 218 219 hash2 = ipv6_portaddr_hash(net, daddr, hnum); 220 slot2 = hash2 & udptable->mask; 221 hslot2 = &udptable->hash2[slot2]; 222 223 /* Lookup connected or non-wildcard sockets */ 224 result = udp6_lib_lookup2(net, saddr, sport, 225 daddr, hnum, dif, sdif, 226 hslot2, skb); 227 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED) 228 goto done; 229 230 /* Lookup redirect from BPF */ 231 if (static_branch_unlikely(&bpf_sk_lookup_enabled) && 232 udptable == net->ipv4.udp_table) { 233 sk = inet6_lookup_run_sk_lookup(net, IPPROTO_UDP, skb, sizeof(struct udphdr), 234 saddr, sport, daddr, hnum, dif, 235 udp6_ehashfn); 236 if (sk) { 237 result = sk; 238 goto done; 239 } 240 } 241 242 /* Got non-wildcard socket or error on first lookup */ 243 if (result) 244 goto done; 245 246 /* Lookup wildcard sockets */ 247 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum); 248 slot2 = hash2 & udptable->mask; 249 hslot2 = &udptable->hash2[slot2]; 250 251 result = udp6_lib_lookup2(net, saddr, sport, 252 &in6addr_any, hnum, dif, sdif, 253 hslot2, skb); 254 done: 255 if (IS_ERR(result)) 256 return NULL; 257 return result; 258 } 259 EXPORT_SYMBOL_GPL(__udp6_lib_lookup); 260 261 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, 262 __be16 sport, __be16 dport, 263 struct udp_table *udptable) 264 { 265 const struct ipv6hdr *iph = ipv6_hdr(skb); 266 267 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, 268 &iph->daddr, dport, inet6_iif(skb), 269 inet6_sdif(skb), udptable, skb); 270 } 271 272 struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb, 273 __be16 sport, __be16 dport) 274 { 275 const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation]; 276 const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset); 277 struct net *net = dev_net(skb->dev); 278 int iif, sdif; 279 280 inet6_get_iif_sdif(skb, &iif, &sdif); 281 282 return __udp6_lib_lookup(net, &iph->saddr, sport, 283 &iph->daddr, dport, iif, 284 sdif, net->ipv4.udp_table, NULL); 285 } 286 287 /* Must be called under rcu_read_lock(). 288 * Does increment socket refcount. 289 */ 290 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6) 291 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, 292 const struct in6_addr *daddr, __be16 dport, int dif) 293 { 294 struct sock *sk; 295 296 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport, 297 dif, 0, net->ipv4.udp_table, NULL); 298 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) 299 sk = NULL; 300 return sk; 301 } 302 EXPORT_SYMBOL_GPL(udp6_lib_lookup); 303 #endif 304 305 /* do not use the scratch area len for jumbogram: their length execeeds the 306 * scratch area space; note that the IP6CB flags is still in the first 307 * cacheline, so checking for jumbograms is cheap 308 */ 309 static int udp6_skb_len(struct sk_buff *skb) 310 { 311 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb); 312 } 313 314 /* 315 * This should be easy, if there is something there we 316 * return it, otherwise we block. 317 */ 318 319 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 320 int flags, int *addr_len) 321 { 322 struct ipv6_pinfo *np = inet6_sk(sk); 323 struct inet_sock *inet = inet_sk(sk); 324 struct sk_buff *skb; 325 unsigned int ulen, copied; 326 int off, err, peeking = flags & MSG_PEEK; 327 int is_udplite = IS_UDPLITE(sk); 328 struct udp_mib __percpu *mib; 329 bool checksum_valid = false; 330 int is_udp4; 331 332 if (flags & MSG_ERRQUEUE) 333 return ipv6_recv_error(sk, msg, len, addr_len); 334 335 if (np->rxpmtu && np->rxopt.bits.rxpmtu) 336 return ipv6_recv_rxpmtu(sk, msg, len, addr_len); 337 338 try_again: 339 off = sk_peek_offset(sk, flags); 340 skb = __skb_recv_udp(sk, flags, &off, &err); 341 if (!skb) 342 return err; 343 344 ulen = udp6_skb_len(skb); 345 copied = len; 346 if (copied > ulen - off) 347 copied = ulen - off; 348 else if (copied < ulen) 349 msg->msg_flags |= MSG_TRUNC; 350 351 is_udp4 = (skb->protocol == htons(ETH_P_IP)); 352 mib = __UDPX_MIB(sk, is_udp4); 353 354 /* 355 * If checksum is needed at all, try to do it while copying the 356 * data. If the data is truncated, or if we only want a partial 357 * coverage checksum (UDP-Lite), do it before the copy. 358 */ 359 360 if (copied < ulen || peeking || 361 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { 362 checksum_valid = udp_skb_csum_unnecessary(skb) || 363 !__udp_lib_checksum_complete(skb); 364 if (!checksum_valid) 365 goto csum_copy_err; 366 } 367 368 if (checksum_valid || udp_skb_csum_unnecessary(skb)) { 369 if (udp_skb_is_linear(skb)) 370 err = copy_linear_skb(skb, copied, off, &msg->msg_iter); 371 else 372 err = skb_copy_datagram_msg(skb, off, msg, copied); 373 } else { 374 err = skb_copy_and_csum_datagram_msg(skb, off, msg); 375 if (err == -EINVAL) 376 goto csum_copy_err; 377 } 378 if (unlikely(err)) { 379 if (!peeking) { 380 atomic_inc(&sk->sk_drops); 381 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 382 } 383 kfree_skb(skb); 384 return err; 385 } 386 if (!peeking) 387 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS); 388 389 sock_recv_cmsgs(msg, sk, skb); 390 391 /* Copy the address. */ 392 if (msg->msg_name) { 393 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 394 sin6->sin6_family = AF_INET6; 395 sin6->sin6_port = udp_hdr(skb)->source; 396 sin6->sin6_flowinfo = 0; 397 398 if (is_udp4) { 399 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, 400 &sin6->sin6_addr); 401 sin6->sin6_scope_id = 0; 402 } else { 403 sin6->sin6_addr = ipv6_hdr(skb)->saddr; 404 sin6->sin6_scope_id = 405 ipv6_iface_scope_id(&sin6->sin6_addr, 406 inet6_iif(skb)); 407 } 408 *addr_len = sizeof(*sin6); 409 410 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, 411 (struct sockaddr *)sin6, 412 addr_len); 413 } 414 415 if (udp_test_bit(GRO_ENABLED, sk)) 416 udp_cmsg_recv(msg, sk, skb); 417 418 if (np->rxopt.all) 419 ip6_datagram_recv_common_ctl(sk, msg, skb); 420 421 if (is_udp4) { 422 if (inet_cmsg_flags(inet)) 423 ip_cmsg_recv_offset(msg, sk, skb, 424 sizeof(struct udphdr), off); 425 } else { 426 if (np->rxopt.all) 427 ip6_datagram_recv_specific_ctl(sk, msg, skb); 428 } 429 430 err = copied; 431 if (flags & MSG_TRUNC) 432 err = ulen; 433 434 skb_consume_udp(sk, skb, peeking ? -err : err); 435 return err; 436 437 csum_copy_err: 438 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags, 439 udp_skb_destructor)) { 440 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS); 441 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 442 } 443 kfree_skb(skb); 444 445 /* starting over for a new packet, but check if we need to yield */ 446 cond_resched(); 447 msg->msg_flags &= ~MSG_TRUNC; 448 goto try_again; 449 } 450 451 DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key); 452 void udpv6_encap_enable(void) 453 { 454 static_branch_inc(&udpv6_encap_needed_key); 455 } 456 EXPORT_SYMBOL(udpv6_encap_enable); 457 458 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go 459 * through error handlers in encapsulations looking for a match. 460 */ 461 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb, 462 struct inet6_skb_parm *opt, 463 u8 type, u8 code, int offset, __be32 info) 464 { 465 int i; 466 467 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 468 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 469 u8 type, u8 code, int offset, __be32 info); 470 const struct ip6_tnl_encap_ops *encap; 471 472 encap = rcu_dereference(ip6tun_encaps[i]); 473 if (!encap) 474 continue; 475 handler = encap->err_handler; 476 if (handler && !handler(skb, opt, type, code, offset, info)) 477 return 0; 478 } 479 480 return -ENOENT; 481 } 482 483 /* Try to match ICMP errors to UDP tunnels by looking up a socket without 484 * reversing source and destination port: this will match tunnels that force the 485 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that 486 * lwtunnels might actually break this assumption by being configured with 487 * different destination ports on endpoints, in this case we won't be able to 488 * trace ICMP messages back to them. 489 * 490 * If this doesn't match any socket, probe tunnels with arbitrary destination 491 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port 492 * we've sent packets to won't necessarily match the local destination port. 493 * 494 * Then ask the tunnel implementation to match the error against a valid 495 * association. 496 * 497 * Return an error if we can't find a match, the socket if we need further 498 * processing, zero otherwise. 499 */ 500 static struct sock *__udp6_lib_err_encap(struct net *net, 501 const struct ipv6hdr *hdr, int offset, 502 struct udphdr *uh, 503 struct udp_table *udptable, 504 struct sock *sk, 505 struct sk_buff *skb, 506 struct inet6_skb_parm *opt, 507 u8 type, u8 code, __be32 info) 508 { 509 int (*lookup)(struct sock *sk, struct sk_buff *skb); 510 int network_offset, transport_offset; 511 struct udp_sock *up; 512 513 network_offset = skb_network_offset(skb); 514 transport_offset = skb_transport_offset(skb); 515 516 /* Network header needs to point to the outer IPv6 header inside ICMP */ 517 skb_reset_network_header(skb); 518 519 /* Transport header needs to point to the UDP header */ 520 skb_set_transport_header(skb, offset); 521 522 if (sk) { 523 up = udp_sk(sk); 524 525 lookup = READ_ONCE(up->encap_err_lookup); 526 if (lookup && lookup(sk, skb)) 527 sk = NULL; 528 529 goto out; 530 } 531 532 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source, 533 &hdr->saddr, uh->dest, 534 inet6_iif(skb), 0, udptable, skb); 535 if (sk) { 536 up = udp_sk(sk); 537 538 lookup = READ_ONCE(up->encap_err_lookup); 539 if (!lookup || lookup(sk, skb)) 540 sk = NULL; 541 } 542 543 out: 544 if (!sk) { 545 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code, 546 offset, info)); 547 } 548 549 skb_set_transport_header(skb, transport_offset); 550 skb_set_network_header(skb, network_offset); 551 552 return sk; 553 } 554 555 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 556 u8 type, u8 code, int offset, __be32 info, 557 struct udp_table *udptable) 558 { 559 struct ipv6_pinfo *np; 560 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; 561 const struct in6_addr *saddr = &hdr->saddr; 562 const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr; 563 struct udphdr *uh = (struct udphdr *)(skb->data+offset); 564 bool tunnel = false; 565 struct sock *sk; 566 int harderr; 567 int err; 568 struct net *net = dev_net(skb->dev); 569 570 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source, 571 inet6_iif(skb), inet6_sdif(skb), udptable, NULL); 572 573 if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) { 574 /* No socket for error: try tunnels before discarding */ 575 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 576 sk = __udp6_lib_err_encap(net, hdr, offset, uh, 577 udptable, sk, skb, 578 opt, type, code, info); 579 if (!sk) 580 return 0; 581 } else 582 sk = ERR_PTR(-ENOENT); 583 584 if (IS_ERR(sk)) { 585 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), 586 ICMP6_MIB_INERRORS); 587 return PTR_ERR(sk); 588 } 589 590 tunnel = true; 591 } 592 593 harderr = icmpv6_err_convert(type, code, &err); 594 np = inet6_sk(sk); 595 596 if (type == ICMPV6_PKT_TOOBIG) { 597 if (!ip6_sk_accept_pmtu(sk)) 598 goto out; 599 ip6_sk_update_pmtu(skb, sk, info); 600 if (READ_ONCE(np->pmtudisc) != IPV6_PMTUDISC_DONT) 601 harderr = 1; 602 } 603 if (type == NDISC_REDIRECT) { 604 if (tunnel) { 605 ip6_redirect(skb, sock_net(sk), inet6_iif(skb), 606 READ_ONCE(sk->sk_mark), sk->sk_uid); 607 } else { 608 ip6_sk_redirect(skb, sk); 609 } 610 goto out; 611 } 612 613 /* Tunnels don't have an application socket: don't pass errors back */ 614 if (tunnel) { 615 if (udp_sk(sk)->encap_err_rcv) 616 udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest, 617 ntohl(info), (u8 *)(uh+1)); 618 goto out; 619 } 620 621 if (!inet6_test_bit(RECVERR6, sk)) { 622 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 623 goto out; 624 } else { 625 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); 626 } 627 628 sk->sk_err = err; 629 sk_error_report(sk); 630 out: 631 return 0; 632 } 633 634 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 635 { 636 int rc; 637 638 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 639 sock_rps_save_rxhash(sk, skb); 640 sk_mark_napi_id(sk, skb); 641 sk_incoming_cpu_update(sk); 642 } else { 643 sk_mark_napi_id_once(sk, skb); 644 } 645 646 rc = __udp_enqueue_schedule_skb(sk, skb); 647 if (rc < 0) { 648 int is_udplite = IS_UDPLITE(sk); 649 enum skb_drop_reason drop_reason; 650 651 /* Note that an ENOMEM error is charged twice */ 652 if (rc == -ENOMEM) { 653 UDP6_INC_STATS(sock_net(sk), 654 UDP_MIB_RCVBUFERRORS, is_udplite); 655 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF; 656 } else { 657 UDP6_INC_STATS(sock_net(sk), 658 UDP_MIB_MEMERRORS, is_udplite); 659 drop_reason = SKB_DROP_REASON_PROTO_MEM; 660 } 661 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 662 kfree_skb_reason(skb, drop_reason); 663 trace_udp_fail_queue_rcv_skb(rc, sk); 664 return -1; 665 } 666 667 return 0; 668 } 669 670 static __inline__ int udpv6_err(struct sk_buff *skb, 671 struct inet6_skb_parm *opt, u8 type, 672 u8 code, int offset, __be32 info) 673 { 674 return __udp6_lib_err(skb, opt, type, code, offset, info, 675 dev_net(skb->dev)->ipv4.udp_table); 676 } 677 678 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) 679 { 680 enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; 681 struct udp_sock *up = udp_sk(sk); 682 int is_udplite = IS_UDPLITE(sk); 683 684 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { 685 drop_reason = SKB_DROP_REASON_XFRM_POLICY; 686 goto drop; 687 } 688 nf_reset_ct(skb); 689 690 if (static_branch_unlikely(&udpv6_encap_needed_key) && 691 READ_ONCE(up->encap_type)) { 692 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 693 694 /* 695 * This is an encapsulation socket so pass the skb to 696 * the socket's udp_encap_rcv() hook. Otherwise, just 697 * fall through and pass this up the UDP socket. 698 * up->encap_rcv() returns the following value: 699 * =0 if skb was successfully passed to the encap 700 * handler or was discarded by it. 701 * >0 if skb should be passed on to UDP. 702 * <0 if skb should be resubmitted as proto -N 703 */ 704 705 /* if we're overly short, let UDP handle it */ 706 encap_rcv = READ_ONCE(up->encap_rcv); 707 if (encap_rcv) { 708 int ret; 709 710 /* Verify checksum before giving to encap */ 711 if (udp_lib_checksum_complete(skb)) 712 goto csum_error; 713 714 ret = encap_rcv(sk, skb); 715 if (ret <= 0) { 716 __UDP6_INC_STATS(sock_net(sk), 717 UDP_MIB_INDATAGRAMS, 718 is_udplite); 719 return -ret; 720 } 721 } 722 723 /* FALLTHROUGH -- it's a UDP Packet */ 724 } 725 726 /* 727 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). 728 */ 729 if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) { 730 u16 pcrlen = READ_ONCE(up->pcrlen); 731 732 if (pcrlen == 0) { /* full coverage was set */ 733 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n", 734 UDP_SKB_CB(skb)->cscov, skb->len); 735 goto drop; 736 } 737 if (UDP_SKB_CB(skb)->cscov < pcrlen) { 738 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n", 739 UDP_SKB_CB(skb)->cscov, pcrlen); 740 goto drop; 741 } 742 } 743 744 prefetch(&sk->sk_rmem_alloc); 745 if (rcu_access_pointer(sk->sk_filter) && 746 udp_lib_checksum_complete(skb)) 747 goto csum_error; 748 749 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) { 750 drop_reason = SKB_DROP_REASON_SOCKET_FILTER; 751 goto drop; 752 } 753 754 udp_csum_pull_header(skb); 755 756 skb_dst_drop(skb); 757 758 return __udpv6_queue_rcv_skb(sk, skb); 759 760 csum_error: 761 drop_reason = SKB_DROP_REASON_UDP_CSUM; 762 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 763 drop: 764 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 765 atomic_inc(&sk->sk_drops); 766 kfree_skb_reason(skb, drop_reason); 767 return -1; 768 } 769 770 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 771 { 772 struct sk_buff *next, *segs; 773 int ret; 774 775 if (likely(!udp_unexpected_gso(sk, skb))) 776 return udpv6_queue_rcv_one_skb(sk, skb); 777 778 __skb_push(skb, -skb_mac_offset(skb)); 779 segs = udp_rcv_segment(sk, skb, false); 780 skb_list_walk_safe(segs, skb, next) { 781 __skb_pull(skb, skb_transport_offset(skb)); 782 783 udp_post_segment_fix_csum(skb); 784 ret = udpv6_queue_rcv_one_skb(sk, skb); 785 if (ret > 0) 786 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret, 787 true); 788 } 789 return 0; 790 } 791 792 static bool __udp_v6_is_mcast_sock(struct net *net, const struct sock *sk, 793 __be16 loc_port, const struct in6_addr *loc_addr, 794 __be16 rmt_port, const struct in6_addr *rmt_addr, 795 int dif, int sdif, unsigned short hnum) 796 { 797 const struct inet_sock *inet = inet_sk(sk); 798 799 if (!net_eq(sock_net(sk), net)) 800 return false; 801 802 if (udp_sk(sk)->udp_port_hash != hnum || 803 sk->sk_family != PF_INET6 || 804 (inet->inet_dport && inet->inet_dport != rmt_port) || 805 (!ipv6_addr_any(&sk->sk_v6_daddr) && 806 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) || 807 !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) || 808 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) && 809 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))) 810 return false; 811 if (!inet6_mc_check(sk, loc_addr, rmt_addr)) 812 return false; 813 return true; 814 } 815 816 static void udp6_csum_zero_error(struct sk_buff *skb) 817 { 818 /* RFC 2460 section 8.1 says that we SHOULD log 819 * this error. Well, it is reasonable. 820 */ 821 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n", 822 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source), 823 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest)); 824 } 825 826 /* 827 * Note: called only from the BH handler context, 828 * so we don't need to lock the hashes. 829 */ 830 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 831 const struct in6_addr *saddr, const struct in6_addr *daddr, 832 struct udp_table *udptable, int proto) 833 { 834 struct sock *sk, *first = NULL; 835 const struct udphdr *uh = udp_hdr(skb); 836 unsigned short hnum = ntohs(uh->dest); 837 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); 838 unsigned int offset = offsetof(typeof(*sk), sk_node); 839 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); 840 int dif = inet6_iif(skb); 841 int sdif = inet6_sdif(skb); 842 struct hlist_node *node; 843 struct sk_buff *nskb; 844 845 if (use_hash2) { 846 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) & 847 udptable->mask; 848 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask; 849 start_lookup: 850 hslot = &udptable->hash2[hash2]; 851 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 852 } 853 854 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { 855 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr, 856 uh->source, saddr, dif, sdif, 857 hnum)) 858 continue; 859 /* If zero checksum and no_check is not on for 860 * the socket then skip it. 861 */ 862 if (!uh->check && !udp_get_no_check6_rx(sk)) 863 continue; 864 if (!first) { 865 first = sk; 866 continue; 867 } 868 nskb = skb_clone(skb, GFP_ATOMIC); 869 if (unlikely(!nskb)) { 870 atomic_inc(&sk->sk_drops); 871 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 872 IS_UDPLITE(sk)); 873 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, 874 IS_UDPLITE(sk)); 875 continue; 876 } 877 878 if (udpv6_queue_rcv_skb(sk, nskb) > 0) 879 consume_skb(nskb); 880 } 881 882 /* Also lookup *:port if we are using hash2 and haven't done so yet. */ 883 if (use_hash2 && hash2 != hash2_any) { 884 hash2 = hash2_any; 885 goto start_lookup; 886 } 887 888 if (first) { 889 if (udpv6_queue_rcv_skb(first, skb) > 0) 890 consume_skb(skb); 891 } else { 892 kfree_skb(skb); 893 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI, 894 proto == IPPROTO_UDPLITE); 895 } 896 return 0; 897 } 898 899 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) 900 { 901 if (udp_sk_rx_dst_set(sk, dst)) { 902 const struct rt6_info *rt = (const struct rt6_info *)dst; 903 904 sk->sk_rx_dst_cookie = rt6_get_cookie(rt); 905 } 906 } 907 908 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and 909 * return code conversion for ip layer consumption 910 */ 911 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, 912 struct udphdr *uh) 913 { 914 int ret; 915 916 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) 917 skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo); 918 919 ret = udpv6_queue_rcv_skb(sk, skb); 920 921 /* a return value > 0 means to resubmit the input */ 922 if (ret > 0) 923 return ret; 924 return 0; 925 } 926 927 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 928 int proto) 929 { 930 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; 931 const struct in6_addr *saddr, *daddr; 932 struct net *net = dev_net(skb->dev); 933 struct udphdr *uh; 934 struct sock *sk; 935 bool refcounted; 936 u32 ulen = 0; 937 938 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 939 goto discard; 940 941 saddr = &ipv6_hdr(skb)->saddr; 942 daddr = &ipv6_hdr(skb)->daddr; 943 uh = udp_hdr(skb); 944 945 ulen = ntohs(uh->len); 946 if (ulen > skb->len) 947 goto short_packet; 948 949 if (proto == IPPROTO_UDP) { 950 /* UDP validates ulen. */ 951 952 /* Check for jumbo payload */ 953 if (ulen == 0) 954 ulen = skb->len; 955 956 if (ulen < sizeof(*uh)) 957 goto short_packet; 958 959 if (ulen < skb->len) { 960 if (pskb_trim_rcsum(skb, ulen)) 961 goto short_packet; 962 saddr = &ipv6_hdr(skb)->saddr; 963 daddr = &ipv6_hdr(skb)->daddr; 964 uh = udp_hdr(skb); 965 } 966 } 967 968 if (udp6_csum_init(skb, uh, proto)) 969 goto csum_error; 970 971 /* Check if the socket is already available, e.g. due to early demux */ 972 sk = inet6_steal_sock(net, skb, sizeof(struct udphdr), saddr, uh->source, daddr, uh->dest, 973 &refcounted, udp6_ehashfn); 974 if (IS_ERR(sk)) 975 goto no_sk; 976 977 if (sk) { 978 struct dst_entry *dst = skb_dst(skb); 979 int ret; 980 981 if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) 982 udp6_sk_rx_dst_set(sk, dst); 983 984 if (!uh->check && !udp_get_no_check6_rx(sk)) { 985 if (refcounted) 986 sock_put(sk); 987 goto report_csum_error; 988 } 989 990 ret = udp6_unicast_rcv_skb(sk, skb, uh); 991 if (refcounted) 992 sock_put(sk); 993 return ret; 994 } 995 996 /* 997 * Multicast receive code 998 */ 999 if (ipv6_addr_is_multicast(daddr)) 1000 return __udp6_lib_mcast_deliver(net, skb, 1001 saddr, daddr, udptable, proto); 1002 1003 /* Unicast */ 1004 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 1005 if (sk) { 1006 if (!uh->check && !udp_get_no_check6_rx(sk)) 1007 goto report_csum_error; 1008 return udp6_unicast_rcv_skb(sk, skb, uh); 1009 } 1010 no_sk: 1011 reason = SKB_DROP_REASON_NO_SOCKET; 1012 1013 if (!uh->check) 1014 goto report_csum_error; 1015 1016 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 1017 goto discard; 1018 nf_reset_ct(skb); 1019 1020 if (udp_lib_checksum_complete(skb)) 1021 goto csum_error; 1022 1023 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 1024 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); 1025 1026 kfree_skb_reason(skb, reason); 1027 return 0; 1028 1029 short_packet: 1030 if (reason == SKB_DROP_REASON_NOT_SPECIFIED) 1031 reason = SKB_DROP_REASON_PKT_TOO_SMALL; 1032 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n", 1033 proto == IPPROTO_UDPLITE ? "-Lite" : "", 1034 saddr, ntohs(uh->source), 1035 ulen, skb->len, 1036 daddr, ntohs(uh->dest)); 1037 goto discard; 1038 1039 report_csum_error: 1040 udp6_csum_zero_error(skb); 1041 csum_error: 1042 if (reason == SKB_DROP_REASON_NOT_SPECIFIED) 1043 reason = SKB_DROP_REASON_UDP_CSUM; 1044 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); 1045 discard: 1046 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 1047 kfree_skb_reason(skb, reason); 1048 return 0; 1049 } 1050 1051 1052 static struct sock *__udp6_lib_demux_lookup(struct net *net, 1053 __be16 loc_port, const struct in6_addr *loc_addr, 1054 __be16 rmt_port, const struct in6_addr *rmt_addr, 1055 int dif, int sdif) 1056 { 1057 struct udp_table *udptable = net->ipv4.udp_table; 1058 unsigned short hnum = ntohs(loc_port); 1059 unsigned int hash2, slot2; 1060 struct udp_hslot *hslot2; 1061 __portpair ports; 1062 struct sock *sk; 1063 1064 hash2 = ipv6_portaddr_hash(net, loc_addr, hnum); 1065 slot2 = hash2 & udptable->mask; 1066 hslot2 = &udptable->hash2[slot2]; 1067 ports = INET_COMBINED_PORTS(rmt_port, hnum); 1068 1069 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 1070 if (sk->sk_state == TCP_ESTABLISHED && 1071 inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif)) 1072 return sk; 1073 /* Only check first socket in chain */ 1074 break; 1075 } 1076 return NULL; 1077 } 1078 1079 void udp_v6_early_demux(struct sk_buff *skb) 1080 { 1081 struct net *net = dev_net(skb->dev); 1082 const struct udphdr *uh; 1083 struct sock *sk; 1084 struct dst_entry *dst; 1085 int dif = skb->dev->ifindex; 1086 int sdif = inet6_sdif(skb); 1087 1088 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 1089 sizeof(struct udphdr))) 1090 return; 1091 1092 uh = udp_hdr(skb); 1093 1094 if (skb->pkt_type == PACKET_HOST) 1095 sk = __udp6_lib_demux_lookup(net, uh->dest, 1096 &ipv6_hdr(skb)->daddr, 1097 uh->source, &ipv6_hdr(skb)->saddr, 1098 dif, sdif); 1099 else 1100 return; 1101 1102 if (!sk) 1103 return; 1104 1105 skb->sk = sk; 1106 DEBUG_NET_WARN_ON_ONCE(sk_is_refcounted(sk)); 1107 skb->destructor = sock_pfree; 1108 dst = rcu_dereference(sk->sk_rx_dst); 1109 1110 if (dst) 1111 dst = dst_check(dst, sk->sk_rx_dst_cookie); 1112 if (dst) { 1113 /* set noref for now. 1114 * any place which wants to hold dst has to call 1115 * dst_hold_safe() 1116 */ 1117 skb_dst_set_noref(skb, dst); 1118 } 1119 } 1120 1121 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb) 1122 { 1123 return __udp6_lib_rcv(skb, dev_net(skb->dev)->ipv4.udp_table, IPPROTO_UDP); 1124 } 1125 1126 /* 1127 * Throw away all pending data and cancel the corking. Socket is locked. 1128 */ 1129 static void udp_v6_flush_pending_frames(struct sock *sk) 1130 { 1131 struct udp_sock *up = udp_sk(sk); 1132 1133 if (up->pending == AF_INET) 1134 udp_flush_pending_frames(sk); 1135 else if (up->pending) { 1136 up->len = 0; 1137 WRITE_ONCE(up->pending, 0); 1138 ip6_flush_pending_frames(sk); 1139 } 1140 } 1141 1142 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr, 1143 int addr_len) 1144 { 1145 if (addr_len < offsetofend(struct sockaddr, sa_family)) 1146 return -EINVAL; 1147 /* The following checks are replicated from __ip6_datagram_connect() 1148 * and intended to prevent BPF program called below from accessing 1149 * bytes that are out of the bound specified by user in addr_len. 1150 */ 1151 if (uaddr->sa_family == AF_INET) { 1152 if (ipv6_only_sock(sk)) 1153 return -EAFNOSUPPORT; 1154 return udp_pre_connect(sk, uaddr, addr_len); 1155 } 1156 1157 if (addr_len < SIN6_LEN_RFC2133) 1158 return -EINVAL; 1159 1160 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len); 1161 } 1162 1163 /** 1164 * udp6_hwcsum_outgoing - handle outgoing HW checksumming 1165 * @sk: socket we are sending on 1166 * @skb: sk_buff containing the filled-in UDP header 1167 * (checksum field must be zeroed out) 1168 * @saddr: source address 1169 * @daddr: destination address 1170 * @len: length of packet 1171 */ 1172 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, 1173 const struct in6_addr *saddr, 1174 const struct in6_addr *daddr, int len) 1175 { 1176 unsigned int offset; 1177 struct udphdr *uh = udp_hdr(skb); 1178 struct sk_buff *frags = skb_shinfo(skb)->frag_list; 1179 __wsum csum = 0; 1180 1181 if (!frags) { 1182 /* Only one fragment on the socket. */ 1183 skb->csum_start = skb_transport_header(skb) - skb->head; 1184 skb->csum_offset = offsetof(struct udphdr, check); 1185 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0); 1186 } else { 1187 /* 1188 * HW-checksum won't work as there are two or more 1189 * fragments on the socket so that all csums of sk_buffs 1190 * should be together 1191 */ 1192 offset = skb_transport_offset(skb); 1193 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 1194 csum = skb->csum; 1195 1196 skb->ip_summed = CHECKSUM_NONE; 1197 1198 do { 1199 csum = csum_add(csum, frags->csum); 1200 } while ((frags = frags->next)); 1201 1202 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 1203 csum); 1204 if (uh->check == 0) 1205 uh->check = CSUM_MANGLED_0; 1206 } 1207 } 1208 1209 /* 1210 * Sending 1211 */ 1212 1213 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, 1214 struct inet_cork *cork) 1215 { 1216 struct sock *sk = skb->sk; 1217 struct udphdr *uh; 1218 int err = 0; 1219 int is_udplite = IS_UDPLITE(sk); 1220 __wsum csum = 0; 1221 int offset = skb_transport_offset(skb); 1222 int len = skb->len - offset; 1223 int datalen = len - sizeof(*uh); 1224 1225 /* 1226 * Create a UDP header 1227 */ 1228 uh = udp_hdr(skb); 1229 uh->source = fl6->fl6_sport; 1230 uh->dest = fl6->fl6_dport; 1231 uh->len = htons(len); 1232 uh->check = 0; 1233 1234 if (cork->gso_size) { 1235 const int hlen = skb_network_header_len(skb) + 1236 sizeof(struct udphdr); 1237 1238 if (hlen + cork->gso_size > cork->fragsize) { 1239 kfree_skb(skb); 1240 return -EINVAL; 1241 } 1242 if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) { 1243 kfree_skb(skb); 1244 return -EINVAL; 1245 } 1246 if (udp_get_no_check6_tx(sk)) { 1247 kfree_skb(skb); 1248 return -EINVAL; 1249 } 1250 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || 1251 dst_xfrm(skb_dst(skb))) { 1252 kfree_skb(skb); 1253 return -EIO; 1254 } 1255 1256 if (datalen > cork->gso_size) { 1257 skb_shinfo(skb)->gso_size = cork->gso_size; 1258 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; 1259 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen, 1260 cork->gso_size); 1261 } 1262 goto csum_partial; 1263 } 1264 1265 if (is_udplite) 1266 csum = udplite_csum(skb); 1267 else if (udp_get_no_check6_tx(sk)) { /* UDP csum disabled */ 1268 skb->ip_summed = CHECKSUM_NONE; 1269 goto send; 1270 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 1271 csum_partial: 1272 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len); 1273 goto send; 1274 } else 1275 csum = udp_csum(skb); 1276 1277 /* add protocol-dependent pseudo-header */ 1278 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, 1279 len, fl6->flowi6_proto, csum); 1280 if (uh->check == 0) 1281 uh->check = CSUM_MANGLED_0; 1282 1283 send: 1284 err = ip6_send_skb(skb); 1285 if (err) { 1286 if (err == -ENOBUFS && !inet6_test_bit(RECVERR6, sk)) { 1287 UDP6_INC_STATS(sock_net(sk), 1288 UDP_MIB_SNDBUFERRORS, is_udplite); 1289 err = 0; 1290 } 1291 } else { 1292 UDP6_INC_STATS(sock_net(sk), 1293 UDP_MIB_OUTDATAGRAMS, is_udplite); 1294 } 1295 return err; 1296 } 1297 1298 static int udp_v6_push_pending_frames(struct sock *sk) 1299 { 1300 struct sk_buff *skb; 1301 struct udp_sock *up = udp_sk(sk); 1302 int err = 0; 1303 1304 if (up->pending == AF_INET) 1305 return udp_push_pending_frames(sk); 1306 1307 skb = ip6_finish_skb(sk); 1308 if (!skb) 1309 goto out; 1310 1311 err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6, 1312 &inet_sk(sk)->cork.base); 1313 out: 1314 up->len = 0; 1315 WRITE_ONCE(up->pending, 0); 1316 return err; 1317 } 1318 1319 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 1320 { 1321 struct ipv6_txoptions opt_space; 1322 struct udp_sock *up = udp_sk(sk); 1323 struct inet_sock *inet = inet_sk(sk); 1324 struct ipv6_pinfo *np = inet6_sk(sk); 1325 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 1326 struct in6_addr *daddr, *final_p, final; 1327 struct ipv6_txoptions *opt = NULL; 1328 struct ipv6_txoptions *opt_to_free = NULL; 1329 struct ip6_flowlabel *flowlabel = NULL; 1330 struct inet_cork_full cork; 1331 struct flowi6 *fl6 = &cork.fl.u.ip6; 1332 struct dst_entry *dst; 1333 struct ipcm6_cookie ipc6; 1334 int addr_len = msg->msg_namelen; 1335 bool connected = false; 1336 int ulen = len; 1337 int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE; 1338 int err; 1339 int is_udplite = IS_UDPLITE(sk); 1340 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 1341 1342 ipcm6_init(&ipc6); 1343 ipc6.gso_size = READ_ONCE(up->gso_size); 1344 ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags); 1345 ipc6.sockc.mark = READ_ONCE(sk->sk_mark); 1346 1347 /* destination address check */ 1348 if (sin6) { 1349 if (addr_len < offsetof(struct sockaddr, sa_data)) 1350 return -EINVAL; 1351 1352 switch (sin6->sin6_family) { 1353 case AF_INET6: 1354 if (addr_len < SIN6_LEN_RFC2133) 1355 return -EINVAL; 1356 daddr = &sin6->sin6_addr; 1357 if (ipv6_addr_any(daddr) && 1358 ipv6_addr_v4mapped(&np->saddr)) 1359 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), 1360 daddr); 1361 break; 1362 case AF_INET: 1363 goto do_udp_sendmsg; 1364 case AF_UNSPEC: 1365 msg->msg_name = sin6 = NULL; 1366 msg->msg_namelen = addr_len = 0; 1367 daddr = NULL; 1368 break; 1369 default: 1370 return -EINVAL; 1371 } 1372 } else if (!READ_ONCE(up->pending)) { 1373 if (sk->sk_state != TCP_ESTABLISHED) 1374 return -EDESTADDRREQ; 1375 daddr = &sk->sk_v6_daddr; 1376 } else 1377 daddr = NULL; 1378 1379 if (daddr) { 1380 if (ipv6_addr_v4mapped(daddr)) { 1381 struct sockaddr_in sin; 1382 sin.sin_family = AF_INET; 1383 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport; 1384 sin.sin_addr.s_addr = daddr->s6_addr32[3]; 1385 msg->msg_name = &sin; 1386 msg->msg_namelen = sizeof(sin); 1387 do_udp_sendmsg: 1388 err = ipv6_only_sock(sk) ? 1389 -ENETUNREACH : udp_sendmsg(sk, msg, len); 1390 msg->msg_name = sin6; 1391 msg->msg_namelen = addr_len; 1392 return err; 1393 } 1394 } 1395 1396 /* Rough check on arithmetic overflow, 1397 better check is made in ip6_append_data(). 1398 */ 1399 if (len > INT_MAX - sizeof(struct udphdr)) 1400 return -EMSGSIZE; 1401 1402 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 1403 if (READ_ONCE(up->pending)) { 1404 if (READ_ONCE(up->pending) == AF_INET) 1405 return udp_sendmsg(sk, msg, len); 1406 /* 1407 * There are pending frames. 1408 * The socket lock must be held while it's corked. 1409 */ 1410 lock_sock(sk); 1411 if (likely(up->pending)) { 1412 if (unlikely(up->pending != AF_INET6)) { 1413 release_sock(sk); 1414 return -EAFNOSUPPORT; 1415 } 1416 dst = NULL; 1417 goto do_append_data; 1418 } 1419 release_sock(sk); 1420 } 1421 ulen += sizeof(struct udphdr); 1422 1423 memset(fl6, 0, sizeof(*fl6)); 1424 1425 if (sin6) { 1426 if (sin6->sin6_port == 0) 1427 return -EINVAL; 1428 1429 fl6->fl6_dport = sin6->sin6_port; 1430 daddr = &sin6->sin6_addr; 1431 1432 if (inet6_test_bit(SNDFLOW, sk)) { 1433 fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK; 1434 if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) { 1435 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel); 1436 if (IS_ERR(flowlabel)) 1437 return -EINVAL; 1438 } 1439 } 1440 1441 /* 1442 * Otherwise it will be difficult to maintain 1443 * sk->sk_dst_cache. 1444 */ 1445 if (sk->sk_state == TCP_ESTABLISHED && 1446 ipv6_addr_equal(daddr, &sk->sk_v6_daddr)) 1447 daddr = &sk->sk_v6_daddr; 1448 1449 if (addr_len >= sizeof(struct sockaddr_in6) && 1450 sin6->sin6_scope_id && 1451 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr))) 1452 fl6->flowi6_oif = sin6->sin6_scope_id; 1453 } else { 1454 if (sk->sk_state != TCP_ESTABLISHED) 1455 return -EDESTADDRREQ; 1456 1457 fl6->fl6_dport = inet->inet_dport; 1458 daddr = &sk->sk_v6_daddr; 1459 fl6->flowlabel = np->flow_label; 1460 connected = true; 1461 } 1462 1463 if (!fl6->flowi6_oif) 1464 fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if); 1465 1466 if (!fl6->flowi6_oif) 1467 fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; 1468 1469 fl6->flowi6_uid = sk->sk_uid; 1470 1471 if (msg->msg_controllen) { 1472 opt = &opt_space; 1473 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1474 opt->tot_len = sizeof(*opt); 1475 ipc6.opt = opt; 1476 1477 err = udp_cmsg_send(sk, msg, &ipc6.gso_size); 1478 if (err > 0) { 1479 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6, 1480 &ipc6); 1481 connected = false; 1482 } 1483 if (err < 0) { 1484 fl6_sock_release(flowlabel); 1485 return err; 1486 } 1487 if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) { 1488 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel); 1489 if (IS_ERR(flowlabel)) 1490 return -EINVAL; 1491 } 1492 if (!(opt->opt_nflen|opt->opt_flen)) 1493 opt = NULL; 1494 } 1495 if (!opt) { 1496 opt = txopt_get(np); 1497 opt_to_free = opt; 1498 } 1499 if (flowlabel) 1500 opt = fl6_merge_options(&opt_space, flowlabel, opt); 1501 opt = ipv6_fixup_options(&opt_space, opt); 1502 ipc6.opt = opt; 1503 1504 fl6->flowi6_proto = sk->sk_protocol; 1505 fl6->flowi6_mark = ipc6.sockc.mark; 1506 fl6->daddr = *daddr; 1507 if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr)) 1508 fl6->saddr = np->saddr; 1509 fl6->fl6_sport = inet->inet_sport; 1510 1511 if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) { 1512 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, 1513 (struct sockaddr *)sin6, 1514 &addr_len, 1515 &fl6->saddr); 1516 if (err) 1517 goto out_no_dst; 1518 if (sin6) { 1519 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) { 1520 /* BPF program rewrote IPv6-only by IPv4-mapped 1521 * IPv6. It's currently unsupported. 1522 */ 1523 err = -ENOTSUPP; 1524 goto out_no_dst; 1525 } 1526 if (sin6->sin6_port == 0) { 1527 /* BPF program set invalid port. Reject it. */ 1528 err = -EINVAL; 1529 goto out_no_dst; 1530 } 1531 fl6->fl6_dport = sin6->sin6_port; 1532 fl6->daddr = sin6->sin6_addr; 1533 } 1534 } 1535 1536 if (ipv6_addr_any(&fl6->daddr)) 1537 fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ 1538 1539 final_p = fl6_update_dst(fl6, opt, &final); 1540 if (final_p) 1541 connected = false; 1542 1543 if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) { 1544 fl6->flowi6_oif = READ_ONCE(np->mcast_oif); 1545 connected = false; 1546 } else if (!fl6->flowi6_oif) 1547 fl6->flowi6_oif = READ_ONCE(np->ucast_oif); 1548 1549 security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6)); 1550 1551 if (ipc6.tclass < 0) 1552 ipc6.tclass = np->tclass; 1553 1554 fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel); 1555 1556 dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected); 1557 if (IS_ERR(dst)) { 1558 err = PTR_ERR(dst); 1559 dst = NULL; 1560 goto out; 1561 } 1562 1563 if (ipc6.hlimit < 0) 1564 ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst); 1565 1566 if (msg->msg_flags&MSG_CONFIRM) 1567 goto do_confirm; 1568 back_from_confirm: 1569 1570 /* Lockless fast path for the non-corking case */ 1571 if (!corkreq) { 1572 struct sk_buff *skb; 1573 1574 skb = ip6_make_skb(sk, getfrag, msg, ulen, 1575 sizeof(struct udphdr), &ipc6, 1576 (struct rt6_info *)dst, 1577 msg->msg_flags, &cork); 1578 err = PTR_ERR(skb); 1579 if (!IS_ERR_OR_NULL(skb)) 1580 err = udp_v6_send_skb(skb, fl6, &cork.base); 1581 /* ip6_make_skb steals dst reference */ 1582 goto out_no_dst; 1583 } 1584 1585 lock_sock(sk); 1586 if (unlikely(up->pending)) { 1587 /* The socket is already corked while preparing it. */ 1588 /* ... which is an evident application bug. --ANK */ 1589 release_sock(sk); 1590 1591 net_dbg_ratelimited("udp cork app bug 2\n"); 1592 err = -EINVAL; 1593 goto out; 1594 } 1595 1596 WRITE_ONCE(up->pending, AF_INET6); 1597 1598 do_append_data: 1599 if (ipc6.dontfrag < 0) 1600 ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk); 1601 up->len += ulen; 1602 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr), 1603 &ipc6, fl6, (struct rt6_info *)dst, 1604 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 1605 if (err) 1606 udp_v6_flush_pending_frames(sk); 1607 else if (!corkreq) 1608 err = udp_v6_push_pending_frames(sk); 1609 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) 1610 WRITE_ONCE(up->pending, 0); 1611 1612 if (err > 0) 1613 err = inet6_test_bit(RECVERR6, sk) ? net_xmit_errno(err) : 0; 1614 release_sock(sk); 1615 1616 out: 1617 dst_release(dst); 1618 out_no_dst: 1619 fl6_sock_release(flowlabel); 1620 txopt_put(opt_to_free); 1621 if (!err) 1622 return len; 1623 /* 1624 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 1625 * ENOBUFS might not be good (it's not tunable per se), but otherwise 1626 * we don't have a good statistic (IpOutDiscards but it can be too many 1627 * things). We could add another new stat but at least for now that 1628 * seems like overkill. 1629 */ 1630 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 1631 UDP6_INC_STATS(sock_net(sk), 1632 UDP_MIB_SNDBUFERRORS, is_udplite); 1633 } 1634 return err; 1635 1636 do_confirm: 1637 if (msg->msg_flags & MSG_PROBE) 1638 dst_confirm_neigh(dst, &fl6->daddr); 1639 if (!(msg->msg_flags&MSG_PROBE) || len) 1640 goto back_from_confirm; 1641 err = 0; 1642 goto out; 1643 } 1644 EXPORT_SYMBOL(udpv6_sendmsg); 1645 1646 static void udpv6_splice_eof(struct socket *sock) 1647 { 1648 struct sock *sk = sock->sk; 1649 struct udp_sock *up = udp_sk(sk); 1650 1651 if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk)) 1652 return; 1653 1654 lock_sock(sk); 1655 if (up->pending && !udp_test_bit(CORK, sk)) 1656 udp_v6_push_pending_frames(sk); 1657 release_sock(sk); 1658 } 1659 1660 void udpv6_destroy_sock(struct sock *sk) 1661 { 1662 struct udp_sock *up = udp_sk(sk); 1663 lock_sock(sk); 1664 1665 /* protects from races with udp_abort() */ 1666 sock_set_flag(sk, SOCK_DEAD); 1667 udp_v6_flush_pending_frames(sk); 1668 release_sock(sk); 1669 1670 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 1671 if (up->encap_type) { 1672 void (*encap_destroy)(struct sock *sk); 1673 encap_destroy = READ_ONCE(up->encap_destroy); 1674 if (encap_destroy) 1675 encap_destroy(sk); 1676 } 1677 if (udp_test_bit(ENCAP_ENABLED, sk)) { 1678 static_branch_dec(&udpv6_encap_needed_key); 1679 udp_encap_disable(); 1680 } 1681 } 1682 } 1683 1684 /* 1685 * Socket option code for UDP 1686 */ 1687 int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 1688 unsigned int optlen) 1689 { 1690 if (level == SOL_UDP || level == SOL_UDPLITE || level == SOL_SOCKET) 1691 return udp_lib_setsockopt(sk, level, optname, 1692 optval, optlen, 1693 udp_v6_push_pending_frames); 1694 return ipv6_setsockopt(sk, level, optname, optval, optlen); 1695 } 1696 1697 int udpv6_getsockopt(struct sock *sk, int level, int optname, 1698 char __user *optval, int __user *optlen) 1699 { 1700 if (level == SOL_UDP || level == SOL_UDPLITE) 1701 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 1702 return ipv6_getsockopt(sk, level, optname, optval, optlen); 1703 } 1704 1705 1706 /* ------------------------------------------------------------------------ */ 1707 #ifdef CONFIG_PROC_FS 1708 int udp6_seq_show(struct seq_file *seq, void *v) 1709 { 1710 if (v == SEQ_START_TOKEN) { 1711 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER); 1712 } else { 1713 int bucket = ((struct udp_iter_state *)seq->private)->bucket; 1714 const struct inet_sock *inet = inet_sk((const struct sock *)v); 1715 __u16 srcp = ntohs(inet->inet_sport); 1716 __u16 destp = ntohs(inet->inet_dport); 1717 __ip6_dgram_sock_seq_show(seq, v, srcp, destp, 1718 udp_rqueue_get(v), bucket); 1719 } 1720 return 0; 1721 } 1722 1723 const struct seq_operations udp6_seq_ops = { 1724 .start = udp_seq_start, 1725 .next = udp_seq_next, 1726 .stop = udp_seq_stop, 1727 .show = udp6_seq_show, 1728 }; 1729 EXPORT_SYMBOL(udp6_seq_ops); 1730 1731 static struct udp_seq_afinfo udp6_seq_afinfo = { 1732 .family = AF_INET6, 1733 .udp_table = NULL, 1734 }; 1735 1736 int __net_init udp6_proc_init(struct net *net) 1737 { 1738 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops, 1739 sizeof(struct udp_iter_state), &udp6_seq_afinfo)) 1740 return -ENOMEM; 1741 return 0; 1742 } 1743 1744 void udp6_proc_exit(struct net *net) 1745 { 1746 remove_proc_entry("udp6", net->proc_net); 1747 } 1748 #endif /* CONFIG_PROC_FS */ 1749 1750 /* ------------------------------------------------------------------------ */ 1751 1752 struct proto udpv6_prot = { 1753 .name = "UDPv6", 1754 .owner = THIS_MODULE, 1755 .close = udp_lib_close, 1756 .pre_connect = udpv6_pre_connect, 1757 .connect = ip6_datagram_connect, 1758 .disconnect = udp_disconnect, 1759 .ioctl = udp_ioctl, 1760 .init = udpv6_init_sock, 1761 .destroy = udpv6_destroy_sock, 1762 .setsockopt = udpv6_setsockopt, 1763 .getsockopt = udpv6_getsockopt, 1764 .sendmsg = udpv6_sendmsg, 1765 .recvmsg = udpv6_recvmsg, 1766 .splice_eof = udpv6_splice_eof, 1767 .release_cb = ip6_datagram_release_cb, 1768 .hash = udp_lib_hash, 1769 .unhash = udp_lib_unhash, 1770 .rehash = udp_v6_rehash, 1771 .get_port = udp_v6_get_port, 1772 .put_port = udp_lib_unhash, 1773 #ifdef CONFIG_BPF_SYSCALL 1774 .psock_update_sk_prot = udp_bpf_update_proto, 1775 #endif 1776 1777 .memory_allocated = &udp_memory_allocated, 1778 .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc, 1779 1780 .sysctl_mem = sysctl_udp_mem, 1781 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min), 1782 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), 1783 .obj_size = sizeof(struct udp6_sock), 1784 .ipv6_pinfo_offset = offsetof(struct udp6_sock, inet6), 1785 .h.udp_table = NULL, 1786 .diag_destroy = udp_abort, 1787 }; 1788 1789 static struct inet_protosw udpv6_protosw = { 1790 .type = SOCK_DGRAM, 1791 .protocol = IPPROTO_UDP, 1792 .prot = &udpv6_prot, 1793 .ops = &inet6_dgram_ops, 1794 .flags = INET_PROTOSW_PERMANENT, 1795 }; 1796 1797 int __init udpv6_init(void) 1798 { 1799 int ret; 1800 1801 net_hotdata.udpv6_protocol = (struct inet6_protocol) { 1802 .handler = udpv6_rcv, 1803 .err_handler = udpv6_err, 1804 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL, 1805 }; 1806 ret = inet6_add_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP); 1807 if (ret) 1808 goto out; 1809 1810 ret = inet6_register_protosw(&udpv6_protosw); 1811 if (ret) 1812 goto out_udpv6_protocol; 1813 out: 1814 return ret; 1815 1816 out_udpv6_protocol: 1817 inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP); 1818 goto out; 1819 } 1820 1821 void udpv6_exit(void) 1822 { 1823 inet6_unregister_protosw(&udpv6_protosw); 1824 inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP); 1825 } 1826