1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * UDP over IPv6 4 * Linux INET6 implementation 5 * 6 * Authors: 7 * Pedro Roque <roque@di.fc.ul.pt> 8 * 9 * Based on linux/ipv4/udp.c 10 * 11 * Fixes: 12 * Hideaki YOSHIFUJI : sin6_scope_id support 13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind 15 * a single port at the same time. 16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data 17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file. 18 */ 19 20 #include <linux/bpf-cgroup.h> 21 #include <linux/errno.h> 22 #include <linux/types.h> 23 #include <linux/socket.h> 24 #include <linux/sockios.h> 25 #include <linux/net.h> 26 #include <linux/in6.h> 27 #include <linux/netdevice.h> 28 #include <linux/if_arp.h> 29 #include <linux/ipv6.h> 30 #include <linux/icmpv6.h> 31 #include <linux/init.h> 32 #include <linux/module.h> 33 #include <linux/skbuff.h> 34 #include <linux/slab.h> 35 #include <linux/uaccess.h> 36 #include <linux/indirect_call_wrapper.h> 37 38 #include <net/addrconf.h> 39 #include <net/ndisc.h> 40 #include <net/protocol.h> 41 #include <net/transp_v6.h> 42 #include <net/ip6_route.h> 43 #include <net/raw.h> 44 #include <net/seg6.h> 45 #include <net/tcp_states.h> 46 #include <net/ip6_checksum.h> 47 #include <net/ip6_tunnel.h> 48 #include <trace/events/udp.h> 49 #include <net/xfrm.h> 50 #include <net/inet_hashtables.h> 51 #include <net/inet6_hashtables.h> 52 #include <net/busy_poll.h> 53 #include <net/sock_reuseport.h> 54 #include <net/gro.h> 55 56 #include <linux/proc_fs.h> 57 #include <linux/seq_file.h> 58 #include <trace/events/skb.h> 59 #include "udp_impl.h" 60 61 static void udpv6_destruct_sock(struct sock *sk) 62 { 63 udp_destruct_common(sk); 64 inet6_sock_destruct(sk); 65 } 66 67 int udpv6_init_sock(struct sock *sk) 68 { 69 udp_lib_init_sock(sk); 70 sk->sk_destruct = udpv6_destruct_sock; 71 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); 72 return 0; 73 } 74 75 static u32 udp6_ehashfn(const struct net *net, 76 const struct in6_addr *laddr, 77 const u16 lport, 78 const struct in6_addr *faddr, 79 const __be16 fport) 80 { 81 static u32 udp6_ehash_secret __read_mostly; 82 static u32 udp_ipv6_hash_secret __read_mostly; 83 84 u32 lhash, fhash; 85 86 net_get_random_once(&udp6_ehash_secret, 87 sizeof(udp6_ehash_secret)); 88 net_get_random_once(&udp_ipv6_hash_secret, 89 sizeof(udp_ipv6_hash_secret)); 90 91 lhash = (__force u32)laddr->s6_addr32[3]; 92 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret); 93 94 return __inet6_ehashfn(lhash, lport, fhash, fport, 95 udp6_ehash_secret + net_hash_mix(net)); 96 } 97 98 int udp_v6_get_port(struct sock *sk, unsigned short snum) 99 { 100 unsigned int hash2_nulladdr = 101 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum); 102 unsigned int hash2_partial = 103 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0); 104 105 /* precompute partial secondary hash */ 106 udp_sk(sk)->udp_portaddr_hash = hash2_partial; 107 return udp_lib_get_port(sk, snum, hash2_nulladdr); 108 } 109 110 void udp_v6_rehash(struct sock *sk) 111 { 112 u16 new_hash = ipv6_portaddr_hash(sock_net(sk), 113 &sk->sk_v6_rcv_saddr, 114 inet_sk(sk)->inet_num); 115 116 udp_lib_rehash(sk, new_hash); 117 } 118 119 static int compute_score(struct sock *sk, struct net *net, 120 const struct in6_addr *saddr, __be16 sport, 121 const struct in6_addr *daddr, unsigned short hnum, 122 int dif, int sdif) 123 { 124 int bound_dev_if, score; 125 struct inet_sock *inet; 126 bool dev_match; 127 128 if (!net_eq(sock_net(sk), net) || 129 udp_sk(sk)->udp_port_hash != hnum || 130 sk->sk_family != PF_INET6) 131 return -1; 132 133 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) 134 return -1; 135 136 score = 0; 137 inet = inet_sk(sk); 138 139 if (inet->inet_dport) { 140 if (inet->inet_dport != sport) 141 return -1; 142 score++; 143 } 144 145 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 146 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr)) 147 return -1; 148 score++; 149 } 150 151 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); 152 dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif); 153 if (!dev_match) 154 return -1; 155 if (bound_dev_if) 156 score++; 157 158 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) 159 score++; 160 161 return score; 162 } 163 164 static struct sock *lookup_reuseport(struct net *net, struct sock *sk, 165 struct sk_buff *skb, 166 const struct in6_addr *saddr, 167 __be16 sport, 168 const struct in6_addr *daddr, 169 unsigned int hnum) 170 { 171 struct sock *reuse_sk = NULL; 172 u32 hash; 173 174 if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) { 175 hash = udp6_ehashfn(net, daddr, hnum, saddr, sport); 176 reuse_sk = reuseport_select_sock(sk, hash, skb, 177 sizeof(struct udphdr)); 178 } 179 return reuse_sk; 180 } 181 182 /* called with rcu_read_lock() */ 183 static struct sock *udp6_lib_lookup2(struct net *net, 184 const struct in6_addr *saddr, __be16 sport, 185 const struct in6_addr *daddr, unsigned int hnum, 186 int dif, int sdif, struct udp_hslot *hslot2, 187 struct sk_buff *skb) 188 { 189 struct sock *sk, *result; 190 int score, badness; 191 192 result = NULL; 193 badness = -1; 194 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 195 score = compute_score(sk, net, saddr, sport, 196 daddr, hnum, dif, sdif); 197 if (score > badness) { 198 result = lookup_reuseport(net, sk, skb, 199 saddr, sport, daddr, hnum); 200 /* Fall back to scoring if group has connections */ 201 if (result && !reuseport_has_conns(sk)) 202 return result; 203 204 result = result ? : sk; 205 badness = score; 206 } 207 } 208 return result; 209 } 210 211 static inline struct sock *udp6_lookup_run_bpf(struct net *net, 212 struct udp_table *udptable, 213 struct sk_buff *skb, 214 const struct in6_addr *saddr, 215 __be16 sport, 216 const struct in6_addr *daddr, 217 u16 hnum, const int dif) 218 { 219 struct sock *sk, *reuse_sk; 220 bool no_reuseport; 221 222 if (udptable != net->ipv4.udp_table) 223 return NULL; /* only UDP is supported */ 224 225 no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, saddr, sport, 226 daddr, hnum, dif, &sk); 227 if (no_reuseport || IS_ERR_OR_NULL(sk)) 228 return sk; 229 230 reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); 231 if (reuse_sk) 232 sk = reuse_sk; 233 return sk; 234 } 235 236 /* rcu_read_lock() must be held */ 237 struct sock *__udp6_lib_lookup(struct net *net, 238 const struct in6_addr *saddr, __be16 sport, 239 const struct in6_addr *daddr, __be16 dport, 240 int dif, int sdif, struct udp_table *udptable, 241 struct sk_buff *skb) 242 { 243 unsigned short hnum = ntohs(dport); 244 unsigned int hash2, slot2; 245 struct udp_hslot *hslot2; 246 struct sock *result, *sk; 247 248 hash2 = ipv6_portaddr_hash(net, daddr, hnum); 249 slot2 = hash2 & udptable->mask; 250 hslot2 = &udptable->hash2[slot2]; 251 252 /* Lookup connected or non-wildcard sockets */ 253 result = udp6_lib_lookup2(net, saddr, sport, 254 daddr, hnum, dif, sdif, 255 hslot2, skb); 256 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED) 257 goto done; 258 259 /* Lookup redirect from BPF */ 260 if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { 261 sk = udp6_lookup_run_bpf(net, udptable, skb, 262 saddr, sport, daddr, hnum, dif); 263 if (sk) { 264 result = sk; 265 goto done; 266 } 267 } 268 269 /* Got non-wildcard socket or error on first lookup */ 270 if (result) 271 goto done; 272 273 /* Lookup wildcard sockets */ 274 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum); 275 slot2 = hash2 & udptable->mask; 276 hslot2 = &udptable->hash2[slot2]; 277 278 result = udp6_lib_lookup2(net, saddr, sport, 279 &in6addr_any, hnum, dif, sdif, 280 hslot2, skb); 281 done: 282 if (IS_ERR(result)) 283 return NULL; 284 return result; 285 } 286 EXPORT_SYMBOL_GPL(__udp6_lib_lookup); 287 288 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, 289 __be16 sport, __be16 dport, 290 struct udp_table *udptable) 291 { 292 const struct ipv6hdr *iph = ipv6_hdr(skb); 293 294 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, 295 &iph->daddr, dport, inet6_iif(skb), 296 inet6_sdif(skb), udptable, skb); 297 } 298 299 struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb, 300 __be16 sport, __be16 dport) 301 { 302 const struct ipv6hdr *iph = ipv6_hdr(skb); 303 struct net *net = dev_net(skb->dev); 304 int iif, sdif; 305 306 inet6_get_iif_sdif(skb, &iif, &sdif); 307 308 return __udp6_lib_lookup(net, &iph->saddr, sport, 309 &iph->daddr, dport, iif, 310 sdif, net->ipv4.udp_table, NULL); 311 } 312 313 /* Must be called under rcu_read_lock(). 314 * Does increment socket refcount. 315 */ 316 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6) 317 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, 318 const struct in6_addr *daddr, __be16 dport, int dif) 319 { 320 struct sock *sk; 321 322 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport, 323 dif, 0, net->ipv4.udp_table, NULL); 324 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) 325 sk = NULL; 326 return sk; 327 } 328 EXPORT_SYMBOL_GPL(udp6_lib_lookup); 329 #endif 330 331 /* do not use the scratch area len for jumbogram: their length execeeds the 332 * scratch area space; note that the IP6CB flags is still in the first 333 * cacheline, so checking for jumbograms is cheap 334 */ 335 static int udp6_skb_len(struct sk_buff *skb) 336 { 337 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb); 338 } 339 340 /* 341 * This should be easy, if there is something there we 342 * return it, otherwise we block. 343 */ 344 345 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 346 int flags, int *addr_len) 347 { 348 struct ipv6_pinfo *np = inet6_sk(sk); 349 struct inet_sock *inet = inet_sk(sk); 350 struct sk_buff *skb; 351 unsigned int ulen, copied; 352 int off, err, peeking = flags & MSG_PEEK; 353 int is_udplite = IS_UDPLITE(sk); 354 struct udp_mib __percpu *mib; 355 bool checksum_valid = false; 356 int is_udp4; 357 358 if (flags & MSG_ERRQUEUE) 359 return ipv6_recv_error(sk, msg, len, addr_len); 360 361 if (np->rxpmtu && np->rxopt.bits.rxpmtu) 362 return ipv6_recv_rxpmtu(sk, msg, len, addr_len); 363 364 try_again: 365 off = sk_peek_offset(sk, flags); 366 skb = __skb_recv_udp(sk, flags, &off, &err); 367 if (!skb) 368 return err; 369 370 ulen = udp6_skb_len(skb); 371 copied = len; 372 if (copied > ulen - off) 373 copied = ulen - off; 374 else if (copied < ulen) 375 msg->msg_flags |= MSG_TRUNC; 376 377 is_udp4 = (skb->protocol == htons(ETH_P_IP)); 378 mib = __UDPX_MIB(sk, is_udp4); 379 380 /* 381 * If checksum is needed at all, try to do it while copying the 382 * data. If the data is truncated, or if we only want a partial 383 * coverage checksum (UDP-Lite), do it before the copy. 384 */ 385 386 if (copied < ulen || peeking || 387 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { 388 checksum_valid = udp_skb_csum_unnecessary(skb) || 389 !__udp_lib_checksum_complete(skb); 390 if (!checksum_valid) 391 goto csum_copy_err; 392 } 393 394 if (checksum_valid || udp_skb_csum_unnecessary(skb)) { 395 if (udp_skb_is_linear(skb)) 396 err = copy_linear_skb(skb, copied, off, &msg->msg_iter); 397 else 398 err = skb_copy_datagram_msg(skb, off, msg, copied); 399 } else { 400 err = skb_copy_and_csum_datagram_msg(skb, off, msg); 401 if (err == -EINVAL) 402 goto csum_copy_err; 403 } 404 if (unlikely(err)) { 405 if (!peeking) { 406 atomic_inc(&sk->sk_drops); 407 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 408 } 409 kfree_skb(skb); 410 return err; 411 } 412 if (!peeking) 413 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS); 414 415 sock_recv_cmsgs(msg, sk, skb); 416 417 /* Copy the address. */ 418 if (msg->msg_name) { 419 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 420 sin6->sin6_family = AF_INET6; 421 sin6->sin6_port = udp_hdr(skb)->source; 422 sin6->sin6_flowinfo = 0; 423 424 if (is_udp4) { 425 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, 426 &sin6->sin6_addr); 427 sin6->sin6_scope_id = 0; 428 } else { 429 sin6->sin6_addr = ipv6_hdr(skb)->saddr; 430 sin6->sin6_scope_id = 431 ipv6_iface_scope_id(&sin6->sin6_addr, 432 inet6_iif(skb)); 433 } 434 *addr_len = sizeof(*sin6); 435 436 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, 437 (struct sockaddr *)sin6); 438 } 439 440 if (udp_sk(sk)->gro_enabled) 441 udp_cmsg_recv(msg, sk, skb); 442 443 if (np->rxopt.all) 444 ip6_datagram_recv_common_ctl(sk, msg, skb); 445 446 if (is_udp4) { 447 if (inet->cmsg_flags) 448 ip_cmsg_recv_offset(msg, sk, skb, 449 sizeof(struct udphdr), off); 450 } else { 451 if (np->rxopt.all) 452 ip6_datagram_recv_specific_ctl(sk, msg, skb); 453 } 454 455 err = copied; 456 if (flags & MSG_TRUNC) 457 err = ulen; 458 459 skb_consume_udp(sk, skb, peeking ? -err : err); 460 return err; 461 462 csum_copy_err: 463 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags, 464 udp_skb_destructor)) { 465 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS); 466 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 467 } 468 kfree_skb(skb); 469 470 /* starting over for a new packet, but check if we need to yield */ 471 cond_resched(); 472 msg->msg_flags &= ~MSG_TRUNC; 473 goto try_again; 474 } 475 476 DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key); 477 void udpv6_encap_enable(void) 478 { 479 static_branch_inc(&udpv6_encap_needed_key); 480 } 481 EXPORT_SYMBOL(udpv6_encap_enable); 482 483 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go 484 * through error handlers in encapsulations looking for a match. 485 */ 486 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb, 487 struct inet6_skb_parm *opt, 488 u8 type, u8 code, int offset, __be32 info) 489 { 490 int i; 491 492 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 493 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 494 u8 type, u8 code, int offset, __be32 info); 495 const struct ip6_tnl_encap_ops *encap; 496 497 encap = rcu_dereference(ip6tun_encaps[i]); 498 if (!encap) 499 continue; 500 handler = encap->err_handler; 501 if (handler && !handler(skb, opt, type, code, offset, info)) 502 return 0; 503 } 504 505 return -ENOENT; 506 } 507 508 /* Try to match ICMP errors to UDP tunnels by looking up a socket without 509 * reversing source and destination port: this will match tunnels that force the 510 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that 511 * lwtunnels might actually break this assumption by being configured with 512 * different destination ports on endpoints, in this case we won't be able to 513 * trace ICMP messages back to them. 514 * 515 * If this doesn't match any socket, probe tunnels with arbitrary destination 516 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port 517 * we've sent packets to won't necessarily match the local destination port. 518 * 519 * Then ask the tunnel implementation to match the error against a valid 520 * association. 521 * 522 * Return an error if we can't find a match, the socket if we need further 523 * processing, zero otherwise. 524 */ 525 static struct sock *__udp6_lib_err_encap(struct net *net, 526 const struct ipv6hdr *hdr, int offset, 527 struct udphdr *uh, 528 struct udp_table *udptable, 529 struct sock *sk, 530 struct sk_buff *skb, 531 struct inet6_skb_parm *opt, 532 u8 type, u8 code, __be32 info) 533 { 534 int (*lookup)(struct sock *sk, struct sk_buff *skb); 535 int network_offset, transport_offset; 536 struct udp_sock *up; 537 538 network_offset = skb_network_offset(skb); 539 transport_offset = skb_transport_offset(skb); 540 541 /* Network header needs to point to the outer IPv6 header inside ICMP */ 542 skb_reset_network_header(skb); 543 544 /* Transport header needs to point to the UDP header */ 545 skb_set_transport_header(skb, offset); 546 547 if (sk) { 548 up = udp_sk(sk); 549 550 lookup = READ_ONCE(up->encap_err_lookup); 551 if (lookup && lookup(sk, skb)) 552 sk = NULL; 553 554 goto out; 555 } 556 557 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source, 558 &hdr->saddr, uh->dest, 559 inet6_iif(skb), 0, udptable, skb); 560 if (sk) { 561 up = udp_sk(sk); 562 563 lookup = READ_ONCE(up->encap_err_lookup); 564 if (!lookup || lookup(sk, skb)) 565 sk = NULL; 566 } 567 568 out: 569 if (!sk) { 570 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code, 571 offset, info)); 572 } 573 574 skb_set_transport_header(skb, transport_offset); 575 skb_set_network_header(skb, network_offset); 576 577 return sk; 578 } 579 580 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 581 u8 type, u8 code, int offset, __be32 info, 582 struct udp_table *udptable) 583 { 584 struct ipv6_pinfo *np; 585 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; 586 const struct in6_addr *saddr = &hdr->saddr; 587 const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr; 588 struct udphdr *uh = (struct udphdr *)(skb->data+offset); 589 bool tunnel = false; 590 struct sock *sk; 591 int harderr; 592 int err; 593 struct net *net = dev_net(skb->dev); 594 595 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source, 596 inet6_iif(skb), inet6_sdif(skb), udptable, NULL); 597 598 if (!sk || udp_sk(sk)->encap_type) { 599 /* No socket for error: try tunnels before discarding */ 600 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 601 sk = __udp6_lib_err_encap(net, hdr, offset, uh, 602 udptable, sk, skb, 603 opt, type, code, info); 604 if (!sk) 605 return 0; 606 } else 607 sk = ERR_PTR(-ENOENT); 608 609 if (IS_ERR(sk)) { 610 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), 611 ICMP6_MIB_INERRORS); 612 return PTR_ERR(sk); 613 } 614 615 tunnel = true; 616 } 617 618 harderr = icmpv6_err_convert(type, code, &err); 619 np = inet6_sk(sk); 620 621 if (type == ICMPV6_PKT_TOOBIG) { 622 if (!ip6_sk_accept_pmtu(sk)) 623 goto out; 624 ip6_sk_update_pmtu(skb, sk, info); 625 if (np->pmtudisc != IPV6_PMTUDISC_DONT) 626 harderr = 1; 627 } 628 if (type == NDISC_REDIRECT) { 629 if (tunnel) { 630 ip6_redirect(skb, sock_net(sk), inet6_iif(skb), 631 READ_ONCE(sk->sk_mark), sk->sk_uid); 632 } else { 633 ip6_sk_redirect(skb, sk); 634 } 635 goto out; 636 } 637 638 /* Tunnels don't have an application socket: don't pass errors back */ 639 if (tunnel) { 640 if (udp_sk(sk)->encap_err_rcv) 641 udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest, 642 ntohl(info), (u8 *)(uh+1)); 643 goto out; 644 } 645 646 if (!np->recverr) { 647 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 648 goto out; 649 } else { 650 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); 651 } 652 653 sk->sk_err = err; 654 sk_error_report(sk); 655 out: 656 return 0; 657 } 658 659 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 660 { 661 int rc; 662 663 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 664 sock_rps_save_rxhash(sk, skb); 665 sk_mark_napi_id(sk, skb); 666 sk_incoming_cpu_update(sk); 667 } else { 668 sk_mark_napi_id_once(sk, skb); 669 } 670 671 rc = __udp_enqueue_schedule_skb(sk, skb); 672 if (rc < 0) { 673 int is_udplite = IS_UDPLITE(sk); 674 enum skb_drop_reason drop_reason; 675 676 /* Note that an ENOMEM error is charged twice */ 677 if (rc == -ENOMEM) { 678 UDP6_INC_STATS(sock_net(sk), 679 UDP_MIB_RCVBUFERRORS, is_udplite); 680 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF; 681 } else { 682 UDP6_INC_STATS(sock_net(sk), 683 UDP_MIB_MEMERRORS, is_udplite); 684 drop_reason = SKB_DROP_REASON_PROTO_MEM; 685 } 686 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 687 kfree_skb_reason(skb, drop_reason); 688 trace_udp_fail_queue_rcv_skb(rc, sk); 689 return -1; 690 } 691 692 return 0; 693 } 694 695 static __inline__ int udpv6_err(struct sk_buff *skb, 696 struct inet6_skb_parm *opt, u8 type, 697 u8 code, int offset, __be32 info) 698 { 699 return __udp6_lib_err(skb, opt, type, code, offset, info, 700 dev_net(skb->dev)->ipv4.udp_table); 701 } 702 703 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) 704 { 705 enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; 706 struct udp_sock *up = udp_sk(sk); 707 int is_udplite = IS_UDPLITE(sk); 708 709 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { 710 drop_reason = SKB_DROP_REASON_XFRM_POLICY; 711 goto drop; 712 } 713 nf_reset_ct(skb); 714 715 if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) { 716 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 717 718 /* 719 * This is an encapsulation socket so pass the skb to 720 * the socket's udp_encap_rcv() hook. Otherwise, just 721 * fall through and pass this up the UDP socket. 722 * up->encap_rcv() returns the following value: 723 * =0 if skb was successfully passed to the encap 724 * handler or was discarded by it. 725 * >0 if skb should be passed on to UDP. 726 * <0 if skb should be resubmitted as proto -N 727 */ 728 729 /* if we're overly short, let UDP handle it */ 730 encap_rcv = READ_ONCE(up->encap_rcv); 731 if (encap_rcv) { 732 int ret; 733 734 /* Verify checksum before giving to encap */ 735 if (udp_lib_checksum_complete(skb)) 736 goto csum_error; 737 738 ret = encap_rcv(sk, skb); 739 if (ret <= 0) { 740 __UDP6_INC_STATS(sock_net(sk), 741 UDP_MIB_INDATAGRAMS, 742 is_udplite); 743 return -ret; 744 } 745 } 746 747 /* FALLTHROUGH -- it's a UDP Packet */ 748 } 749 750 /* 751 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). 752 */ 753 if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { 754 755 if (up->pcrlen == 0) { /* full coverage was set */ 756 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n", 757 UDP_SKB_CB(skb)->cscov, skb->len); 758 goto drop; 759 } 760 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { 761 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n", 762 UDP_SKB_CB(skb)->cscov, up->pcrlen); 763 goto drop; 764 } 765 } 766 767 prefetch(&sk->sk_rmem_alloc); 768 if (rcu_access_pointer(sk->sk_filter) && 769 udp_lib_checksum_complete(skb)) 770 goto csum_error; 771 772 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) { 773 drop_reason = SKB_DROP_REASON_SOCKET_FILTER; 774 goto drop; 775 } 776 777 udp_csum_pull_header(skb); 778 779 skb_dst_drop(skb); 780 781 return __udpv6_queue_rcv_skb(sk, skb); 782 783 csum_error: 784 drop_reason = SKB_DROP_REASON_UDP_CSUM; 785 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 786 drop: 787 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 788 atomic_inc(&sk->sk_drops); 789 kfree_skb_reason(skb, drop_reason); 790 return -1; 791 } 792 793 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 794 { 795 struct sk_buff *next, *segs; 796 int ret; 797 798 if (likely(!udp_unexpected_gso(sk, skb))) 799 return udpv6_queue_rcv_one_skb(sk, skb); 800 801 __skb_push(skb, -skb_mac_offset(skb)); 802 segs = udp_rcv_segment(sk, skb, false); 803 skb_list_walk_safe(segs, skb, next) { 804 __skb_pull(skb, skb_transport_offset(skb)); 805 806 udp_post_segment_fix_csum(skb); 807 ret = udpv6_queue_rcv_one_skb(sk, skb); 808 if (ret > 0) 809 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret, 810 true); 811 } 812 return 0; 813 } 814 815 static bool __udp_v6_is_mcast_sock(struct net *net, const struct sock *sk, 816 __be16 loc_port, const struct in6_addr *loc_addr, 817 __be16 rmt_port, const struct in6_addr *rmt_addr, 818 int dif, int sdif, unsigned short hnum) 819 { 820 const struct inet_sock *inet = inet_sk(sk); 821 822 if (!net_eq(sock_net(sk), net)) 823 return false; 824 825 if (udp_sk(sk)->udp_port_hash != hnum || 826 sk->sk_family != PF_INET6 || 827 (inet->inet_dport && inet->inet_dport != rmt_port) || 828 (!ipv6_addr_any(&sk->sk_v6_daddr) && 829 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) || 830 !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) || 831 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) && 832 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))) 833 return false; 834 if (!inet6_mc_check(sk, loc_addr, rmt_addr)) 835 return false; 836 return true; 837 } 838 839 static void udp6_csum_zero_error(struct sk_buff *skb) 840 { 841 /* RFC 2460 section 8.1 says that we SHOULD log 842 * this error. Well, it is reasonable. 843 */ 844 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n", 845 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source), 846 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest)); 847 } 848 849 /* 850 * Note: called only from the BH handler context, 851 * so we don't need to lock the hashes. 852 */ 853 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 854 const struct in6_addr *saddr, const struct in6_addr *daddr, 855 struct udp_table *udptable, int proto) 856 { 857 struct sock *sk, *first = NULL; 858 const struct udphdr *uh = udp_hdr(skb); 859 unsigned short hnum = ntohs(uh->dest); 860 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); 861 unsigned int offset = offsetof(typeof(*sk), sk_node); 862 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); 863 int dif = inet6_iif(skb); 864 int sdif = inet6_sdif(skb); 865 struct hlist_node *node; 866 struct sk_buff *nskb; 867 868 if (use_hash2) { 869 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) & 870 udptable->mask; 871 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask; 872 start_lookup: 873 hslot = &udptable->hash2[hash2]; 874 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 875 } 876 877 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { 878 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr, 879 uh->source, saddr, dif, sdif, 880 hnum)) 881 continue; 882 /* If zero checksum and no_check is not on for 883 * the socket then skip it. 884 */ 885 if (!uh->check && !udp_sk(sk)->no_check6_rx) 886 continue; 887 if (!first) { 888 first = sk; 889 continue; 890 } 891 nskb = skb_clone(skb, GFP_ATOMIC); 892 if (unlikely(!nskb)) { 893 atomic_inc(&sk->sk_drops); 894 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 895 IS_UDPLITE(sk)); 896 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, 897 IS_UDPLITE(sk)); 898 continue; 899 } 900 901 if (udpv6_queue_rcv_skb(sk, nskb) > 0) 902 consume_skb(nskb); 903 } 904 905 /* Also lookup *:port if we are using hash2 and haven't done so yet. */ 906 if (use_hash2 && hash2 != hash2_any) { 907 hash2 = hash2_any; 908 goto start_lookup; 909 } 910 911 if (first) { 912 if (udpv6_queue_rcv_skb(first, skb) > 0) 913 consume_skb(skb); 914 } else { 915 kfree_skb(skb); 916 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI, 917 proto == IPPROTO_UDPLITE); 918 } 919 return 0; 920 } 921 922 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) 923 { 924 if (udp_sk_rx_dst_set(sk, dst)) { 925 const struct rt6_info *rt = (const struct rt6_info *)dst; 926 927 sk->sk_rx_dst_cookie = rt6_get_cookie(rt); 928 } 929 } 930 931 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and 932 * return code conversion for ip layer consumption 933 */ 934 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, 935 struct udphdr *uh) 936 { 937 int ret; 938 939 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) 940 skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo); 941 942 ret = udpv6_queue_rcv_skb(sk, skb); 943 944 /* a return value > 0 means to resubmit the input */ 945 if (ret > 0) 946 return ret; 947 return 0; 948 } 949 950 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 951 int proto) 952 { 953 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; 954 const struct in6_addr *saddr, *daddr; 955 struct net *net = dev_net(skb->dev); 956 struct udphdr *uh; 957 struct sock *sk; 958 bool refcounted; 959 u32 ulen = 0; 960 961 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 962 goto discard; 963 964 saddr = &ipv6_hdr(skb)->saddr; 965 daddr = &ipv6_hdr(skb)->daddr; 966 uh = udp_hdr(skb); 967 968 ulen = ntohs(uh->len); 969 if (ulen > skb->len) 970 goto short_packet; 971 972 if (proto == IPPROTO_UDP) { 973 /* UDP validates ulen. */ 974 975 /* Check for jumbo payload */ 976 if (ulen == 0) 977 ulen = skb->len; 978 979 if (ulen < sizeof(*uh)) 980 goto short_packet; 981 982 if (ulen < skb->len) { 983 if (pskb_trim_rcsum(skb, ulen)) 984 goto short_packet; 985 saddr = &ipv6_hdr(skb)->saddr; 986 daddr = &ipv6_hdr(skb)->daddr; 987 uh = udp_hdr(skb); 988 } 989 } 990 991 if (udp6_csum_init(skb, uh, proto)) 992 goto csum_error; 993 994 /* Check if the socket is already available, e.g. due to early demux */ 995 sk = skb_steal_sock(skb, &refcounted); 996 if (sk) { 997 struct dst_entry *dst = skb_dst(skb); 998 int ret; 999 1000 if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) 1001 udp6_sk_rx_dst_set(sk, dst); 1002 1003 if (!uh->check && !udp_sk(sk)->no_check6_rx) { 1004 if (refcounted) 1005 sock_put(sk); 1006 goto report_csum_error; 1007 } 1008 1009 ret = udp6_unicast_rcv_skb(sk, skb, uh); 1010 if (refcounted) 1011 sock_put(sk); 1012 return ret; 1013 } 1014 1015 /* 1016 * Multicast receive code 1017 */ 1018 if (ipv6_addr_is_multicast(daddr)) 1019 return __udp6_lib_mcast_deliver(net, skb, 1020 saddr, daddr, udptable, proto); 1021 1022 /* Unicast */ 1023 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 1024 if (sk) { 1025 if (!uh->check && !udp_sk(sk)->no_check6_rx) 1026 goto report_csum_error; 1027 return udp6_unicast_rcv_skb(sk, skb, uh); 1028 } 1029 1030 reason = SKB_DROP_REASON_NO_SOCKET; 1031 1032 if (!uh->check) 1033 goto report_csum_error; 1034 1035 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 1036 goto discard; 1037 nf_reset_ct(skb); 1038 1039 if (udp_lib_checksum_complete(skb)) 1040 goto csum_error; 1041 1042 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 1043 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); 1044 1045 kfree_skb_reason(skb, reason); 1046 return 0; 1047 1048 short_packet: 1049 if (reason == SKB_DROP_REASON_NOT_SPECIFIED) 1050 reason = SKB_DROP_REASON_PKT_TOO_SMALL; 1051 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n", 1052 proto == IPPROTO_UDPLITE ? "-Lite" : "", 1053 saddr, ntohs(uh->source), 1054 ulen, skb->len, 1055 daddr, ntohs(uh->dest)); 1056 goto discard; 1057 1058 report_csum_error: 1059 udp6_csum_zero_error(skb); 1060 csum_error: 1061 if (reason == SKB_DROP_REASON_NOT_SPECIFIED) 1062 reason = SKB_DROP_REASON_UDP_CSUM; 1063 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); 1064 discard: 1065 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 1066 kfree_skb_reason(skb, reason); 1067 return 0; 1068 } 1069 1070 1071 static struct sock *__udp6_lib_demux_lookup(struct net *net, 1072 __be16 loc_port, const struct in6_addr *loc_addr, 1073 __be16 rmt_port, const struct in6_addr *rmt_addr, 1074 int dif, int sdif) 1075 { 1076 struct udp_table *udptable = net->ipv4.udp_table; 1077 unsigned short hnum = ntohs(loc_port); 1078 unsigned int hash2, slot2; 1079 struct udp_hslot *hslot2; 1080 __portpair ports; 1081 struct sock *sk; 1082 1083 hash2 = ipv6_portaddr_hash(net, loc_addr, hnum); 1084 slot2 = hash2 & udptable->mask; 1085 hslot2 = &udptable->hash2[slot2]; 1086 ports = INET_COMBINED_PORTS(rmt_port, hnum); 1087 1088 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 1089 if (sk->sk_state == TCP_ESTABLISHED && 1090 inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif)) 1091 return sk; 1092 /* Only check first socket in chain */ 1093 break; 1094 } 1095 return NULL; 1096 } 1097 1098 void udp_v6_early_demux(struct sk_buff *skb) 1099 { 1100 struct net *net = dev_net(skb->dev); 1101 const struct udphdr *uh; 1102 struct sock *sk; 1103 struct dst_entry *dst; 1104 int dif = skb->dev->ifindex; 1105 int sdif = inet6_sdif(skb); 1106 1107 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 1108 sizeof(struct udphdr))) 1109 return; 1110 1111 uh = udp_hdr(skb); 1112 1113 if (skb->pkt_type == PACKET_HOST) 1114 sk = __udp6_lib_demux_lookup(net, uh->dest, 1115 &ipv6_hdr(skb)->daddr, 1116 uh->source, &ipv6_hdr(skb)->saddr, 1117 dif, sdif); 1118 else 1119 return; 1120 1121 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 1122 return; 1123 1124 skb->sk = sk; 1125 skb->destructor = sock_efree; 1126 dst = rcu_dereference(sk->sk_rx_dst); 1127 1128 if (dst) 1129 dst = dst_check(dst, sk->sk_rx_dst_cookie); 1130 if (dst) { 1131 /* set noref for now. 1132 * any place which wants to hold dst has to call 1133 * dst_hold_safe() 1134 */ 1135 skb_dst_set_noref(skb, dst); 1136 } 1137 } 1138 1139 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb) 1140 { 1141 return __udp6_lib_rcv(skb, dev_net(skb->dev)->ipv4.udp_table, IPPROTO_UDP); 1142 } 1143 1144 /* 1145 * Throw away all pending data and cancel the corking. Socket is locked. 1146 */ 1147 static void udp_v6_flush_pending_frames(struct sock *sk) 1148 { 1149 struct udp_sock *up = udp_sk(sk); 1150 1151 if (up->pending == AF_INET) 1152 udp_flush_pending_frames(sk); 1153 else if (up->pending) { 1154 up->len = 0; 1155 up->pending = 0; 1156 ip6_flush_pending_frames(sk); 1157 } 1158 } 1159 1160 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr, 1161 int addr_len) 1162 { 1163 if (addr_len < offsetofend(struct sockaddr, sa_family)) 1164 return -EINVAL; 1165 /* The following checks are replicated from __ip6_datagram_connect() 1166 * and intended to prevent BPF program called below from accessing 1167 * bytes that are out of the bound specified by user in addr_len. 1168 */ 1169 if (uaddr->sa_family == AF_INET) { 1170 if (ipv6_only_sock(sk)) 1171 return -EAFNOSUPPORT; 1172 return udp_pre_connect(sk, uaddr, addr_len); 1173 } 1174 1175 if (addr_len < SIN6_LEN_RFC2133) 1176 return -EINVAL; 1177 1178 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr); 1179 } 1180 1181 /** 1182 * udp6_hwcsum_outgoing - handle outgoing HW checksumming 1183 * @sk: socket we are sending on 1184 * @skb: sk_buff containing the filled-in UDP header 1185 * (checksum field must be zeroed out) 1186 * @saddr: source address 1187 * @daddr: destination address 1188 * @len: length of packet 1189 */ 1190 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, 1191 const struct in6_addr *saddr, 1192 const struct in6_addr *daddr, int len) 1193 { 1194 unsigned int offset; 1195 struct udphdr *uh = udp_hdr(skb); 1196 struct sk_buff *frags = skb_shinfo(skb)->frag_list; 1197 __wsum csum = 0; 1198 1199 if (!frags) { 1200 /* Only one fragment on the socket. */ 1201 skb->csum_start = skb_transport_header(skb) - skb->head; 1202 skb->csum_offset = offsetof(struct udphdr, check); 1203 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0); 1204 } else { 1205 /* 1206 * HW-checksum won't work as there are two or more 1207 * fragments on the socket so that all csums of sk_buffs 1208 * should be together 1209 */ 1210 offset = skb_transport_offset(skb); 1211 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 1212 csum = skb->csum; 1213 1214 skb->ip_summed = CHECKSUM_NONE; 1215 1216 do { 1217 csum = csum_add(csum, frags->csum); 1218 } while ((frags = frags->next)); 1219 1220 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 1221 csum); 1222 if (uh->check == 0) 1223 uh->check = CSUM_MANGLED_0; 1224 } 1225 } 1226 1227 /* 1228 * Sending 1229 */ 1230 1231 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, 1232 struct inet_cork *cork) 1233 { 1234 struct sock *sk = skb->sk; 1235 struct udphdr *uh; 1236 int err = 0; 1237 int is_udplite = IS_UDPLITE(sk); 1238 __wsum csum = 0; 1239 int offset = skb_transport_offset(skb); 1240 int len = skb->len - offset; 1241 int datalen = len - sizeof(*uh); 1242 1243 /* 1244 * Create a UDP header 1245 */ 1246 uh = udp_hdr(skb); 1247 uh->source = fl6->fl6_sport; 1248 uh->dest = fl6->fl6_dport; 1249 uh->len = htons(len); 1250 uh->check = 0; 1251 1252 if (cork->gso_size) { 1253 const int hlen = skb_network_header_len(skb) + 1254 sizeof(struct udphdr); 1255 1256 if (hlen + cork->gso_size > cork->fragsize) { 1257 kfree_skb(skb); 1258 return -EINVAL; 1259 } 1260 if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) { 1261 kfree_skb(skb); 1262 return -EINVAL; 1263 } 1264 if (udp_sk(sk)->no_check6_tx) { 1265 kfree_skb(skb); 1266 return -EINVAL; 1267 } 1268 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || 1269 dst_xfrm(skb_dst(skb))) { 1270 kfree_skb(skb); 1271 return -EIO; 1272 } 1273 1274 if (datalen > cork->gso_size) { 1275 skb_shinfo(skb)->gso_size = cork->gso_size; 1276 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; 1277 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen, 1278 cork->gso_size); 1279 } 1280 goto csum_partial; 1281 } 1282 1283 if (is_udplite) 1284 csum = udplite_csum(skb); 1285 else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */ 1286 skb->ip_summed = CHECKSUM_NONE; 1287 goto send; 1288 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 1289 csum_partial: 1290 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len); 1291 goto send; 1292 } else 1293 csum = udp_csum(skb); 1294 1295 /* add protocol-dependent pseudo-header */ 1296 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, 1297 len, fl6->flowi6_proto, csum); 1298 if (uh->check == 0) 1299 uh->check = CSUM_MANGLED_0; 1300 1301 send: 1302 err = ip6_send_skb(skb); 1303 if (err) { 1304 if (err == -ENOBUFS && !inet6_sk(sk)->recverr) { 1305 UDP6_INC_STATS(sock_net(sk), 1306 UDP_MIB_SNDBUFERRORS, is_udplite); 1307 err = 0; 1308 } 1309 } else { 1310 UDP6_INC_STATS(sock_net(sk), 1311 UDP_MIB_OUTDATAGRAMS, is_udplite); 1312 } 1313 return err; 1314 } 1315 1316 static int udp_v6_push_pending_frames(struct sock *sk) 1317 { 1318 struct sk_buff *skb; 1319 struct udp_sock *up = udp_sk(sk); 1320 int err = 0; 1321 1322 if (up->pending == AF_INET) 1323 return udp_push_pending_frames(sk); 1324 1325 skb = ip6_finish_skb(sk); 1326 if (!skb) 1327 goto out; 1328 1329 err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6, 1330 &inet_sk(sk)->cork.base); 1331 out: 1332 up->len = 0; 1333 up->pending = 0; 1334 return err; 1335 } 1336 1337 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 1338 { 1339 struct ipv6_txoptions opt_space; 1340 struct udp_sock *up = udp_sk(sk); 1341 struct inet_sock *inet = inet_sk(sk); 1342 struct ipv6_pinfo *np = inet6_sk(sk); 1343 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 1344 struct in6_addr *daddr, *final_p, final; 1345 struct ipv6_txoptions *opt = NULL; 1346 struct ipv6_txoptions *opt_to_free = NULL; 1347 struct ip6_flowlabel *flowlabel = NULL; 1348 struct inet_cork_full cork; 1349 struct flowi6 *fl6 = &cork.fl.u.ip6; 1350 struct dst_entry *dst; 1351 struct ipcm6_cookie ipc6; 1352 int addr_len = msg->msg_namelen; 1353 bool connected = false; 1354 int ulen = len; 1355 int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE; 1356 int err; 1357 int is_udplite = IS_UDPLITE(sk); 1358 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 1359 1360 ipcm6_init(&ipc6); 1361 ipc6.gso_size = READ_ONCE(up->gso_size); 1362 ipc6.sockc.tsflags = sk->sk_tsflags; 1363 ipc6.sockc.mark = READ_ONCE(sk->sk_mark); 1364 1365 /* destination address check */ 1366 if (sin6) { 1367 if (addr_len < offsetof(struct sockaddr, sa_data)) 1368 return -EINVAL; 1369 1370 switch (sin6->sin6_family) { 1371 case AF_INET6: 1372 if (addr_len < SIN6_LEN_RFC2133) 1373 return -EINVAL; 1374 daddr = &sin6->sin6_addr; 1375 if (ipv6_addr_any(daddr) && 1376 ipv6_addr_v4mapped(&np->saddr)) 1377 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), 1378 daddr); 1379 break; 1380 case AF_INET: 1381 goto do_udp_sendmsg; 1382 case AF_UNSPEC: 1383 msg->msg_name = sin6 = NULL; 1384 msg->msg_namelen = addr_len = 0; 1385 daddr = NULL; 1386 break; 1387 default: 1388 return -EINVAL; 1389 } 1390 } else if (!up->pending) { 1391 if (sk->sk_state != TCP_ESTABLISHED) 1392 return -EDESTADDRREQ; 1393 daddr = &sk->sk_v6_daddr; 1394 } else 1395 daddr = NULL; 1396 1397 if (daddr) { 1398 if (ipv6_addr_v4mapped(daddr)) { 1399 struct sockaddr_in sin; 1400 sin.sin_family = AF_INET; 1401 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport; 1402 sin.sin_addr.s_addr = daddr->s6_addr32[3]; 1403 msg->msg_name = &sin; 1404 msg->msg_namelen = sizeof(sin); 1405 do_udp_sendmsg: 1406 err = ipv6_only_sock(sk) ? 1407 -ENETUNREACH : udp_sendmsg(sk, msg, len); 1408 msg->msg_name = sin6; 1409 msg->msg_namelen = addr_len; 1410 return err; 1411 } 1412 } 1413 1414 /* Rough check on arithmetic overflow, 1415 better check is made in ip6_append_data(). 1416 */ 1417 if (len > INT_MAX - sizeof(struct udphdr)) 1418 return -EMSGSIZE; 1419 1420 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 1421 if (up->pending) { 1422 if (up->pending == AF_INET) 1423 return udp_sendmsg(sk, msg, len); 1424 /* 1425 * There are pending frames. 1426 * The socket lock must be held while it's corked. 1427 */ 1428 lock_sock(sk); 1429 if (likely(up->pending)) { 1430 if (unlikely(up->pending != AF_INET6)) { 1431 release_sock(sk); 1432 return -EAFNOSUPPORT; 1433 } 1434 dst = NULL; 1435 goto do_append_data; 1436 } 1437 release_sock(sk); 1438 } 1439 ulen += sizeof(struct udphdr); 1440 1441 memset(fl6, 0, sizeof(*fl6)); 1442 1443 if (sin6) { 1444 if (sin6->sin6_port == 0) 1445 return -EINVAL; 1446 1447 fl6->fl6_dport = sin6->sin6_port; 1448 daddr = &sin6->sin6_addr; 1449 1450 if (np->sndflow) { 1451 fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK; 1452 if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) { 1453 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel); 1454 if (IS_ERR(flowlabel)) 1455 return -EINVAL; 1456 } 1457 } 1458 1459 /* 1460 * Otherwise it will be difficult to maintain 1461 * sk->sk_dst_cache. 1462 */ 1463 if (sk->sk_state == TCP_ESTABLISHED && 1464 ipv6_addr_equal(daddr, &sk->sk_v6_daddr)) 1465 daddr = &sk->sk_v6_daddr; 1466 1467 if (addr_len >= sizeof(struct sockaddr_in6) && 1468 sin6->sin6_scope_id && 1469 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr))) 1470 fl6->flowi6_oif = sin6->sin6_scope_id; 1471 } else { 1472 if (sk->sk_state != TCP_ESTABLISHED) 1473 return -EDESTADDRREQ; 1474 1475 fl6->fl6_dport = inet->inet_dport; 1476 daddr = &sk->sk_v6_daddr; 1477 fl6->flowlabel = np->flow_label; 1478 connected = true; 1479 } 1480 1481 if (!fl6->flowi6_oif) 1482 fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if); 1483 1484 if (!fl6->flowi6_oif) 1485 fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; 1486 1487 fl6->flowi6_uid = sk->sk_uid; 1488 1489 if (msg->msg_controllen) { 1490 opt = &opt_space; 1491 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1492 opt->tot_len = sizeof(*opt); 1493 ipc6.opt = opt; 1494 1495 err = udp_cmsg_send(sk, msg, &ipc6.gso_size); 1496 if (err > 0) 1497 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6, 1498 &ipc6); 1499 if (err < 0) { 1500 fl6_sock_release(flowlabel); 1501 return err; 1502 } 1503 if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) { 1504 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel); 1505 if (IS_ERR(flowlabel)) 1506 return -EINVAL; 1507 } 1508 if (!(opt->opt_nflen|opt->opt_flen)) 1509 opt = NULL; 1510 connected = false; 1511 } 1512 if (!opt) { 1513 opt = txopt_get(np); 1514 opt_to_free = opt; 1515 } 1516 if (flowlabel) 1517 opt = fl6_merge_options(&opt_space, flowlabel, opt); 1518 opt = ipv6_fixup_options(&opt_space, opt); 1519 ipc6.opt = opt; 1520 1521 fl6->flowi6_proto = sk->sk_protocol; 1522 fl6->flowi6_mark = ipc6.sockc.mark; 1523 fl6->daddr = *daddr; 1524 if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr)) 1525 fl6->saddr = np->saddr; 1526 fl6->fl6_sport = inet->inet_sport; 1527 1528 if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) { 1529 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, 1530 (struct sockaddr *)sin6, 1531 &fl6->saddr); 1532 if (err) 1533 goto out_no_dst; 1534 if (sin6) { 1535 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) { 1536 /* BPF program rewrote IPv6-only by IPv4-mapped 1537 * IPv6. It's currently unsupported. 1538 */ 1539 err = -ENOTSUPP; 1540 goto out_no_dst; 1541 } 1542 if (sin6->sin6_port == 0) { 1543 /* BPF program set invalid port. Reject it. */ 1544 err = -EINVAL; 1545 goto out_no_dst; 1546 } 1547 fl6->fl6_dport = sin6->sin6_port; 1548 fl6->daddr = sin6->sin6_addr; 1549 } 1550 } 1551 1552 if (ipv6_addr_any(&fl6->daddr)) 1553 fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ 1554 1555 final_p = fl6_update_dst(fl6, opt, &final); 1556 if (final_p) 1557 connected = false; 1558 1559 if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) { 1560 fl6->flowi6_oif = np->mcast_oif; 1561 connected = false; 1562 } else if (!fl6->flowi6_oif) 1563 fl6->flowi6_oif = np->ucast_oif; 1564 1565 security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6)); 1566 1567 if (ipc6.tclass < 0) 1568 ipc6.tclass = np->tclass; 1569 1570 fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel); 1571 1572 dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected); 1573 if (IS_ERR(dst)) { 1574 err = PTR_ERR(dst); 1575 dst = NULL; 1576 goto out; 1577 } 1578 1579 if (ipc6.hlimit < 0) 1580 ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst); 1581 1582 if (msg->msg_flags&MSG_CONFIRM) 1583 goto do_confirm; 1584 back_from_confirm: 1585 1586 /* Lockless fast path for the non-corking case */ 1587 if (!corkreq) { 1588 struct sk_buff *skb; 1589 1590 skb = ip6_make_skb(sk, getfrag, msg, ulen, 1591 sizeof(struct udphdr), &ipc6, 1592 (struct rt6_info *)dst, 1593 msg->msg_flags, &cork); 1594 err = PTR_ERR(skb); 1595 if (!IS_ERR_OR_NULL(skb)) 1596 err = udp_v6_send_skb(skb, fl6, &cork.base); 1597 /* ip6_make_skb steals dst reference */ 1598 goto out_no_dst; 1599 } 1600 1601 lock_sock(sk); 1602 if (unlikely(up->pending)) { 1603 /* The socket is already corked while preparing it. */ 1604 /* ... which is an evident application bug. --ANK */ 1605 release_sock(sk); 1606 1607 net_dbg_ratelimited("udp cork app bug 2\n"); 1608 err = -EINVAL; 1609 goto out; 1610 } 1611 1612 up->pending = AF_INET6; 1613 1614 do_append_data: 1615 if (ipc6.dontfrag < 0) 1616 ipc6.dontfrag = np->dontfrag; 1617 up->len += ulen; 1618 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr), 1619 &ipc6, fl6, (struct rt6_info *)dst, 1620 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 1621 if (err) 1622 udp_v6_flush_pending_frames(sk); 1623 else if (!corkreq) 1624 err = udp_v6_push_pending_frames(sk); 1625 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) 1626 up->pending = 0; 1627 1628 if (err > 0) 1629 err = np->recverr ? net_xmit_errno(err) : 0; 1630 release_sock(sk); 1631 1632 out: 1633 dst_release(dst); 1634 out_no_dst: 1635 fl6_sock_release(flowlabel); 1636 txopt_put(opt_to_free); 1637 if (!err) 1638 return len; 1639 /* 1640 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 1641 * ENOBUFS might not be good (it's not tunable per se), but otherwise 1642 * we don't have a good statistic (IpOutDiscards but it can be too many 1643 * things). We could add another new stat but at least for now that 1644 * seems like overkill. 1645 */ 1646 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 1647 UDP6_INC_STATS(sock_net(sk), 1648 UDP_MIB_SNDBUFERRORS, is_udplite); 1649 } 1650 return err; 1651 1652 do_confirm: 1653 if (msg->msg_flags & MSG_PROBE) 1654 dst_confirm_neigh(dst, &fl6->daddr); 1655 if (!(msg->msg_flags&MSG_PROBE) || len) 1656 goto back_from_confirm; 1657 err = 0; 1658 goto out; 1659 } 1660 EXPORT_SYMBOL(udpv6_sendmsg); 1661 1662 static void udpv6_splice_eof(struct socket *sock) 1663 { 1664 struct sock *sk = sock->sk; 1665 struct udp_sock *up = udp_sk(sk); 1666 1667 if (!up->pending || READ_ONCE(up->corkflag)) 1668 return; 1669 1670 lock_sock(sk); 1671 if (up->pending && !READ_ONCE(up->corkflag)) 1672 udp_v6_push_pending_frames(sk); 1673 release_sock(sk); 1674 } 1675 1676 void udpv6_destroy_sock(struct sock *sk) 1677 { 1678 struct udp_sock *up = udp_sk(sk); 1679 lock_sock(sk); 1680 1681 /* protects from races with udp_abort() */ 1682 sock_set_flag(sk, SOCK_DEAD); 1683 udp_v6_flush_pending_frames(sk); 1684 release_sock(sk); 1685 1686 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 1687 if (up->encap_type) { 1688 void (*encap_destroy)(struct sock *sk); 1689 encap_destroy = READ_ONCE(up->encap_destroy); 1690 if (encap_destroy) 1691 encap_destroy(sk); 1692 } 1693 if (up->encap_enabled) { 1694 static_branch_dec(&udpv6_encap_needed_key); 1695 udp_encap_disable(); 1696 } 1697 } 1698 } 1699 1700 /* 1701 * Socket option code for UDP 1702 */ 1703 int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 1704 unsigned int optlen) 1705 { 1706 if (level == SOL_UDP || level == SOL_UDPLITE || level == SOL_SOCKET) 1707 return udp_lib_setsockopt(sk, level, optname, 1708 optval, optlen, 1709 udp_v6_push_pending_frames); 1710 return ipv6_setsockopt(sk, level, optname, optval, optlen); 1711 } 1712 1713 int udpv6_getsockopt(struct sock *sk, int level, int optname, 1714 char __user *optval, int __user *optlen) 1715 { 1716 if (level == SOL_UDP || level == SOL_UDPLITE) 1717 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 1718 return ipv6_getsockopt(sk, level, optname, optval, optlen); 1719 } 1720 1721 static const struct inet6_protocol udpv6_protocol = { 1722 .handler = udpv6_rcv, 1723 .err_handler = udpv6_err, 1724 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1725 }; 1726 1727 /* ------------------------------------------------------------------------ */ 1728 #ifdef CONFIG_PROC_FS 1729 int udp6_seq_show(struct seq_file *seq, void *v) 1730 { 1731 if (v == SEQ_START_TOKEN) { 1732 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER); 1733 } else { 1734 int bucket = ((struct udp_iter_state *)seq->private)->bucket; 1735 const struct inet_sock *inet = inet_sk((const struct sock *)v); 1736 __u16 srcp = ntohs(inet->inet_sport); 1737 __u16 destp = ntohs(inet->inet_dport); 1738 __ip6_dgram_sock_seq_show(seq, v, srcp, destp, 1739 udp_rqueue_get(v), bucket); 1740 } 1741 return 0; 1742 } 1743 1744 const struct seq_operations udp6_seq_ops = { 1745 .start = udp_seq_start, 1746 .next = udp_seq_next, 1747 .stop = udp_seq_stop, 1748 .show = udp6_seq_show, 1749 }; 1750 EXPORT_SYMBOL(udp6_seq_ops); 1751 1752 static struct udp_seq_afinfo udp6_seq_afinfo = { 1753 .family = AF_INET6, 1754 .udp_table = NULL, 1755 }; 1756 1757 int __net_init udp6_proc_init(struct net *net) 1758 { 1759 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops, 1760 sizeof(struct udp_iter_state), &udp6_seq_afinfo)) 1761 return -ENOMEM; 1762 return 0; 1763 } 1764 1765 void udp6_proc_exit(struct net *net) 1766 { 1767 remove_proc_entry("udp6", net->proc_net); 1768 } 1769 #endif /* CONFIG_PROC_FS */ 1770 1771 /* ------------------------------------------------------------------------ */ 1772 1773 struct proto udpv6_prot = { 1774 .name = "UDPv6", 1775 .owner = THIS_MODULE, 1776 .close = udp_lib_close, 1777 .pre_connect = udpv6_pre_connect, 1778 .connect = ip6_datagram_connect, 1779 .disconnect = udp_disconnect, 1780 .ioctl = udp_ioctl, 1781 .init = udpv6_init_sock, 1782 .destroy = udpv6_destroy_sock, 1783 .setsockopt = udpv6_setsockopt, 1784 .getsockopt = udpv6_getsockopt, 1785 .sendmsg = udpv6_sendmsg, 1786 .recvmsg = udpv6_recvmsg, 1787 .splice_eof = udpv6_splice_eof, 1788 .release_cb = ip6_datagram_release_cb, 1789 .hash = udp_lib_hash, 1790 .unhash = udp_lib_unhash, 1791 .rehash = udp_v6_rehash, 1792 .get_port = udp_v6_get_port, 1793 .put_port = udp_lib_unhash, 1794 #ifdef CONFIG_BPF_SYSCALL 1795 .psock_update_sk_prot = udp_bpf_update_proto, 1796 #endif 1797 1798 .memory_allocated = &udp_memory_allocated, 1799 .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc, 1800 1801 .sysctl_mem = sysctl_udp_mem, 1802 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min), 1803 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), 1804 .obj_size = sizeof(struct udp6_sock), 1805 .h.udp_table = NULL, 1806 .diag_destroy = udp_abort, 1807 }; 1808 1809 static struct inet_protosw udpv6_protosw = { 1810 .type = SOCK_DGRAM, 1811 .protocol = IPPROTO_UDP, 1812 .prot = &udpv6_prot, 1813 .ops = &inet6_dgram_ops, 1814 .flags = INET_PROTOSW_PERMANENT, 1815 }; 1816 1817 int __init udpv6_init(void) 1818 { 1819 int ret; 1820 1821 ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP); 1822 if (ret) 1823 goto out; 1824 1825 ret = inet6_register_protosw(&udpv6_protosw); 1826 if (ret) 1827 goto out_udpv6_protocol; 1828 out: 1829 return ret; 1830 1831 out_udpv6_protocol: 1832 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); 1833 goto out; 1834 } 1835 1836 void udpv6_exit(void) 1837 { 1838 inet6_unregister_protosw(&udpv6_protosw); 1839 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); 1840 } 1841