1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 5 * The Regents of the University of California. 6 * Copyright (c) 2008 Robert N. M. Watson 7 * Copyright (c) 2010-2011 Juniper Networks, Inc. 8 * Copyright (c) 2014 Kevin Lo 9 * All rights reserved. 10 * 11 * Portions of this software were developed by Robert N. M. Watson under 12 * contract to Juniper Networks, Inc. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)udp_usrreq.c 8.6 (Berkeley) 5/23/95 39 */ 40 41 #include <sys/cdefs.h> 42 __FBSDID("$FreeBSD$"); 43 44 #include "opt_inet.h" 45 #include "opt_inet6.h" 46 #include "opt_ipsec.h" 47 #include "opt_rss.h" 48 49 #include <sys/param.h> 50 #include <sys/domain.h> 51 #include <sys/eventhandler.h> 52 #include <sys/jail.h> 53 #include <sys/kernel.h> 54 #include <sys/lock.h> 55 #include <sys/malloc.h> 56 #include <sys/mbuf.h> 57 #include <sys/priv.h> 58 #include <sys/proc.h> 59 #include <sys/protosw.h> 60 #include <sys/sdt.h> 61 #include <sys/signalvar.h> 62 #include <sys/socket.h> 63 #include <sys/socketvar.h> 64 #include <sys/sx.h> 65 #include <sys/sysctl.h> 66 #include <sys/syslog.h> 67 #include <sys/systm.h> 68 69 #include <vm/uma.h> 70 71 #include <net/if.h> 72 #include <net/if_var.h> 73 #include <net/route.h> 74 #include <net/rss_config.h> 75 76 #include <netinet/in.h> 77 #include <netinet/in_kdtrace.h> 78 #include <netinet/in_pcb.h> 79 #include <netinet/in_systm.h> 80 #include <netinet/in_var.h> 81 #include <netinet/ip.h> 82 #ifdef INET6 83 #include <netinet/ip6.h> 84 #endif 85 #include <netinet/ip_icmp.h> 86 #include <netinet/icmp_var.h> 87 #include <netinet/ip_var.h> 88 #include <netinet/ip_options.h> 89 #ifdef INET6 90 #include <netinet6/ip6_var.h> 91 #endif 92 #include <netinet/udp.h> 93 #include <netinet/udp_var.h> 94 #include <netinet/udplite.h> 95 #include <netinet/in_rss.h> 96 97 #include <netipsec/ipsec_support.h> 98 99 #include <machine/in_cksum.h> 100 101 #include <security/mac/mac_framework.h> 102 103 /* 104 * UDP and UDP-Lite protocols implementation. 105 * Per RFC 768, August, 1980. 106 * Per RFC 3828, July, 2004. 107 */ 108 109 /* 110 * BSD 4.2 defaulted the udp checksum to be off. Turning off udp checksums 111 * removes the only data integrity mechanism for packets and malformed 112 * packets that would otherwise be discarded due to bad checksums, and may 113 * cause problems (especially for NFS data blocks). 114 */ 115 VNET_DEFINE(int, udp_cksum) = 1; 116 SYSCTL_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_VNET | CTLFLAG_RW, 117 &VNET_NAME(udp_cksum), 0, "compute udp checksum"); 118 119 int udp_log_in_vain = 0; 120 SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW, 121 &udp_log_in_vain, 0, "Log all incoming UDP packets"); 122 123 VNET_DEFINE(int, udp_blackhole) = 0; 124 SYSCTL_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_VNET | CTLFLAG_RW, 125 &VNET_NAME(udp_blackhole), 0, 126 "Do not send port unreachables for refused connects"); 127 128 u_long udp_sendspace = 9216; /* really max datagram size */ 129 SYSCTL_ULONG(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, CTLFLAG_RW, 130 &udp_sendspace, 0, "Maximum outgoing UDP datagram size"); 131 132 u_long udp_recvspace = 40 * (1024 + 133 #ifdef INET6 134 sizeof(struct sockaddr_in6) 135 #else 136 sizeof(struct sockaddr_in) 137 #endif 138 ); /* 40 1K datagrams */ 139 140 SYSCTL_ULONG(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, CTLFLAG_RW, 141 &udp_recvspace, 0, "Maximum space for incoming UDP datagrams"); 142 143 VNET_DEFINE(struct inpcbhead, udb); /* from udp_var.h */ 144 VNET_DEFINE(struct inpcbinfo, udbinfo); 145 VNET_DEFINE(struct inpcbhead, ulitecb); 146 VNET_DEFINE(struct inpcbinfo, ulitecbinfo); 147 VNET_DEFINE_STATIC(uma_zone_t, udpcb_zone); 148 #define V_udpcb_zone VNET(udpcb_zone) 149 150 #ifndef UDBHASHSIZE 151 #define UDBHASHSIZE 128 152 #endif 153 154 VNET_PCPUSTAT_DEFINE(struct udpstat, udpstat); /* from udp_var.h */ 155 VNET_PCPUSTAT_SYSINIT(udpstat); 156 SYSCTL_VNET_PCPUSTAT(_net_inet_udp, UDPCTL_STATS, stats, struct udpstat, 157 udpstat, "UDP statistics (struct udpstat, netinet/udp_var.h)"); 158 159 #ifdef VIMAGE 160 VNET_PCPUSTAT_SYSUNINIT(udpstat); 161 #endif /* VIMAGE */ 162 #ifdef INET 163 static void udp_detach(struct socket *so); 164 static int udp_output(struct inpcb *, struct mbuf *, struct sockaddr *, 165 struct mbuf *, struct thread *); 166 #endif 167 168 static void 169 udp_zone_change(void *tag) 170 { 171 172 uma_zone_set_max(V_udbinfo.ipi_zone, maxsockets); 173 uma_zone_set_max(V_udpcb_zone, maxsockets); 174 } 175 176 static int 177 udp_inpcb_init(void *mem, int size, int flags) 178 { 179 struct inpcb *inp; 180 181 inp = mem; 182 INP_LOCK_INIT(inp, "inp", "udpinp"); 183 return (0); 184 } 185 186 static int 187 udplite_inpcb_init(void *mem, int size, int flags) 188 { 189 struct inpcb *inp; 190 191 inp = mem; 192 INP_LOCK_INIT(inp, "inp", "udpliteinp"); 193 return (0); 194 } 195 196 void 197 udp_init(void) 198 { 199 200 /* 201 * For now default to 2-tuple UDP hashing - until the fragment 202 * reassembly code can also update the flowid. 203 * 204 * Once we can calculate the flowid that way and re-establish 205 * a 4-tuple, flip this to 4-tuple. 206 */ 207 in_pcbinfo_init(&V_udbinfo, "udp", &V_udb, UDBHASHSIZE, UDBHASHSIZE, 208 "udp_inpcb", udp_inpcb_init, IPI_HASHFIELDS_2TUPLE); 209 V_udpcb_zone = uma_zcreate("udpcb", sizeof(struct udpcb), 210 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 211 uma_zone_set_max(V_udpcb_zone, maxsockets); 212 uma_zone_set_warning(V_udpcb_zone, "kern.ipc.maxsockets limit reached"); 213 EVENTHANDLER_REGISTER(maxsockets_change, udp_zone_change, NULL, 214 EVENTHANDLER_PRI_ANY); 215 } 216 217 void 218 udplite_init(void) 219 { 220 221 in_pcbinfo_init(&V_ulitecbinfo, "udplite", &V_ulitecb, UDBHASHSIZE, 222 UDBHASHSIZE, "udplite_inpcb", udplite_inpcb_init, 223 IPI_HASHFIELDS_2TUPLE); 224 } 225 226 /* 227 * Kernel module interface for updating udpstat. The argument is an index 228 * into udpstat treated as an array of u_long. While this encodes the 229 * general layout of udpstat into the caller, it doesn't encode its location, 230 * so that future changes to add, for example, per-CPU stats support won't 231 * cause binary compatibility problems for kernel modules. 232 */ 233 void 234 kmod_udpstat_inc(int statnum) 235 { 236 237 counter_u64_add(VNET(udpstat)[statnum], 1); 238 } 239 240 int 241 udp_newudpcb(struct inpcb *inp) 242 { 243 struct udpcb *up; 244 245 up = uma_zalloc(V_udpcb_zone, M_NOWAIT | M_ZERO); 246 if (up == NULL) 247 return (ENOBUFS); 248 inp->inp_ppcb = up; 249 return (0); 250 } 251 252 void 253 udp_discardcb(struct udpcb *up) 254 { 255 256 uma_zfree(V_udpcb_zone, up); 257 } 258 259 #ifdef VIMAGE 260 static void 261 udp_destroy(void *unused __unused) 262 { 263 264 in_pcbinfo_destroy(&V_udbinfo); 265 uma_zdestroy(V_udpcb_zone); 266 } 267 VNET_SYSUNINIT(udp, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, udp_destroy, NULL); 268 269 static void 270 udplite_destroy(void *unused __unused) 271 { 272 273 in_pcbinfo_destroy(&V_ulitecbinfo); 274 } 275 VNET_SYSUNINIT(udplite, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, udplite_destroy, 276 NULL); 277 #endif 278 279 #ifdef INET 280 /* 281 * Subroutine of udp_input(), which appends the provided mbuf chain to the 282 * passed pcb/socket. The caller must provide a sockaddr_in via udp_in that 283 * contains the source address. If the socket ends up being an IPv6 socket, 284 * udp_append() will convert to a sockaddr_in6 before passing the address 285 * into the socket code. 286 * 287 * In the normal case udp_append() will return 0, indicating that you 288 * must unlock the inp. However if a tunneling protocol is in place we increment 289 * the inpcb refcnt and unlock the inp, on return from the tunneling protocol we 290 * then decrement the reference count. If the inp_rele returns 1, indicating the 291 * inp is gone, we return that to the caller to tell them *not* to unlock 292 * the inp. In the case of multi-cast this will cause the distribution 293 * to stop (though most tunneling protocols known currently do *not* use 294 * multicast). 295 */ 296 static int 297 udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off, 298 struct sockaddr_in *udp_in) 299 { 300 struct sockaddr *append_sa; 301 struct socket *so; 302 struct mbuf *tmpopts, *opts = NULL; 303 #ifdef INET6 304 struct sockaddr_in6 udp_in6; 305 #endif 306 struct udpcb *up; 307 308 INP_LOCK_ASSERT(inp); 309 310 /* 311 * Engage the tunneling protocol. 312 */ 313 up = intoudpcb(inp); 314 if (up->u_tun_func != NULL) { 315 in_pcbref(inp); 316 INP_RUNLOCK(inp); 317 (*up->u_tun_func)(n, off, inp, (struct sockaddr *)&udp_in[0], 318 up->u_tun_ctx); 319 INP_RLOCK(inp); 320 return (in_pcbrele_rlocked(inp)); 321 } 322 323 off += sizeof(struct udphdr); 324 325 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 326 /* Check AH/ESP integrity. */ 327 if (IPSEC_ENABLED(ipv4) && 328 IPSEC_CHECK_POLICY(ipv4, n, inp) != 0) { 329 m_freem(n); 330 return (0); 331 } 332 if (up->u_flags & UF_ESPINUDP) {/* IPSec UDP encaps. */ 333 if (IPSEC_ENABLED(ipv4) && 334 UDPENCAP_INPUT(n, off, AF_INET) != 0) 335 return (0); /* Consumed. */ 336 } 337 #endif /* IPSEC */ 338 #ifdef MAC 339 if (mac_inpcb_check_deliver(inp, n) != 0) { 340 m_freem(n); 341 return (0); 342 } 343 #endif /* MAC */ 344 if (inp->inp_flags & INP_CONTROLOPTS || 345 inp->inp_socket->so_options & (SO_TIMESTAMP | SO_BINTIME)) { 346 #ifdef INET6 347 if (inp->inp_vflag & INP_IPV6) 348 (void)ip6_savecontrol_v4(inp, n, &opts, NULL); 349 else 350 #endif /* INET6 */ 351 ip_savecontrol(inp, &opts, ip, n); 352 } 353 if ((inp->inp_vflag & INP_IPV4) && (inp->inp_flags2 & INP_ORIGDSTADDR)) { 354 tmpopts = sbcreatecontrol((caddr_t)&udp_in[1], 355 sizeof(struct sockaddr_in), IP_ORIGDSTADDR, IPPROTO_IP); 356 if (tmpopts) { 357 if (opts) { 358 tmpopts->m_next = opts; 359 opts = tmpopts; 360 } else 361 opts = tmpopts; 362 } 363 } 364 #ifdef INET6 365 if (inp->inp_vflag & INP_IPV6) { 366 bzero(&udp_in6, sizeof(udp_in6)); 367 udp_in6.sin6_len = sizeof(udp_in6); 368 udp_in6.sin6_family = AF_INET6; 369 in6_sin_2_v4mapsin6(&udp_in[0], &udp_in6); 370 append_sa = (struct sockaddr *)&udp_in6; 371 } else 372 #endif /* INET6 */ 373 append_sa = (struct sockaddr *)&udp_in[0]; 374 m_adj(n, off); 375 376 so = inp->inp_socket; 377 SOCKBUF_LOCK(&so->so_rcv); 378 if (sbappendaddr_locked(&so->so_rcv, append_sa, n, opts) == 0) { 379 SOCKBUF_UNLOCK(&so->so_rcv); 380 m_freem(n); 381 if (opts) 382 m_freem(opts); 383 UDPSTAT_INC(udps_fullsock); 384 } else 385 sorwakeup_locked(so); 386 return (0); 387 } 388 389 int 390 udp_input(struct mbuf **mp, int *offp, int proto) 391 { 392 struct ip *ip; 393 struct udphdr *uh; 394 struct ifnet *ifp; 395 struct inpcb *inp; 396 uint16_t len, ip_len; 397 struct inpcbinfo *pcbinfo; 398 struct ip save_ip; 399 struct sockaddr_in udp_in[2]; 400 struct mbuf *m; 401 struct m_tag *fwd_tag; 402 struct epoch_tracker et; 403 int cscov_partial, iphlen; 404 405 m = *mp; 406 iphlen = *offp; 407 ifp = m->m_pkthdr.rcvif; 408 *mp = NULL; 409 UDPSTAT_INC(udps_ipackets); 410 411 /* 412 * Strip IP options, if any; should skip this, make available to 413 * user, and use on returned packets, but we don't yet have a way to 414 * check the checksum with options still present. 415 */ 416 if (iphlen > sizeof (struct ip)) { 417 ip_stripoptions(m); 418 iphlen = sizeof(struct ip); 419 } 420 421 /* 422 * Get IP and UDP header together in first mbuf. 423 */ 424 ip = mtod(m, struct ip *); 425 if (m->m_len < iphlen + sizeof(struct udphdr)) { 426 if ((m = m_pullup(m, iphlen + sizeof(struct udphdr))) == NULL) { 427 UDPSTAT_INC(udps_hdrops); 428 return (IPPROTO_DONE); 429 } 430 ip = mtod(m, struct ip *); 431 } 432 uh = (struct udphdr *)((caddr_t)ip + iphlen); 433 cscov_partial = (proto == IPPROTO_UDPLITE) ? 1 : 0; 434 435 /* 436 * Destination port of 0 is illegal, based on RFC768. 437 */ 438 if (uh->uh_dport == 0) 439 goto badunlocked; 440 441 /* 442 * Construct sockaddr format source address. Stuff source address 443 * and datagram in user buffer. 444 */ 445 bzero(&udp_in[0], sizeof(struct sockaddr_in) * 2); 446 udp_in[0].sin_len = sizeof(struct sockaddr_in); 447 udp_in[0].sin_family = AF_INET; 448 udp_in[0].sin_port = uh->uh_sport; 449 udp_in[0].sin_addr = ip->ip_src; 450 udp_in[1].sin_len = sizeof(struct sockaddr_in); 451 udp_in[1].sin_family = AF_INET; 452 udp_in[1].sin_port = uh->uh_dport; 453 udp_in[1].sin_addr = ip->ip_dst; 454 455 /* 456 * Make mbuf data length reflect UDP length. If not enough data to 457 * reflect UDP length, drop. 458 */ 459 len = ntohs((u_short)uh->uh_ulen); 460 ip_len = ntohs(ip->ip_len) - iphlen; 461 if (proto == IPPROTO_UDPLITE && (len == 0 || len == ip_len)) { 462 /* Zero means checksum over the complete packet. */ 463 if (len == 0) 464 len = ip_len; 465 cscov_partial = 0; 466 } 467 if (ip_len != len) { 468 if (len > ip_len || len < sizeof(struct udphdr)) { 469 UDPSTAT_INC(udps_badlen); 470 goto badunlocked; 471 } 472 if (proto == IPPROTO_UDP) 473 m_adj(m, len - ip_len); 474 } 475 476 /* 477 * Save a copy of the IP header in case we want restore it for 478 * sending an ICMP error message in response. 479 */ 480 if (!V_udp_blackhole) 481 save_ip = *ip; 482 else 483 memset(&save_ip, 0, sizeof(save_ip)); 484 485 /* 486 * Checksum extended UDP header and data. 487 */ 488 if (uh->uh_sum) { 489 u_short uh_sum; 490 491 if ((m->m_pkthdr.csum_flags & CSUM_DATA_VALID) && 492 !cscov_partial) { 493 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 494 uh_sum = m->m_pkthdr.csum_data; 495 else 496 uh_sum = in_pseudo(ip->ip_src.s_addr, 497 ip->ip_dst.s_addr, htonl((u_short)len + 498 m->m_pkthdr.csum_data + proto)); 499 uh_sum ^= 0xffff; 500 } else { 501 char b[9]; 502 503 bcopy(((struct ipovly *)ip)->ih_x1, b, 9); 504 bzero(((struct ipovly *)ip)->ih_x1, 9); 505 ((struct ipovly *)ip)->ih_len = (proto == IPPROTO_UDP) ? 506 uh->uh_ulen : htons(ip_len); 507 uh_sum = in_cksum(m, len + sizeof (struct ip)); 508 bcopy(b, ((struct ipovly *)ip)->ih_x1, 9); 509 } 510 if (uh_sum) { 511 UDPSTAT_INC(udps_badsum); 512 m_freem(m); 513 return (IPPROTO_DONE); 514 } 515 } else { 516 if (proto == IPPROTO_UDP) { 517 UDPSTAT_INC(udps_nosum); 518 } else { 519 /* UDPLite requires a checksum */ 520 /* XXX: What is the right UDPLite MIB counter here? */ 521 m_freem(m); 522 return (IPPROTO_DONE); 523 } 524 } 525 526 pcbinfo = udp_get_inpcbinfo(proto); 527 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 528 in_broadcast(ip->ip_dst, ifp)) { 529 struct inpcb *last; 530 struct inpcbhead *pcblist; 531 532 INP_INFO_RLOCK_ET(pcbinfo, et); 533 pcblist = udp_get_pcblist(proto); 534 last = NULL; 535 CK_LIST_FOREACH(inp, pcblist, inp_list) { 536 if (inp->inp_lport != uh->uh_dport) 537 continue; 538 #ifdef INET6 539 if ((inp->inp_vflag & INP_IPV4) == 0) 540 continue; 541 #endif 542 if (inp->inp_laddr.s_addr != INADDR_ANY && 543 inp->inp_laddr.s_addr != ip->ip_dst.s_addr) 544 continue; 545 if (inp->inp_faddr.s_addr != INADDR_ANY && 546 inp->inp_faddr.s_addr != ip->ip_src.s_addr) 547 continue; 548 if (inp->inp_fport != 0 && 549 inp->inp_fport != uh->uh_sport) 550 continue; 551 552 INP_RLOCK(inp); 553 554 if (__predict_false(inp->inp_flags2 & INP_FREED)) { 555 INP_RUNLOCK(inp); 556 continue; 557 } 558 559 /* 560 * XXXRW: Because we weren't holding either the inpcb 561 * or the hash lock when we checked for a match 562 * before, we should probably recheck now that the 563 * inpcb lock is held. 564 */ 565 566 /* 567 * Handle socket delivery policy for any-source 568 * and source-specific multicast. [RFC3678] 569 */ 570 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 571 struct ip_moptions *imo; 572 struct sockaddr_in group; 573 int blocked; 574 575 imo = inp->inp_moptions; 576 if (imo == NULL) { 577 INP_RUNLOCK(inp); 578 continue; 579 } 580 bzero(&group, sizeof(struct sockaddr_in)); 581 group.sin_len = sizeof(struct sockaddr_in); 582 group.sin_family = AF_INET; 583 group.sin_addr = ip->ip_dst; 584 585 blocked = imo_multi_filter(imo, ifp, 586 (struct sockaddr *)&group, 587 (struct sockaddr *)&udp_in[0]); 588 if (blocked != MCAST_PASS) { 589 if (blocked == MCAST_NOTGMEMBER) 590 IPSTAT_INC(ips_notmember); 591 if (blocked == MCAST_NOTSMEMBER || 592 blocked == MCAST_MUTED) 593 UDPSTAT_INC(udps_filtermcast); 594 INP_RUNLOCK(inp); 595 continue; 596 } 597 } 598 if (last != NULL) { 599 struct mbuf *n; 600 601 if ((n = m_copym(m, 0, M_COPYALL, M_NOWAIT)) != 602 NULL) { 603 if (proto == IPPROTO_UDPLITE) 604 UDPLITE_PROBE(receive, NULL, last, ip, 605 last, uh); 606 else 607 UDP_PROBE(receive, NULL, last, ip, last, 608 uh); 609 if (udp_append(last, ip, n, iphlen, 610 udp_in)) { 611 goto inp_lost; 612 } 613 } 614 INP_RUNLOCK(last); 615 } 616 last = inp; 617 /* 618 * Don't look for additional matches if this one does 619 * not have either the SO_REUSEPORT or SO_REUSEADDR 620 * socket options set. This heuristic avoids 621 * searching through all pcbs in the common case of a 622 * non-shared port. It assumes that an application 623 * will never clear these options after setting them. 624 */ 625 if ((last->inp_socket->so_options & 626 (SO_REUSEPORT|SO_REUSEPORT_LB|SO_REUSEADDR)) == 0) 627 break; 628 } 629 630 if (last == NULL) { 631 /* 632 * No matching pcb found; discard datagram. (No need 633 * to send an ICMP Port Unreachable for a broadcast 634 * or multicast datgram.) 635 */ 636 UDPSTAT_INC(udps_noportbcast); 637 if (inp) 638 INP_RUNLOCK(inp); 639 INP_INFO_RUNLOCK_ET(pcbinfo, et); 640 goto badunlocked; 641 } 642 if (proto == IPPROTO_UDPLITE) 643 UDPLITE_PROBE(receive, NULL, last, ip, last, uh); 644 else 645 UDP_PROBE(receive, NULL, last, ip, last, uh); 646 if (udp_append(last, ip, m, iphlen, udp_in) == 0) 647 INP_RUNLOCK(last); 648 inp_lost: 649 INP_INFO_RUNLOCK_ET(pcbinfo, et); 650 return (IPPROTO_DONE); 651 } 652 653 /* 654 * Locate pcb for datagram. 655 */ 656 657 /* 658 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. 659 */ 660 if ((m->m_flags & M_IP_NEXTHOP) && 661 (fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL)) != NULL) { 662 struct sockaddr_in *next_hop; 663 664 next_hop = (struct sockaddr_in *)(fwd_tag + 1); 665 666 /* 667 * Transparently forwarded. Pretend to be the destination. 668 * Already got one like this? 669 */ 670 inp = in_pcblookup_mbuf(pcbinfo, ip->ip_src, uh->uh_sport, 671 ip->ip_dst, uh->uh_dport, INPLOOKUP_RLOCKPCB, ifp, m); 672 if (!inp) { 673 /* 674 * It's new. Try to find the ambushing socket. 675 * Because we've rewritten the destination address, 676 * any hardware-generated hash is ignored. 677 */ 678 inp = in_pcblookup(pcbinfo, ip->ip_src, 679 uh->uh_sport, next_hop->sin_addr, 680 next_hop->sin_port ? htons(next_hop->sin_port) : 681 uh->uh_dport, INPLOOKUP_WILDCARD | 682 INPLOOKUP_RLOCKPCB, ifp); 683 } 684 /* Remove the tag from the packet. We don't need it anymore. */ 685 m_tag_delete(m, fwd_tag); 686 m->m_flags &= ~M_IP_NEXTHOP; 687 } else 688 inp = in_pcblookup_mbuf(pcbinfo, ip->ip_src, uh->uh_sport, 689 ip->ip_dst, uh->uh_dport, INPLOOKUP_WILDCARD | 690 INPLOOKUP_RLOCKPCB, ifp, m); 691 if (inp == NULL) { 692 if (udp_log_in_vain) { 693 char src[INET_ADDRSTRLEN]; 694 char dst[INET_ADDRSTRLEN]; 695 696 log(LOG_INFO, 697 "Connection attempt to UDP %s:%d from %s:%d\n", 698 inet_ntoa_r(ip->ip_dst, dst), ntohs(uh->uh_dport), 699 inet_ntoa_r(ip->ip_src, src), ntohs(uh->uh_sport)); 700 } 701 if (proto == IPPROTO_UDPLITE) 702 UDPLITE_PROBE(receive, NULL, NULL, ip, NULL, uh); 703 else 704 UDP_PROBE(receive, NULL, NULL, ip, NULL, uh); 705 UDPSTAT_INC(udps_noport); 706 if (m->m_flags & (M_BCAST | M_MCAST)) { 707 UDPSTAT_INC(udps_noportbcast); 708 goto badunlocked; 709 } 710 if (V_udp_blackhole) 711 goto badunlocked; 712 if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0) 713 goto badunlocked; 714 *ip = save_ip; 715 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0); 716 return (IPPROTO_DONE); 717 } 718 719 /* 720 * Check the minimum TTL for socket. 721 */ 722 INP_RLOCK_ASSERT(inp); 723 if (inp->inp_ip_minttl && inp->inp_ip_minttl > ip->ip_ttl) { 724 if (proto == IPPROTO_UDPLITE) 725 UDPLITE_PROBE(receive, NULL, inp, ip, inp, uh); 726 else 727 UDP_PROBE(receive, NULL, inp, ip, inp, uh); 728 INP_RUNLOCK(inp); 729 m_freem(m); 730 return (IPPROTO_DONE); 731 } 732 if (cscov_partial) { 733 struct udpcb *up; 734 735 up = intoudpcb(inp); 736 if (up->u_rxcslen == 0 || up->u_rxcslen > len) { 737 INP_RUNLOCK(inp); 738 m_freem(m); 739 return (IPPROTO_DONE); 740 } 741 } 742 743 if (proto == IPPROTO_UDPLITE) 744 UDPLITE_PROBE(receive, NULL, inp, ip, inp, uh); 745 else 746 UDP_PROBE(receive, NULL, inp, ip, inp, uh); 747 if (udp_append(inp, ip, m, iphlen, udp_in) == 0) 748 INP_RUNLOCK(inp); 749 return (IPPROTO_DONE); 750 751 badunlocked: 752 m_freem(m); 753 return (IPPROTO_DONE); 754 } 755 #endif /* INET */ 756 757 /* 758 * Notify a udp user of an asynchronous error; just wake up so that they can 759 * collect error status. 760 */ 761 struct inpcb * 762 udp_notify(struct inpcb *inp, int errno) 763 { 764 765 INP_WLOCK_ASSERT(inp); 766 if ((errno == EHOSTUNREACH || errno == ENETUNREACH || 767 errno == EHOSTDOWN) && inp->inp_route.ro_rt) { 768 RTFREE(inp->inp_route.ro_rt); 769 inp->inp_route.ro_rt = (struct rtentry *)NULL; 770 } 771 772 inp->inp_socket->so_error = errno; 773 sorwakeup(inp->inp_socket); 774 sowwakeup(inp->inp_socket); 775 return (inp); 776 } 777 778 #ifdef INET 779 static void 780 udp_common_ctlinput(int cmd, struct sockaddr *sa, void *vip, 781 struct inpcbinfo *pcbinfo) 782 { 783 struct ip *ip = vip; 784 struct udphdr *uh; 785 struct in_addr faddr; 786 struct inpcb *inp; 787 788 faddr = ((struct sockaddr_in *)sa)->sin_addr; 789 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) 790 return; 791 792 if (PRC_IS_REDIRECT(cmd)) { 793 /* signal EHOSTDOWN, as it flushes the cached route */ 794 in_pcbnotifyall(&V_udbinfo, faddr, EHOSTDOWN, udp_notify); 795 return; 796 } 797 798 /* 799 * Hostdead is ugly because it goes linearly through all PCBs. 800 * 801 * XXX: We never get this from ICMP, otherwise it makes an excellent 802 * DoS attack on machines with many connections. 803 */ 804 if (cmd == PRC_HOSTDEAD) 805 ip = NULL; 806 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) 807 return; 808 if (ip != NULL) { 809 uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 810 inp = in_pcblookup(pcbinfo, faddr, uh->uh_dport, 811 ip->ip_src, uh->uh_sport, INPLOOKUP_WLOCKPCB, NULL); 812 if (inp != NULL) { 813 INP_WLOCK_ASSERT(inp); 814 if (inp->inp_socket != NULL) { 815 udp_notify(inp, inetctlerrmap[cmd]); 816 } 817 INP_WUNLOCK(inp); 818 } else { 819 inp = in_pcblookup(pcbinfo, faddr, uh->uh_dport, 820 ip->ip_src, uh->uh_sport, 821 INPLOOKUP_WILDCARD | INPLOOKUP_RLOCKPCB, NULL); 822 if (inp != NULL) { 823 struct udpcb *up; 824 void *ctx; 825 udp_tun_icmp_t func; 826 827 up = intoudpcb(inp); 828 ctx = up->u_tun_ctx; 829 func = up->u_icmp_func; 830 INP_RUNLOCK(inp); 831 if (func != NULL) 832 (*func)(cmd, sa, vip, ctx); 833 } 834 } 835 } else 836 in_pcbnotifyall(pcbinfo, faddr, inetctlerrmap[cmd], 837 udp_notify); 838 } 839 void 840 udp_ctlinput(int cmd, struct sockaddr *sa, void *vip) 841 { 842 843 return (udp_common_ctlinput(cmd, sa, vip, &V_udbinfo)); 844 } 845 846 void 847 udplite_ctlinput(int cmd, struct sockaddr *sa, void *vip) 848 { 849 850 return (udp_common_ctlinput(cmd, sa, vip, &V_ulitecbinfo)); 851 } 852 #endif /* INET */ 853 854 static int 855 udp_pcblist(SYSCTL_HANDLER_ARGS) 856 { 857 int error, i, n; 858 struct inpcb *inp, **inp_list; 859 inp_gen_t gencnt; 860 struct xinpgen xig; 861 struct epoch_tracker et; 862 863 /* 864 * The process of preparing the PCB list is too time-consuming and 865 * resource-intensive to repeat twice on every request. 866 */ 867 if (req->oldptr == 0) { 868 n = V_udbinfo.ipi_count; 869 n += imax(n / 8, 10); 870 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb); 871 return (0); 872 } 873 874 if (req->newptr != 0) 875 return (EPERM); 876 877 /* 878 * OK, now we're committed to doing something. 879 */ 880 INP_INFO_RLOCK_ET(&V_udbinfo, et); 881 gencnt = V_udbinfo.ipi_gencnt; 882 n = V_udbinfo.ipi_count; 883 INP_INFO_RUNLOCK_ET(&V_udbinfo, et); 884 885 error = sysctl_wire_old_buffer(req, 2 * (sizeof xig) 886 + n * sizeof(struct xinpcb)); 887 if (error != 0) 888 return (error); 889 890 bzero(&xig, sizeof(xig)); 891 xig.xig_len = sizeof xig; 892 xig.xig_count = n; 893 xig.xig_gen = gencnt; 894 xig.xig_sogen = so_gencnt; 895 error = SYSCTL_OUT(req, &xig, sizeof xig); 896 if (error) 897 return (error); 898 899 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK); 900 if (inp_list == NULL) 901 return (ENOMEM); 902 903 INP_INFO_RLOCK_ET(&V_udbinfo, et); 904 for (inp = CK_LIST_FIRST(V_udbinfo.ipi_listhead), i = 0; inp && i < n; 905 inp = CK_LIST_NEXT(inp, inp_list)) { 906 INP_WLOCK(inp); 907 if (inp->inp_gencnt <= gencnt && 908 cr_canseeinpcb(req->td->td_ucred, inp) == 0) { 909 in_pcbref(inp); 910 inp_list[i++] = inp; 911 } 912 INP_WUNLOCK(inp); 913 } 914 INP_INFO_RUNLOCK_ET(&V_udbinfo, et); 915 n = i; 916 917 error = 0; 918 for (i = 0; i < n; i++) { 919 inp = inp_list[i]; 920 INP_RLOCK(inp); 921 if (inp->inp_gencnt <= gencnt) { 922 struct xinpcb xi; 923 924 in_pcbtoxinpcb(inp, &xi); 925 INP_RUNLOCK(inp); 926 error = SYSCTL_OUT(req, &xi, sizeof xi); 927 } else 928 INP_RUNLOCK(inp); 929 } 930 INP_INFO_WLOCK(&V_udbinfo); 931 for (i = 0; i < n; i++) { 932 inp = inp_list[i]; 933 INP_RLOCK(inp); 934 if (!in_pcbrele_rlocked(inp)) 935 INP_RUNLOCK(inp); 936 } 937 INP_INFO_WUNLOCK(&V_udbinfo); 938 939 if (!error) { 940 /* 941 * Give the user an updated idea of our state. If the 942 * generation differs from what we told her before, she knows 943 * that something happened while we were processing this 944 * request, and it might be necessary to retry. 945 */ 946 INP_INFO_RLOCK_ET(&V_udbinfo, et); 947 xig.xig_gen = V_udbinfo.ipi_gencnt; 948 xig.xig_sogen = so_gencnt; 949 xig.xig_count = V_udbinfo.ipi_count; 950 INP_INFO_RUNLOCK_ET(&V_udbinfo, et); 951 error = SYSCTL_OUT(req, &xig, sizeof xig); 952 } 953 free(inp_list, M_TEMP); 954 return (error); 955 } 956 957 SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist, 958 CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0, 959 udp_pcblist, "S,xinpcb", "List of active UDP sockets"); 960 961 #ifdef INET 962 static int 963 udp_getcred(SYSCTL_HANDLER_ARGS) 964 { 965 struct xucred xuc; 966 struct sockaddr_in addrs[2]; 967 struct inpcb *inp; 968 int error; 969 970 error = priv_check(req->td, PRIV_NETINET_GETCRED); 971 if (error) 972 return (error); 973 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 974 if (error) 975 return (error); 976 inp = in_pcblookup(&V_udbinfo, addrs[1].sin_addr, addrs[1].sin_port, 977 addrs[0].sin_addr, addrs[0].sin_port, 978 INPLOOKUP_WILDCARD | INPLOOKUP_RLOCKPCB, NULL); 979 if (inp != NULL) { 980 INP_RLOCK_ASSERT(inp); 981 if (inp->inp_socket == NULL) 982 error = ENOENT; 983 if (error == 0) 984 error = cr_canseeinpcb(req->td->td_ucred, inp); 985 if (error == 0) 986 cru2x(inp->inp_cred, &xuc); 987 INP_RUNLOCK(inp); 988 } else 989 error = ENOENT; 990 if (error == 0) 991 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 992 return (error); 993 } 994 995 SYSCTL_PROC(_net_inet_udp, OID_AUTO, getcred, 996 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0, 997 udp_getcred, "S,xucred", "Get the xucred of a UDP connection"); 998 #endif /* INET */ 999 1000 int 1001 udp_ctloutput(struct socket *so, struct sockopt *sopt) 1002 { 1003 struct inpcb *inp; 1004 struct udpcb *up; 1005 int isudplite, error, optval; 1006 1007 error = 0; 1008 isudplite = (so->so_proto->pr_protocol == IPPROTO_UDPLITE) ? 1 : 0; 1009 inp = sotoinpcb(so); 1010 KASSERT(inp != NULL, ("%s: inp == NULL", __func__)); 1011 INP_WLOCK(inp); 1012 if (sopt->sopt_level != so->so_proto->pr_protocol) { 1013 #ifdef INET6 1014 if (INP_CHECK_SOCKAF(so, AF_INET6)) { 1015 INP_WUNLOCK(inp); 1016 error = ip6_ctloutput(so, sopt); 1017 } 1018 #endif 1019 #if defined(INET) && defined(INET6) 1020 else 1021 #endif 1022 #ifdef INET 1023 { 1024 INP_WUNLOCK(inp); 1025 error = ip_ctloutput(so, sopt); 1026 } 1027 #endif 1028 return (error); 1029 } 1030 1031 switch (sopt->sopt_dir) { 1032 case SOPT_SET: 1033 switch (sopt->sopt_name) { 1034 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 1035 #ifdef INET 1036 case UDP_ENCAP: 1037 if (!IPSEC_ENABLED(ipv4)) { 1038 INP_WUNLOCK(inp); 1039 return (ENOPROTOOPT); 1040 } 1041 error = UDPENCAP_PCBCTL(inp, sopt); 1042 break; 1043 #endif /* INET */ 1044 #endif /* IPSEC */ 1045 case UDPLITE_SEND_CSCOV: 1046 case UDPLITE_RECV_CSCOV: 1047 if (!isudplite) { 1048 INP_WUNLOCK(inp); 1049 error = ENOPROTOOPT; 1050 break; 1051 } 1052 INP_WUNLOCK(inp); 1053 error = sooptcopyin(sopt, &optval, sizeof(optval), 1054 sizeof(optval)); 1055 if (error != 0) 1056 break; 1057 inp = sotoinpcb(so); 1058 KASSERT(inp != NULL, ("%s: inp == NULL", __func__)); 1059 INP_WLOCK(inp); 1060 up = intoudpcb(inp); 1061 KASSERT(up != NULL, ("%s: up == NULL", __func__)); 1062 if ((optval != 0 && optval < 8) || (optval > 65535)) { 1063 INP_WUNLOCK(inp); 1064 error = EINVAL; 1065 break; 1066 } 1067 if (sopt->sopt_name == UDPLITE_SEND_CSCOV) 1068 up->u_txcslen = optval; 1069 else 1070 up->u_rxcslen = optval; 1071 INP_WUNLOCK(inp); 1072 break; 1073 default: 1074 INP_WUNLOCK(inp); 1075 error = ENOPROTOOPT; 1076 break; 1077 } 1078 break; 1079 case SOPT_GET: 1080 switch (sopt->sopt_name) { 1081 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 1082 #ifdef INET 1083 case UDP_ENCAP: 1084 if (!IPSEC_ENABLED(ipv4)) { 1085 INP_WUNLOCK(inp); 1086 return (ENOPROTOOPT); 1087 } 1088 error = UDPENCAP_PCBCTL(inp, sopt); 1089 break; 1090 #endif /* INET */ 1091 #endif /* IPSEC */ 1092 case UDPLITE_SEND_CSCOV: 1093 case UDPLITE_RECV_CSCOV: 1094 if (!isudplite) { 1095 INP_WUNLOCK(inp); 1096 error = ENOPROTOOPT; 1097 break; 1098 } 1099 up = intoudpcb(inp); 1100 KASSERT(up != NULL, ("%s: up == NULL", __func__)); 1101 if (sopt->sopt_name == UDPLITE_SEND_CSCOV) 1102 optval = up->u_txcslen; 1103 else 1104 optval = up->u_rxcslen; 1105 INP_WUNLOCK(inp); 1106 error = sooptcopyout(sopt, &optval, sizeof(optval)); 1107 break; 1108 default: 1109 INP_WUNLOCK(inp); 1110 error = ENOPROTOOPT; 1111 break; 1112 } 1113 break; 1114 } 1115 return (error); 1116 } 1117 1118 #ifdef INET 1119 #define UH_WLOCKED 2 1120 #define UH_RLOCKED 1 1121 #define UH_UNLOCKED 0 1122 static int 1123 udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, 1124 struct mbuf *control, struct thread *td) 1125 { 1126 struct udpiphdr *ui; 1127 int len = m->m_pkthdr.len; 1128 struct in_addr faddr, laddr; 1129 struct cmsghdr *cm; 1130 struct inpcbinfo *pcbinfo; 1131 struct sockaddr_in *sin, src; 1132 struct epoch_tracker et; 1133 int cscov_partial = 0; 1134 int error = 0; 1135 int ipflags; 1136 u_short fport, lport; 1137 int unlock_udbinfo, unlock_inp; 1138 u_char tos; 1139 uint8_t pr; 1140 uint16_t cscov = 0; 1141 uint32_t flowid = 0; 1142 uint8_t flowtype = M_HASHTYPE_NONE; 1143 1144 /* 1145 * udp_output() may need to temporarily bind or connect the current 1146 * inpcb. As such, we don't know up front whether we will need the 1147 * pcbinfo lock or not. Do any work to decide what is needed up 1148 * front before acquiring any locks. 1149 */ 1150 if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) { 1151 if (control) 1152 m_freem(control); 1153 m_freem(m); 1154 return (EMSGSIZE); 1155 } 1156 1157 src.sin_family = 0; 1158 sin = (struct sockaddr_in *)addr; 1159 if (sin == NULL || 1160 (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0)) { 1161 INP_WLOCK(inp); 1162 unlock_inp = UH_WLOCKED; 1163 } else { 1164 INP_RLOCK(inp); 1165 unlock_inp = UH_RLOCKED; 1166 } 1167 tos = inp->inp_ip_tos; 1168 if (control != NULL) { 1169 /* 1170 * XXX: Currently, we assume all the optional information is 1171 * stored in a single mbuf. 1172 */ 1173 if (control->m_next) { 1174 if (unlock_inp == UH_WLOCKED) 1175 INP_WUNLOCK(inp); 1176 else 1177 INP_RUNLOCK(inp); 1178 m_freem(control); 1179 m_freem(m); 1180 return (EINVAL); 1181 } 1182 for (; control->m_len > 0; 1183 control->m_data += CMSG_ALIGN(cm->cmsg_len), 1184 control->m_len -= CMSG_ALIGN(cm->cmsg_len)) { 1185 cm = mtod(control, struct cmsghdr *); 1186 if (control->m_len < sizeof(*cm) || cm->cmsg_len == 0 1187 || cm->cmsg_len > control->m_len) { 1188 error = EINVAL; 1189 break; 1190 } 1191 if (cm->cmsg_level != IPPROTO_IP) 1192 continue; 1193 1194 switch (cm->cmsg_type) { 1195 case IP_SENDSRCADDR: 1196 if (cm->cmsg_len != 1197 CMSG_LEN(sizeof(struct in_addr))) { 1198 error = EINVAL; 1199 break; 1200 } 1201 bzero(&src, sizeof(src)); 1202 src.sin_family = AF_INET; 1203 src.sin_len = sizeof(src); 1204 src.sin_port = inp->inp_lport; 1205 src.sin_addr = 1206 *(struct in_addr *)CMSG_DATA(cm); 1207 break; 1208 1209 case IP_TOS: 1210 if (cm->cmsg_len != CMSG_LEN(sizeof(u_char))) { 1211 error = EINVAL; 1212 break; 1213 } 1214 tos = *(u_char *)CMSG_DATA(cm); 1215 break; 1216 1217 case IP_FLOWID: 1218 if (cm->cmsg_len != CMSG_LEN(sizeof(uint32_t))) { 1219 error = EINVAL; 1220 break; 1221 } 1222 flowid = *(uint32_t *) CMSG_DATA(cm); 1223 break; 1224 1225 case IP_FLOWTYPE: 1226 if (cm->cmsg_len != CMSG_LEN(sizeof(uint32_t))) { 1227 error = EINVAL; 1228 break; 1229 } 1230 flowtype = *(uint32_t *) CMSG_DATA(cm); 1231 break; 1232 1233 #ifdef RSS 1234 case IP_RSSBUCKETID: 1235 if (cm->cmsg_len != CMSG_LEN(sizeof(uint32_t))) { 1236 error = EINVAL; 1237 break; 1238 } 1239 /* This is just a placeholder for now */ 1240 break; 1241 #endif /* RSS */ 1242 default: 1243 error = ENOPROTOOPT; 1244 break; 1245 } 1246 if (error) 1247 break; 1248 } 1249 m_freem(control); 1250 } 1251 if (error) { 1252 if (unlock_inp == UH_WLOCKED) 1253 INP_WUNLOCK(inp); 1254 else 1255 INP_RUNLOCK(inp); 1256 m_freem(m); 1257 return (error); 1258 } 1259 1260 /* 1261 * In the old days, depending on whether or not the application had 1262 * bound or connected the socket, we had to do varying levels of work. 1263 * The optimal case was for a connected UDP socket, as a global lock 1264 * wasn't required at all. 1265 * In order to decide which we need, we required stability of the 1266 * inpcb binding, which we ensured by acquiring a read lock on the 1267 * inpcb. This didn't strictly follow the lock order, so we played 1268 * the trylock and retry game. 1269 * With the re-introduction of the route-cache in some cases, we started 1270 * to acquire an early inp wlock and a possible race during re-lock 1271 * went away. With the introduction of epoch(9) some read locking 1272 * became epoch(9) and the lock-order issues also went away. 1273 * Due to route-cache we may now hold more conservative locks than 1274 * otherwise required and have split up the 2nd case in case 2 and 3 1275 * in order to keep the udpinfo lock level in sync with the inp one 1276 * for the IP_SENDSRCADDR case below. 1277 */ 1278 pr = inp->inp_socket->so_proto->pr_protocol; 1279 pcbinfo = udp_get_inpcbinfo(pr); 1280 if (sin != NULL && 1281 (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0)) { 1282 INP_HASH_WLOCK(pcbinfo); 1283 unlock_udbinfo = UH_WLOCKED; 1284 } else if (sin != NULL && 1285 (sin->sin_addr.s_addr == INADDR_ANY || 1286 sin->sin_addr.s_addr == INADDR_BROADCAST || 1287 inp->inp_laddr.s_addr == INADDR_ANY || 1288 inp->inp_lport == 0)) { 1289 INP_HASH_RLOCK_ET(pcbinfo, et); 1290 unlock_udbinfo = UH_RLOCKED; 1291 } else if (src.sin_family == AF_INET) { 1292 if (unlock_inp == UH_WLOCKED) { 1293 INP_HASH_WLOCK(pcbinfo); 1294 unlock_udbinfo = UH_WLOCKED; 1295 } else { 1296 INP_HASH_RLOCK_ET(pcbinfo, et); 1297 unlock_udbinfo = UH_RLOCKED; 1298 } 1299 } else 1300 unlock_udbinfo = UH_UNLOCKED; 1301 1302 /* 1303 * If the IP_SENDSRCADDR control message was specified, override the 1304 * source address for this datagram. Its use is invalidated if the 1305 * address thus specified is incomplete or clobbers other inpcbs. 1306 */ 1307 laddr = inp->inp_laddr; 1308 lport = inp->inp_lport; 1309 if (src.sin_family == AF_INET) { 1310 INP_HASH_LOCK_ASSERT(pcbinfo); 1311 if ((lport == 0) || 1312 (laddr.s_addr == INADDR_ANY && 1313 src.sin_addr.s_addr == INADDR_ANY)) { 1314 error = EINVAL; 1315 goto release; 1316 } 1317 error = in_pcbbind_setup(inp, (struct sockaddr *)&src, 1318 &laddr.s_addr, &lport, td->td_ucred); 1319 if (error) 1320 goto release; 1321 } 1322 1323 /* 1324 * If a UDP socket has been connected, then a local address/port will 1325 * have been selected and bound. 1326 * 1327 * If a UDP socket has not been connected to, then an explicit 1328 * destination address must be used, in which case a local 1329 * address/port may not have been selected and bound. 1330 */ 1331 if (sin != NULL) { 1332 INP_LOCK_ASSERT(inp); 1333 if (inp->inp_faddr.s_addr != INADDR_ANY) { 1334 error = EISCONN; 1335 goto release; 1336 } 1337 1338 /* 1339 * Jail may rewrite the destination address, so let it do 1340 * that before we use it. 1341 */ 1342 error = prison_remote_ip4(td->td_ucred, &sin->sin_addr); 1343 if (error) 1344 goto release; 1345 1346 /* 1347 * If a local address or port hasn't yet been selected, or if 1348 * the destination address needs to be rewritten due to using 1349 * a special INADDR_ constant, invoke in_pcbconnect_setup() 1350 * to do the heavy lifting. Once a port is selected, we 1351 * commit the binding back to the socket; we also commit the 1352 * binding of the address if in jail. 1353 * 1354 * If we already have a valid binding and we're not 1355 * requesting a destination address rewrite, use a fast path. 1356 */ 1357 if (inp->inp_laddr.s_addr == INADDR_ANY || 1358 inp->inp_lport == 0 || 1359 sin->sin_addr.s_addr == INADDR_ANY || 1360 sin->sin_addr.s_addr == INADDR_BROADCAST) { 1361 INP_HASH_LOCK_ASSERT(pcbinfo); 1362 error = in_pcbconnect_setup(inp, addr, &laddr.s_addr, 1363 &lport, &faddr.s_addr, &fport, NULL, 1364 td->td_ucred); 1365 if (error) 1366 goto release; 1367 1368 /* 1369 * XXXRW: Why not commit the port if the address is 1370 * !INADDR_ANY? 1371 */ 1372 /* Commit the local port if newly assigned. */ 1373 if (inp->inp_laddr.s_addr == INADDR_ANY && 1374 inp->inp_lport == 0) { 1375 INP_WLOCK_ASSERT(inp); 1376 INP_HASH_WLOCK_ASSERT(pcbinfo); 1377 /* 1378 * Remember addr if jailed, to prevent 1379 * rebinding. 1380 */ 1381 if (prison_flag(td->td_ucred, PR_IP4)) 1382 inp->inp_laddr = laddr; 1383 inp->inp_lport = lport; 1384 if (in_pcbinshash(inp) != 0) { 1385 inp->inp_lport = 0; 1386 error = EAGAIN; 1387 goto release; 1388 } 1389 inp->inp_flags |= INP_ANONPORT; 1390 } 1391 } else { 1392 faddr = sin->sin_addr; 1393 fport = sin->sin_port; 1394 } 1395 } else { 1396 INP_LOCK_ASSERT(inp); 1397 faddr = inp->inp_faddr; 1398 fport = inp->inp_fport; 1399 if (faddr.s_addr == INADDR_ANY) { 1400 error = ENOTCONN; 1401 goto release; 1402 } 1403 } 1404 1405 /* 1406 * Calculate data length and get a mbuf for UDP, IP, and possible 1407 * link-layer headers. Immediate slide the data pointer back forward 1408 * since we won't use that space at this layer. 1409 */ 1410 M_PREPEND(m, sizeof(struct udpiphdr) + max_linkhdr, M_NOWAIT); 1411 if (m == NULL) { 1412 error = ENOBUFS; 1413 goto release; 1414 } 1415 m->m_data += max_linkhdr; 1416 m->m_len -= max_linkhdr; 1417 m->m_pkthdr.len -= max_linkhdr; 1418 1419 /* 1420 * Fill in mbuf with extended UDP header and addresses and length put 1421 * into network format. 1422 */ 1423 ui = mtod(m, struct udpiphdr *); 1424 bzero(ui->ui_x1, sizeof(ui->ui_x1)); /* XXX still needed? */ 1425 ui->ui_v = IPVERSION << 4; 1426 ui->ui_pr = pr; 1427 ui->ui_src = laddr; 1428 ui->ui_dst = faddr; 1429 ui->ui_sport = lport; 1430 ui->ui_dport = fport; 1431 ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr)); 1432 if (pr == IPPROTO_UDPLITE) { 1433 struct udpcb *up; 1434 uint16_t plen; 1435 1436 up = intoudpcb(inp); 1437 cscov = up->u_txcslen; 1438 plen = (u_short)len + sizeof(struct udphdr); 1439 if (cscov >= plen) 1440 cscov = 0; 1441 ui->ui_len = htons(plen); 1442 ui->ui_ulen = htons(cscov); 1443 /* 1444 * For UDP-Lite, checksum coverage length of zero means 1445 * the entire UDPLite packet is covered by the checksum. 1446 */ 1447 cscov_partial = (cscov == 0) ? 0 : 1; 1448 } 1449 1450 /* 1451 * Set the Don't Fragment bit in the IP header. 1452 */ 1453 if (inp->inp_flags & INP_DONTFRAG) { 1454 struct ip *ip; 1455 1456 ip = (struct ip *)&ui->ui_i; 1457 ip->ip_off |= htons(IP_DF); 1458 } 1459 1460 ipflags = 0; 1461 if (inp->inp_socket->so_options & SO_DONTROUTE) 1462 ipflags |= IP_ROUTETOIF; 1463 if (inp->inp_socket->so_options & SO_BROADCAST) 1464 ipflags |= IP_ALLOWBROADCAST; 1465 if (inp->inp_flags & INP_ONESBCAST) 1466 ipflags |= IP_SENDONES; 1467 1468 #ifdef MAC 1469 mac_inpcb_create_mbuf(inp, m); 1470 #endif 1471 1472 /* 1473 * Set up checksum and output datagram. 1474 */ 1475 ui->ui_sum = 0; 1476 if (pr == IPPROTO_UDPLITE) { 1477 if (inp->inp_flags & INP_ONESBCAST) 1478 faddr.s_addr = INADDR_BROADCAST; 1479 if (cscov_partial) { 1480 if ((ui->ui_sum = in_cksum(m, sizeof(struct ip) + cscov)) == 0) 1481 ui->ui_sum = 0xffff; 1482 } else { 1483 if ((ui->ui_sum = in_cksum(m, sizeof(struct udpiphdr) + len)) == 0) 1484 ui->ui_sum = 0xffff; 1485 } 1486 } else if (V_udp_cksum) { 1487 if (inp->inp_flags & INP_ONESBCAST) 1488 faddr.s_addr = INADDR_BROADCAST; 1489 ui->ui_sum = in_pseudo(ui->ui_src.s_addr, faddr.s_addr, 1490 htons((u_short)len + sizeof(struct udphdr) + pr)); 1491 m->m_pkthdr.csum_flags = CSUM_UDP; 1492 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 1493 } 1494 ((struct ip *)ui)->ip_len = htons(sizeof(struct udpiphdr) + len); 1495 ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */ 1496 ((struct ip *)ui)->ip_tos = tos; /* XXX */ 1497 UDPSTAT_INC(udps_opackets); 1498 1499 /* 1500 * Setup flowid / RSS information for outbound socket. 1501 * 1502 * Once the UDP code decides to set a flowid some other way, 1503 * this allows the flowid to be overridden by userland. 1504 */ 1505 if (flowtype != M_HASHTYPE_NONE) { 1506 m->m_pkthdr.flowid = flowid; 1507 M_HASHTYPE_SET(m, flowtype); 1508 #ifdef RSS 1509 } else { 1510 uint32_t hash_val, hash_type; 1511 /* 1512 * Calculate an appropriate RSS hash for UDP and 1513 * UDP Lite. 1514 * 1515 * The called function will take care of figuring out 1516 * whether a 2-tuple or 4-tuple hash is required based 1517 * on the currently configured scheme. 1518 * 1519 * Later later on connected socket values should be 1520 * cached in the inpcb and reused, rather than constantly 1521 * re-calculating it. 1522 * 1523 * UDP Lite is a different protocol number and will 1524 * likely end up being hashed as a 2-tuple until 1525 * RSS / NICs grow UDP Lite protocol awareness. 1526 */ 1527 if (rss_proto_software_hash_v4(faddr, laddr, fport, lport, 1528 pr, &hash_val, &hash_type) == 0) { 1529 m->m_pkthdr.flowid = hash_val; 1530 M_HASHTYPE_SET(m, hash_type); 1531 } 1532 #endif 1533 } 1534 1535 #ifdef RSS 1536 /* 1537 * Don't override with the inp cached flowid value. 1538 * 1539 * Depending upon the kind of send being done, the inp 1540 * flowid/flowtype values may actually not be appropriate 1541 * for this particular socket send. 1542 * 1543 * We should either leave the flowid at zero (which is what is 1544 * currently done) or set it to some software generated 1545 * hash value based on the packet contents. 1546 */ 1547 ipflags |= IP_NODEFAULTFLOWID; 1548 #endif /* RSS */ 1549 1550 if (unlock_udbinfo == UH_WLOCKED) 1551 INP_HASH_WUNLOCK(pcbinfo); 1552 else if (unlock_udbinfo == UH_RLOCKED) 1553 INP_HASH_RUNLOCK_ET(pcbinfo, et); 1554 if (pr == IPPROTO_UDPLITE) 1555 UDPLITE_PROBE(send, NULL, inp, &ui->ui_i, inp, &ui->ui_u); 1556 else 1557 UDP_PROBE(send, NULL, inp, &ui->ui_i, inp, &ui->ui_u); 1558 error = ip_output(m, inp->inp_options, 1559 (unlock_inp == UH_WLOCKED ? &inp->inp_route : NULL), ipflags, 1560 inp->inp_moptions, inp); 1561 if (unlock_inp == UH_WLOCKED) 1562 INP_WUNLOCK(inp); 1563 else 1564 INP_RUNLOCK(inp); 1565 return (error); 1566 1567 release: 1568 if (unlock_udbinfo == UH_WLOCKED) { 1569 KASSERT(unlock_inp == UH_WLOCKED, 1570 ("%s: excl udbinfo lock, shared inp lock", __func__)); 1571 INP_HASH_WUNLOCK(pcbinfo); 1572 INP_WUNLOCK(inp); 1573 } else if (unlock_udbinfo == UH_RLOCKED) { 1574 KASSERT(unlock_inp == UH_RLOCKED, 1575 ("%s: shared udbinfo lock, excl inp lock", __func__)); 1576 INP_HASH_RUNLOCK_ET(pcbinfo, et); 1577 INP_RUNLOCK(inp); 1578 } else if (unlock_inp == UH_WLOCKED) 1579 INP_WUNLOCK(inp); 1580 else 1581 INP_RUNLOCK(inp); 1582 m_freem(m); 1583 return (error); 1584 } 1585 1586 static void 1587 udp_abort(struct socket *so) 1588 { 1589 struct inpcb *inp; 1590 struct inpcbinfo *pcbinfo; 1591 1592 pcbinfo = udp_get_inpcbinfo(so->so_proto->pr_protocol); 1593 inp = sotoinpcb(so); 1594 KASSERT(inp != NULL, ("udp_abort: inp == NULL")); 1595 INP_WLOCK(inp); 1596 if (inp->inp_faddr.s_addr != INADDR_ANY) { 1597 INP_HASH_WLOCK(pcbinfo); 1598 in_pcbdisconnect(inp); 1599 inp->inp_laddr.s_addr = INADDR_ANY; 1600 INP_HASH_WUNLOCK(pcbinfo); 1601 soisdisconnected(so); 1602 } 1603 INP_WUNLOCK(inp); 1604 } 1605 1606 static int 1607 udp_attach(struct socket *so, int proto, struct thread *td) 1608 { 1609 static uint32_t udp_flowid; 1610 struct inpcb *inp; 1611 struct inpcbinfo *pcbinfo; 1612 int error; 1613 1614 pcbinfo = udp_get_inpcbinfo(so->so_proto->pr_protocol); 1615 inp = sotoinpcb(so); 1616 KASSERT(inp == NULL, ("udp_attach: inp != NULL")); 1617 error = soreserve(so, udp_sendspace, udp_recvspace); 1618 if (error) 1619 return (error); 1620 INP_INFO_WLOCK(pcbinfo); 1621 error = in_pcballoc(so, pcbinfo); 1622 if (error) { 1623 INP_INFO_WUNLOCK(pcbinfo); 1624 return (error); 1625 } 1626 1627 inp = sotoinpcb(so); 1628 inp->inp_vflag |= INP_IPV4; 1629 inp->inp_ip_ttl = V_ip_defttl; 1630 inp->inp_flowid = atomic_fetchadd_int(&udp_flowid, 1); 1631 inp->inp_flowtype = M_HASHTYPE_OPAQUE; 1632 1633 error = udp_newudpcb(inp); 1634 if (error) { 1635 in_pcbdetach(inp); 1636 in_pcbfree(inp); 1637 INP_INFO_WUNLOCK(pcbinfo); 1638 return (error); 1639 } 1640 1641 INP_WUNLOCK(inp); 1642 INP_INFO_WUNLOCK(pcbinfo); 1643 return (0); 1644 } 1645 #endif /* INET */ 1646 1647 int 1648 udp_set_kernel_tunneling(struct socket *so, udp_tun_func_t f, udp_tun_icmp_t i, void *ctx) 1649 { 1650 struct inpcb *inp; 1651 struct udpcb *up; 1652 1653 KASSERT(so->so_type == SOCK_DGRAM, 1654 ("udp_set_kernel_tunneling: !dgram")); 1655 inp = sotoinpcb(so); 1656 KASSERT(inp != NULL, ("udp_set_kernel_tunneling: inp == NULL")); 1657 INP_WLOCK(inp); 1658 up = intoudpcb(inp); 1659 if ((up->u_tun_func != NULL) || 1660 (up->u_icmp_func != NULL)) { 1661 INP_WUNLOCK(inp); 1662 return (EBUSY); 1663 } 1664 up->u_tun_func = f; 1665 up->u_icmp_func = i; 1666 up->u_tun_ctx = ctx; 1667 INP_WUNLOCK(inp); 1668 return (0); 1669 } 1670 1671 #ifdef INET 1672 static int 1673 udp_bind(struct socket *so, struct sockaddr *nam, struct thread *td) 1674 { 1675 struct inpcb *inp; 1676 struct inpcbinfo *pcbinfo; 1677 int error; 1678 1679 pcbinfo = udp_get_inpcbinfo(so->so_proto->pr_protocol); 1680 inp = sotoinpcb(so); 1681 KASSERT(inp != NULL, ("udp_bind: inp == NULL")); 1682 INP_WLOCK(inp); 1683 INP_HASH_WLOCK(pcbinfo); 1684 error = in_pcbbind(inp, nam, td->td_ucred); 1685 INP_HASH_WUNLOCK(pcbinfo); 1686 INP_WUNLOCK(inp); 1687 return (error); 1688 } 1689 1690 static void 1691 udp_close(struct socket *so) 1692 { 1693 struct inpcb *inp; 1694 struct inpcbinfo *pcbinfo; 1695 1696 pcbinfo = udp_get_inpcbinfo(so->so_proto->pr_protocol); 1697 inp = sotoinpcb(so); 1698 KASSERT(inp != NULL, ("udp_close: inp == NULL")); 1699 INP_WLOCK(inp); 1700 if (inp->inp_faddr.s_addr != INADDR_ANY) { 1701 INP_HASH_WLOCK(pcbinfo); 1702 in_pcbdisconnect(inp); 1703 inp->inp_laddr.s_addr = INADDR_ANY; 1704 INP_HASH_WUNLOCK(pcbinfo); 1705 soisdisconnected(so); 1706 } 1707 INP_WUNLOCK(inp); 1708 } 1709 1710 static int 1711 udp_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 1712 { 1713 struct inpcb *inp; 1714 struct inpcbinfo *pcbinfo; 1715 struct sockaddr_in *sin; 1716 int error; 1717 1718 pcbinfo = udp_get_inpcbinfo(so->so_proto->pr_protocol); 1719 inp = sotoinpcb(so); 1720 KASSERT(inp != NULL, ("udp_connect: inp == NULL")); 1721 INP_WLOCK(inp); 1722 if (inp->inp_faddr.s_addr != INADDR_ANY) { 1723 INP_WUNLOCK(inp); 1724 return (EISCONN); 1725 } 1726 sin = (struct sockaddr_in *)nam; 1727 error = prison_remote_ip4(td->td_ucred, &sin->sin_addr); 1728 if (error != 0) { 1729 INP_WUNLOCK(inp); 1730 return (error); 1731 } 1732 INP_HASH_WLOCK(pcbinfo); 1733 error = in_pcbconnect(inp, nam, td->td_ucred); 1734 INP_HASH_WUNLOCK(pcbinfo); 1735 if (error == 0) 1736 soisconnected(so); 1737 INP_WUNLOCK(inp); 1738 return (error); 1739 } 1740 1741 static void 1742 udp_detach(struct socket *so) 1743 { 1744 struct inpcb *inp; 1745 struct inpcbinfo *pcbinfo; 1746 struct udpcb *up; 1747 1748 pcbinfo = udp_get_inpcbinfo(so->so_proto->pr_protocol); 1749 inp = sotoinpcb(so); 1750 KASSERT(inp != NULL, ("udp_detach: inp == NULL")); 1751 KASSERT(inp->inp_faddr.s_addr == INADDR_ANY, 1752 ("udp_detach: not disconnected")); 1753 INP_INFO_WLOCK(pcbinfo); 1754 INP_WLOCK(inp); 1755 up = intoudpcb(inp); 1756 KASSERT(up != NULL, ("%s: up == NULL", __func__)); 1757 inp->inp_ppcb = NULL; 1758 in_pcbdetach(inp); 1759 in_pcbfree(inp); 1760 INP_INFO_WUNLOCK(pcbinfo); 1761 udp_discardcb(up); 1762 } 1763 1764 static int 1765 udp_disconnect(struct socket *so) 1766 { 1767 struct inpcb *inp; 1768 struct inpcbinfo *pcbinfo; 1769 1770 pcbinfo = udp_get_inpcbinfo(so->so_proto->pr_protocol); 1771 inp = sotoinpcb(so); 1772 KASSERT(inp != NULL, ("udp_disconnect: inp == NULL")); 1773 INP_WLOCK(inp); 1774 if (inp->inp_faddr.s_addr == INADDR_ANY) { 1775 INP_WUNLOCK(inp); 1776 return (ENOTCONN); 1777 } 1778 INP_HASH_WLOCK(pcbinfo); 1779 in_pcbdisconnect(inp); 1780 inp->inp_laddr.s_addr = INADDR_ANY; 1781 INP_HASH_WUNLOCK(pcbinfo); 1782 SOCK_LOCK(so); 1783 so->so_state &= ~SS_ISCONNECTED; /* XXX */ 1784 SOCK_UNLOCK(so); 1785 INP_WUNLOCK(inp); 1786 return (0); 1787 } 1788 1789 static int 1790 udp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 1791 struct mbuf *control, struct thread *td) 1792 { 1793 struct inpcb *inp; 1794 1795 inp = sotoinpcb(so); 1796 KASSERT(inp != NULL, ("udp_send: inp == NULL")); 1797 return (udp_output(inp, m, addr, control, td)); 1798 } 1799 #endif /* INET */ 1800 1801 int 1802 udp_shutdown(struct socket *so) 1803 { 1804 struct inpcb *inp; 1805 1806 inp = sotoinpcb(so); 1807 KASSERT(inp != NULL, ("udp_shutdown: inp == NULL")); 1808 INP_WLOCK(inp); 1809 socantsendmore(so); 1810 INP_WUNLOCK(inp); 1811 return (0); 1812 } 1813 1814 #ifdef INET 1815 struct pr_usrreqs udp_usrreqs = { 1816 .pru_abort = udp_abort, 1817 .pru_attach = udp_attach, 1818 .pru_bind = udp_bind, 1819 .pru_connect = udp_connect, 1820 .pru_control = in_control, 1821 .pru_detach = udp_detach, 1822 .pru_disconnect = udp_disconnect, 1823 .pru_peeraddr = in_getpeeraddr, 1824 .pru_send = udp_send, 1825 .pru_soreceive = soreceive_dgram, 1826 .pru_sosend = sosend_dgram, 1827 .pru_shutdown = udp_shutdown, 1828 .pru_sockaddr = in_getsockaddr, 1829 .pru_sosetlabel = in_pcbsosetlabel, 1830 .pru_close = udp_close, 1831 }; 1832 #endif /* INET */ 1833