1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 3 * The Regents of the University of California. 4 * Copyright (c) 2008 Robert N. M. Watson 5 * Copyright (c) 2010-2011 Juniper Networks, Inc. 6 * Copyright (c) 2014 Kevin Lo 7 * All rights reserved. 8 * 9 * Portions of this software were developed by Robert N. M. Watson under 10 * contract to Juniper Networks, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)udp_usrreq.c 8.6 (Berkeley) 5/23/95 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include "opt_inet.h" 43 #include "opt_inet6.h" 44 #include "opt_ipsec.h" 45 #include "opt_rss.h" 46 47 #include <sys/param.h> 48 #include <sys/domain.h> 49 #include <sys/eventhandler.h> 50 #include <sys/jail.h> 51 #include <sys/kernel.h> 52 #include <sys/lock.h> 53 #include <sys/malloc.h> 54 #include <sys/mbuf.h> 55 #include <sys/priv.h> 56 #include <sys/proc.h> 57 #include <sys/protosw.h> 58 #include <sys/sdt.h> 59 #include <sys/signalvar.h> 60 #include <sys/socket.h> 61 #include <sys/socketvar.h> 62 #include <sys/sx.h> 63 #include <sys/sysctl.h> 64 #include <sys/syslog.h> 65 #include <sys/systm.h> 66 67 #include <vm/uma.h> 68 69 #include <net/if.h> 70 #include <net/if_var.h> 71 #include <net/route.h> 72 #include <net/rss_config.h> 73 74 #include <netinet/in.h> 75 #include <netinet/in_kdtrace.h> 76 #include <netinet/in_pcb.h> 77 #include <netinet/in_systm.h> 78 #include <netinet/in_var.h> 79 #include <netinet/ip.h> 80 #ifdef INET6 81 #include <netinet/ip6.h> 82 #endif 83 #include <netinet/ip_icmp.h> 84 #include <netinet/icmp_var.h> 85 #include <netinet/ip_var.h> 86 #include <netinet/ip_options.h> 87 #ifdef INET6 88 #include <netinet6/ip6_var.h> 89 #endif 90 #include <netinet/udp.h> 91 #include <netinet/udp_var.h> 92 #include <netinet/udplite.h> 93 #include <netinet/in_rss.h> 94 95 #ifdef IPSEC 96 #include <netipsec/ipsec.h> 97 #include <netipsec/esp.h> 98 #endif 99 100 #include <machine/in_cksum.h> 101 102 #include <security/mac/mac_framework.h> 103 104 /* 105 * UDP and UDP-Lite protocols implementation. 106 * Per RFC 768, August, 1980. 107 * Per RFC 3828, July, 2004. 108 */ 109 110 /* 111 * BSD 4.2 defaulted the udp checksum to be off. Turning off udp checksums 112 * removes the only data integrity mechanism for packets and malformed 113 * packets that would otherwise be discarded due to bad checksums, and may 114 * cause problems (especially for NFS data blocks). 115 */ 116 VNET_DEFINE(int, udp_cksum) = 1; 117 SYSCTL_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_VNET | CTLFLAG_RW, 118 &VNET_NAME(udp_cksum), 0, "compute udp checksum"); 119 120 int udp_log_in_vain = 0; 121 SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW, 122 &udp_log_in_vain, 0, "Log all incoming UDP packets"); 123 124 VNET_DEFINE(int, udp_blackhole) = 0; 125 SYSCTL_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_VNET | CTLFLAG_RW, 126 &VNET_NAME(udp_blackhole), 0, 127 "Do not send port unreachables for refused connects"); 128 129 u_long udp_sendspace = 9216; /* really max datagram size */ 130 SYSCTL_ULONG(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, CTLFLAG_RW, 131 &udp_sendspace, 0, "Maximum outgoing UDP datagram size"); 132 133 u_long udp_recvspace = 40 * (1024 + 134 #ifdef INET6 135 sizeof(struct sockaddr_in6) 136 #else 137 sizeof(struct sockaddr_in) 138 #endif 139 ); /* 40 1K datagrams */ 140 141 SYSCTL_ULONG(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, CTLFLAG_RW, 142 &udp_recvspace, 0, "Maximum space for incoming UDP datagrams"); 143 144 VNET_DEFINE(struct inpcbhead, udb); /* from udp_var.h */ 145 VNET_DEFINE(struct inpcbinfo, udbinfo); 146 VNET_DEFINE(struct inpcbhead, ulitecb); 147 VNET_DEFINE(struct inpcbinfo, ulitecbinfo); 148 static VNET_DEFINE(uma_zone_t, udpcb_zone); 149 #define V_udpcb_zone VNET(udpcb_zone) 150 151 #ifndef UDBHASHSIZE 152 #define UDBHASHSIZE 128 153 #endif 154 155 VNET_PCPUSTAT_DEFINE(struct udpstat, udpstat); /* from udp_var.h */ 156 VNET_PCPUSTAT_SYSINIT(udpstat); 157 SYSCTL_VNET_PCPUSTAT(_net_inet_udp, UDPCTL_STATS, stats, struct udpstat, 158 udpstat, "UDP statistics (struct udpstat, netinet/udp_var.h)"); 159 160 #ifdef VIMAGE 161 VNET_PCPUSTAT_SYSUNINIT(udpstat); 162 #endif /* VIMAGE */ 163 #ifdef INET 164 static void udp_detach(struct socket *so); 165 static int udp_output(struct inpcb *, struct mbuf *, struct sockaddr *, 166 struct mbuf *, struct thread *); 167 #endif 168 169 #ifdef IPSEC 170 #ifdef IPSEC_NAT_T 171 #define UF_ESPINUDP_ALL (UF_ESPINUDP_NON_IKE|UF_ESPINUDP) 172 #ifdef INET 173 static struct mbuf *udp4_espdecap(struct inpcb *, struct mbuf *, int); 174 #endif 175 #endif /* IPSEC_NAT_T */ 176 #endif /* IPSEC */ 177 178 static void 179 udp_zone_change(void *tag) 180 { 181 182 uma_zone_set_max(V_udbinfo.ipi_zone, maxsockets); 183 uma_zone_set_max(V_udpcb_zone, maxsockets); 184 } 185 186 static int 187 udp_inpcb_init(void *mem, int size, int flags) 188 { 189 struct inpcb *inp; 190 191 inp = mem; 192 INP_LOCK_INIT(inp, "inp", "udpinp"); 193 return (0); 194 } 195 196 static int 197 udplite_inpcb_init(void *mem, int size, int flags) 198 { 199 struct inpcb *inp; 200 201 inp = mem; 202 INP_LOCK_INIT(inp, "inp", "udpliteinp"); 203 return (0); 204 } 205 206 void 207 udp_init(void) 208 { 209 210 /* 211 * For now default to 2-tuple UDP hashing - until the fragment 212 * reassembly code can also update the flowid. 213 * 214 * Once we can calculate the flowid that way and re-establish 215 * a 4-tuple, flip this to 4-tuple. 216 */ 217 in_pcbinfo_init(&V_udbinfo, "udp", &V_udb, UDBHASHSIZE, UDBHASHSIZE, 218 "udp_inpcb", udp_inpcb_init, NULL, 0, 219 IPI_HASHFIELDS_2TUPLE); 220 V_udpcb_zone = uma_zcreate("udpcb", sizeof(struct udpcb), 221 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 222 uma_zone_set_max(V_udpcb_zone, maxsockets); 223 uma_zone_set_warning(V_udpcb_zone, "kern.ipc.maxsockets limit reached"); 224 EVENTHANDLER_REGISTER(maxsockets_change, udp_zone_change, NULL, 225 EVENTHANDLER_PRI_ANY); 226 } 227 228 void 229 udplite_init(void) 230 { 231 232 in_pcbinfo_init(&V_ulitecbinfo, "udplite", &V_ulitecb, UDBHASHSIZE, 233 UDBHASHSIZE, "udplite_inpcb", udplite_inpcb_init, NULL, 234 0, IPI_HASHFIELDS_2TUPLE); 235 } 236 237 /* 238 * Kernel module interface for updating udpstat. The argument is an index 239 * into udpstat treated as an array of u_long. While this encodes the 240 * general layout of udpstat into the caller, it doesn't encode its location, 241 * so that future changes to add, for example, per-CPU stats support won't 242 * cause binary compatibility problems for kernel modules. 243 */ 244 void 245 kmod_udpstat_inc(int statnum) 246 { 247 248 counter_u64_add(VNET(udpstat)[statnum], 1); 249 } 250 251 int 252 udp_newudpcb(struct inpcb *inp) 253 { 254 struct udpcb *up; 255 256 up = uma_zalloc(V_udpcb_zone, M_NOWAIT | M_ZERO); 257 if (up == NULL) 258 return (ENOBUFS); 259 inp->inp_ppcb = up; 260 return (0); 261 } 262 263 void 264 udp_discardcb(struct udpcb *up) 265 { 266 267 uma_zfree(V_udpcb_zone, up); 268 } 269 270 #ifdef VIMAGE 271 static void 272 udp_destroy(void *unused __unused) 273 { 274 275 in_pcbinfo_destroy(&V_udbinfo); 276 uma_zdestroy(V_udpcb_zone); 277 } 278 VNET_SYSUNINIT(udp, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, udp_destroy, NULL); 279 280 static void 281 udplite_destroy(void *unused __unused) 282 { 283 284 in_pcbinfo_destroy(&V_ulitecbinfo); 285 } 286 VNET_SYSUNINIT(udplite, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, udplite_destroy, 287 NULL); 288 #endif 289 290 #ifdef INET 291 /* 292 * Subroutine of udp_input(), which appends the provided mbuf chain to the 293 * passed pcb/socket. The caller must provide a sockaddr_in via udp_in that 294 * contains the source address. If the socket ends up being an IPv6 socket, 295 * udp_append() will convert to a sockaddr_in6 before passing the address 296 * into the socket code. 297 * 298 * In the normal case udp_append() will return 0, indicating that you 299 * must unlock the inp. However if a tunneling protocol is in place we increment 300 * the inpcb refcnt and unlock the inp, on return from the tunneling protocol we 301 * then decrement the reference count. If the inp_rele returns 1, indicating the 302 * inp is gone, we return that to the caller to tell them *not* to unlock 303 * the inp. In the case of multi-cast this will cause the distribution 304 * to stop (though most tunneling protocols known currently do *not* use 305 * multicast). 306 */ 307 static int 308 udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off, 309 struct sockaddr_in *udp_in) 310 { 311 struct sockaddr *append_sa; 312 struct socket *so; 313 struct mbuf *opts = NULL; 314 #ifdef INET6 315 struct sockaddr_in6 udp_in6; 316 #endif 317 struct udpcb *up; 318 319 INP_LOCK_ASSERT(inp); 320 321 /* 322 * Engage the tunneling protocol. 323 */ 324 up = intoudpcb(inp); 325 if (up->u_tun_func != NULL) { 326 in_pcbref(inp); 327 INP_RUNLOCK(inp); 328 (*up->u_tun_func)(n, off, inp, (struct sockaddr *)udp_in, 329 up->u_tun_ctx); 330 INP_RLOCK(inp); 331 return (in_pcbrele_rlocked(inp)); 332 } 333 334 off += sizeof(struct udphdr); 335 336 #ifdef IPSEC 337 /* Check AH/ESP integrity. */ 338 if (ipsec4_in_reject(n, inp)) { 339 m_freem(n); 340 return (0); 341 } 342 #ifdef IPSEC_NAT_T 343 up = intoudpcb(inp); 344 KASSERT(up != NULL, ("%s: udpcb NULL", __func__)); 345 if (up->u_flags & UF_ESPINUDP_ALL) { /* IPSec UDP encaps. */ 346 n = udp4_espdecap(inp, n, off); 347 if (n == NULL) /* Consumed. */ 348 return (0); 349 } 350 #endif /* IPSEC_NAT_T */ 351 #endif /* IPSEC */ 352 #ifdef MAC 353 if (mac_inpcb_check_deliver(inp, n) != 0) { 354 m_freem(n); 355 return (0); 356 } 357 #endif /* MAC */ 358 if (inp->inp_flags & INP_CONTROLOPTS || 359 inp->inp_socket->so_options & (SO_TIMESTAMP | SO_BINTIME)) { 360 #ifdef INET6 361 if (inp->inp_vflag & INP_IPV6) 362 (void)ip6_savecontrol_v4(inp, n, &opts, NULL); 363 else 364 #endif /* INET6 */ 365 ip_savecontrol(inp, &opts, ip, n); 366 } 367 #ifdef INET6 368 if (inp->inp_vflag & INP_IPV6) { 369 bzero(&udp_in6, sizeof(udp_in6)); 370 udp_in6.sin6_len = sizeof(udp_in6); 371 udp_in6.sin6_family = AF_INET6; 372 in6_sin_2_v4mapsin6(udp_in, &udp_in6); 373 append_sa = (struct sockaddr *)&udp_in6; 374 } else 375 #endif /* INET6 */ 376 append_sa = (struct sockaddr *)udp_in; 377 m_adj(n, off); 378 379 so = inp->inp_socket; 380 SOCKBUF_LOCK(&so->so_rcv); 381 if (sbappendaddr_locked(&so->so_rcv, append_sa, n, opts) == 0) { 382 SOCKBUF_UNLOCK(&so->so_rcv); 383 m_freem(n); 384 if (opts) 385 m_freem(opts); 386 UDPSTAT_INC(udps_fullsock); 387 } else 388 sorwakeup_locked(so); 389 return (0); 390 } 391 392 int 393 udp_input(struct mbuf **mp, int *offp, int proto) 394 { 395 struct ip *ip; 396 struct udphdr *uh; 397 struct ifnet *ifp; 398 struct inpcb *inp; 399 uint16_t len, ip_len; 400 struct inpcbinfo *pcbinfo; 401 struct ip save_ip; 402 struct sockaddr_in udp_in; 403 struct mbuf *m; 404 struct m_tag *fwd_tag; 405 int cscov_partial, iphlen; 406 407 m = *mp; 408 iphlen = *offp; 409 ifp = m->m_pkthdr.rcvif; 410 *mp = NULL; 411 UDPSTAT_INC(udps_ipackets); 412 413 /* 414 * Strip IP options, if any; should skip this, make available to 415 * user, and use on returned packets, but we don't yet have a way to 416 * check the checksum with options still present. 417 */ 418 if (iphlen > sizeof (struct ip)) { 419 ip_stripoptions(m); 420 iphlen = sizeof(struct ip); 421 } 422 423 /* 424 * Get IP and UDP header together in first mbuf. 425 */ 426 ip = mtod(m, struct ip *); 427 if (m->m_len < iphlen + sizeof(struct udphdr)) { 428 if ((m = m_pullup(m, iphlen + sizeof(struct udphdr))) == NULL) { 429 UDPSTAT_INC(udps_hdrops); 430 return (IPPROTO_DONE); 431 } 432 ip = mtod(m, struct ip *); 433 } 434 uh = (struct udphdr *)((caddr_t)ip + iphlen); 435 cscov_partial = (proto == IPPROTO_UDPLITE) ? 1 : 0; 436 437 /* 438 * Destination port of 0 is illegal, based on RFC768. 439 */ 440 if (uh->uh_dport == 0) 441 goto badunlocked; 442 443 /* 444 * Construct sockaddr format source address. Stuff source address 445 * and datagram in user buffer. 446 */ 447 bzero(&udp_in, sizeof(udp_in)); 448 udp_in.sin_len = sizeof(udp_in); 449 udp_in.sin_family = AF_INET; 450 udp_in.sin_port = uh->uh_sport; 451 udp_in.sin_addr = ip->ip_src; 452 453 /* 454 * Make mbuf data length reflect UDP length. If not enough data to 455 * reflect UDP length, drop. 456 */ 457 len = ntohs((u_short)uh->uh_ulen); 458 ip_len = ntohs(ip->ip_len) - iphlen; 459 if (proto == IPPROTO_UDPLITE && (len == 0 || len == ip_len)) { 460 /* Zero means checksum over the complete packet. */ 461 if (len == 0) 462 len = ip_len; 463 cscov_partial = 0; 464 } 465 if (ip_len != len) { 466 if (len > ip_len || len < sizeof(struct udphdr)) { 467 UDPSTAT_INC(udps_badlen); 468 goto badunlocked; 469 } 470 if (proto == IPPROTO_UDP) 471 m_adj(m, len - ip_len); 472 } 473 474 /* 475 * Save a copy of the IP header in case we want restore it for 476 * sending an ICMP error message in response. 477 */ 478 if (!V_udp_blackhole) 479 save_ip = *ip; 480 else 481 memset(&save_ip, 0, sizeof(save_ip)); 482 483 /* 484 * Checksum extended UDP header and data. 485 */ 486 if (uh->uh_sum) { 487 u_short uh_sum; 488 489 if ((m->m_pkthdr.csum_flags & CSUM_DATA_VALID) && 490 !cscov_partial) { 491 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 492 uh_sum = m->m_pkthdr.csum_data; 493 else 494 uh_sum = in_pseudo(ip->ip_src.s_addr, 495 ip->ip_dst.s_addr, htonl((u_short)len + 496 m->m_pkthdr.csum_data + proto)); 497 uh_sum ^= 0xffff; 498 } else { 499 char b[9]; 500 501 bcopy(((struct ipovly *)ip)->ih_x1, b, 9); 502 bzero(((struct ipovly *)ip)->ih_x1, 9); 503 ((struct ipovly *)ip)->ih_len = (proto == IPPROTO_UDP) ? 504 uh->uh_ulen : htons(ip_len); 505 uh_sum = in_cksum(m, len + sizeof (struct ip)); 506 bcopy(b, ((struct ipovly *)ip)->ih_x1, 9); 507 } 508 if (uh_sum) { 509 UDPSTAT_INC(udps_badsum); 510 m_freem(m); 511 return (IPPROTO_DONE); 512 } 513 } else { 514 if (proto == IPPROTO_UDP) { 515 UDPSTAT_INC(udps_nosum); 516 } else { 517 /* UDPLite requires a checksum */ 518 /* XXX: What is the right UDPLite MIB counter here? */ 519 m_freem(m); 520 return (IPPROTO_DONE); 521 } 522 } 523 524 pcbinfo = udp_get_inpcbinfo(proto); 525 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 526 in_broadcast(ip->ip_dst, ifp)) { 527 struct inpcb *last; 528 struct inpcbhead *pcblist; 529 struct ip_moptions *imo; 530 531 INP_INFO_RLOCK(pcbinfo); 532 pcblist = udp_get_pcblist(proto); 533 last = NULL; 534 LIST_FOREACH(inp, pcblist, inp_list) { 535 if (inp->inp_lport != uh->uh_dport) 536 continue; 537 #ifdef INET6 538 if ((inp->inp_vflag & INP_IPV4) == 0) 539 continue; 540 #endif 541 if (inp->inp_laddr.s_addr != INADDR_ANY && 542 inp->inp_laddr.s_addr != ip->ip_dst.s_addr) 543 continue; 544 if (inp->inp_faddr.s_addr != INADDR_ANY && 545 inp->inp_faddr.s_addr != ip->ip_src.s_addr) 546 continue; 547 if (inp->inp_fport != 0 && 548 inp->inp_fport != uh->uh_sport) 549 continue; 550 551 INP_RLOCK(inp); 552 553 /* 554 * XXXRW: Because we weren't holding either the inpcb 555 * or the hash lock when we checked for a match 556 * before, we should probably recheck now that the 557 * inpcb lock is held. 558 */ 559 560 /* 561 * Handle socket delivery policy for any-source 562 * and source-specific multicast. [RFC3678] 563 */ 564 imo = inp->inp_moptions; 565 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 566 struct sockaddr_in group; 567 int blocked; 568 if (imo == NULL) { 569 INP_RUNLOCK(inp); 570 continue; 571 } 572 bzero(&group, sizeof(struct sockaddr_in)); 573 group.sin_len = sizeof(struct sockaddr_in); 574 group.sin_family = AF_INET; 575 group.sin_addr = ip->ip_dst; 576 577 blocked = imo_multi_filter(imo, ifp, 578 (struct sockaddr *)&group, 579 (struct sockaddr *)&udp_in); 580 if (blocked != MCAST_PASS) { 581 if (blocked == MCAST_NOTGMEMBER) 582 IPSTAT_INC(ips_notmember); 583 if (blocked == MCAST_NOTSMEMBER || 584 blocked == MCAST_MUTED) 585 UDPSTAT_INC(udps_filtermcast); 586 INP_RUNLOCK(inp); 587 continue; 588 } 589 } 590 if (last != NULL) { 591 struct mbuf *n; 592 593 if ((n = m_copy(m, 0, M_COPYALL)) != NULL) { 594 UDP_PROBE(receive, NULL, last, ip, 595 last, uh); 596 if (udp_append(last, ip, n, iphlen, 597 &udp_in)) { 598 goto inp_lost; 599 } 600 } 601 INP_RUNLOCK(last); 602 } 603 last = inp; 604 /* 605 * Don't look for additional matches if this one does 606 * not have either the SO_REUSEPORT or SO_REUSEADDR 607 * socket options set. This heuristic avoids 608 * searching through all pcbs in the common case of a 609 * non-shared port. It assumes that an application 610 * will never clear these options after setting them. 611 */ 612 if ((last->inp_socket->so_options & 613 (SO_REUSEPORT|SO_REUSEADDR)) == 0) 614 break; 615 } 616 617 if (last == NULL) { 618 /* 619 * No matching pcb found; discard datagram. (No need 620 * to send an ICMP Port Unreachable for a broadcast 621 * or multicast datgram.) 622 */ 623 UDPSTAT_INC(udps_noportbcast); 624 if (inp) 625 INP_RUNLOCK(inp); 626 INP_INFO_RUNLOCK(pcbinfo); 627 goto badunlocked; 628 } 629 UDP_PROBE(receive, NULL, last, ip, last, uh); 630 if (udp_append(last, ip, m, iphlen, &udp_in) == 0) 631 INP_RUNLOCK(last); 632 inp_lost: 633 INP_INFO_RUNLOCK(pcbinfo); 634 return (IPPROTO_DONE); 635 } 636 637 /* 638 * Locate pcb for datagram. 639 */ 640 641 /* 642 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. 643 */ 644 if ((m->m_flags & M_IP_NEXTHOP) && 645 (fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL)) != NULL) { 646 struct sockaddr_in *next_hop; 647 648 next_hop = (struct sockaddr_in *)(fwd_tag + 1); 649 650 /* 651 * Transparently forwarded. Pretend to be the destination. 652 * Already got one like this? 653 */ 654 inp = in_pcblookup_mbuf(pcbinfo, ip->ip_src, uh->uh_sport, 655 ip->ip_dst, uh->uh_dport, INPLOOKUP_RLOCKPCB, ifp, m); 656 if (!inp) { 657 /* 658 * It's new. Try to find the ambushing socket. 659 * Because we've rewritten the destination address, 660 * any hardware-generated hash is ignored. 661 */ 662 inp = in_pcblookup(pcbinfo, ip->ip_src, 663 uh->uh_sport, next_hop->sin_addr, 664 next_hop->sin_port ? htons(next_hop->sin_port) : 665 uh->uh_dport, INPLOOKUP_WILDCARD | 666 INPLOOKUP_RLOCKPCB, ifp); 667 } 668 /* Remove the tag from the packet. We don't need it anymore. */ 669 m_tag_delete(m, fwd_tag); 670 m->m_flags &= ~M_IP_NEXTHOP; 671 } else 672 inp = in_pcblookup_mbuf(pcbinfo, ip->ip_src, uh->uh_sport, 673 ip->ip_dst, uh->uh_dport, INPLOOKUP_WILDCARD | 674 INPLOOKUP_RLOCKPCB, ifp, m); 675 if (inp == NULL) { 676 if (udp_log_in_vain) { 677 char buf[4*sizeof "123"]; 678 679 strcpy(buf, inet_ntoa(ip->ip_dst)); 680 log(LOG_INFO, 681 "Connection attempt to UDP %s:%d from %s:%d\n", 682 buf, ntohs(uh->uh_dport), inet_ntoa(ip->ip_src), 683 ntohs(uh->uh_sport)); 684 } 685 UDPSTAT_INC(udps_noport); 686 if (m->m_flags & (M_BCAST | M_MCAST)) { 687 UDPSTAT_INC(udps_noportbcast); 688 goto badunlocked; 689 } 690 if (V_udp_blackhole) 691 goto badunlocked; 692 if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0) 693 goto badunlocked; 694 *ip = save_ip; 695 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0); 696 return (IPPROTO_DONE); 697 } 698 699 /* 700 * Check the minimum TTL for socket. 701 */ 702 INP_RLOCK_ASSERT(inp); 703 if (inp->inp_ip_minttl && inp->inp_ip_minttl > ip->ip_ttl) { 704 INP_RUNLOCK(inp); 705 m_freem(m); 706 return (IPPROTO_DONE); 707 } 708 if (cscov_partial) { 709 struct udpcb *up; 710 711 up = intoudpcb(inp); 712 if (up->u_rxcslen == 0 || up->u_rxcslen > len) { 713 INP_RUNLOCK(inp); 714 m_freem(m); 715 return (IPPROTO_DONE); 716 } 717 } 718 719 UDP_PROBE(receive, NULL, inp, ip, inp, uh); 720 if (udp_append(inp, ip, m, iphlen, &udp_in) == 0) 721 INP_RUNLOCK(inp); 722 return (IPPROTO_DONE); 723 724 badunlocked: 725 m_freem(m); 726 return (IPPROTO_DONE); 727 } 728 #endif /* INET */ 729 730 /* 731 * Notify a udp user of an asynchronous error; just wake up so that they can 732 * collect error status. 733 */ 734 struct inpcb * 735 udp_notify(struct inpcb *inp, int errno) 736 { 737 738 /* 739 * While udp_ctlinput() always calls udp_notify() with a read lock 740 * when invoking it directly, in_pcbnotifyall() currently uses write 741 * locks due to sharing code with TCP. For now, accept either a read 742 * or a write lock, but a read lock is sufficient. 743 */ 744 INP_LOCK_ASSERT(inp); 745 if ((errno == EHOSTUNREACH || errno == ENETUNREACH || 746 errno == EHOSTDOWN) && inp->inp_route.ro_rt) { 747 RTFREE(inp->inp_route.ro_rt); 748 inp->inp_route.ro_rt = (struct rtentry *)NULL; 749 } 750 751 inp->inp_socket->so_error = errno; 752 sorwakeup(inp->inp_socket); 753 sowwakeup(inp->inp_socket); 754 return (inp); 755 } 756 757 #ifdef INET 758 static void 759 udp_common_ctlinput(int cmd, struct sockaddr *sa, void *vip, 760 struct inpcbinfo *pcbinfo) 761 { 762 struct ip *ip = vip; 763 struct udphdr *uh; 764 struct in_addr faddr; 765 struct inpcb *inp; 766 767 faddr = ((struct sockaddr_in *)sa)->sin_addr; 768 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) 769 return; 770 771 if (PRC_IS_REDIRECT(cmd)) { 772 /* signal EHOSTDOWN, as it flushes the cached route */ 773 in_pcbnotifyall(&V_udbinfo, faddr, EHOSTDOWN, udp_notify); 774 return; 775 } 776 777 /* 778 * Hostdead is ugly because it goes linearly through all PCBs. 779 * 780 * XXX: We never get this from ICMP, otherwise it makes an excellent 781 * DoS attack on machines with many connections. 782 */ 783 if (cmd == PRC_HOSTDEAD) 784 ip = NULL; 785 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) 786 return; 787 if (ip != NULL) { 788 uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 789 inp = in_pcblookup(pcbinfo, faddr, uh->uh_dport, 790 ip->ip_src, uh->uh_sport, INPLOOKUP_RLOCKPCB, NULL); 791 if (inp != NULL) { 792 INP_RLOCK_ASSERT(inp); 793 if (inp->inp_socket != NULL) { 794 udp_notify(inp, inetctlerrmap[cmd]); 795 } 796 INP_RUNLOCK(inp); 797 } else { 798 inp = in_pcblookup(pcbinfo, faddr, uh->uh_dport, 799 ip->ip_src, uh->uh_sport, 800 INPLOOKUP_WILDCARD | INPLOOKUP_RLOCKPCB, NULL); 801 if (inp != NULL) { 802 struct udpcb *up; 803 804 up = intoudpcb(inp); 805 if (up->u_icmp_func != NULL) { 806 INP_RUNLOCK(inp); 807 (*up->u_icmp_func)(cmd, sa, vip, up->u_tun_ctx); 808 } else { 809 INP_RUNLOCK(inp); 810 } 811 } 812 } 813 } else 814 in_pcbnotifyall(pcbinfo, faddr, inetctlerrmap[cmd], 815 udp_notify); 816 } 817 void 818 udp_ctlinput(int cmd, struct sockaddr *sa, void *vip) 819 { 820 821 return (udp_common_ctlinput(cmd, sa, vip, &V_udbinfo)); 822 } 823 824 void 825 udplite_ctlinput(int cmd, struct sockaddr *sa, void *vip) 826 { 827 828 return (udp_common_ctlinput(cmd, sa, vip, &V_ulitecbinfo)); 829 } 830 #endif /* INET */ 831 832 static int 833 udp_pcblist(SYSCTL_HANDLER_ARGS) 834 { 835 int error, i, n; 836 struct inpcb *inp, **inp_list; 837 inp_gen_t gencnt; 838 struct xinpgen xig; 839 840 /* 841 * The process of preparing the PCB list is too time-consuming and 842 * resource-intensive to repeat twice on every request. 843 */ 844 if (req->oldptr == 0) { 845 n = V_udbinfo.ipi_count; 846 n += imax(n / 8, 10); 847 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb); 848 return (0); 849 } 850 851 if (req->newptr != 0) 852 return (EPERM); 853 854 /* 855 * OK, now we're committed to doing something. 856 */ 857 INP_INFO_RLOCK(&V_udbinfo); 858 gencnt = V_udbinfo.ipi_gencnt; 859 n = V_udbinfo.ipi_count; 860 INP_INFO_RUNLOCK(&V_udbinfo); 861 862 error = sysctl_wire_old_buffer(req, 2 * (sizeof xig) 863 + n * sizeof(struct xinpcb)); 864 if (error != 0) 865 return (error); 866 867 xig.xig_len = sizeof xig; 868 xig.xig_count = n; 869 xig.xig_gen = gencnt; 870 xig.xig_sogen = so_gencnt; 871 error = SYSCTL_OUT(req, &xig, sizeof xig); 872 if (error) 873 return (error); 874 875 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK); 876 if (inp_list == NULL) 877 return (ENOMEM); 878 879 INP_INFO_RLOCK(&V_udbinfo); 880 for (inp = LIST_FIRST(V_udbinfo.ipi_listhead), i = 0; inp && i < n; 881 inp = LIST_NEXT(inp, inp_list)) { 882 INP_WLOCK(inp); 883 if (inp->inp_gencnt <= gencnt && 884 cr_canseeinpcb(req->td->td_ucred, inp) == 0) { 885 in_pcbref(inp); 886 inp_list[i++] = inp; 887 } 888 INP_WUNLOCK(inp); 889 } 890 INP_INFO_RUNLOCK(&V_udbinfo); 891 n = i; 892 893 error = 0; 894 for (i = 0; i < n; i++) { 895 inp = inp_list[i]; 896 INP_RLOCK(inp); 897 if (inp->inp_gencnt <= gencnt) { 898 struct xinpcb xi; 899 900 bzero(&xi, sizeof(xi)); 901 xi.xi_len = sizeof xi; 902 /* XXX should avoid extra copy */ 903 bcopy(inp, &xi.xi_inp, sizeof *inp); 904 if (inp->inp_socket) 905 sotoxsocket(inp->inp_socket, &xi.xi_socket); 906 xi.xi_inp.inp_gencnt = inp->inp_gencnt; 907 INP_RUNLOCK(inp); 908 error = SYSCTL_OUT(req, &xi, sizeof xi); 909 } else 910 INP_RUNLOCK(inp); 911 } 912 INP_INFO_WLOCK(&V_udbinfo); 913 for (i = 0; i < n; i++) { 914 inp = inp_list[i]; 915 INP_RLOCK(inp); 916 if (!in_pcbrele_rlocked(inp)) 917 INP_RUNLOCK(inp); 918 } 919 INP_INFO_WUNLOCK(&V_udbinfo); 920 921 if (!error) { 922 /* 923 * Give the user an updated idea of our state. If the 924 * generation differs from what we told her before, she knows 925 * that something happened while we were processing this 926 * request, and it might be necessary to retry. 927 */ 928 INP_INFO_RLOCK(&V_udbinfo); 929 xig.xig_gen = V_udbinfo.ipi_gencnt; 930 xig.xig_sogen = so_gencnt; 931 xig.xig_count = V_udbinfo.ipi_count; 932 INP_INFO_RUNLOCK(&V_udbinfo); 933 error = SYSCTL_OUT(req, &xig, sizeof xig); 934 } 935 free(inp_list, M_TEMP); 936 return (error); 937 } 938 939 SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist, 940 CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0, 941 udp_pcblist, "S,xinpcb", "List of active UDP sockets"); 942 943 #ifdef INET 944 static int 945 udp_getcred(SYSCTL_HANDLER_ARGS) 946 { 947 struct xucred xuc; 948 struct sockaddr_in addrs[2]; 949 struct inpcb *inp; 950 int error; 951 952 error = priv_check(req->td, PRIV_NETINET_GETCRED); 953 if (error) 954 return (error); 955 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 956 if (error) 957 return (error); 958 inp = in_pcblookup(&V_udbinfo, addrs[1].sin_addr, addrs[1].sin_port, 959 addrs[0].sin_addr, addrs[0].sin_port, 960 INPLOOKUP_WILDCARD | INPLOOKUP_RLOCKPCB, NULL); 961 if (inp != NULL) { 962 INP_RLOCK_ASSERT(inp); 963 if (inp->inp_socket == NULL) 964 error = ENOENT; 965 if (error == 0) 966 error = cr_canseeinpcb(req->td->td_ucred, inp); 967 if (error == 0) 968 cru2x(inp->inp_cred, &xuc); 969 INP_RUNLOCK(inp); 970 } else 971 error = ENOENT; 972 if (error == 0) 973 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 974 return (error); 975 } 976 977 SYSCTL_PROC(_net_inet_udp, OID_AUTO, getcred, 978 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0, 979 udp_getcred, "S,xucred", "Get the xucred of a UDP connection"); 980 #endif /* INET */ 981 982 int 983 udp_ctloutput(struct socket *so, struct sockopt *sopt) 984 { 985 struct inpcb *inp; 986 struct udpcb *up; 987 int isudplite, error, optval; 988 989 error = 0; 990 isudplite = (so->so_proto->pr_protocol == IPPROTO_UDPLITE) ? 1 : 0; 991 inp = sotoinpcb(so); 992 KASSERT(inp != NULL, ("%s: inp == NULL", __func__)); 993 INP_WLOCK(inp); 994 if (sopt->sopt_level != so->so_proto->pr_protocol) { 995 #ifdef INET6 996 if (INP_CHECK_SOCKAF(so, AF_INET6)) { 997 INP_WUNLOCK(inp); 998 error = ip6_ctloutput(so, sopt); 999 } 1000 #endif 1001 #if defined(INET) && defined(INET6) 1002 else 1003 #endif 1004 #ifdef INET 1005 { 1006 INP_WUNLOCK(inp); 1007 error = ip_ctloutput(so, sopt); 1008 } 1009 #endif 1010 return (error); 1011 } 1012 1013 switch (sopt->sopt_dir) { 1014 case SOPT_SET: 1015 switch (sopt->sopt_name) { 1016 case UDP_ENCAP: 1017 INP_WUNLOCK(inp); 1018 error = sooptcopyin(sopt, &optval, sizeof optval, 1019 sizeof optval); 1020 if (error) 1021 break; 1022 inp = sotoinpcb(so); 1023 KASSERT(inp != NULL, ("%s: inp == NULL", __func__)); 1024 INP_WLOCK(inp); 1025 #ifdef IPSEC_NAT_T 1026 up = intoudpcb(inp); 1027 KASSERT(up != NULL, ("%s: up == NULL", __func__)); 1028 #endif 1029 switch (optval) { 1030 case 0: 1031 /* Clear all UDP encap. */ 1032 #ifdef IPSEC_NAT_T 1033 up->u_flags &= ~UF_ESPINUDP_ALL; 1034 #endif 1035 break; 1036 #ifdef IPSEC_NAT_T 1037 case UDP_ENCAP_ESPINUDP: 1038 case UDP_ENCAP_ESPINUDP_NON_IKE: 1039 up->u_flags &= ~UF_ESPINUDP_ALL; 1040 if (optval == UDP_ENCAP_ESPINUDP) 1041 up->u_flags |= UF_ESPINUDP; 1042 else if (optval == UDP_ENCAP_ESPINUDP_NON_IKE) 1043 up->u_flags |= UF_ESPINUDP_NON_IKE; 1044 break; 1045 #endif 1046 default: 1047 error = EINVAL; 1048 break; 1049 } 1050 INP_WUNLOCK(inp); 1051 break; 1052 case UDPLITE_SEND_CSCOV: 1053 case UDPLITE_RECV_CSCOV: 1054 if (!isudplite) { 1055 INP_WUNLOCK(inp); 1056 error = ENOPROTOOPT; 1057 break; 1058 } 1059 INP_WUNLOCK(inp); 1060 error = sooptcopyin(sopt, &optval, sizeof(optval), 1061 sizeof(optval)); 1062 if (error != 0) 1063 break; 1064 inp = sotoinpcb(so); 1065 KASSERT(inp != NULL, ("%s: inp == NULL", __func__)); 1066 INP_WLOCK(inp); 1067 up = intoudpcb(inp); 1068 KASSERT(up != NULL, ("%s: up == NULL", __func__)); 1069 if ((optval != 0 && optval < 8) || (optval > 65535)) { 1070 INP_WUNLOCK(inp); 1071 error = EINVAL; 1072 break; 1073 } 1074 if (sopt->sopt_name == UDPLITE_SEND_CSCOV) 1075 up->u_txcslen = optval; 1076 else 1077 up->u_rxcslen = optval; 1078 INP_WUNLOCK(inp); 1079 break; 1080 default: 1081 INP_WUNLOCK(inp); 1082 error = ENOPROTOOPT; 1083 break; 1084 } 1085 break; 1086 case SOPT_GET: 1087 switch (sopt->sopt_name) { 1088 #ifdef IPSEC_NAT_T 1089 case UDP_ENCAP: 1090 up = intoudpcb(inp); 1091 KASSERT(up != NULL, ("%s: up == NULL", __func__)); 1092 optval = up->u_flags & UF_ESPINUDP_ALL; 1093 INP_WUNLOCK(inp); 1094 error = sooptcopyout(sopt, &optval, sizeof optval); 1095 break; 1096 #endif 1097 case UDPLITE_SEND_CSCOV: 1098 case UDPLITE_RECV_CSCOV: 1099 if (!isudplite) { 1100 INP_WUNLOCK(inp); 1101 error = ENOPROTOOPT; 1102 break; 1103 } 1104 up = intoudpcb(inp); 1105 KASSERT(up != NULL, ("%s: up == NULL", __func__)); 1106 if (sopt->sopt_name == UDPLITE_SEND_CSCOV) 1107 optval = up->u_txcslen; 1108 else 1109 optval = up->u_rxcslen; 1110 INP_WUNLOCK(inp); 1111 error = sooptcopyout(sopt, &optval, sizeof(optval)); 1112 break; 1113 default: 1114 INP_WUNLOCK(inp); 1115 error = ENOPROTOOPT; 1116 break; 1117 } 1118 break; 1119 } 1120 return (error); 1121 } 1122 1123 #ifdef INET 1124 #define UH_WLOCKED 2 1125 #define UH_RLOCKED 1 1126 #define UH_UNLOCKED 0 1127 static int 1128 udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, 1129 struct mbuf *control, struct thread *td) 1130 { 1131 struct udpiphdr *ui; 1132 int len = m->m_pkthdr.len; 1133 struct in_addr faddr, laddr; 1134 struct cmsghdr *cm; 1135 struct inpcbinfo *pcbinfo; 1136 struct sockaddr_in *sin, src; 1137 int cscov_partial = 0; 1138 int error = 0; 1139 int ipflags; 1140 u_short fport, lport; 1141 int unlock_udbinfo, unlock_inp; 1142 u_char tos; 1143 uint8_t pr; 1144 uint16_t cscov = 0; 1145 uint32_t flowid = 0; 1146 uint8_t flowtype = M_HASHTYPE_NONE; 1147 1148 /* 1149 * udp_output() may need to temporarily bind or connect the current 1150 * inpcb. As such, we don't know up front whether we will need the 1151 * pcbinfo lock or not. Do any work to decide what is needed up 1152 * front before acquiring any locks. 1153 */ 1154 if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) { 1155 if (control) 1156 m_freem(control); 1157 m_freem(m); 1158 return (EMSGSIZE); 1159 } 1160 1161 src.sin_family = 0; 1162 sin = (struct sockaddr_in *)addr; 1163 if (sin == NULL || 1164 (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0)) { 1165 INP_WLOCK(inp); 1166 unlock_inp = UH_WLOCKED; 1167 } else { 1168 INP_RLOCK(inp); 1169 unlock_inp = UH_RLOCKED; 1170 } 1171 tos = inp->inp_ip_tos; 1172 if (control != NULL) { 1173 /* 1174 * XXX: Currently, we assume all the optional information is 1175 * stored in a single mbuf. 1176 */ 1177 if (control->m_next) { 1178 if (unlock_inp == UH_WLOCKED) 1179 INP_WUNLOCK(inp); 1180 else 1181 INP_RUNLOCK(inp); 1182 m_freem(control); 1183 m_freem(m); 1184 return (EINVAL); 1185 } 1186 for (; control->m_len > 0; 1187 control->m_data += CMSG_ALIGN(cm->cmsg_len), 1188 control->m_len -= CMSG_ALIGN(cm->cmsg_len)) { 1189 cm = mtod(control, struct cmsghdr *); 1190 if (control->m_len < sizeof(*cm) || cm->cmsg_len == 0 1191 || cm->cmsg_len > control->m_len) { 1192 error = EINVAL; 1193 break; 1194 } 1195 if (cm->cmsg_level != IPPROTO_IP) 1196 continue; 1197 1198 switch (cm->cmsg_type) { 1199 case IP_SENDSRCADDR: 1200 if (cm->cmsg_len != 1201 CMSG_LEN(sizeof(struct in_addr))) { 1202 error = EINVAL; 1203 break; 1204 } 1205 bzero(&src, sizeof(src)); 1206 src.sin_family = AF_INET; 1207 src.sin_len = sizeof(src); 1208 src.sin_port = inp->inp_lport; 1209 src.sin_addr = 1210 *(struct in_addr *)CMSG_DATA(cm); 1211 break; 1212 1213 case IP_TOS: 1214 if (cm->cmsg_len != CMSG_LEN(sizeof(u_char))) { 1215 error = EINVAL; 1216 break; 1217 } 1218 tos = *(u_char *)CMSG_DATA(cm); 1219 break; 1220 1221 case IP_FLOWID: 1222 if (cm->cmsg_len != CMSG_LEN(sizeof(uint32_t))) { 1223 error = EINVAL; 1224 break; 1225 } 1226 flowid = *(uint32_t *) CMSG_DATA(cm); 1227 break; 1228 1229 case IP_FLOWTYPE: 1230 if (cm->cmsg_len != CMSG_LEN(sizeof(uint32_t))) { 1231 error = EINVAL; 1232 break; 1233 } 1234 flowtype = *(uint32_t *) CMSG_DATA(cm); 1235 break; 1236 1237 #ifdef RSS 1238 case IP_RSSBUCKETID: 1239 if (cm->cmsg_len != CMSG_LEN(sizeof(uint32_t))) { 1240 error = EINVAL; 1241 break; 1242 } 1243 /* This is just a placeholder for now */ 1244 break; 1245 #endif /* RSS */ 1246 default: 1247 error = ENOPROTOOPT; 1248 break; 1249 } 1250 if (error) 1251 break; 1252 } 1253 m_freem(control); 1254 } 1255 if (error) { 1256 if (unlock_inp == UH_WLOCKED) 1257 INP_WUNLOCK(inp); 1258 else 1259 INP_RUNLOCK(inp); 1260 m_freem(m); 1261 return (error); 1262 } 1263 1264 /* 1265 * Depending on whether or not the application has bound or connected 1266 * the socket, we may have to do varying levels of work. The optimal 1267 * case is for a connected UDP socket, as a global lock isn't 1268 * required at all. 1269 * 1270 * In order to decide which we need, we require stability of the 1271 * inpcb binding, which we ensure by acquiring a read lock on the 1272 * inpcb. This doesn't strictly follow the lock order, so we play 1273 * the trylock and retry game; note that we may end up with more 1274 * conservative locks than required the second time around, so later 1275 * assertions have to accept that. Further analysis of the number of 1276 * misses under contention is required. 1277 * 1278 * XXXRW: Check that hash locking update here is correct. 1279 */ 1280 pr = inp->inp_socket->so_proto->pr_protocol; 1281 pcbinfo = udp_get_inpcbinfo(pr); 1282 sin = (struct sockaddr_in *)addr; 1283 if (sin != NULL && 1284 (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0)) { 1285 INP_HASH_WLOCK(pcbinfo); 1286 unlock_udbinfo = UH_WLOCKED; 1287 } else if ((sin != NULL && ( 1288 (sin->sin_addr.s_addr == INADDR_ANY) || 1289 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 1290 (inp->inp_laddr.s_addr == INADDR_ANY) || 1291 (inp->inp_lport == 0))) || 1292 (src.sin_family == AF_INET)) { 1293 INP_HASH_RLOCK(pcbinfo); 1294 unlock_udbinfo = UH_RLOCKED; 1295 } else 1296 unlock_udbinfo = UH_UNLOCKED; 1297 1298 /* 1299 * If the IP_SENDSRCADDR control message was specified, override the 1300 * source address for this datagram. Its use is invalidated if the 1301 * address thus specified is incomplete or clobbers other inpcbs. 1302 */ 1303 laddr = inp->inp_laddr; 1304 lport = inp->inp_lport; 1305 if (src.sin_family == AF_INET) { 1306 INP_HASH_LOCK_ASSERT(pcbinfo); 1307 if ((lport == 0) || 1308 (laddr.s_addr == INADDR_ANY && 1309 src.sin_addr.s_addr == INADDR_ANY)) { 1310 error = EINVAL; 1311 goto release; 1312 } 1313 error = in_pcbbind_setup(inp, (struct sockaddr *)&src, 1314 &laddr.s_addr, &lport, td->td_ucred); 1315 if (error) 1316 goto release; 1317 } 1318 1319 /* 1320 * If a UDP socket has been connected, then a local address/port will 1321 * have been selected and bound. 1322 * 1323 * If a UDP socket has not been connected to, then an explicit 1324 * destination address must be used, in which case a local 1325 * address/port may not have been selected and bound. 1326 */ 1327 if (sin != NULL) { 1328 INP_LOCK_ASSERT(inp); 1329 if (inp->inp_faddr.s_addr != INADDR_ANY) { 1330 error = EISCONN; 1331 goto release; 1332 } 1333 1334 /* 1335 * Jail may rewrite the destination address, so let it do 1336 * that before we use it. 1337 */ 1338 error = prison_remote_ip4(td->td_ucred, &sin->sin_addr); 1339 if (error) 1340 goto release; 1341 1342 /* 1343 * If a local address or port hasn't yet been selected, or if 1344 * the destination address needs to be rewritten due to using 1345 * a special INADDR_ constant, invoke in_pcbconnect_setup() 1346 * to do the heavy lifting. Once a port is selected, we 1347 * commit the binding back to the socket; we also commit the 1348 * binding of the address if in jail. 1349 * 1350 * If we already have a valid binding and we're not 1351 * requesting a destination address rewrite, use a fast path. 1352 */ 1353 if (inp->inp_laddr.s_addr == INADDR_ANY || 1354 inp->inp_lport == 0 || 1355 sin->sin_addr.s_addr == INADDR_ANY || 1356 sin->sin_addr.s_addr == INADDR_BROADCAST) { 1357 INP_HASH_LOCK_ASSERT(pcbinfo); 1358 error = in_pcbconnect_setup(inp, addr, &laddr.s_addr, 1359 &lport, &faddr.s_addr, &fport, NULL, 1360 td->td_ucred); 1361 if (error) 1362 goto release; 1363 1364 /* 1365 * XXXRW: Why not commit the port if the address is 1366 * !INADDR_ANY? 1367 */ 1368 /* Commit the local port if newly assigned. */ 1369 if (inp->inp_laddr.s_addr == INADDR_ANY && 1370 inp->inp_lport == 0) { 1371 INP_WLOCK_ASSERT(inp); 1372 INP_HASH_WLOCK_ASSERT(pcbinfo); 1373 /* 1374 * Remember addr if jailed, to prevent 1375 * rebinding. 1376 */ 1377 if (prison_flag(td->td_ucred, PR_IP4)) 1378 inp->inp_laddr = laddr; 1379 inp->inp_lport = lport; 1380 if (in_pcbinshash(inp) != 0) { 1381 inp->inp_lport = 0; 1382 error = EAGAIN; 1383 goto release; 1384 } 1385 inp->inp_flags |= INP_ANONPORT; 1386 } 1387 } else { 1388 faddr = sin->sin_addr; 1389 fport = sin->sin_port; 1390 } 1391 } else { 1392 INP_LOCK_ASSERT(inp); 1393 faddr = inp->inp_faddr; 1394 fport = inp->inp_fport; 1395 if (faddr.s_addr == INADDR_ANY) { 1396 error = ENOTCONN; 1397 goto release; 1398 } 1399 } 1400 1401 /* 1402 * Calculate data length and get a mbuf for UDP, IP, and possible 1403 * link-layer headers. Immediate slide the data pointer back forward 1404 * since we won't use that space at this layer. 1405 */ 1406 M_PREPEND(m, sizeof(struct udpiphdr) + max_linkhdr, M_NOWAIT); 1407 if (m == NULL) { 1408 error = ENOBUFS; 1409 goto release; 1410 } 1411 m->m_data += max_linkhdr; 1412 m->m_len -= max_linkhdr; 1413 m->m_pkthdr.len -= max_linkhdr; 1414 1415 /* 1416 * Fill in mbuf with extended UDP header and addresses and length put 1417 * into network format. 1418 */ 1419 ui = mtod(m, struct udpiphdr *); 1420 bzero(ui->ui_x1, sizeof(ui->ui_x1)); /* XXX still needed? */ 1421 ui->ui_pr = pr; 1422 ui->ui_src = laddr; 1423 ui->ui_dst = faddr; 1424 ui->ui_sport = lport; 1425 ui->ui_dport = fport; 1426 ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr)); 1427 if (pr == IPPROTO_UDPLITE) { 1428 struct udpcb *up; 1429 uint16_t plen; 1430 1431 up = intoudpcb(inp); 1432 cscov = up->u_txcslen; 1433 plen = (u_short)len + sizeof(struct udphdr); 1434 if (cscov >= plen) 1435 cscov = 0; 1436 ui->ui_len = htons(plen); 1437 ui->ui_ulen = htons(cscov); 1438 /* 1439 * For UDP-Lite, checksum coverage length of zero means 1440 * the entire UDPLite packet is covered by the checksum. 1441 */ 1442 cscov_partial = (cscov == 0) ? 0 : 1; 1443 } else 1444 ui->ui_v = IPVERSION << 4; 1445 1446 /* 1447 * Set the Don't Fragment bit in the IP header. 1448 */ 1449 if (inp->inp_flags & INP_DONTFRAG) { 1450 struct ip *ip; 1451 1452 ip = (struct ip *)&ui->ui_i; 1453 ip->ip_off |= htons(IP_DF); 1454 } 1455 1456 ipflags = 0; 1457 if (inp->inp_socket->so_options & SO_DONTROUTE) 1458 ipflags |= IP_ROUTETOIF; 1459 if (inp->inp_socket->so_options & SO_BROADCAST) 1460 ipflags |= IP_ALLOWBROADCAST; 1461 if (inp->inp_flags & INP_ONESBCAST) 1462 ipflags |= IP_SENDONES; 1463 1464 #ifdef MAC 1465 mac_inpcb_create_mbuf(inp, m); 1466 #endif 1467 1468 /* 1469 * Set up checksum and output datagram. 1470 */ 1471 ui->ui_sum = 0; 1472 if (pr == IPPROTO_UDPLITE) { 1473 if (inp->inp_flags & INP_ONESBCAST) 1474 faddr.s_addr = INADDR_BROADCAST; 1475 if (cscov_partial) { 1476 if ((ui->ui_sum = in_cksum(m, sizeof(struct ip) + cscov)) == 0) 1477 ui->ui_sum = 0xffff; 1478 } else { 1479 if ((ui->ui_sum = in_cksum(m, sizeof(struct udpiphdr) + len)) == 0) 1480 ui->ui_sum = 0xffff; 1481 } 1482 } else if (V_udp_cksum) { 1483 if (inp->inp_flags & INP_ONESBCAST) 1484 faddr.s_addr = INADDR_BROADCAST; 1485 ui->ui_sum = in_pseudo(ui->ui_src.s_addr, faddr.s_addr, 1486 htons((u_short)len + sizeof(struct udphdr) + pr)); 1487 m->m_pkthdr.csum_flags = CSUM_UDP; 1488 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 1489 } 1490 ((struct ip *)ui)->ip_len = htons(sizeof(struct udpiphdr) + len); 1491 ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */ 1492 ((struct ip *)ui)->ip_tos = tos; /* XXX */ 1493 UDPSTAT_INC(udps_opackets); 1494 1495 /* 1496 * Setup flowid / RSS information for outbound socket. 1497 * 1498 * Once the UDP code decides to set a flowid some other way, 1499 * this allows the flowid to be overridden by userland. 1500 */ 1501 if (flowtype != M_HASHTYPE_NONE) { 1502 m->m_pkthdr.flowid = flowid; 1503 M_HASHTYPE_SET(m, flowtype); 1504 #ifdef RSS 1505 } else { 1506 uint32_t hash_val, hash_type; 1507 /* 1508 * Calculate an appropriate RSS hash for UDP and 1509 * UDP Lite. 1510 * 1511 * The called function will take care of figuring out 1512 * whether a 2-tuple or 4-tuple hash is required based 1513 * on the currently configured scheme. 1514 * 1515 * Later later on connected socket values should be 1516 * cached in the inpcb and reused, rather than constantly 1517 * re-calculating it. 1518 * 1519 * UDP Lite is a different protocol number and will 1520 * likely end up being hashed as a 2-tuple until 1521 * RSS / NICs grow UDP Lite protocol awareness. 1522 */ 1523 if (rss_proto_software_hash_v4(faddr, laddr, fport, lport, 1524 pr, &hash_val, &hash_type) == 0) { 1525 m->m_pkthdr.flowid = hash_val; 1526 M_HASHTYPE_SET(m, hash_type); 1527 } 1528 #endif 1529 } 1530 1531 #ifdef RSS 1532 /* 1533 * Don't override with the inp cached flowid value. 1534 * 1535 * Depending upon the kind of send being done, the inp 1536 * flowid/flowtype values may actually not be appropriate 1537 * for this particular socket send. 1538 * 1539 * We should either leave the flowid at zero (which is what is 1540 * currently done) or set it to some software generated 1541 * hash value based on the packet contents. 1542 */ 1543 ipflags |= IP_NODEFAULTFLOWID; 1544 #endif /* RSS */ 1545 1546 if (unlock_udbinfo == UH_WLOCKED) 1547 INP_HASH_WUNLOCK(pcbinfo); 1548 else if (unlock_udbinfo == UH_RLOCKED) 1549 INP_HASH_RUNLOCK(pcbinfo); 1550 UDP_PROBE(send, NULL, inp, &ui->ui_i, inp, &ui->ui_u); 1551 error = ip_output(m, inp->inp_options, 1552 (unlock_inp == UH_WLOCKED ? &inp->inp_route : NULL), ipflags, 1553 inp->inp_moptions, inp); 1554 if (unlock_inp == UH_WLOCKED) 1555 INP_WUNLOCK(inp); 1556 else 1557 INP_RUNLOCK(inp); 1558 return (error); 1559 1560 release: 1561 if (unlock_udbinfo == UH_WLOCKED) { 1562 INP_HASH_WUNLOCK(pcbinfo); 1563 INP_WUNLOCK(inp); 1564 } else if (unlock_udbinfo == UH_RLOCKED) { 1565 INP_HASH_RUNLOCK(pcbinfo); 1566 INP_RUNLOCK(inp); 1567 } else 1568 INP_RUNLOCK(inp); 1569 m_freem(m); 1570 return (error); 1571 } 1572 1573 1574 #if defined(IPSEC) && defined(IPSEC_NAT_T) 1575 /* 1576 * Potentially decap ESP in UDP frame. Check for an ESP header 1577 * and optional marker; if present, strip the UDP header and 1578 * push the result through IPSec. 1579 * 1580 * Returns mbuf to be processed (potentially re-allocated) or 1581 * NULL if consumed and/or processed. 1582 */ 1583 static struct mbuf * 1584 udp4_espdecap(struct inpcb *inp, struct mbuf *m, int off) 1585 { 1586 size_t minlen, payload, skip, iphlen; 1587 caddr_t data; 1588 struct udpcb *up; 1589 struct m_tag *tag; 1590 struct udphdr *udphdr; 1591 struct ip *ip; 1592 1593 INP_RLOCK_ASSERT(inp); 1594 1595 /* 1596 * Pull up data so the longest case is contiguous: 1597 * IP/UDP hdr + non ESP marker + ESP hdr. 1598 */ 1599 minlen = off + sizeof(uint64_t) + sizeof(struct esp); 1600 if (minlen > m->m_pkthdr.len) 1601 minlen = m->m_pkthdr.len; 1602 if ((m = m_pullup(m, minlen)) == NULL) { 1603 IPSECSTAT_INC(ips_in_inval); 1604 return (NULL); /* Bypass caller processing. */ 1605 } 1606 data = mtod(m, caddr_t); /* Points to ip header. */ 1607 payload = m->m_len - off; /* Size of payload. */ 1608 1609 if (payload == 1 && data[off] == '\xff') 1610 return (m); /* NB: keepalive packet, no decap. */ 1611 1612 up = intoudpcb(inp); 1613 KASSERT(up != NULL, ("%s: udpcb NULL", __func__)); 1614 KASSERT((up->u_flags & UF_ESPINUDP_ALL) != 0, 1615 ("u_flags 0x%x", up->u_flags)); 1616 1617 /* 1618 * Check that the payload is large enough to hold an 1619 * ESP header and compute the amount of data to remove. 1620 * 1621 * NB: the caller has already done a pullup for us. 1622 * XXX can we assume alignment and eliminate bcopys? 1623 */ 1624 if (up->u_flags & UF_ESPINUDP_NON_IKE) { 1625 /* 1626 * draft-ietf-ipsec-nat-t-ike-0[01].txt and 1627 * draft-ietf-ipsec-udp-encaps-(00/)01.txt, ignoring 1628 * possible AH mode non-IKE marker+non-ESP marker 1629 * from draft-ietf-ipsec-udp-encaps-00.txt. 1630 */ 1631 uint64_t marker; 1632 1633 if (payload <= sizeof(uint64_t) + sizeof(struct esp)) 1634 return (m); /* NB: no decap. */ 1635 bcopy(data + off, &marker, sizeof(uint64_t)); 1636 if (marker != 0) /* Non-IKE marker. */ 1637 return (m); /* NB: no decap. */ 1638 skip = sizeof(uint64_t) + sizeof(struct udphdr); 1639 } else { 1640 uint32_t spi; 1641 1642 if (payload <= sizeof(struct esp)) { 1643 IPSECSTAT_INC(ips_in_inval); 1644 m_freem(m); 1645 return (NULL); /* Discard. */ 1646 } 1647 bcopy(data + off, &spi, sizeof(uint32_t)); 1648 if (spi == 0) /* Non-ESP marker. */ 1649 return (m); /* NB: no decap. */ 1650 skip = sizeof(struct udphdr); 1651 } 1652 1653 /* 1654 * Setup a PACKET_TAG_IPSEC_NAT_T_PORT tag to remember 1655 * the UDP ports. This is required if we want to select 1656 * the right SPD for multiple hosts behind same NAT. 1657 * 1658 * NB: ports are maintained in network byte order everywhere 1659 * in the NAT-T code. 1660 */ 1661 tag = m_tag_get(PACKET_TAG_IPSEC_NAT_T_PORTS, 1662 2 * sizeof(uint16_t), M_NOWAIT); 1663 if (tag == NULL) { 1664 IPSECSTAT_INC(ips_in_nomem); 1665 m_freem(m); 1666 return (NULL); /* Discard. */ 1667 } 1668 iphlen = off - sizeof(struct udphdr); 1669 udphdr = (struct udphdr *)(data + iphlen); 1670 ((uint16_t *)(tag + 1))[0] = udphdr->uh_sport; 1671 ((uint16_t *)(tag + 1))[1] = udphdr->uh_dport; 1672 m_tag_prepend(m, tag); 1673 1674 /* 1675 * Remove the UDP header (and possibly the non ESP marker) 1676 * IP header length is iphlen 1677 * Before: 1678 * <--- off ---> 1679 * +----+------+-----+ 1680 * | IP | UDP | ESP | 1681 * +----+------+-----+ 1682 * <-skip-> 1683 * After: 1684 * +----+-----+ 1685 * | IP | ESP | 1686 * +----+-----+ 1687 * <-skip-> 1688 */ 1689 ovbcopy(data, data + skip, iphlen); 1690 m_adj(m, skip); 1691 1692 ip = mtod(m, struct ip *); 1693 ip->ip_len = htons(ntohs(ip->ip_len) - skip); 1694 ip->ip_p = IPPROTO_ESP; 1695 1696 /* 1697 * We cannot yet update the cksums so clear any 1698 * h/w cksum flags as they are no longer valid. 1699 */ 1700 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) 1701 m->m_pkthdr.csum_flags &= ~(CSUM_DATA_VALID|CSUM_PSEUDO_HDR); 1702 1703 (void) ipsec_common_input(m, iphlen, offsetof(struct ip, ip_p), 1704 AF_INET, ip->ip_p); 1705 return (NULL); /* NB: consumed, bypass processing. */ 1706 } 1707 #endif /* defined(IPSEC) && defined(IPSEC_NAT_T) */ 1708 1709 static void 1710 udp_abort(struct socket *so) 1711 { 1712 struct inpcb *inp; 1713 struct inpcbinfo *pcbinfo; 1714 1715 pcbinfo = udp_get_inpcbinfo(so->so_proto->pr_protocol); 1716 inp = sotoinpcb(so); 1717 KASSERT(inp != NULL, ("udp_abort: inp == NULL")); 1718 INP_WLOCK(inp); 1719 if (inp->inp_faddr.s_addr != INADDR_ANY) { 1720 INP_HASH_WLOCK(pcbinfo); 1721 in_pcbdisconnect(inp); 1722 inp->inp_laddr.s_addr = INADDR_ANY; 1723 INP_HASH_WUNLOCK(pcbinfo); 1724 soisdisconnected(so); 1725 } 1726 INP_WUNLOCK(inp); 1727 } 1728 1729 static int 1730 udp_attach(struct socket *so, int proto, struct thread *td) 1731 { 1732 struct inpcb *inp; 1733 struct inpcbinfo *pcbinfo; 1734 int error; 1735 1736 pcbinfo = udp_get_inpcbinfo(so->so_proto->pr_protocol); 1737 inp = sotoinpcb(so); 1738 KASSERT(inp == NULL, ("udp_attach: inp != NULL")); 1739 error = soreserve(so, udp_sendspace, udp_recvspace); 1740 if (error) 1741 return (error); 1742 INP_INFO_WLOCK(pcbinfo); 1743 error = in_pcballoc(so, pcbinfo); 1744 if (error) { 1745 INP_INFO_WUNLOCK(pcbinfo); 1746 return (error); 1747 } 1748 1749 inp = sotoinpcb(so); 1750 inp->inp_vflag |= INP_IPV4; 1751 inp->inp_ip_ttl = V_ip_defttl; 1752 1753 error = udp_newudpcb(inp); 1754 if (error) { 1755 in_pcbdetach(inp); 1756 in_pcbfree(inp); 1757 INP_INFO_WUNLOCK(pcbinfo); 1758 return (error); 1759 } 1760 1761 INP_WUNLOCK(inp); 1762 INP_INFO_WUNLOCK(pcbinfo); 1763 return (0); 1764 } 1765 #endif /* INET */ 1766 1767 int 1768 udp_set_kernel_tunneling(struct socket *so, udp_tun_func_t f, udp_tun_icmp_t i, void *ctx) 1769 { 1770 struct inpcb *inp; 1771 struct udpcb *up; 1772 1773 KASSERT(so->so_type == SOCK_DGRAM, 1774 ("udp_set_kernel_tunneling: !dgram")); 1775 inp = sotoinpcb(so); 1776 KASSERT(inp != NULL, ("udp_set_kernel_tunneling: inp == NULL")); 1777 INP_WLOCK(inp); 1778 up = intoudpcb(inp); 1779 if ((up->u_tun_func != NULL) || 1780 (up->u_icmp_func != NULL)) { 1781 INP_WUNLOCK(inp); 1782 return (EBUSY); 1783 } 1784 up->u_tun_func = f; 1785 up->u_icmp_func = i; 1786 up->u_tun_ctx = ctx; 1787 INP_WUNLOCK(inp); 1788 return (0); 1789 } 1790 1791 #ifdef INET 1792 static int 1793 udp_bind(struct socket *so, struct sockaddr *nam, struct thread *td) 1794 { 1795 struct inpcb *inp; 1796 struct inpcbinfo *pcbinfo; 1797 int error; 1798 1799 pcbinfo = udp_get_inpcbinfo(so->so_proto->pr_protocol); 1800 inp = sotoinpcb(so); 1801 KASSERT(inp != NULL, ("udp_bind: inp == NULL")); 1802 INP_WLOCK(inp); 1803 INP_HASH_WLOCK(pcbinfo); 1804 error = in_pcbbind(inp, nam, td->td_ucred); 1805 INP_HASH_WUNLOCK(pcbinfo); 1806 INP_WUNLOCK(inp); 1807 return (error); 1808 } 1809 1810 static void 1811 udp_close(struct socket *so) 1812 { 1813 struct inpcb *inp; 1814 struct inpcbinfo *pcbinfo; 1815 1816 pcbinfo = udp_get_inpcbinfo(so->so_proto->pr_protocol); 1817 inp = sotoinpcb(so); 1818 KASSERT(inp != NULL, ("udp_close: inp == NULL")); 1819 INP_WLOCK(inp); 1820 if (inp->inp_faddr.s_addr != INADDR_ANY) { 1821 INP_HASH_WLOCK(pcbinfo); 1822 in_pcbdisconnect(inp); 1823 inp->inp_laddr.s_addr = INADDR_ANY; 1824 INP_HASH_WUNLOCK(pcbinfo); 1825 soisdisconnected(so); 1826 } 1827 INP_WUNLOCK(inp); 1828 } 1829 1830 static int 1831 udp_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 1832 { 1833 struct inpcb *inp; 1834 struct inpcbinfo *pcbinfo; 1835 struct sockaddr_in *sin; 1836 int error; 1837 1838 pcbinfo = udp_get_inpcbinfo(so->so_proto->pr_protocol); 1839 inp = sotoinpcb(so); 1840 KASSERT(inp != NULL, ("udp_connect: inp == NULL")); 1841 INP_WLOCK(inp); 1842 if (inp->inp_faddr.s_addr != INADDR_ANY) { 1843 INP_WUNLOCK(inp); 1844 return (EISCONN); 1845 } 1846 sin = (struct sockaddr_in *)nam; 1847 error = prison_remote_ip4(td->td_ucred, &sin->sin_addr); 1848 if (error != 0) { 1849 INP_WUNLOCK(inp); 1850 return (error); 1851 } 1852 INP_HASH_WLOCK(pcbinfo); 1853 error = in_pcbconnect(inp, nam, td->td_ucred); 1854 INP_HASH_WUNLOCK(pcbinfo); 1855 if (error == 0) 1856 soisconnected(so); 1857 INP_WUNLOCK(inp); 1858 return (error); 1859 } 1860 1861 static void 1862 udp_detach(struct socket *so) 1863 { 1864 struct inpcb *inp; 1865 struct inpcbinfo *pcbinfo; 1866 struct udpcb *up; 1867 1868 pcbinfo = udp_get_inpcbinfo(so->so_proto->pr_protocol); 1869 inp = sotoinpcb(so); 1870 KASSERT(inp != NULL, ("udp_detach: inp == NULL")); 1871 KASSERT(inp->inp_faddr.s_addr == INADDR_ANY, 1872 ("udp_detach: not disconnected")); 1873 INP_INFO_WLOCK(pcbinfo); 1874 INP_WLOCK(inp); 1875 up = intoudpcb(inp); 1876 KASSERT(up != NULL, ("%s: up == NULL", __func__)); 1877 inp->inp_ppcb = NULL; 1878 in_pcbdetach(inp); 1879 in_pcbfree(inp); 1880 INP_INFO_WUNLOCK(pcbinfo); 1881 udp_discardcb(up); 1882 } 1883 1884 static int 1885 udp_disconnect(struct socket *so) 1886 { 1887 struct inpcb *inp; 1888 struct inpcbinfo *pcbinfo; 1889 1890 pcbinfo = udp_get_inpcbinfo(so->so_proto->pr_protocol); 1891 inp = sotoinpcb(so); 1892 KASSERT(inp != NULL, ("udp_disconnect: inp == NULL")); 1893 INP_WLOCK(inp); 1894 if (inp->inp_faddr.s_addr == INADDR_ANY) { 1895 INP_WUNLOCK(inp); 1896 return (ENOTCONN); 1897 } 1898 INP_HASH_WLOCK(pcbinfo); 1899 in_pcbdisconnect(inp); 1900 inp->inp_laddr.s_addr = INADDR_ANY; 1901 INP_HASH_WUNLOCK(pcbinfo); 1902 SOCK_LOCK(so); 1903 so->so_state &= ~SS_ISCONNECTED; /* XXX */ 1904 SOCK_UNLOCK(so); 1905 INP_WUNLOCK(inp); 1906 return (0); 1907 } 1908 1909 static int 1910 udp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 1911 struct mbuf *control, struct thread *td) 1912 { 1913 struct inpcb *inp; 1914 1915 inp = sotoinpcb(so); 1916 KASSERT(inp != NULL, ("udp_send: inp == NULL")); 1917 return (udp_output(inp, m, addr, control, td)); 1918 } 1919 #endif /* INET */ 1920 1921 int 1922 udp_shutdown(struct socket *so) 1923 { 1924 struct inpcb *inp; 1925 1926 inp = sotoinpcb(so); 1927 KASSERT(inp != NULL, ("udp_shutdown: inp == NULL")); 1928 INP_WLOCK(inp); 1929 socantsendmore(so); 1930 INP_WUNLOCK(inp); 1931 return (0); 1932 } 1933 1934 #ifdef INET 1935 struct pr_usrreqs udp_usrreqs = { 1936 .pru_abort = udp_abort, 1937 .pru_attach = udp_attach, 1938 .pru_bind = udp_bind, 1939 .pru_connect = udp_connect, 1940 .pru_control = in_control, 1941 .pru_detach = udp_detach, 1942 .pru_disconnect = udp_disconnect, 1943 .pru_peeraddr = in_getpeeraddr, 1944 .pru_send = udp_send, 1945 .pru_soreceive = soreceive_dgram, 1946 .pru_sosend = sosend_dgram, 1947 .pru_shutdown = udp_shutdown, 1948 .pru_sockaddr = in_getsockaddr, 1949 .pru_sosetlabel = in_pcbsosetlabel, 1950 .pru_close = udp_close, 1951 }; 1952 #endif /* INET */ 1953