1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 3 * The Regents of the University of California. 4 * Copyright (c) 2008 Robert N. M. Watson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 4. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)udp_usrreq.c 8.6 (Berkeley) 5/23/95 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_ipfw.h" 38 #include "opt_inet6.h" 39 #include "opt_ipsec.h" 40 #include "opt_mac.h" 41 42 #include <sys/param.h> 43 #include <sys/domain.h> 44 #include <sys/eventhandler.h> 45 #include <sys/jail.h> 46 #include <sys/kernel.h> 47 #include <sys/lock.h> 48 #include <sys/malloc.h> 49 #include <sys/mbuf.h> 50 #include <sys/priv.h> 51 #include <sys/proc.h> 52 #include <sys/protosw.h> 53 #include <sys/signalvar.h> 54 #include <sys/socket.h> 55 #include <sys/socketvar.h> 56 #include <sys/sx.h> 57 #include <sys/sysctl.h> 58 #include <sys/syslog.h> 59 #include <sys/systm.h> 60 #include <sys/vimage.h> 61 62 #include <vm/uma.h> 63 64 #include <net/if.h> 65 #include <net/route.h> 66 67 #include <netinet/in.h> 68 #include <netinet/in_pcb.h> 69 #include <netinet/in_systm.h> 70 #include <netinet/in_var.h> 71 #include <netinet/ip.h> 72 #ifdef INET6 73 #include <netinet/ip6.h> 74 #endif 75 #include <netinet/ip_icmp.h> 76 #include <netinet/icmp_var.h> 77 #include <netinet/ip_var.h> 78 #include <netinet/ip_options.h> 79 #ifdef INET6 80 #include <netinet6/ip6_var.h> 81 #endif 82 #include <netinet/udp.h> 83 #include <netinet/udp_var.h> 84 #include <netinet/vinet.h> 85 86 #ifdef IPSEC 87 #include <netipsec/ipsec.h> 88 #endif 89 90 #include <machine/in_cksum.h> 91 92 #include <security/mac/mac_framework.h> 93 94 /* 95 * UDP protocol implementation. 96 * Per RFC 768, August, 1980. 97 */ 98 99 #ifdef VIMAGE_GLOBALS 100 int udp_blackhole; 101 #endif 102 103 /* 104 * BSD 4.2 defaulted the udp checksum to be off. Turning off udp checksums 105 * removes the only data integrity mechanism for packets and malformed 106 * packets that would otherwise be discarded due to bad checksums, and may 107 * cause problems (especially for NFS data blocks). 108 */ 109 static int udp_cksum = 1; 110 SYSCTL_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_RW, &udp_cksum, 111 0, "compute udp checksum"); 112 113 int udp_log_in_vain = 0; 114 SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW, 115 &udp_log_in_vain, 0, "Log all incoming UDP packets"); 116 117 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_udp, OID_AUTO, blackhole, 118 CTLFLAG_RW, udp_blackhole, 0, 119 "Do not send port unreachables for refused connects"); 120 121 u_long udp_sendspace = 9216; /* really max datagram size */ 122 /* 40 1K datagrams */ 123 SYSCTL_ULONG(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, CTLFLAG_RW, 124 &udp_sendspace, 0, "Maximum outgoing UDP datagram size"); 125 126 u_long udp_recvspace = 40 * (1024 + 127 #ifdef INET6 128 sizeof(struct sockaddr_in6) 129 #else 130 sizeof(struct sockaddr_in) 131 #endif 132 ); 133 134 SYSCTL_ULONG(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, CTLFLAG_RW, 135 &udp_recvspace, 0, "Maximum space for incoming UDP datagrams"); 136 137 #ifdef VIMAGE_GLOBALS 138 struct inpcbhead udb; /* from udp_var.h */ 139 struct inpcbinfo udbinfo; 140 struct udpstat udpstat; /* from udp_var.h */ 141 #endif 142 143 #ifndef UDBHASHSIZE 144 #define UDBHASHSIZE 128 145 #endif 146 147 SYSCTL_V_STRUCT(V_NET, vnet_inet, _net_inet_udp, UDPCTL_STATS, stats, 148 CTLFLAG_RW, udpstat, udpstat, 149 "UDP statistics (struct udpstat, netinet/udp_var.h)"); 150 151 static void udp_detach(struct socket *so); 152 static int udp_output(struct inpcb *, struct mbuf *, struct sockaddr *, 153 struct mbuf *, struct thread *); 154 155 static void 156 udp_zone_change(void *tag) 157 { 158 159 uma_zone_set_max(V_udbinfo.ipi_zone, maxsockets); 160 } 161 162 static int 163 udp_inpcb_init(void *mem, int size, int flags) 164 { 165 struct inpcb *inp; 166 167 inp = mem; 168 INP_LOCK_INIT(inp, "inp", "udpinp"); 169 return (0); 170 } 171 172 void 173 udp_init(void) 174 { 175 INIT_VNET_INET(curvnet); 176 177 V_udp_blackhole = 0; 178 179 INP_INFO_LOCK_INIT(&V_udbinfo, "udp"); 180 LIST_INIT(&V_udb); 181 V_udbinfo.ipi_listhead = &V_udb; 182 V_udbinfo.ipi_hashbase = hashinit(UDBHASHSIZE, M_PCB, 183 &V_udbinfo.ipi_hashmask); 184 V_udbinfo.ipi_porthashbase = hashinit(UDBHASHSIZE, M_PCB, 185 &V_udbinfo.ipi_porthashmask); 186 V_udbinfo.ipi_zone = uma_zcreate("udpcb", sizeof(struct inpcb), NULL, 187 NULL, udp_inpcb_init, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 188 uma_zone_set_max(V_udbinfo.ipi_zone, maxsockets); 189 EVENTHANDLER_REGISTER(maxsockets_change, udp_zone_change, NULL, 190 EVENTHANDLER_PRI_ANY); 191 } 192 193 /* 194 * Subroutine of udp_input(), which appends the provided mbuf chain to the 195 * passed pcb/socket. The caller must provide a sockaddr_in via udp_in that 196 * contains the source address. If the socket ends up being an IPv6 socket, 197 * udp_append() will convert to a sockaddr_in6 before passing the address 198 * into the socket code. 199 */ 200 static void 201 udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off, 202 struct sockaddr_in *udp_in) 203 { 204 struct sockaddr *append_sa; 205 struct socket *so; 206 struct mbuf *opts = 0; 207 #ifdef INET6 208 struct sockaddr_in6 udp_in6; 209 #endif 210 211 INP_RLOCK_ASSERT(inp); 212 213 #ifdef IPSEC 214 /* Check AH/ESP integrity. */ 215 if (ipsec4_in_reject(n, inp)) { 216 INIT_VNET_IPSEC(curvnet); 217 m_freem(n); 218 V_ipsec4stat.in_polvio++; 219 return; 220 } 221 #endif /* IPSEC */ 222 #ifdef MAC 223 if (mac_inpcb_check_deliver(inp, n) != 0) { 224 m_freem(n); 225 return; 226 } 227 #endif 228 if (inp->inp_flags & INP_CONTROLOPTS || 229 inp->inp_socket->so_options & (SO_TIMESTAMP | SO_BINTIME)) { 230 #ifdef INET6 231 if (inp->inp_vflag & INP_IPV6) 232 (void)ip6_savecontrol_v4(inp, n, &opts, NULL); 233 else 234 #endif 235 ip_savecontrol(inp, &opts, ip, n); 236 } 237 #ifdef INET6 238 if (inp->inp_vflag & INP_IPV6) { 239 bzero(&udp_in6, sizeof(udp_in6)); 240 udp_in6.sin6_len = sizeof(udp_in6); 241 udp_in6.sin6_family = AF_INET6; 242 in6_sin_2_v4mapsin6(udp_in, &udp_in6); 243 append_sa = (struct sockaddr *)&udp_in6; 244 } else 245 #endif 246 append_sa = (struct sockaddr *)udp_in; 247 m_adj(n, off); 248 249 so = inp->inp_socket; 250 SOCKBUF_LOCK(&so->so_rcv); 251 if (sbappendaddr_locked(&so->so_rcv, append_sa, n, opts) == 0) { 252 INIT_VNET_INET(so->so_vnet); 253 SOCKBUF_UNLOCK(&so->so_rcv); 254 m_freem(n); 255 if (opts) 256 m_freem(opts); 257 V_udpstat.udps_fullsock++; 258 } else 259 sorwakeup_locked(so); 260 } 261 262 void 263 udp_input(struct mbuf *m, int off) 264 { 265 INIT_VNET_INET(curvnet); 266 int iphlen = off; 267 struct ip *ip; 268 struct udphdr *uh; 269 struct ifnet *ifp; 270 struct inpcb *inp; 271 int len; 272 struct ip save_ip; 273 struct sockaddr_in udp_in; 274 #ifdef IPFIREWALL_FORWARD 275 struct m_tag *fwd_tag; 276 #endif 277 278 ifp = m->m_pkthdr.rcvif; 279 V_udpstat.udps_ipackets++; 280 281 /* 282 * Strip IP options, if any; should skip this, make available to 283 * user, and use on returned packets, but we don't yet have a way to 284 * check the checksum with options still present. 285 */ 286 if (iphlen > sizeof (struct ip)) { 287 ip_stripoptions(m, (struct mbuf *)0); 288 iphlen = sizeof(struct ip); 289 } 290 291 /* 292 * Get IP and UDP header together in first mbuf. 293 */ 294 ip = mtod(m, struct ip *); 295 if (m->m_len < iphlen + sizeof(struct udphdr)) { 296 if ((m = m_pullup(m, iphlen + sizeof(struct udphdr))) == 0) { 297 V_udpstat.udps_hdrops++; 298 return; 299 } 300 ip = mtod(m, struct ip *); 301 } 302 uh = (struct udphdr *)((caddr_t)ip + iphlen); 303 304 /* 305 * Destination port of 0 is illegal, based on RFC768. 306 */ 307 if (uh->uh_dport == 0) 308 goto badunlocked; 309 310 /* 311 * Construct sockaddr format source address. Stuff source address 312 * and datagram in user buffer. 313 */ 314 bzero(&udp_in, sizeof(udp_in)); 315 udp_in.sin_len = sizeof(udp_in); 316 udp_in.sin_family = AF_INET; 317 udp_in.sin_port = uh->uh_sport; 318 udp_in.sin_addr = ip->ip_src; 319 320 /* 321 * Make mbuf data length reflect UDP length. If not enough data to 322 * reflect UDP length, drop. 323 */ 324 len = ntohs((u_short)uh->uh_ulen); 325 if (ip->ip_len != len) { 326 if (len > ip->ip_len || len < sizeof(struct udphdr)) { 327 V_udpstat.udps_badlen++; 328 goto badunlocked; 329 } 330 m_adj(m, len - ip->ip_len); 331 /* ip->ip_len = len; */ 332 } 333 334 /* 335 * Save a copy of the IP header in case we want restore it for 336 * sending an ICMP error message in response. 337 */ 338 if (!V_udp_blackhole) 339 save_ip = *ip; 340 else 341 memset(&save_ip, 0, sizeof(save_ip)); 342 343 /* 344 * Checksum extended UDP header and data. 345 */ 346 if (uh->uh_sum) { 347 u_short uh_sum; 348 349 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 350 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 351 uh_sum = m->m_pkthdr.csum_data; 352 else 353 uh_sum = in_pseudo(ip->ip_src.s_addr, 354 ip->ip_dst.s_addr, htonl((u_short)len + 355 m->m_pkthdr.csum_data + IPPROTO_UDP)); 356 uh_sum ^= 0xffff; 357 } else { 358 char b[9]; 359 360 bcopy(((struct ipovly *)ip)->ih_x1, b, 9); 361 bzero(((struct ipovly *)ip)->ih_x1, 9); 362 ((struct ipovly *)ip)->ih_len = uh->uh_ulen; 363 uh_sum = in_cksum(m, len + sizeof (struct ip)); 364 bcopy(b, ((struct ipovly *)ip)->ih_x1, 9); 365 } 366 if (uh_sum) { 367 V_udpstat.udps_badsum++; 368 m_freem(m); 369 return; 370 } 371 } else 372 V_udpstat.udps_nosum++; 373 374 #ifdef IPFIREWALL_FORWARD 375 /* 376 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. 377 */ 378 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 379 if (fwd_tag != NULL) { 380 struct sockaddr_in *next_hop; 381 382 /* 383 * Do the hack. 384 */ 385 next_hop = (struct sockaddr_in *)(fwd_tag + 1); 386 ip->ip_dst = next_hop->sin_addr; 387 uh->uh_dport = ntohs(next_hop->sin_port); 388 389 /* 390 * Remove the tag from the packet. We don't need it anymore. 391 */ 392 m_tag_delete(m, fwd_tag); 393 } 394 #endif 395 396 INP_INFO_RLOCK(&V_udbinfo); 397 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 398 in_broadcast(ip->ip_dst, ifp)) { 399 struct inpcb *last; 400 struct ip_moptions *imo; 401 402 last = NULL; 403 LIST_FOREACH(inp, &V_udb, inp_list) { 404 if (inp->inp_lport != uh->uh_dport) 405 continue; 406 #ifdef INET6 407 if ((inp->inp_vflag & INP_IPV4) == 0) 408 continue; 409 #endif 410 if (inp->inp_laddr.s_addr != INADDR_ANY && 411 inp->inp_laddr.s_addr != ip->ip_dst.s_addr) 412 continue; 413 if (inp->inp_faddr.s_addr != INADDR_ANY && 414 inp->inp_faddr.s_addr != ip->ip_src.s_addr) 415 continue; 416 /* 417 * XXX: Do not check source port of incoming datagram 418 * unless inp_connect() has been called to bind the 419 * fport part of the 4-tuple; the source could be 420 * trying to talk to us with an ephemeral port. 421 */ 422 if (inp->inp_fport != 0 && 423 inp->inp_fport != uh->uh_sport) 424 continue; 425 426 INP_RLOCK(inp); 427 428 /* 429 * Handle socket delivery policy for any-source 430 * and source-specific multicast. [RFC3678] 431 */ 432 imo = inp->inp_moptions; 433 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) && 434 imo != NULL) { 435 struct sockaddr_in sin; 436 struct in_msource *ims; 437 int blocked, mode; 438 size_t idx; 439 440 bzero(&sin, sizeof(struct sockaddr_in)); 441 sin.sin_len = sizeof(struct sockaddr_in); 442 sin.sin_family = AF_INET; 443 sin.sin_addr = ip->ip_dst; 444 445 blocked = 0; 446 idx = imo_match_group(imo, ifp, 447 (struct sockaddr *)&sin); 448 if (idx == -1) { 449 /* 450 * No group membership for this socket. 451 * Do not bump udps_noportbcast, as 452 * this will happen further down. 453 */ 454 blocked++; 455 } else { 456 /* 457 * Check for a multicast source filter 458 * entry on this socket for this group. 459 * MCAST_EXCLUDE is the default 460 * behaviour. It means default accept; 461 * entries, if present, denote sources 462 * to be excluded from delivery. 463 */ 464 ims = imo_match_source(imo, idx, 465 (struct sockaddr *)&udp_in); 466 mode = imo->imo_mfilters[idx].imf_fmode; 467 if ((ims != NULL && 468 mode == MCAST_EXCLUDE) || 469 (ims == NULL && 470 mode == MCAST_INCLUDE)) { 471 #ifdef DIAGNOSTIC 472 if (bootverbose) { 473 printf("%s: blocked by" 474 " source filter\n", 475 __func__); 476 } 477 #endif 478 V_udpstat.udps_filtermcast++; 479 blocked++; 480 } 481 } 482 if (blocked != 0) { 483 INP_RUNLOCK(inp); 484 continue; 485 } 486 } 487 if (last != NULL) { 488 struct mbuf *n; 489 490 n = m_copy(m, 0, M_COPYALL); 491 if (last->inp_ppcb == NULL) { 492 if (n != NULL) 493 udp_append(last, 494 ip, n, 495 iphlen + 496 sizeof(struct udphdr), 497 &udp_in); 498 INP_RUNLOCK(last); 499 } else { 500 /* 501 * Engage the tunneling protocol we 502 * will have to leave the info_lock 503 * up, since we are hunting through 504 * multiple UDP's. 505 * 506 */ 507 udp_tun_func_t tunnel_func; 508 509 tunnel_func = (udp_tun_func_t)last->inp_ppcb; 510 tunnel_func(n, iphlen, last); 511 INP_RUNLOCK(last); 512 } 513 } 514 last = inp; 515 /* 516 * Don't look for additional matches if this one does 517 * not have either the SO_REUSEPORT or SO_REUSEADDR 518 * socket options set. This heuristic avoids 519 * searching through all pcbs in the common case of a 520 * non-shared port. It assumes that an application 521 * will never clear these options after setting them. 522 */ 523 if ((last->inp_socket->so_options & 524 (SO_REUSEPORT|SO_REUSEADDR)) == 0) 525 break; 526 } 527 528 if (last == NULL) { 529 /* 530 * No matching pcb found; discard datagram. (No need 531 * to send an ICMP Port Unreachable for a broadcast 532 * or multicast datgram.) 533 */ 534 V_udpstat.udps_noportbcast++; 535 goto badheadlocked; 536 } 537 if (last->inp_ppcb == NULL) { 538 udp_append(last, ip, m, iphlen + sizeof(struct udphdr), 539 &udp_in); 540 INP_RUNLOCK(last); 541 INP_INFO_RUNLOCK(&V_udbinfo); 542 } else { 543 /* 544 * Engage the tunneling protocol. 545 */ 546 udp_tun_func_t tunnel_func; 547 548 tunnel_func = (udp_tun_func_t)last->inp_ppcb; 549 tunnel_func(m, iphlen, last); 550 INP_RUNLOCK(last); 551 INP_INFO_RUNLOCK(&V_udbinfo); 552 } 553 return; 554 } 555 556 /* 557 * Locate pcb for datagram. 558 */ 559 inp = in_pcblookup_hash(&V_udbinfo, ip->ip_src, uh->uh_sport, 560 ip->ip_dst, uh->uh_dport, 1, ifp); 561 if (inp == NULL) { 562 if (udp_log_in_vain) { 563 char buf[4*sizeof "123"]; 564 565 strcpy(buf, inet_ntoa(ip->ip_dst)); 566 log(LOG_INFO, 567 "Connection attempt to UDP %s:%d from %s:%d\n", 568 buf, ntohs(uh->uh_dport), inet_ntoa(ip->ip_src), 569 ntohs(uh->uh_sport)); 570 } 571 V_udpstat.udps_noport++; 572 if (m->m_flags & (M_BCAST | M_MCAST)) { 573 V_udpstat.udps_noportbcast++; 574 goto badheadlocked; 575 } 576 if (V_udp_blackhole) 577 goto badheadlocked; 578 if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0) 579 goto badheadlocked; 580 *ip = save_ip; 581 ip->ip_len += iphlen; 582 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0); 583 INP_INFO_RUNLOCK(&V_udbinfo); 584 return; 585 } 586 587 /* 588 * Check the minimum TTL for socket. 589 */ 590 INP_RLOCK(inp); 591 INP_INFO_RUNLOCK(&V_udbinfo); 592 if (inp->inp_ip_minttl && inp->inp_ip_minttl > ip->ip_ttl) { 593 INP_RUNLOCK(inp); 594 goto badunlocked; 595 } 596 if (inp->inp_ppcb != NULL) { 597 /* 598 * Engage the tunneling protocol. 599 */ 600 udp_tun_func_t tunnel_func; 601 602 tunnel_func = (udp_tun_func_t)inp->inp_ppcb; 603 tunnel_func(m, iphlen, inp); 604 INP_RUNLOCK(inp); 605 return; 606 } 607 udp_append(inp, ip, m, iphlen + sizeof(struct udphdr), &udp_in); 608 INP_RUNLOCK(inp); 609 return; 610 611 badheadlocked: 612 if (inp) 613 INP_RUNLOCK(inp); 614 INP_INFO_RUNLOCK(&V_udbinfo); 615 badunlocked: 616 m_freem(m); 617 } 618 619 /* 620 * Notify a udp user of an asynchronous error; just wake up so that they can 621 * collect error status. 622 */ 623 struct inpcb * 624 udp_notify(struct inpcb *inp, int errno) 625 { 626 627 /* 628 * While udp_ctlinput() always calls udp_notify() with a read lock 629 * when invoking it directly, in_pcbnotifyall() currently uses write 630 * locks due to sharing code with TCP. For now, accept either a read 631 * or a write lock, but a read lock is sufficient. 632 */ 633 INP_LOCK_ASSERT(inp); 634 635 inp->inp_socket->so_error = errno; 636 sorwakeup(inp->inp_socket); 637 sowwakeup(inp->inp_socket); 638 return (inp); 639 } 640 641 void 642 udp_ctlinput(int cmd, struct sockaddr *sa, void *vip) 643 { 644 INIT_VNET_INET(curvnet); 645 struct ip *ip = vip; 646 struct udphdr *uh; 647 struct in_addr faddr; 648 struct inpcb *inp; 649 650 faddr = ((struct sockaddr_in *)sa)->sin_addr; 651 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) 652 return; 653 654 /* 655 * Redirects don't need to be handled up here. 656 */ 657 if (PRC_IS_REDIRECT(cmd)) 658 return; 659 660 /* 661 * Hostdead is ugly because it goes linearly through all PCBs. 662 * 663 * XXX: We never get this from ICMP, otherwise it makes an excellent 664 * DoS attack on machines with many connections. 665 */ 666 if (cmd == PRC_HOSTDEAD) 667 ip = NULL; 668 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) 669 return; 670 if (ip != NULL) { 671 uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 672 INP_INFO_RLOCK(&V_udbinfo); 673 inp = in_pcblookup_hash(&V_udbinfo, faddr, uh->uh_dport, 674 ip->ip_src, uh->uh_sport, 0, NULL); 675 if (inp != NULL) { 676 INP_RLOCK(inp); 677 if (inp->inp_socket != NULL) { 678 udp_notify(inp, inetctlerrmap[cmd]); 679 } 680 INP_RUNLOCK(inp); 681 } 682 INP_INFO_RUNLOCK(&V_udbinfo); 683 } else 684 in_pcbnotifyall(&V_udbinfo, faddr, inetctlerrmap[cmd], 685 udp_notify); 686 } 687 688 static int 689 udp_pcblist(SYSCTL_HANDLER_ARGS) 690 { 691 INIT_VNET_INET(curvnet); 692 int error, i, n; 693 struct inpcb *inp, **inp_list; 694 inp_gen_t gencnt; 695 struct xinpgen xig; 696 697 /* 698 * The process of preparing the PCB list is too time-consuming and 699 * resource-intensive to repeat twice on every request. 700 */ 701 if (req->oldptr == 0) { 702 n = V_udbinfo.ipi_count; 703 req->oldidx = 2 * (sizeof xig) 704 + (n + n/8) * sizeof(struct xinpcb); 705 return (0); 706 } 707 708 if (req->newptr != 0) 709 return (EPERM); 710 711 /* 712 * OK, now we're committed to doing something. 713 */ 714 INP_INFO_RLOCK(&V_udbinfo); 715 gencnt = V_udbinfo.ipi_gencnt; 716 n = V_udbinfo.ipi_count; 717 INP_INFO_RUNLOCK(&V_udbinfo); 718 719 error = sysctl_wire_old_buffer(req, 2 * (sizeof xig) 720 + n * sizeof(struct xinpcb)); 721 if (error != 0) 722 return (error); 723 724 xig.xig_len = sizeof xig; 725 xig.xig_count = n; 726 xig.xig_gen = gencnt; 727 xig.xig_sogen = so_gencnt; 728 error = SYSCTL_OUT(req, &xig, sizeof xig); 729 if (error) 730 return (error); 731 732 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK); 733 if (inp_list == 0) 734 return (ENOMEM); 735 736 INP_INFO_RLOCK(&V_udbinfo); 737 for (inp = LIST_FIRST(V_udbinfo.ipi_listhead), i = 0; inp && i < n; 738 inp = LIST_NEXT(inp, inp_list)) { 739 INP_RLOCK(inp); 740 if (inp->inp_gencnt <= gencnt && 741 cr_canseeinpcb(req->td->td_ucred, inp) == 0) 742 inp_list[i++] = inp; 743 INP_RUNLOCK(inp); 744 } 745 INP_INFO_RUNLOCK(&V_udbinfo); 746 n = i; 747 748 error = 0; 749 for (i = 0; i < n; i++) { 750 inp = inp_list[i]; 751 INP_RLOCK(inp); 752 if (inp->inp_gencnt <= gencnt) { 753 struct xinpcb xi; 754 bzero(&xi, sizeof(xi)); 755 xi.xi_len = sizeof xi; 756 /* XXX should avoid extra copy */ 757 bcopy(inp, &xi.xi_inp, sizeof *inp); 758 if (inp->inp_socket) 759 sotoxsocket(inp->inp_socket, &xi.xi_socket); 760 xi.xi_inp.inp_gencnt = inp->inp_gencnt; 761 INP_RUNLOCK(inp); 762 error = SYSCTL_OUT(req, &xi, sizeof xi); 763 } else 764 INP_RUNLOCK(inp); 765 } 766 if (!error) { 767 /* 768 * Give the user an updated idea of our state. If the 769 * generation differs from what we told her before, she knows 770 * that something happened while we were processing this 771 * request, and it might be necessary to retry. 772 */ 773 INP_INFO_RLOCK(&V_udbinfo); 774 xig.xig_gen = V_udbinfo.ipi_gencnt; 775 xig.xig_sogen = so_gencnt; 776 xig.xig_count = V_udbinfo.ipi_count; 777 INP_INFO_RUNLOCK(&V_udbinfo); 778 error = SYSCTL_OUT(req, &xig, sizeof xig); 779 } 780 free(inp_list, M_TEMP); 781 return (error); 782 } 783 784 SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0, 785 udp_pcblist, "S,xinpcb", "List of active UDP sockets"); 786 787 static int 788 udp_getcred(SYSCTL_HANDLER_ARGS) 789 { 790 INIT_VNET_INET(curvnet); 791 struct xucred xuc; 792 struct sockaddr_in addrs[2]; 793 struct inpcb *inp; 794 int error; 795 796 error = priv_check(req->td, PRIV_NETINET_GETCRED); 797 if (error) 798 return (error); 799 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 800 if (error) 801 return (error); 802 INP_INFO_RLOCK(&V_udbinfo); 803 inp = in_pcblookup_hash(&V_udbinfo, addrs[1].sin_addr, addrs[1].sin_port, 804 addrs[0].sin_addr, addrs[0].sin_port, 1, NULL); 805 if (inp != NULL) { 806 INP_RLOCK(inp); 807 INP_INFO_RUNLOCK(&V_udbinfo); 808 if (inp->inp_socket == NULL) 809 error = ENOENT; 810 if (error == 0) 811 error = cr_canseeinpcb(req->td->td_ucred, inp); 812 if (error == 0) 813 cru2x(inp->inp_cred, &xuc); 814 INP_RUNLOCK(inp); 815 } else { 816 INP_INFO_RUNLOCK(&V_udbinfo); 817 error = ENOENT; 818 } 819 if (error == 0) 820 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 821 return (error); 822 } 823 824 SYSCTL_PROC(_net_inet_udp, OID_AUTO, getcred, 825 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0, 826 udp_getcred, "S,xucred", "Get the xucred of a UDP connection"); 827 828 static int 829 udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, 830 struct mbuf *control, struct thread *td) 831 { 832 INIT_VNET_INET(inp->inp_vnet); 833 struct udpiphdr *ui; 834 int len = m->m_pkthdr.len; 835 struct in_addr faddr, laddr; 836 struct cmsghdr *cm; 837 struct sockaddr_in *sin, src; 838 int error = 0; 839 int ipflags; 840 u_short fport, lport; 841 int unlock_udbinfo; 842 843 /* 844 * udp_output() may need to temporarily bind or connect the current 845 * inpcb. As such, we don't know up front whether we will need the 846 * pcbinfo lock or not. Do any work to decide what is needed up 847 * front before acquiring any locks. 848 */ 849 if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) { 850 if (control) 851 m_freem(control); 852 m_freem(m); 853 return (EMSGSIZE); 854 } 855 856 src.sin_family = 0; 857 if (control != NULL) { 858 /* 859 * XXX: Currently, we assume all the optional information is 860 * stored in a single mbuf. 861 */ 862 if (control->m_next) { 863 m_freem(control); 864 m_freem(m); 865 return (EINVAL); 866 } 867 for (; control->m_len > 0; 868 control->m_data += CMSG_ALIGN(cm->cmsg_len), 869 control->m_len -= CMSG_ALIGN(cm->cmsg_len)) { 870 cm = mtod(control, struct cmsghdr *); 871 if (control->m_len < sizeof(*cm) || cm->cmsg_len == 0 872 || cm->cmsg_len > control->m_len) { 873 error = EINVAL; 874 break; 875 } 876 if (cm->cmsg_level != IPPROTO_IP) 877 continue; 878 879 switch (cm->cmsg_type) { 880 case IP_SENDSRCADDR: 881 if (cm->cmsg_len != 882 CMSG_LEN(sizeof(struct in_addr))) { 883 error = EINVAL; 884 break; 885 } 886 bzero(&src, sizeof(src)); 887 src.sin_family = AF_INET; 888 src.sin_len = sizeof(src); 889 src.sin_port = inp->inp_lport; 890 src.sin_addr = 891 *(struct in_addr *)CMSG_DATA(cm); 892 break; 893 894 default: 895 error = ENOPROTOOPT; 896 break; 897 } 898 if (error) 899 break; 900 } 901 m_freem(control); 902 } 903 if (error) { 904 m_freem(m); 905 return (error); 906 } 907 908 /* 909 * Depending on whether or not the application has bound or connected 910 * the socket, we may have to do varying levels of work. The optimal 911 * case is for a connected UDP socket, as a global lock isn't 912 * required at all. 913 * 914 * In order to decide which we need, we require stability of the 915 * inpcb binding, which we ensure by acquiring a read lock on the 916 * inpcb. This doesn't strictly follow the lock order, so we play 917 * the trylock and retry game; note that we may end up with more 918 * conservative locks than required the second time around, so later 919 * assertions have to accept that. Further analysis of the number of 920 * misses under contention is required. 921 */ 922 sin = (struct sockaddr_in *)addr; 923 INP_RLOCK(inp); 924 if (sin != NULL && 925 (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0)) { 926 INP_RUNLOCK(inp); 927 INP_INFO_WLOCK(&V_udbinfo); 928 INP_WLOCK(inp); 929 unlock_udbinfo = 2; 930 } else if ((sin != NULL && ( 931 (sin->sin_addr.s_addr == INADDR_ANY) || 932 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 933 (inp->inp_laddr.s_addr == INADDR_ANY) || 934 (inp->inp_lport == 0))) || 935 (src.sin_family == AF_INET)) { 936 if (!INP_INFO_TRY_RLOCK(&V_udbinfo)) { 937 INP_RUNLOCK(inp); 938 INP_INFO_RLOCK(&V_udbinfo); 939 INP_RLOCK(inp); 940 } 941 unlock_udbinfo = 1; 942 } else 943 unlock_udbinfo = 0; 944 945 /* 946 * If the IP_SENDSRCADDR control message was specified, override the 947 * source address for this datagram. Its use is invalidated if the 948 * address thus specified is incomplete or clobbers other inpcbs. 949 */ 950 laddr = inp->inp_laddr; 951 lport = inp->inp_lport; 952 if (src.sin_family == AF_INET) { 953 INP_INFO_LOCK_ASSERT(&V_udbinfo); 954 if ((lport == 0) || 955 (laddr.s_addr == INADDR_ANY && 956 src.sin_addr.s_addr == INADDR_ANY)) { 957 error = EINVAL; 958 goto release; 959 } 960 error = in_pcbbind_setup(inp, (struct sockaddr *)&src, 961 &laddr.s_addr, &lport, td->td_ucred); 962 if (error) 963 goto release; 964 } 965 966 /* 967 * If a UDP socket has been connected, then a local address/port will 968 * have been selected and bound. 969 * 970 * If a UDP socket has not been connected to, then an explicit 971 * destination address must be used, in which case a local 972 * address/port may not have been selected and bound. 973 */ 974 if (sin != NULL) { 975 INP_LOCK_ASSERT(inp); 976 if (inp->inp_faddr.s_addr != INADDR_ANY) { 977 error = EISCONN; 978 goto release; 979 } 980 981 /* 982 * Jail may rewrite the destination address, so let it do 983 * that before we use it. 984 */ 985 error = prison_remote_ip4(td->td_ucred, &sin->sin_addr); 986 if (error) 987 goto release; 988 989 /* 990 * If a local address or port hasn't yet been selected, or if 991 * the destination address needs to be rewritten due to using 992 * a special INADDR_ constant, invoke in_pcbconnect_setup() 993 * to do the heavy lifting. Once a port is selected, we 994 * commit the binding back to the socket; we also commit the 995 * binding of the address if in jail. 996 * 997 * If we already have a valid binding and we're not 998 * requesting a destination address rewrite, use a fast path. 999 */ 1000 if (inp->inp_laddr.s_addr == INADDR_ANY || 1001 inp->inp_lport == 0 || 1002 sin->sin_addr.s_addr == INADDR_ANY || 1003 sin->sin_addr.s_addr == INADDR_BROADCAST) { 1004 INP_INFO_LOCK_ASSERT(&V_udbinfo); 1005 error = in_pcbconnect_setup(inp, addr, &laddr.s_addr, 1006 &lport, &faddr.s_addr, &fport, NULL, 1007 td->td_ucred); 1008 if (error) 1009 goto release; 1010 1011 /* 1012 * XXXRW: Why not commit the port if the address is 1013 * !INADDR_ANY? 1014 */ 1015 /* Commit the local port if newly assigned. */ 1016 if (inp->inp_laddr.s_addr == INADDR_ANY && 1017 inp->inp_lport == 0) { 1018 INP_INFO_WLOCK_ASSERT(&V_udbinfo); 1019 INP_WLOCK_ASSERT(inp); 1020 /* 1021 * Remember addr if jailed, to prevent 1022 * rebinding. 1023 */ 1024 if (jailed(td->td_ucred)) 1025 inp->inp_laddr = laddr; 1026 inp->inp_lport = lport; 1027 if (in_pcbinshash(inp) != 0) { 1028 inp->inp_lport = 0; 1029 error = EAGAIN; 1030 goto release; 1031 } 1032 inp->inp_flags |= INP_ANONPORT; 1033 } 1034 } else { 1035 faddr = sin->sin_addr; 1036 fport = sin->sin_port; 1037 } 1038 } else { 1039 INP_LOCK_ASSERT(inp); 1040 faddr = inp->inp_faddr; 1041 fport = inp->inp_fport; 1042 if (faddr.s_addr == INADDR_ANY) { 1043 error = ENOTCONN; 1044 goto release; 1045 } 1046 } 1047 1048 /* 1049 * Calculate data length and get a mbuf for UDP, IP, and possible 1050 * link-layer headers. Immediate slide the data pointer back forward 1051 * since we won't use that space at this layer. 1052 */ 1053 M_PREPEND(m, sizeof(struct udpiphdr) + max_linkhdr, M_DONTWAIT); 1054 if (m == NULL) { 1055 error = ENOBUFS; 1056 goto release; 1057 } 1058 m->m_data += max_linkhdr; 1059 m->m_len -= max_linkhdr; 1060 m->m_pkthdr.len -= max_linkhdr; 1061 1062 /* 1063 * Fill in mbuf with extended UDP header and addresses and length put 1064 * into network format. 1065 */ 1066 ui = mtod(m, struct udpiphdr *); 1067 bzero(ui->ui_x1, sizeof(ui->ui_x1)); /* XXX still needed? */ 1068 ui->ui_pr = IPPROTO_UDP; 1069 ui->ui_src = laddr; 1070 ui->ui_dst = faddr; 1071 ui->ui_sport = lport; 1072 ui->ui_dport = fport; 1073 ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr)); 1074 1075 /* 1076 * Set the Don't Fragment bit in the IP header. 1077 */ 1078 if (inp->inp_flags & INP_DONTFRAG) { 1079 struct ip *ip; 1080 1081 ip = (struct ip *)&ui->ui_i; 1082 ip->ip_off |= IP_DF; 1083 } 1084 1085 ipflags = 0; 1086 if (inp->inp_socket->so_options & SO_DONTROUTE) 1087 ipflags |= IP_ROUTETOIF; 1088 if (inp->inp_socket->so_options & SO_BROADCAST) 1089 ipflags |= IP_ALLOWBROADCAST; 1090 if (inp->inp_flags & INP_ONESBCAST) 1091 ipflags |= IP_SENDONES; 1092 1093 #ifdef MAC 1094 mac_inpcb_create_mbuf(inp, m); 1095 #endif 1096 1097 /* 1098 * Set up checksum and output datagram. 1099 */ 1100 if (udp_cksum) { 1101 if (inp->inp_flags & INP_ONESBCAST) 1102 faddr.s_addr = INADDR_BROADCAST; 1103 ui->ui_sum = in_pseudo(ui->ui_src.s_addr, faddr.s_addr, 1104 htons((u_short)len + sizeof(struct udphdr) + IPPROTO_UDP)); 1105 m->m_pkthdr.csum_flags = CSUM_UDP; 1106 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 1107 } else 1108 ui->ui_sum = 0; 1109 ((struct ip *)ui)->ip_len = sizeof (struct udpiphdr) + len; 1110 ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */ 1111 ((struct ip *)ui)->ip_tos = inp->inp_ip_tos; /* XXX */ 1112 V_udpstat.udps_opackets++; 1113 1114 if (unlock_udbinfo == 2) 1115 INP_INFO_WUNLOCK(&V_udbinfo); 1116 else if (unlock_udbinfo == 1) 1117 INP_INFO_RUNLOCK(&V_udbinfo); 1118 error = ip_output(m, inp->inp_options, NULL, ipflags, 1119 inp->inp_moptions, inp); 1120 if (unlock_udbinfo == 2) 1121 INP_WUNLOCK(inp); 1122 else 1123 INP_RUNLOCK(inp); 1124 return (error); 1125 1126 release: 1127 if (unlock_udbinfo == 2) { 1128 INP_WUNLOCK(inp); 1129 INP_INFO_WUNLOCK(&V_udbinfo); 1130 } else if (unlock_udbinfo == 1) { 1131 INP_RUNLOCK(inp); 1132 INP_INFO_RUNLOCK(&V_udbinfo); 1133 } else 1134 INP_RUNLOCK(inp); 1135 m_freem(m); 1136 return (error); 1137 } 1138 1139 static void 1140 udp_abort(struct socket *so) 1141 { 1142 INIT_VNET_INET(so->so_vnet); 1143 struct inpcb *inp; 1144 1145 inp = sotoinpcb(so); 1146 KASSERT(inp != NULL, ("udp_abort: inp == NULL")); 1147 INP_INFO_WLOCK(&V_udbinfo); 1148 INP_WLOCK(inp); 1149 if (inp->inp_faddr.s_addr != INADDR_ANY) { 1150 in_pcbdisconnect(inp); 1151 inp->inp_laddr.s_addr = INADDR_ANY; 1152 soisdisconnected(so); 1153 } 1154 INP_WUNLOCK(inp); 1155 INP_INFO_WUNLOCK(&V_udbinfo); 1156 } 1157 1158 static int 1159 udp_attach(struct socket *so, int proto, struct thread *td) 1160 { 1161 INIT_VNET_INET(so->so_vnet); 1162 struct inpcb *inp; 1163 int error; 1164 1165 inp = sotoinpcb(so); 1166 KASSERT(inp == NULL, ("udp_attach: inp != NULL")); 1167 error = soreserve(so, udp_sendspace, udp_recvspace); 1168 if (error) 1169 return (error); 1170 INP_INFO_WLOCK(&V_udbinfo); 1171 error = in_pcballoc(so, &V_udbinfo); 1172 if (error) { 1173 INP_INFO_WUNLOCK(&V_udbinfo); 1174 return (error); 1175 } 1176 1177 inp = (struct inpcb *)so->so_pcb; 1178 INP_INFO_WUNLOCK(&V_udbinfo); 1179 inp->inp_vflag |= INP_IPV4; 1180 inp->inp_ip_ttl = V_ip_defttl; 1181 /* 1182 * UDP does not have a per-protocol pcb (inp->inp_ppcb). 1183 * We use this pointer for kernel tunneling pointer. 1184 * If we ever need to have a protocol block we will 1185 * need to move this function pointer there. Null 1186 * in this pointer means "do the normal thing". 1187 */ 1188 inp->inp_ppcb = NULL; 1189 INP_WUNLOCK(inp); 1190 return (0); 1191 } 1192 1193 int 1194 udp_set_kernel_tunneling(struct socket *so, udp_tun_func_t f) 1195 { 1196 struct inpcb *inp; 1197 1198 inp = (struct inpcb *)so->so_pcb; 1199 KASSERT(so->so_type == SOCK_DGRAM, ("udp_set_kernel_tunneling: !dgram")); 1200 KASSERT(so->so_pcb != NULL, ("udp_set_kernel_tunneling: NULL inp")); 1201 if (so->so_type != SOCK_DGRAM) { 1202 /* Not UDP socket... sorry! */ 1203 return (ENOTSUP); 1204 } 1205 if (inp == NULL) { 1206 /* NULL INP? */ 1207 return (EINVAL); 1208 } 1209 INP_WLOCK(inp); 1210 if (inp->inp_ppcb != NULL) { 1211 INP_WUNLOCK(inp); 1212 return (EBUSY); 1213 } 1214 inp->inp_ppcb = f; 1215 INP_WUNLOCK(inp); 1216 return (0); 1217 } 1218 1219 static int 1220 udp_bind(struct socket *so, struct sockaddr *nam, struct thread *td) 1221 { 1222 INIT_VNET_INET(so->so_vnet); 1223 struct inpcb *inp; 1224 int error; 1225 1226 inp = sotoinpcb(so); 1227 KASSERT(inp != NULL, ("udp_bind: inp == NULL")); 1228 INP_INFO_WLOCK(&V_udbinfo); 1229 INP_WLOCK(inp); 1230 error = in_pcbbind(inp, nam, td->td_ucred); 1231 INP_WUNLOCK(inp); 1232 INP_INFO_WUNLOCK(&V_udbinfo); 1233 return (error); 1234 } 1235 1236 static void 1237 udp_close(struct socket *so) 1238 { 1239 INIT_VNET_INET(so->so_vnet); 1240 struct inpcb *inp; 1241 1242 inp = sotoinpcb(so); 1243 KASSERT(inp != NULL, ("udp_close: inp == NULL")); 1244 INP_INFO_WLOCK(&V_udbinfo); 1245 INP_WLOCK(inp); 1246 if (inp->inp_faddr.s_addr != INADDR_ANY) { 1247 in_pcbdisconnect(inp); 1248 inp->inp_laddr.s_addr = INADDR_ANY; 1249 soisdisconnected(so); 1250 } 1251 INP_WUNLOCK(inp); 1252 INP_INFO_WUNLOCK(&V_udbinfo); 1253 } 1254 1255 static int 1256 udp_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 1257 { 1258 INIT_VNET_INET(so->so_vnet); 1259 struct inpcb *inp; 1260 int error; 1261 struct sockaddr_in *sin; 1262 1263 inp = sotoinpcb(so); 1264 KASSERT(inp != NULL, ("udp_connect: inp == NULL")); 1265 INP_INFO_WLOCK(&V_udbinfo); 1266 INP_WLOCK(inp); 1267 if (inp->inp_faddr.s_addr != INADDR_ANY) { 1268 INP_WUNLOCK(inp); 1269 INP_INFO_WUNLOCK(&V_udbinfo); 1270 return (EISCONN); 1271 } 1272 sin = (struct sockaddr_in *)nam; 1273 error = prison_remote_ip4(td->td_ucred, &sin->sin_addr); 1274 if (error != 0) { 1275 INP_WUNLOCK(inp); 1276 INP_INFO_WUNLOCK(&V_udbinfo); 1277 return (error); 1278 } 1279 error = in_pcbconnect(inp, nam, td->td_ucred); 1280 if (error == 0) 1281 soisconnected(so); 1282 INP_WUNLOCK(inp); 1283 INP_INFO_WUNLOCK(&V_udbinfo); 1284 return (error); 1285 } 1286 1287 static void 1288 udp_detach(struct socket *so) 1289 { 1290 INIT_VNET_INET(so->so_vnet); 1291 struct inpcb *inp; 1292 1293 inp = sotoinpcb(so); 1294 KASSERT(inp != NULL, ("udp_detach: inp == NULL")); 1295 KASSERT(inp->inp_faddr.s_addr == INADDR_ANY, 1296 ("udp_detach: not disconnected")); 1297 INP_INFO_WLOCK(&V_udbinfo); 1298 INP_WLOCK(inp); 1299 in_pcbdetach(inp); 1300 in_pcbfree(inp); 1301 INP_INFO_WUNLOCK(&V_udbinfo); 1302 } 1303 1304 static int 1305 udp_disconnect(struct socket *so) 1306 { 1307 INIT_VNET_INET(so->so_vnet); 1308 struct inpcb *inp; 1309 1310 inp = sotoinpcb(so); 1311 KASSERT(inp != NULL, ("udp_disconnect: inp == NULL")); 1312 INP_INFO_WLOCK(&V_udbinfo); 1313 INP_WLOCK(inp); 1314 if (inp->inp_faddr.s_addr == INADDR_ANY) { 1315 INP_WUNLOCK(inp); 1316 INP_INFO_WUNLOCK(&V_udbinfo); 1317 return (ENOTCONN); 1318 } 1319 1320 in_pcbdisconnect(inp); 1321 inp->inp_laddr.s_addr = INADDR_ANY; 1322 SOCK_LOCK(so); 1323 so->so_state &= ~SS_ISCONNECTED; /* XXX */ 1324 SOCK_UNLOCK(so); 1325 INP_WUNLOCK(inp); 1326 INP_INFO_WUNLOCK(&V_udbinfo); 1327 return (0); 1328 } 1329 1330 static int 1331 udp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 1332 struct mbuf *control, struct thread *td) 1333 { 1334 struct inpcb *inp; 1335 1336 inp = sotoinpcb(so); 1337 KASSERT(inp != NULL, ("udp_send: inp == NULL")); 1338 return (udp_output(inp, m, addr, control, td)); 1339 } 1340 1341 int 1342 udp_shutdown(struct socket *so) 1343 { 1344 struct inpcb *inp; 1345 1346 inp = sotoinpcb(so); 1347 KASSERT(inp != NULL, ("udp_shutdown: inp == NULL")); 1348 INP_WLOCK(inp); 1349 socantsendmore(so); 1350 INP_WUNLOCK(inp); 1351 return (0); 1352 } 1353 1354 struct pr_usrreqs udp_usrreqs = { 1355 .pru_abort = udp_abort, 1356 .pru_attach = udp_attach, 1357 .pru_bind = udp_bind, 1358 .pru_connect = udp_connect, 1359 .pru_control = in_control, 1360 .pru_detach = udp_detach, 1361 .pru_disconnect = udp_disconnect, 1362 .pru_peeraddr = in_getpeeraddr, 1363 .pru_send = udp_send, 1364 .pru_soreceive = soreceive_dgram, 1365 .pru_sosend = sosend_dgram, 1366 .pru_shutdown = udp_shutdown, 1367 .pru_sockaddr = in_getsockaddr, 1368 .pru_sosetlabel = in_pcbsosetlabel, 1369 .pru_close = udp_close, 1370 }; 1371