1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 3 * The Regents of the University of California. 4 * Copyright (c) 2008 Robert N. M. Watson 5 * Copyright (c) 2010-2011 Juniper Networks, Inc. 6 * All rights reserved. 7 * 8 * Portions of this software were developed by Robert N. M. Watson under 9 * contract to Juniper Networks, Inc. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)udp_usrreq.c 8.6 (Berkeley) 5/23/95 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include "opt_ipfw.h" 42 #include "opt_inet.h" 43 #include "opt_inet6.h" 44 #include "opt_ipsec.h" 45 46 #include <sys/param.h> 47 #include <sys/domain.h> 48 #include <sys/eventhandler.h> 49 #include <sys/jail.h> 50 #include <sys/kernel.h> 51 #include <sys/lock.h> 52 #include <sys/malloc.h> 53 #include <sys/mbuf.h> 54 #include <sys/priv.h> 55 #include <sys/proc.h> 56 #include <sys/protosw.h> 57 #include <sys/signalvar.h> 58 #include <sys/socket.h> 59 #include <sys/socketvar.h> 60 #include <sys/sx.h> 61 #include <sys/sysctl.h> 62 #include <sys/syslog.h> 63 #include <sys/systm.h> 64 65 #include <vm/uma.h> 66 67 #include <net/if.h> 68 #include <net/route.h> 69 70 #include <netinet/in.h> 71 #include <netinet/in_pcb.h> 72 #include <netinet/in_systm.h> 73 #include <netinet/in_var.h> 74 #include <netinet/ip.h> 75 #ifdef INET6 76 #include <netinet/ip6.h> 77 #endif 78 #include <netinet/ip_icmp.h> 79 #include <netinet/icmp_var.h> 80 #include <netinet/ip_var.h> 81 #include <netinet/ip_options.h> 82 #ifdef INET6 83 #include <netinet6/ip6_var.h> 84 #endif 85 #include <netinet/udp.h> 86 #include <netinet/udp_var.h> 87 88 #ifdef IPSEC 89 #include <netipsec/ipsec.h> 90 #include <netipsec/esp.h> 91 #endif 92 93 #include <machine/in_cksum.h> 94 95 #include <security/mac/mac_framework.h> 96 97 /* 98 * UDP protocol implementation. 99 * Per RFC 768, August, 1980. 100 */ 101 102 /* 103 * BSD 4.2 defaulted the udp checksum to be off. Turning off udp checksums 104 * removes the only data integrity mechanism for packets and malformed 105 * packets that would otherwise be discarded due to bad checksums, and may 106 * cause problems (especially for NFS data blocks). 107 */ 108 static int udp_cksum = 1; 109 SYSCTL_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_RW, &udp_cksum, 110 0, "compute udp checksum"); 111 112 int udp_log_in_vain = 0; 113 SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW, 114 &udp_log_in_vain, 0, "Log all incoming UDP packets"); 115 116 VNET_DEFINE(int, udp_blackhole) = 0; 117 SYSCTL_VNET_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_RW, 118 &VNET_NAME(udp_blackhole), 0, 119 "Do not send port unreachables for refused connects"); 120 121 u_long udp_sendspace = 9216; /* really max datagram size */ 122 /* 40 1K datagrams */ 123 SYSCTL_ULONG(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, CTLFLAG_RW, 124 &udp_sendspace, 0, "Maximum outgoing UDP datagram size"); 125 126 u_long udp_recvspace = 40 * (1024 + 127 #ifdef INET6 128 sizeof(struct sockaddr_in6) 129 #else 130 sizeof(struct sockaddr_in) 131 #endif 132 ); 133 134 SYSCTL_ULONG(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, CTLFLAG_RW, 135 &udp_recvspace, 0, "Maximum space for incoming UDP datagrams"); 136 137 VNET_DEFINE(struct inpcbhead, udb); /* from udp_var.h */ 138 VNET_DEFINE(struct inpcbinfo, udbinfo); 139 static VNET_DEFINE(uma_zone_t, udpcb_zone); 140 #define V_udpcb_zone VNET(udpcb_zone) 141 142 #ifndef UDBHASHSIZE 143 #define UDBHASHSIZE 128 144 #endif 145 146 VNET_DEFINE(struct udpstat, udpstat); /* from udp_var.h */ 147 SYSCTL_VNET_STRUCT(_net_inet_udp, UDPCTL_STATS, stats, CTLFLAG_RW, 148 &VNET_NAME(udpstat), udpstat, 149 "UDP statistics (struct udpstat, netinet/udp_var.h)"); 150 151 #ifdef INET 152 static void udp_detach(struct socket *so); 153 static int udp_output(struct inpcb *, struct mbuf *, struct sockaddr *, 154 struct mbuf *, struct thread *); 155 #endif 156 157 #ifdef IPSEC 158 #ifdef IPSEC_NAT_T 159 #define UF_ESPINUDP_ALL (UF_ESPINUDP_NON_IKE|UF_ESPINUDP) 160 #ifdef INET 161 static struct mbuf *udp4_espdecap(struct inpcb *, struct mbuf *, int); 162 #endif 163 #endif /* IPSEC_NAT_T */ 164 #endif /* IPSEC */ 165 166 static void 167 udp_zone_change(void *tag) 168 { 169 170 uma_zone_set_max(V_udbinfo.ipi_zone, maxsockets); 171 uma_zone_set_max(V_udpcb_zone, maxsockets); 172 } 173 174 static int 175 udp_inpcb_init(void *mem, int size, int flags) 176 { 177 struct inpcb *inp; 178 179 inp = mem; 180 INP_LOCK_INIT(inp, "inp", "udpinp"); 181 return (0); 182 } 183 184 void 185 udp_init(void) 186 { 187 188 in_pcbinfo_init(&V_udbinfo, "udp", &V_udb, UDBHASHSIZE, UDBHASHSIZE, 189 "udp_inpcb", udp_inpcb_init, NULL, UMA_ZONE_NOFREE, 190 IPI_HASHFIELDS_2TUPLE); 191 V_udpcb_zone = uma_zcreate("udpcb", sizeof(struct udpcb), 192 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 193 uma_zone_set_max(V_udpcb_zone, maxsockets); 194 EVENTHANDLER_REGISTER(maxsockets_change, udp_zone_change, NULL, 195 EVENTHANDLER_PRI_ANY); 196 } 197 198 /* 199 * Kernel module interface for updating udpstat. The argument is an index 200 * into udpstat treated as an array of u_long. While this encodes the 201 * general layout of udpstat into the caller, it doesn't encode its location, 202 * so that future changes to add, for example, per-CPU stats support won't 203 * cause binary compatibility problems for kernel modules. 204 */ 205 void 206 kmod_udpstat_inc(int statnum) 207 { 208 209 (*((u_long *)&V_udpstat + statnum))++; 210 } 211 212 int 213 udp_newudpcb(struct inpcb *inp) 214 { 215 struct udpcb *up; 216 217 up = uma_zalloc(V_udpcb_zone, M_NOWAIT | M_ZERO); 218 if (up == NULL) 219 return (ENOBUFS); 220 inp->inp_ppcb = up; 221 return (0); 222 } 223 224 void 225 udp_discardcb(struct udpcb *up) 226 { 227 228 uma_zfree(V_udpcb_zone, up); 229 } 230 231 #ifdef VIMAGE 232 void 233 udp_destroy(void) 234 { 235 236 in_pcbinfo_destroy(&V_udbinfo); 237 uma_zdestroy(V_udpcb_zone); 238 } 239 #endif 240 241 #ifdef INET 242 /* 243 * Subroutine of udp_input(), which appends the provided mbuf chain to the 244 * passed pcb/socket. The caller must provide a sockaddr_in via udp_in that 245 * contains the source address. If the socket ends up being an IPv6 socket, 246 * udp_append() will convert to a sockaddr_in6 before passing the address 247 * into the socket code. 248 */ 249 static void 250 udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off, 251 struct sockaddr_in *udp_in) 252 { 253 struct sockaddr *append_sa; 254 struct socket *so; 255 struct mbuf *opts = 0; 256 #ifdef INET6 257 struct sockaddr_in6 udp_in6; 258 #endif 259 struct udpcb *up; 260 261 INP_LOCK_ASSERT(inp); 262 263 /* 264 * Engage the tunneling protocol. 265 */ 266 up = intoudpcb(inp); 267 if (up->u_tun_func != NULL) { 268 (*up->u_tun_func)(n, off, inp); 269 return; 270 } 271 272 if (n == NULL) 273 return; 274 275 off += sizeof(struct udphdr); 276 277 #ifdef IPSEC 278 /* Check AH/ESP integrity. */ 279 if (ipsec4_in_reject(n, inp)) { 280 m_freem(n); 281 V_ipsec4stat.in_polvio++; 282 return; 283 } 284 #ifdef IPSEC_NAT_T 285 up = intoudpcb(inp); 286 KASSERT(up != NULL, ("%s: udpcb NULL", __func__)); 287 if (up->u_flags & UF_ESPINUDP_ALL) { /* IPSec UDP encaps. */ 288 n = udp4_espdecap(inp, n, off); 289 if (n == NULL) /* Consumed. */ 290 return; 291 } 292 #endif /* IPSEC_NAT_T */ 293 #endif /* IPSEC */ 294 #ifdef MAC 295 if (mac_inpcb_check_deliver(inp, n) != 0) { 296 m_freem(n); 297 return; 298 } 299 #endif /* MAC */ 300 if (inp->inp_flags & INP_CONTROLOPTS || 301 inp->inp_socket->so_options & (SO_TIMESTAMP | SO_BINTIME)) { 302 #ifdef INET6 303 if (inp->inp_vflag & INP_IPV6) 304 (void)ip6_savecontrol_v4(inp, n, &opts, NULL); 305 else 306 #endif /* INET6 */ 307 ip_savecontrol(inp, &opts, ip, n); 308 } 309 #ifdef INET6 310 if (inp->inp_vflag & INP_IPV6) { 311 bzero(&udp_in6, sizeof(udp_in6)); 312 udp_in6.sin6_len = sizeof(udp_in6); 313 udp_in6.sin6_family = AF_INET6; 314 in6_sin_2_v4mapsin6(udp_in, &udp_in6); 315 append_sa = (struct sockaddr *)&udp_in6; 316 } else 317 #endif /* INET6 */ 318 append_sa = (struct sockaddr *)udp_in; 319 m_adj(n, off); 320 321 so = inp->inp_socket; 322 SOCKBUF_LOCK(&so->so_rcv); 323 if (sbappendaddr_locked(&so->so_rcv, append_sa, n, opts) == 0) { 324 SOCKBUF_UNLOCK(&so->so_rcv); 325 m_freem(n); 326 if (opts) 327 m_freem(opts); 328 UDPSTAT_INC(udps_fullsock); 329 } else 330 sorwakeup_locked(so); 331 } 332 333 void 334 udp_input(struct mbuf *m, int off) 335 { 336 int iphlen = off; 337 struct ip *ip; 338 struct udphdr *uh; 339 struct ifnet *ifp; 340 struct inpcb *inp; 341 int len; 342 struct ip save_ip; 343 struct sockaddr_in udp_in; 344 #ifdef IPFIREWALL_FORWARD 345 struct m_tag *fwd_tag; 346 #endif 347 348 ifp = m->m_pkthdr.rcvif; 349 UDPSTAT_INC(udps_ipackets); 350 351 /* 352 * Strip IP options, if any; should skip this, make available to 353 * user, and use on returned packets, but we don't yet have a way to 354 * check the checksum with options still present. 355 */ 356 if (iphlen > sizeof (struct ip)) { 357 ip_stripoptions(m, (struct mbuf *)0); 358 iphlen = sizeof(struct ip); 359 } 360 361 /* 362 * Get IP and UDP header together in first mbuf. 363 */ 364 ip = mtod(m, struct ip *); 365 if (m->m_len < iphlen + sizeof(struct udphdr)) { 366 if ((m = m_pullup(m, iphlen + sizeof(struct udphdr))) == 0) { 367 UDPSTAT_INC(udps_hdrops); 368 return; 369 } 370 ip = mtod(m, struct ip *); 371 } 372 uh = (struct udphdr *)((caddr_t)ip + iphlen); 373 374 /* 375 * Destination port of 0 is illegal, based on RFC768. 376 */ 377 if (uh->uh_dport == 0) 378 goto badunlocked; 379 380 /* 381 * Construct sockaddr format source address. Stuff source address 382 * and datagram in user buffer. 383 */ 384 bzero(&udp_in, sizeof(udp_in)); 385 udp_in.sin_len = sizeof(udp_in); 386 udp_in.sin_family = AF_INET; 387 udp_in.sin_port = uh->uh_sport; 388 udp_in.sin_addr = ip->ip_src; 389 390 /* 391 * Make mbuf data length reflect UDP length. If not enough data to 392 * reflect UDP length, drop. 393 */ 394 len = ntohs((u_short)uh->uh_ulen); 395 if (ip->ip_len != len) { 396 if (len > ip->ip_len || len < sizeof(struct udphdr)) { 397 UDPSTAT_INC(udps_badlen); 398 goto badunlocked; 399 } 400 m_adj(m, len - ip->ip_len); 401 /* ip->ip_len = len; */ 402 } 403 404 /* 405 * Save a copy of the IP header in case we want restore it for 406 * sending an ICMP error message in response. 407 */ 408 if (!V_udp_blackhole) 409 save_ip = *ip; 410 else 411 memset(&save_ip, 0, sizeof(save_ip)); 412 413 /* 414 * Checksum extended UDP header and data. 415 */ 416 if (uh->uh_sum) { 417 u_short uh_sum; 418 419 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 420 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 421 uh_sum = m->m_pkthdr.csum_data; 422 else 423 uh_sum = in_pseudo(ip->ip_src.s_addr, 424 ip->ip_dst.s_addr, htonl((u_short)len + 425 m->m_pkthdr.csum_data + IPPROTO_UDP)); 426 uh_sum ^= 0xffff; 427 } else { 428 char b[9]; 429 430 bcopy(((struct ipovly *)ip)->ih_x1, b, 9); 431 bzero(((struct ipovly *)ip)->ih_x1, 9); 432 ((struct ipovly *)ip)->ih_len = uh->uh_ulen; 433 uh_sum = in_cksum(m, len + sizeof (struct ip)); 434 bcopy(b, ((struct ipovly *)ip)->ih_x1, 9); 435 } 436 if (uh_sum) { 437 UDPSTAT_INC(udps_badsum); 438 m_freem(m); 439 return; 440 } 441 } else 442 UDPSTAT_INC(udps_nosum); 443 444 #ifdef IPFIREWALL_FORWARD 445 /* 446 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. 447 */ 448 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 449 if (fwd_tag != NULL) { 450 struct sockaddr_in *next_hop; 451 452 /* 453 * Do the hack. 454 */ 455 next_hop = (struct sockaddr_in *)(fwd_tag + 1); 456 ip->ip_dst = next_hop->sin_addr; 457 uh->uh_dport = ntohs(next_hop->sin_port); 458 459 /* 460 * Remove the tag from the packet. We don't need it anymore. 461 */ 462 m_tag_delete(m, fwd_tag); 463 } 464 #endif 465 466 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 467 in_broadcast(ip->ip_dst, ifp)) { 468 struct inpcb *last; 469 struct ip_moptions *imo; 470 471 INP_INFO_RLOCK(&V_udbinfo); 472 last = NULL; 473 LIST_FOREACH(inp, &V_udb, inp_list) { 474 if (inp->inp_lport != uh->uh_dport) 475 continue; 476 #ifdef INET6 477 if ((inp->inp_vflag & INP_IPV4) == 0) 478 continue; 479 #endif 480 if (inp->inp_laddr.s_addr != INADDR_ANY && 481 inp->inp_laddr.s_addr != ip->ip_dst.s_addr) 482 continue; 483 if (inp->inp_faddr.s_addr != INADDR_ANY && 484 inp->inp_faddr.s_addr != ip->ip_src.s_addr) 485 continue; 486 if (inp->inp_fport != 0 && 487 inp->inp_fport != uh->uh_sport) 488 continue; 489 490 INP_RLOCK(inp); 491 492 /* 493 * XXXRW: Because we weren't holding either the inpcb 494 * or the hash lock when we checked for a match 495 * before, we should probably recheck now that the 496 * inpcb lock is held. 497 */ 498 499 /* 500 * Handle socket delivery policy for any-source 501 * and source-specific multicast. [RFC3678] 502 */ 503 imo = inp->inp_moptions; 504 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 505 struct sockaddr_in group; 506 int blocked; 507 if (imo == NULL) { 508 INP_RUNLOCK(inp); 509 continue; 510 } 511 bzero(&group, sizeof(struct sockaddr_in)); 512 group.sin_len = sizeof(struct sockaddr_in); 513 group.sin_family = AF_INET; 514 group.sin_addr = ip->ip_dst; 515 516 blocked = imo_multi_filter(imo, ifp, 517 (struct sockaddr *)&group, 518 (struct sockaddr *)&udp_in); 519 if (blocked != MCAST_PASS) { 520 if (blocked == MCAST_NOTGMEMBER) 521 IPSTAT_INC(ips_notmember); 522 if (blocked == MCAST_NOTSMEMBER || 523 blocked == MCAST_MUTED) 524 UDPSTAT_INC(udps_filtermcast); 525 INP_RUNLOCK(inp); 526 continue; 527 } 528 } 529 if (last != NULL) { 530 struct mbuf *n; 531 532 n = m_copy(m, 0, M_COPYALL); 533 udp_append(last, ip, n, iphlen, &udp_in); 534 INP_RUNLOCK(last); 535 } 536 last = inp; 537 /* 538 * Don't look for additional matches if this one does 539 * not have either the SO_REUSEPORT or SO_REUSEADDR 540 * socket options set. This heuristic avoids 541 * searching through all pcbs in the common case of a 542 * non-shared port. It assumes that an application 543 * will never clear these options after setting them. 544 */ 545 if ((last->inp_socket->so_options & 546 (SO_REUSEPORT|SO_REUSEADDR)) == 0) 547 break; 548 } 549 550 if (last == NULL) { 551 /* 552 * No matching pcb found; discard datagram. (No need 553 * to send an ICMP Port Unreachable for a broadcast 554 * or multicast datgram.) 555 */ 556 UDPSTAT_INC(udps_noportbcast); 557 if (inp) 558 INP_RUNLOCK(inp); 559 INP_INFO_RUNLOCK(&V_udbinfo); 560 goto badunlocked; 561 } 562 udp_append(last, ip, m, iphlen, &udp_in); 563 INP_RUNLOCK(last); 564 INP_INFO_RUNLOCK(&V_udbinfo); 565 return; 566 } 567 568 /* 569 * Locate pcb for datagram. 570 */ 571 inp = in_pcblookup_mbuf(&V_udbinfo, ip->ip_src, uh->uh_sport, 572 ip->ip_dst, uh->uh_dport, INPLOOKUP_WILDCARD | INPLOOKUP_RLOCKPCB, 573 ifp, m); 574 if (inp == NULL) { 575 if (udp_log_in_vain) { 576 char buf[4*sizeof "123"]; 577 578 strcpy(buf, inet_ntoa(ip->ip_dst)); 579 log(LOG_INFO, 580 "Connection attempt to UDP %s:%d from %s:%d\n", 581 buf, ntohs(uh->uh_dport), inet_ntoa(ip->ip_src), 582 ntohs(uh->uh_sport)); 583 } 584 UDPSTAT_INC(udps_noport); 585 if (m->m_flags & (M_BCAST | M_MCAST)) { 586 UDPSTAT_INC(udps_noportbcast); 587 goto badunlocked; 588 } 589 if (V_udp_blackhole) 590 goto badunlocked; 591 if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0) 592 goto badunlocked; 593 *ip = save_ip; 594 ip->ip_len += iphlen; 595 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0); 596 return; 597 } 598 599 /* 600 * Check the minimum TTL for socket. 601 */ 602 INP_RLOCK_ASSERT(inp); 603 if (inp->inp_ip_minttl && inp->inp_ip_minttl > ip->ip_ttl) { 604 INP_RUNLOCK(inp); 605 m_freem(m); 606 return; 607 } 608 udp_append(inp, ip, m, iphlen, &udp_in); 609 INP_RUNLOCK(inp); 610 return; 611 612 badunlocked: 613 m_freem(m); 614 } 615 #endif /* INET */ 616 617 /* 618 * Notify a udp user of an asynchronous error; just wake up so that they can 619 * collect error status. 620 */ 621 struct inpcb * 622 udp_notify(struct inpcb *inp, int errno) 623 { 624 625 /* 626 * While udp_ctlinput() always calls udp_notify() with a read lock 627 * when invoking it directly, in_pcbnotifyall() currently uses write 628 * locks due to sharing code with TCP. For now, accept either a read 629 * or a write lock, but a read lock is sufficient. 630 */ 631 INP_LOCK_ASSERT(inp); 632 633 inp->inp_socket->so_error = errno; 634 sorwakeup(inp->inp_socket); 635 sowwakeup(inp->inp_socket); 636 return (inp); 637 } 638 639 #ifdef INET 640 void 641 udp_ctlinput(int cmd, struct sockaddr *sa, void *vip) 642 { 643 struct ip *ip = vip; 644 struct udphdr *uh; 645 struct in_addr faddr; 646 struct inpcb *inp; 647 648 faddr = ((struct sockaddr_in *)sa)->sin_addr; 649 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) 650 return; 651 652 /* 653 * Redirects don't need to be handled up here. 654 */ 655 if (PRC_IS_REDIRECT(cmd)) 656 return; 657 658 /* 659 * Hostdead is ugly because it goes linearly through all PCBs. 660 * 661 * XXX: We never get this from ICMP, otherwise it makes an excellent 662 * DoS attack on machines with many connections. 663 */ 664 if (cmd == PRC_HOSTDEAD) 665 ip = NULL; 666 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) 667 return; 668 if (ip != NULL) { 669 uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 670 inp = in_pcblookup(&V_udbinfo, faddr, uh->uh_dport, 671 ip->ip_src, uh->uh_sport, INPLOOKUP_RLOCKPCB, NULL); 672 if (inp != NULL) { 673 INP_RLOCK_ASSERT(inp); 674 if (inp->inp_socket != NULL) { 675 udp_notify(inp, inetctlerrmap[cmd]); 676 } 677 INP_RUNLOCK(inp); 678 } 679 } else 680 in_pcbnotifyall(&V_udbinfo, faddr, inetctlerrmap[cmd], 681 udp_notify); 682 } 683 #endif /* INET */ 684 685 static int 686 udp_pcblist(SYSCTL_HANDLER_ARGS) 687 { 688 int error, i, n; 689 struct inpcb *inp, **inp_list; 690 inp_gen_t gencnt; 691 struct xinpgen xig; 692 693 /* 694 * The process of preparing the PCB list is too time-consuming and 695 * resource-intensive to repeat twice on every request. 696 */ 697 if (req->oldptr == 0) { 698 n = V_udbinfo.ipi_count; 699 n += imax(n / 8, 10); 700 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb); 701 return (0); 702 } 703 704 if (req->newptr != 0) 705 return (EPERM); 706 707 /* 708 * OK, now we're committed to doing something. 709 */ 710 INP_INFO_RLOCK(&V_udbinfo); 711 gencnt = V_udbinfo.ipi_gencnt; 712 n = V_udbinfo.ipi_count; 713 INP_INFO_RUNLOCK(&V_udbinfo); 714 715 error = sysctl_wire_old_buffer(req, 2 * (sizeof xig) 716 + n * sizeof(struct xinpcb)); 717 if (error != 0) 718 return (error); 719 720 xig.xig_len = sizeof xig; 721 xig.xig_count = n; 722 xig.xig_gen = gencnt; 723 xig.xig_sogen = so_gencnt; 724 error = SYSCTL_OUT(req, &xig, sizeof xig); 725 if (error) 726 return (error); 727 728 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK); 729 if (inp_list == 0) 730 return (ENOMEM); 731 732 INP_INFO_RLOCK(&V_udbinfo); 733 for (inp = LIST_FIRST(V_udbinfo.ipi_listhead), i = 0; inp && i < n; 734 inp = LIST_NEXT(inp, inp_list)) { 735 INP_WLOCK(inp); 736 if (inp->inp_gencnt <= gencnt && 737 cr_canseeinpcb(req->td->td_ucred, inp) == 0) { 738 in_pcbref(inp); 739 inp_list[i++] = inp; 740 } 741 INP_WUNLOCK(inp); 742 } 743 INP_INFO_RUNLOCK(&V_udbinfo); 744 n = i; 745 746 error = 0; 747 for (i = 0; i < n; i++) { 748 inp = inp_list[i]; 749 INP_RLOCK(inp); 750 if (inp->inp_gencnt <= gencnt) { 751 struct xinpcb xi; 752 753 bzero(&xi, sizeof(xi)); 754 xi.xi_len = sizeof xi; 755 /* XXX should avoid extra copy */ 756 bcopy(inp, &xi.xi_inp, sizeof *inp); 757 if (inp->inp_socket) 758 sotoxsocket(inp->inp_socket, &xi.xi_socket); 759 xi.xi_inp.inp_gencnt = inp->inp_gencnt; 760 INP_RUNLOCK(inp); 761 error = SYSCTL_OUT(req, &xi, sizeof xi); 762 } else 763 INP_RUNLOCK(inp); 764 } 765 INP_INFO_WLOCK(&V_udbinfo); 766 for (i = 0; i < n; i++) { 767 inp = inp_list[i]; 768 INP_RLOCK(inp); 769 if (!in_pcbrele_rlocked(inp)) 770 INP_RUNLOCK(inp); 771 } 772 INP_INFO_WUNLOCK(&V_udbinfo); 773 774 if (!error) { 775 /* 776 * Give the user an updated idea of our state. If the 777 * generation differs from what we told her before, she knows 778 * that something happened while we were processing this 779 * request, and it might be necessary to retry. 780 */ 781 INP_INFO_RLOCK(&V_udbinfo); 782 xig.xig_gen = V_udbinfo.ipi_gencnt; 783 xig.xig_sogen = so_gencnt; 784 xig.xig_count = V_udbinfo.ipi_count; 785 INP_INFO_RUNLOCK(&V_udbinfo); 786 error = SYSCTL_OUT(req, &xig, sizeof xig); 787 } 788 free(inp_list, M_TEMP); 789 return (error); 790 } 791 792 SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist, 793 CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0, 794 udp_pcblist, "S,xinpcb", "List of active UDP sockets"); 795 796 #ifdef INET 797 static int 798 udp_getcred(SYSCTL_HANDLER_ARGS) 799 { 800 struct xucred xuc; 801 struct sockaddr_in addrs[2]; 802 struct inpcb *inp; 803 int error; 804 805 error = priv_check(req->td, PRIV_NETINET_GETCRED); 806 if (error) 807 return (error); 808 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 809 if (error) 810 return (error); 811 inp = in_pcblookup(&V_udbinfo, addrs[1].sin_addr, addrs[1].sin_port, 812 addrs[0].sin_addr, addrs[0].sin_port, 813 INPLOOKUP_WILDCARD | INPLOOKUP_RLOCKPCB, NULL); 814 if (inp != NULL) { 815 INP_RLOCK_ASSERT(inp); 816 if (inp->inp_socket == NULL) 817 error = ENOENT; 818 if (error == 0) 819 error = cr_canseeinpcb(req->td->td_ucred, inp); 820 if (error == 0) 821 cru2x(inp->inp_cred, &xuc); 822 INP_RUNLOCK(inp); 823 } else 824 error = ENOENT; 825 if (error == 0) 826 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 827 return (error); 828 } 829 830 SYSCTL_PROC(_net_inet_udp, OID_AUTO, getcred, 831 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0, 832 udp_getcred, "S,xucred", "Get the xucred of a UDP connection"); 833 #endif /* INET */ 834 835 int 836 udp_ctloutput(struct socket *so, struct sockopt *sopt) 837 { 838 int error = 0, optval; 839 struct inpcb *inp; 840 #ifdef IPSEC_NAT_T 841 struct udpcb *up; 842 #endif 843 844 inp = sotoinpcb(so); 845 KASSERT(inp != NULL, ("%s: inp == NULL", __func__)); 846 INP_WLOCK(inp); 847 if (sopt->sopt_level != IPPROTO_UDP) { 848 #ifdef INET6 849 if (INP_CHECK_SOCKAF(so, AF_INET6)) { 850 INP_WUNLOCK(inp); 851 error = ip6_ctloutput(so, sopt); 852 } 853 #endif 854 #if defined(INET) && defined(INET6) 855 else 856 #endif 857 #ifdef INET 858 { 859 INP_WUNLOCK(inp); 860 error = ip_ctloutput(so, sopt); 861 } 862 #endif 863 return (error); 864 } 865 866 switch (sopt->sopt_dir) { 867 case SOPT_SET: 868 switch (sopt->sopt_name) { 869 case UDP_ENCAP: 870 INP_WUNLOCK(inp); 871 error = sooptcopyin(sopt, &optval, sizeof optval, 872 sizeof optval); 873 if (error) 874 break; 875 inp = sotoinpcb(so); 876 KASSERT(inp != NULL, ("%s: inp == NULL", __func__)); 877 INP_WLOCK(inp); 878 #ifdef IPSEC_NAT_T 879 up = intoudpcb(inp); 880 KASSERT(up != NULL, ("%s: up == NULL", __func__)); 881 #endif 882 switch (optval) { 883 case 0: 884 /* Clear all UDP encap. */ 885 #ifdef IPSEC_NAT_T 886 up->u_flags &= ~UF_ESPINUDP_ALL; 887 #endif 888 break; 889 #ifdef IPSEC_NAT_T 890 case UDP_ENCAP_ESPINUDP: 891 case UDP_ENCAP_ESPINUDP_NON_IKE: 892 up->u_flags &= ~UF_ESPINUDP_ALL; 893 if (optval == UDP_ENCAP_ESPINUDP) 894 up->u_flags |= UF_ESPINUDP; 895 else if (optval == UDP_ENCAP_ESPINUDP_NON_IKE) 896 up->u_flags |= UF_ESPINUDP_NON_IKE; 897 break; 898 #endif 899 default: 900 error = EINVAL; 901 break; 902 } 903 INP_WUNLOCK(inp); 904 break; 905 default: 906 INP_WUNLOCK(inp); 907 error = ENOPROTOOPT; 908 break; 909 } 910 break; 911 case SOPT_GET: 912 switch (sopt->sopt_name) { 913 #ifdef IPSEC_NAT_T 914 case UDP_ENCAP: 915 up = intoudpcb(inp); 916 KASSERT(up != NULL, ("%s: up == NULL", __func__)); 917 optval = up->u_flags & UF_ESPINUDP_ALL; 918 INP_WUNLOCK(inp); 919 error = sooptcopyout(sopt, &optval, sizeof optval); 920 break; 921 #endif 922 default: 923 INP_WUNLOCK(inp); 924 error = ENOPROTOOPT; 925 break; 926 } 927 break; 928 } 929 return (error); 930 } 931 932 #ifdef INET 933 #define UH_WLOCKED 2 934 #define UH_RLOCKED 1 935 #define UH_UNLOCKED 0 936 static int 937 udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, 938 struct mbuf *control, struct thread *td) 939 { 940 struct udpiphdr *ui; 941 int len = m->m_pkthdr.len; 942 struct in_addr faddr, laddr; 943 struct cmsghdr *cm; 944 struct sockaddr_in *sin, src; 945 int error = 0; 946 int ipflags; 947 u_short fport, lport; 948 int unlock_udbinfo; 949 950 /* 951 * udp_output() may need to temporarily bind or connect the current 952 * inpcb. As such, we don't know up front whether we will need the 953 * pcbinfo lock or not. Do any work to decide what is needed up 954 * front before acquiring any locks. 955 */ 956 if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) { 957 if (control) 958 m_freem(control); 959 m_freem(m); 960 return (EMSGSIZE); 961 } 962 963 src.sin_family = 0; 964 if (control != NULL) { 965 /* 966 * XXX: Currently, we assume all the optional information is 967 * stored in a single mbuf. 968 */ 969 if (control->m_next) { 970 m_freem(control); 971 m_freem(m); 972 return (EINVAL); 973 } 974 for (; control->m_len > 0; 975 control->m_data += CMSG_ALIGN(cm->cmsg_len), 976 control->m_len -= CMSG_ALIGN(cm->cmsg_len)) { 977 cm = mtod(control, struct cmsghdr *); 978 if (control->m_len < sizeof(*cm) || cm->cmsg_len == 0 979 || cm->cmsg_len > control->m_len) { 980 error = EINVAL; 981 break; 982 } 983 if (cm->cmsg_level != IPPROTO_IP) 984 continue; 985 986 switch (cm->cmsg_type) { 987 case IP_SENDSRCADDR: 988 if (cm->cmsg_len != 989 CMSG_LEN(sizeof(struct in_addr))) { 990 error = EINVAL; 991 break; 992 } 993 bzero(&src, sizeof(src)); 994 src.sin_family = AF_INET; 995 src.sin_len = sizeof(src); 996 src.sin_port = inp->inp_lport; 997 src.sin_addr = 998 *(struct in_addr *)CMSG_DATA(cm); 999 break; 1000 1001 default: 1002 error = ENOPROTOOPT; 1003 break; 1004 } 1005 if (error) 1006 break; 1007 } 1008 m_freem(control); 1009 } 1010 if (error) { 1011 m_freem(m); 1012 return (error); 1013 } 1014 1015 /* 1016 * Depending on whether or not the application has bound or connected 1017 * the socket, we may have to do varying levels of work. The optimal 1018 * case is for a connected UDP socket, as a global lock isn't 1019 * required at all. 1020 * 1021 * In order to decide which we need, we require stability of the 1022 * inpcb binding, which we ensure by acquiring a read lock on the 1023 * inpcb. This doesn't strictly follow the lock order, so we play 1024 * the trylock and retry game; note that we may end up with more 1025 * conservative locks than required the second time around, so later 1026 * assertions have to accept that. Further analysis of the number of 1027 * misses under contention is required. 1028 * 1029 * XXXRW: Check that hash locking update here is correct. 1030 */ 1031 sin = (struct sockaddr_in *)addr; 1032 INP_RLOCK(inp); 1033 if (sin != NULL && 1034 (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0)) { 1035 INP_RUNLOCK(inp); 1036 INP_WLOCK(inp); 1037 INP_HASH_WLOCK(&V_udbinfo); 1038 unlock_udbinfo = UH_WLOCKED; 1039 } else if ((sin != NULL && ( 1040 (sin->sin_addr.s_addr == INADDR_ANY) || 1041 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 1042 (inp->inp_laddr.s_addr == INADDR_ANY) || 1043 (inp->inp_lport == 0))) || 1044 (src.sin_family == AF_INET)) { 1045 INP_HASH_RLOCK(&V_udbinfo); 1046 unlock_udbinfo = UH_RLOCKED; 1047 } else 1048 unlock_udbinfo = UH_UNLOCKED; 1049 1050 /* 1051 * If the IP_SENDSRCADDR control message was specified, override the 1052 * source address for this datagram. Its use is invalidated if the 1053 * address thus specified is incomplete or clobbers other inpcbs. 1054 */ 1055 laddr = inp->inp_laddr; 1056 lport = inp->inp_lport; 1057 if (src.sin_family == AF_INET) { 1058 INP_HASH_LOCK_ASSERT(&V_udbinfo); 1059 if ((lport == 0) || 1060 (laddr.s_addr == INADDR_ANY && 1061 src.sin_addr.s_addr == INADDR_ANY)) { 1062 error = EINVAL; 1063 goto release; 1064 } 1065 error = in_pcbbind_setup(inp, (struct sockaddr *)&src, 1066 &laddr.s_addr, &lport, td->td_ucred); 1067 if (error) 1068 goto release; 1069 } 1070 1071 /* 1072 * If a UDP socket has been connected, then a local address/port will 1073 * have been selected and bound. 1074 * 1075 * If a UDP socket has not been connected to, then an explicit 1076 * destination address must be used, in which case a local 1077 * address/port may not have been selected and bound. 1078 */ 1079 if (sin != NULL) { 1080 INP_LOCK_ASSERT(inp); 1081 if (inp->inp_faddr.s_addr != INADDR_ANY) { 1082 error = EISCONN; 1083 goto release; 1084 } 1085 1086 /* 1087 * Jail may rewrite the destination address, so let it do 1088 * that before we use it. 1089 */ 1090 error = prison_remote_ip4(td->td_ucred, &sin->sin_addr); 1091 if (error) 1092 goto release; 1093 1094 /* 1095 * If a local address or port hasn't yet been selected, or if 1096 * the destination address needs to be rewritten due to using 1097 * a special INADDR_ constant, invoke in_pcbconnect_setup() 1098 * to do the heavy lifting. Once a port is selected, we 1099 * commit the binding back to the socket; we also commit the 1100 * binding of the address if in jail. 1101 * 1102 * If we already have a valid binding and we're not 1103 * requesting a destination address rewrite, use a fast path. 1104 */ 1105 if (inp->inp_laddr.s_addr == INADDR_ANY || 1106 inp->inp_lport == 0 || 1107 sin->sin_addr.s_addr == INADDR_ANY || 1108 sin->sin_addr.s_addr == INADDR_BROADCAST) { 1109 INP_HASH_LOCK_ASSERT(&V_udbinfo); 1110 error = in_pcbconnect_setup(inp, addr, &laddr.s_addr, 1111 &lport, &faddr.s_addr, &fport, NULL, 1112 td->td_ucred); 1113 if (error) 1114 goto release; 1115 1116 /* 1117 * XXXRW: Why not commit the port if the address is 1118 * !INADDR_ANY? 1119 */ 1120 /* Commit the local port if newly assigned. */ 1121 if (inp->inp_laddr.s_addr == INADDR_ANY && 1122 inp->inp_lport == 0) { 1123 INP_WLOCK_ASSERT(inp); 1124 INP_HASH_WLOCK_ASSERT(&V_udbinfo); 1125 /* 1126 * Remember addr if jailed, to prevent 1127 * rebinding. 1128 */ 1129 if (prison_flag(td->td_ucred, PR_IP4)) 1130 inp->inp_laddr = laddr; 1131 inp->inp_lport = lport; 1132 if (in_pcbinshash(inp) != 0) { 1133 inp->inp_lport = 0; 1134 error = EAGAIN; 1135 goto release; 1136 } 1137 inp->inp_flags |= INP_ANONPORT; 1138 } 1139 } else { 1140 faddr = sin->sin_addr; 1141 fport = sin->sin_port; 1142 } 1143 } else { 1144 INP_LOCK_ASSERT(inp); 1145 faddr = inp->inp_faddr; 1146 fport = inp->inp_fport; 1147 if (faddr.s_addr == INADDR_ANY) { 1148 error = ENOTCONN; 1149 goto release; 1150 } 1151 } 1152 1153 /* 1154 * Calculate data length and get a mbuf for UDP, IP, and possible 1155 * link-layer headers. Immediate slide the data pointer back forward 1156 * since we won't use that space at this layer. 1157 */ 1158 M_PREPEND(m, sizeof(struct udpiphdr) + max_linkhdr, M_DONTWAIT); 1159 if (m == NULL) { 1160 error = ENOBUFS; 1161 goto release; 1162 } 1163 m->m_data += max_linkhdr; 1164 m->m_len -= max_linkhdr; 1165 m->m_pkthdr.len -= max_linkhdr; 1166 1167 /* 1168 * Fill in mbuf with extended UDP header and addresses and length put 1169 * into network format. 1170 */ 1171 ui = mtod(m, struct udpiphdr *); 1172 bzero(ui->ui_x1, sizeof(ui->ui_x1)); /* XXX still needed? */ 1173 ui->ui_pr = IPPROTO_UDP; 1174 ui->ui_src = laddr; 1175 ui->ui_dst = faddr; 1176 ui->ui_sport = lport; 1177 ui->ui_dport = fport; 1178 ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr)); 1179 1180 /* 1181 * Set the Don't Fragment bit in the IP header. 1182 */ 1183 if (inp->inp_flags & INP_DONTFRAG) { 1184 struct ip *ip; 1185 1186 ip = (struct ip *)&ui->ui_i; 1187 ip->ip_off |= IP_DF; 1188 } 1189 1190 ipflags = 0; 1191 if (inp->inp_socket->so_options & SO_DONTROUTE) 1192 ipflags |= IP_ROUTETOIF; 1193 if (inp->inp_socket->so_options & SO_BROADCAST) 1194 ipflags |= IP_ALLOWBROADCAST; 1195 if (inp->inp_flags & INP_ONESBCAST) 1196 ipflags |= IP_SENDONES; 1197 1198 #ifdef MAC 1199 mac_inpcb_create_mbuf(inp, m); 1200 #endif 1201 1202 /* 1203 * Set up checksum and output datagram. 1204 */ 1205 if (udp_cksum) { 1206 if (inp->inp_flags & INP_ONESBCAST) 1207 faddr.s_addr = INADDR_BROADCAST; 1208 ui->ui_sum = in_pseudo(ui->ui_src.s_addr, faddr.s_addr, 1209 htons((u_short)len + sizeof(struct udphdr) + IPPROTO_UDP)); 1210 m->m_pkthdr.csum_flags = CSUM_UDP; 1211 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 1212 } else 1213 ui->ui_sum = 0; 1214 ((struct ip *)ui)->ip_len = sizeof (struct udpiphdr) + len; 1215 ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */ 1216 ((struct ip *)ui)->ip_tos = inp->inp_ip_tos; /* XXX */ 1217 UDPSTAT_INC(udps_opackets); 1218 1219 if (unlock_udbinfo == UH_WLOCKED) 1220 INP_HASH_WUNLOCK(&V_udbinfo); 1221 else if (unlock_udbinfo == UH_RLOCKED) 1222 INP_HASH_RUNLOCK(&V_udbinfo); 1223 error = ip_output(m, inp->inp_options, NULL, ipflags, 1224 inp->inp_moptions, inp); 1225 if (unlock_udbinfo == UH_WLOCKED) 1226 INP_WUNLOCK(inp); 1227 else 1228 INP_RUNLOCK(inp); 1229 return (error); 1230 1231 release: 1232 if (unlock_udbinfo == UH_WLOCKED) { 1233 INP_HASH_WUNLOCK(&V_udbinfo); 1234 INP_WUNLOCK(inp); 1235 } else if (unlock_udbinfo == UH_RLOCKED) { 1236 INP_HASH_RUNLOCK(&V_udbinfo); 1237 INP_RUNLOCK(inp); 1238 } else 1239 INP_RUNLOCK(inp); 1240 m_freem(m); 1241 return (error); 1242 } 1243 1244 1245 #if defined(IPSEC) && defined(IPSEC_NAT_T) 1246 /* 1247 * Potentially decap ESP in UDP frame. Check for an ESP header 1248 * and optional marker; if present, strip the UDP header and 1249 * push the result through IPSec. 1250 * 1251 * Returns mbuf to be processed (potentially re-allocated) or 1252 * NULL if consumed and/or processed. 1253 */ 1254 static struct mbuf * 1255 udp4_espdecap(struct inpcb *inp, struct mbuf *m, int off) 1256 { 1257 size_t minlen, payload, skip, iphlen; 1258 caddr_t data; 1259 struct udpcb *up; 1260 struct m_tag *tag; 1261 struct udphdr *udphdr; 1262 struct ip *ip; 1263 1264 INP_RLOCK_ASSERT(inp); 1265 1266 /* 1267 * Pull up data so the longest case is contiguous: 1268 * IP/UDP hdr + non ESP marker + ESP hdr. 1269 */ 1270 minlen = off + sizeof(uint64_t) + sizeof(struct esp); 1271 if (minlen > m->m_pkthdr.len) 1272 minlen = m->m_pkthdr.len; 1273 if ((m = m_pullup(m, minlen)) == NULL) { 1274 V_ipsec4stat.in_inval++; 1275 return (NULL); /* Bypass caller processing. */ 1276 } 1277 data = mtod(m, caddr_t); /* Points to ip header. */ 1278 payload = m->m_len - off; /* Size of payload. */ 1279 1280 if (payload == 1 && data[off] == '\xff') 1281 return (m); /* NB: keepalive packet, no decap. */ 1282 1283 up = intoudpcb(inp); 1284 KASSERT(up != NULL, ("%s: udpcb NULL", __func__)); 1285 KASSERT((up->u_flags & UF_ESPINUDP_ALL) != 0, 1286 ("u_flags 0x%x", up->u_flags)); 1287 1288 /* 1289 * Check that the payload is large enough to hold an 1290 * ESP header and compute the amount of data to remove. 1291 * 1292 * NB: the caller has already done a pullup for us. 1293 * XXX can we assume alignment and eliminate bcopys? 1294 */ 1295 if (up->u_flags & UF_ESPINUDP_NON_IKE) { 1296 /* 1297 * draft-ietf-ipsec-nat-t-ike-0[01].txt and 1298 * draft-ietf-ipsec-udp-encaps-(00/)01.txt, ignoring 1299 * possible AH mode non-IKE marker+non-ESP marker 1300 * from draft-ietf-ipsec-udp-encaps-00.txt. 1301 */ 1302 uint64_t marker; 1303 1304 if (payload <= sizeof(uint64_t) + sizeof(struct esp)) 1305 return (m); /* NB: no decap. */ 1306 bcopy(data + off, &marker, sizeof(uint64_t)); 1307 if (marker != 0) /* Non-IKE marker. */ 1308 return (m); /* NB: no decap. */ 1309 skip = sizeof(uint64_t) + sizeof(struct udphdr); 1310 } else { 1311 uint32_t spi; 1312 1313 if (payload <= sizeof(struct esp)) { 1314 V_ipsec4stat.in_inval++; 1315 m_freem(m); 1316 return (NULL); /* Discard. */ 1317 } 1318 bcopy(data + off, &spi, sizeof(uint32_t)); 1319 if (spi == 0) /* Non-ESP marker. */ 1320 return (m); /* NB: no decap. */ 1321 skip = sizeof(struct udphdr); 1322 } 1323 1324 /* 1325 * Setup a PACKET_TAG_IPSEC_NAT_T_PORT tag to remember 1326 * the UDP ports. This is required if we want to select 1327 * the right SPD for multiple hosts behind same NAT. 1328 * 1329 * NB: ports are maintained in network byte order everywhere 1330 * in the NAT-T code. 1331 */ 1332 tag = m_tag_get(PACKET_TAG_IPSEC_NAT_T_PORTS, 1333 2 * sizeof(uint16_t), M_NOWAIT); 1334 if (tag == NULL) { 1335 V_ipsec4stat.in_nomem++; 1336 m_freem(m); 1337 return (NULL); /* Discard. */ 1338 } 1339 iphlen = off - sizeof(struct udphdr); 1340 udphdr = (struct udphdr *)(data + iphlen); 1341 ((uint16_t *)(tag + 1))[0] = udphdr->uh_sport; 1342 ((uint16_t *)(tag + 1))[1] = udphdr->uh_dport; 1343 m_tag_prepend(m, tag); 1344 1345 /* 1346 * Remove the UDP header (and possibly the non ESP marker) 1347 * IP header length is iphlen 1348 * Before: 1349 * <--- off ---> 1350 * +----+------+-----+ 1351 * | IP | UDP | ESP | 1352 * +----+------+-----+ 1353 * <-skip-> 1354 * After: 1355 * +----+-----+ 1356 * | IP | ESP | 1357 * +----+-----+ 1358 * <-skip-> 1359 */ 1360 ovbcopy(data, data + skip, iphlen); 1361 m_adj(m, skip); 1362 1363 ip = mtod(m, struct ip *); 1364 ip->ip_len -= skip; 1365 ip->ip_p = IPPROTO_ESP; 1366 1367 /* 1368 * We cannot yet update the cksums so clear any 1369 * h/w cksum flags as they are no longer valid. 1370 */ 1371 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) 1372 m->m_pkthdr.csum_flags &= ~(CSUM_DATA_VALID|CSUM_PSEUDO_HDR); 1373 1374 (void) ipsec4_common_input(m, iphlen, ip->ip_p); 1375 return (NULL); /* NB: consumed, bypass processing. */ 1376 } 1377 #endif /* defined(IPSEC) && defined(IPSEC_NAT_T) */ 1378 1379 static void 1380 udp_abort(struct socket *so) 1381 { 1382 struct inpcb *inp; 1383 1384 inp = sotoinpcb(so); 1385 KASSERT(inp != NULL, ("udp_abort: inp == NULL")); 1386 INP_WLOCK(inp); 1387 if (inp->inp_faddr.s_addr != INADDR_ANY) { 1388 INP_HASH_WLOCK(&V_udbinfo); 1389 in_pcbdisconnect(inp); 1390 inp->inp_laddr.s_addr = INADDR_ANY; 1391 INP_HASH_WUNLOCK(&V_udbinfo); 1392 soisdisconnected(so); 1393 } 1394 INP_WUNLOCK(inp); 1395 } 1396 1397 static int 1398 udp_attach(struct socket *so, int proto, struct thread *td) 1399 { 1400 struct inpcb *inp; 1401 int error; 1402 1403 inp = sotoinpcb(so); 1404 KASSERT(inp == NULL, ("udp_attach: inp != NULL")); 1405 error = soreserve(so, udp_sendspace, udp_recvspace); 1406 if (error) 1407 return (error); 1408 INP_INFO_WLOCK(&V_udbinfo); 1409 error = in_pcballoc(so, &V_udbinfo); 1410 if (error) { 1411 INP_INFO_WUNLOCK(&V_udbinfo); 1412 return (error); 1413 } 1414 1415 inp = sotoinpcb(so); 1416 inp->inp_vflag |= INP_IPV4; 1417 inp->inp_ip_ttl = V_ip_defttl; 1418 1419 error = udp_newudpcb(inp); 1420 if (error) { 1421 in_pcbdetach(inp); 1422 in_pcbfree(inp); 1423 INP_INFO_WUNLOCK(&V_udbinfo); 1424 return (error); 1425 } 1426 1427 INP_WUNLOCK(inp); 1428 INP_INFO_WUNLOCK(&V_udbinfo); 1429 return (0); 1430 } 1431 #endif /* INET */ 1432 1433 int 1434 udp_set_kernel_tunneling(struct socket *so, udp_tun_func_t f) 1435 { 1436 struct inpcb *inp; 1437 struct udpcb *up; 1438 1439 KASSERT(so->so_type == SOCK_DGRAM, 1440 ("udp_set_kernel_tunneling: !dgram")); 1441 inp = sotoinpcb(so); 1442 KASSERT(inp != NULL, ("udp_set_kernel_tunneling: inp == NULL")); 1443 INP_WLOCK(inp); 1444 up = intoudpcb(inp); 1445 if (up->u_tun_func != NULL) { 1446 INP_WUNLOCK(inp); 1447 return (EBUSY); 1448 } 1449 up->u_tun_func = f; 1450 INP_WUNLOCK(inp); 1451 return (0); 1452 } 1453 1454 #ifdef INET 1455 static int 1456 udp_bind(struct socket *so, struct sockaddr *nam, struct thread *td) 1457 { 1458 struct inpcb *inp; 1459 int error; 1460 1461 inp = sotoinpcb(so); 1462 KASSERT(inp != NULL, ("udp_bind: inp == NULL")); 1463 INP_WLOCK(inp); 1464 INP_HASH_WLOCK(&V_udbinfo); 1465 error = in_pcbbind(inp, nam, td->td_ucred); 1466 INP_HASH_WUNLOCK(&V_udbinfo); 1467 INP_WUNLOCK(inp); 1468 return (error); 1469 } 1470 1471 static void 1472 udp_close(struct socket *so) 1473 { 1474 struct inpcb *inp; 1475 1476 inp = sotoinpcb(so); 1477 KASSERT(inp != NULL, ("udp_close: inp == NULL")); 1478 INP_WLOCK(inp); 1479 if (inp->inp_faddr.s_addr != INADDR_ANY) { 1480 INP_HASH_WLOCK(&V_udbinfo); 1481 in_pcbdisconnect(inp); 1482 inp->inp_laddr.s_addr = INADDR_ANY; 1483 INP_HASH_WUNLOCK(&V_udbinfo); 1484 soisdisconnected(so); 1485 } 1486 INP_WUNLOCK(inp); 1487 } 1488 1489 static int 1490 udp_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 1491 { 1492 struct inpcb *inp; 1493 int error; 1494 struct sockaddr_in *sin; 1495 1496 inp = sotoinpcb(so); 1497 KASSERT(inp != NULL, ("udp_connect: inp == NULL")); 1498 INP_WLOCK(inp); 1499 if (inp->inp_faddr.s_addr != INADDR_ANY) { 1500 INP_WUNLOCK(inp); 1501 return (EISCONN); 1502 } 1503 sin = (struct sockaddr_in *)nam; 1504 error = prison_remote_ip4(td->td_ucred, &sin->sin_addr); 1505 if (error != 0) { 1506 INP_WUNLOCK(inp); 1507 return (error); 1508 } 1509 INP_HASH_WLOCK(&V_udbinfo); 1510 error = in_pcbconnect(inp, nam, td->td_ucred); 1511 INP_HASH_WUNLOCK(&V_udbinfo); 1512 if (error == 0) 1513 soisconnected(so); 1514 INP_WUNLOCK(inp); 1515 return (error); 1516 } 1517 1518 static void 1519 udp_detach(struct socket *so) 1520 { 1521 struct inpcb *inp; 1522 struct udpcb *up; 1523 1524 inp = sotoinpcb(so); 1525 KASSERT(inp != NULL, ("udp_detach: inp == NULL")); 1526 KASSERT(inp->inp_faddr.s_addr == INADDR_ANY, 1527 ("udp_detach: not disconnected")); 1528 INP_INFO_WLOCK(&V_udbinfo); 1529 INP_WLOCK(inp); 1530 up = intoudpcb(inp); 1531 KASSERT(up != NULL, ("%s: up == NULL", __func__)); 1532 inp->inp_ppcb = NULL; 1533 in_pcbdetach(inp); 1534 in_pcbfree(inp); 1535 INP_INFO_WUNLOCK(&V_udbinfo); 1536 udp_discardcb(up); 1537 } 1538 1539 static int 1540 udp_disconnect(struct socket *so) 1541 { 1542 struct inpcb *inp; 1543 1544 inp = sotoinpcb(so); 1545 KASSERT(inp != NULL, ("udp_disconnect: inp == NULL")); 1546 INP_WLOCK(inp); 1547 if (inp->inp_faddr.s_addr == INADDR_ANY) { 1548 INP_WUNLOCK(inp); 1549 return (ENOTCONN); 1550 } 1551 INP_HASH_WLOCK(&V_udbinfo); 1552 in_pcbdisconnect(inp); 1553 inp->inp_laddr.s_addr = INADDR_ANY; 1554 INP_HASH_WUNLOCK(&V_udbinfo); 1555 SOCK_LOCK(so); 1556 so->so_state &= ~SS_ISCONNECTED; /* XXX */ 1557 SOCK_UNLOCK(so); 1558 INP_WUNLOCK(inp); 1559 return (0); 1560 } 1561 1562 static int 1563 udp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, 1564 struct mbuf *control, struct thread *td) 1565 { 1566 struct inpcb *inp; 1567 1568 inp = sotoinpcb(so); 1569 KASSERT(inp != NULL, ("udp_send: inp == NULL")); 1570 return (udp_output(inp, m, addr, control, td)); 1571 } 1572 #endif /* INET */ 1573 1574 int 1575 udp_shutdown(struct socket *so) 1576 { 1577 struct inpcb *inp; 1578 1579 inp = sotoinpcb(so); 1580 KASSERT(inp != NULL, ("udp_shutdown: inp == NULL")); 1581 INP_WLOCK(inp); 1582 socantsendmore(so); 1583 INP_WUNLOCK(inp); 1584 return (0); 1585 } 1586 1587 #ifdef INET 1588 struct pr_usrreqs udp_usrreqs = { 1589 .pru_abort = udp_abort, 1590 .pru_attach = udp_attach, 1591 .pru_bind = udp_bind, 1592 .pru_connect = udp_connect, 1593 .pru_control = in_control, 1594 .pru_detach = udp_detach, 1595 .pru_disconnect = udp_disconnect, 1596 .pru_peeraddr = in_getpeeraddr, 1597 .pru_send = udp_send, 1598 .pru_soreceive = soreceive_dgram, 1599 .pru_sosend = sosend_dgram, 1600 .pru_shutdown = udp_shutdown, 1601 .pru_sockaddr = in_getsockaddr, 1602 .pru_sosetlabel = in_pcbsosetlabel, 1603 .pru_close = udp_close, 1604 }; 1605 #endif /* INET */ 1606