1 /*- 2 * Copyright (c) 1982, 1986, 1991, 1993, 1995 3 * The Regents of the University of California. 4 * Copyright (c) 2007-2009 Robert N. M. Watson 5 * Copyright (c) 2010-2011 Juniper Networks, Inc. 6 * All rights reserved. 7 * 8 * Portions of this software were developed by Robert N. M. Watson under 9 * contract to Juniper Networks, Inc. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)in_pcb.c 8.4 (Berkeley) 5/24/95 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include "opt_ddb.h" 42 #include "opt_ipsec.h" 43 #include "opt_inet.h" 44 #include "opt_inet6.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/malloc.h> 49 #include <sys/mbuf.h> 50 #include <sys/callout.h> 51 #include <sys/domain.h> 52 #include <sys/protosw.h> 53 #include <sys/socket.h> 54 #include <sys/socketvar.h> 55 #include <sys/priv.h> 56 #include <sys/proc.h> 57 #include <sys/refcount.h> 58 #include <sys/jail.h> 59 #include <sys/kernel.h> 60 #include <sys/sysctl.h> 61 62 #ifdef DDB 63 #include <ddb/ddb.h> 64 #endif 65 66 #include <vm/uma.h> 67 68 #include <net/if.h> 69 #include <net/if_types.h> 70 #include <net/route.h> 71 #include <net/vnet.h> 72 73 #if defined(INET) || defined(INET6) 74 #include <netinet/in.h> 75 #include <netinet/in_pcb.h> 76 #include <netinet/ip_var.h> 77 #include <netinet/tcp_var.h> 78 #include <netinet/udp.h> 79 #include <netinet/udp_var.h> 80 #endif 81 #ifdef INET 82 #include <netinet/in_var.h> 83 #endif 84 #ifdef INET6 85 #include <netinet/ip6.h> 86 #include <netinet6/in6_pcb.h> 87 #include <netinet6/in6_var.h> 88 #include <netinet6/ip6_var.h> 89 #endif /* INET6 */ 90 91 92 #ifdef IPSEC 93 #include <netipsec/ipsec.h> 94 #include <netipsec/key.h> 95 #endif /* IPSEC */ 96 97 #include <security/mac/mac_framework.h> 98 99 static struct callout ipport_tick_callout; 100 101 /* 102 * These configure the range of local port addresses assigned to 103 * "unspecified" outgoing connections/packets/whatever. 104 */ 105 VNET_DEFINE(int, ipport_lowfirstauto) = IPPORT_RESERVED - 1; /* 1023 */ 106 VNET_DEFINE(int, ipport_lowlastauto) = IPPORT_RESERVEDSTART; /* 600 */ 107 VNET_DEFINE(int, ipport_firstauto) = IPPORT_EPHEMERALFIRST; /* 10000 */ 108 VNET_DEFINE(int, ipport_lastauto) = IPPORT_EPHEMERALLAST; /* 65535 */ 109 VNET_DEFINE(int, ipport_hifirstauto) = IPPORT_HIFIRSTAUTO; /* 49152 */ 110 VNET_DEFINE(int, ipport_hilastauto) = IPPORT_HILASTAUTO; /* 65535 */ 111 112 /* 113 * Reserved ports accessible only to root. There are significant 114 * security considerations that must be accounted for when changing these, 115 * but the security benefits can be great. Please be careful. 116 */ 117 VNET_DEFINE(int, ipport_reservedhigh) = IPPORT_RESERVED - 1; /* 1023 */ 118 VNET_DEFINE(int, ipport_reservedlow); 119 120 /* Variables dealing with random ephemeral port allocation. */ 121 VNET_DEFINE(int, ipport_randomized) = 1; /* user controlled via sysctl */ 122 VNET_DEFINE(int, ipport_randomcps) = 10; /* user controlled via sysctl */ 123 VNET_DEFINE(int, ipport_randomtime) = 45; /* user controlled via sysctl */ 124 VNET_DEFINE(int, ipport_stoprandom); /* toggled by ipport_tick */ 125 VNET_DEFINE(int, ipport_tcpallocs); 126 static VNET_DEFINE(int, ipport_tcplastcount); 127 128 #define V_ipport_tcplastcount VNET(ipport_tcplastcount) 129 130 static void in_pcbremlists(struct inpcb *inp); 131 #ifdef INET 132 static struct inpcb *in_pcblookup_hash_locked(struct inpcbinfo *pcbinfo, 133 struct in_addr faddr, u_int fport_arg, 134 struct in_addr laddr, u_int lport_arg, 135 int lookupflags, struct ifnet *ifp); 136 137 #define RANGECHK(var, min, max) \ 138 if ((var) < (min)) { (var) = (min); } \ 139 else if ((var) > (max)) { (var) = (max); } 140 141 static int 142 sysctl_net_ipport_check(SYSCTL_HANDLER_ARGS) 143 { 144 int error; 145 146 #ifdef VIMAGE 147 error = vnet_sysctl_handle_int(oidp, arg1, arg2, req); 148 #else 149 error = sysctl_handle_int(oidp, arg1, arg2, req); 150 #endif 151 if (error == 0) { 152 RANGECHK(V_ipport_lowfirstauto, 1, IPPORT_RESERVED - 1); 153 RANGECHK(V_ipport_lowlastauto, 1, IPPORT_RESERVED - 1); 154 RANGECHK(V_ipport_firstauto, IPPORT_RESERVED, IPPORT_MAX); 155 RANGECHK(V_ipport_lastauto, IPPORT_RESERVED, IPPORT_MAX); 156 RANGECHK(V_ipport_hifirstauto, IPPORT_RESERVED, IPPORT_MAX); 157 RANGECHK(V_ipport_hilastauto, IPPORT_RESERVED, IPPORT_MAX); 158 } 159 return (error); 160 } 161 162 #undef RANGECHK 163 164 SYSCTL_NODE(_net_inet_ip, IPPROTO_IP, portrange, CTLFLAG_RW, 0, "IP Ports"); 165 166 SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, lowfirst, 167 CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_lowfirstauto), 0, 168 &sysctl_net_ipport_check, "I", ""); 169 SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, lowlast, 170 CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_lowlastauto), 0, 171 &sysctl_net_ipport_check, "I", ""); 172 SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, first, 173 CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_firstauto), 0, 174 &sysctl_net_ipport_check, "I", ""); 175 SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, last, 176 CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_lastauto), 0, 177 &sysctl_net_ipport_check, "I", ""); 178 SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, hifirst, 179 CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_hifirstauto), 0, 180 &sysctl_net_ipport_check, "I", ""); 181 SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, hilast, 182 CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_hilastauto), 0, 183 &sysctl_net_ipport_check, "I", ""); 184 SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, reservedhigh, 185 CTLFLAG_RW|CTLFLAG_SECURE, &VNET_NAME(ipport_reservedhigh), 0, ""); 186 SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, reservedlow, 187 CTLFLAG_RW|CTLFLAG_SECURE, &VNET_NAME(ipport_reservedlow), 0, ""); 188 SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, randomized, CTLFLAG_RW, 189 &VNET_NAME(ipport_randomized), 0, "Enable random port allocation"); 190 SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, randomcps, CTLFLAG_RW, 191 &VNET_NAME(ipport_randomcps), 0, "Maximum number of random port " 192 "allocations before switching to a sequental one"); 193 SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, randomtime, CTLFLAG_RW, 194 &VNET_NAME(ipport_randomtime), 0, 195 "Minimum time to keep sequental port " 196 "allocation before switching to a random one"); 197 #endif 198 199 /* 200 * in_pcb.c: manage the Protocol Control Blocks. 201 * 202 * NOTE: It is assumed that most of these functions will be called with 203 * the pcbinfo lock held, and often, the inpcb lock held, as these utility 204 * functions often modify hash chains or addresses in pcbs. 205 */ 206 207 /* 208 * Initialize an inpcbinfo -- we should be able to reduce the number of 209 * arguments in time. 210 */ 211 void 212 in_pcbinfo_init(struct inpcbinfo *pcbinfo, const char *name, 213 struct inpcbhead *listhead, int hash_nelements, int porthash_nelements, 214 char *inpcbzone_name, uma_init inpcbzone_init, uma_fini inpcbzone_fini, 215 uint32_t inpcbzone_flags) 216 { 217 218 INP_INFO_LOCK_INIT(pcbinfo, name); 219 INP_HASH_LOCK_INIT(pcbinfo, "pcbinfohash"); /* XXXRW: argument? */ 220 #ifdef VIMAGE 221 pcbinfo->ipi_vnet = curvnet; 222 #endif 223 pcbinfo->ipi_listhead = listhead; 224 LIST_INIT(pcbinfo->ipi_listhead); 225 pcbinfo->ipi_count = 0; 226 pcbinfo->ipi_hashbase = hashinit(hash_nelements, M_PCB, 227 &pcbinfo->ipi_hashmask); 228 pcbinfo->ipi_porthashbase = hashinit(porthash_nelements, M_PCB, 229 &pcbinfo->ipi_porthashmask); 230 pcbinfo->ipi_zone = uma_zcreate(inpcbzone_name, sizeof(struct inpcb), 231 NULL, NULL, inpcbzone_init, inpcbzone_fini, UMA_ALIGN_PTR, 232 inpcbzone_flags); 233 uma_zone_set_max(pcbinfo->ipi_zone, maxsockets); 234 } 235 236 /* 237 * Destroy an inpcbinfo. 238 */ 239 void 240 in_pcbinfo_destroy(struct inpcbinfo *pcbinfo) 241 { 242 243 KASSERT(pcbinfo->ipi_count == 0, 244 ("%s: ipi_count = %u", __func__, pcbinfo->ipi_count)); 245 246 hashdestroy(pcbinfo->ipi_hashbase, M_PCB, pcbinfo->ipi_hashmask); 247 hashdestroy(pcbinfo->ipi_porthashbase, M_PCB, 248 pcbinfo->ipi_porthashmask); 249 uma_zdestroy(pcbinfo->ipi_zone); 250 INP_HASH_LOCK_DESTROY(pcbinfo); 251 INP_INFO_LOCK_DESTROY(pcbinfo); 252 } 253 254 /* 255 * Allocate a PCB and associate it with the socket. 256 * On success return with the PCB locked. 257 */ 258 int 259 in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo) 260 { 261 struct inpcb *inp; 262 int error; 263 264 INP_INFO_WLOCK_ASSERT(pcbinfo); 265 error = 0; 266 inp = uma_zalloc(pcbinfo->ipi_zone, M_NOWAIT); 267 if (inp == NULL) 268 return (ENOBUFS); 269 bzero(inp, inp_zero_size); 270 inp->inp_pcbinfo = pcbinfo; 271 inp->inp_socket = so; 272 inp->inp_cred = crhold(so->so_cred); 273 inp->inp_inc.inc_fibnum = so->so_fibnum; 274 #ifdef MAC 275 error = mac_inpcb_init(inp, M_NOWAIT); 276 if (error != 0) 277 goto out; 278 mac_inpcb_create(so, inp); 279 #endif 280 #ifdef IPSEC 281 error = ipsec_init_policy(so, &inp->inp_sp); 282 if (error != 0) { 283 #ifdef MAC 284 mac_inpcb_destroy(inp); 285 #endif 286 goto out; 287 } 288 #endif /*IPSEC*/ 289 #ifdef INET6 290 if (INP_SOCKAF(so) == AF_INET6) { 291 inp->inp_vflag |= INP_IPV6PROTO; 292 if (V_ip6_v6only) 293 inp->inp_flags |= IN6P_IPV6_V6ONLY; 294 } 295 #endif 296 LIST_INSERT_HEAD(pcbinfo->ipi_listhead, inp, inp_list); 297 pcbinfo->ipi_count++; 298 so->so_pcb = (caddr_t)inp; 299 #ifdef INET6 300 if (V_ip6_auto_flowlabel) 301 inp->inp_flags |= IN6P_AUTOFLOWLABEL; 302 #endif 303 INP_WLOCK(inp); 304 inp->inp_gencnt = ++pcbinfo->ipi_gencnt; 305 refcount_init(&inp->inp_refcount, 1); /* Reference from inpcbinfo */ 306 #if defined(IPSEC) || defined(MAC) 307 out: 308 if (error != 0) { 309 crfree(inp->inp_cred); 310 uma_zfree(pcbinfo->ipi_zone, inp); 311 } 312 #endif 313 return (error); 314 } 315 316 #ifdef INET 317 int 318 in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct ucred *cred) 319 { 320 int anonport, error; 321 322 INP_WLOCK_ASSERT(inp); 323 INP_HASH_WLOCK_ASSERT(inp->inp_pcbinfo); 324 325 if (inp->inp_lport != 0 || inp->inp_laddr.s_addr != INADDR_ANY) 326 return (EINVAL); 327 anonport = inp->inp_lport == 0 && (nam == NULL || 328 ((struct sockaddr_in *)nam)->sin_port == 0); 329 error = in_pcbbind_setup(inp, nam, &inp->inp_laddr.s_addr, 330 &inp->inp_lport, cred); 331 if (error) 332 return (error); 333 if (in_pcbinshash(inp) != 0) { 334 inp->inp_laddr.s_addr = INADDR_ANY; 335 inp->inp_lport = 0; 336 return (EAGAIN); 337 } 338 if (anonport) 339 inp->inp_flags |= INP_ANONPORT; 340 return (0); 341 } 342 #endif 343 344 #if defined(INET) || defined(INET6) 345 int 346 in_pcb_lport(struct inpcb *inp, struct in_addr *laddrp, u_short *lportp, 347 struct ucred *cred, int lookupflags) 348 { 349 struct inpcbinfo *pcbinfo; 350 struct inpcb *tmpinp; 351 unsigned short *lastport; 352 int count, dorandom, error; 353 u_short aux, first, last, lport; 354 #ifdef INET 355 struct in_addr laddr; 356 #endif 357 358 pcbinfo = inp->inp_pcbinfo; 359 360 /* 361 * Because no actual state changes occur here, a global write lock on 362 * the pcbinfo isn't required. 363 */ 364 INP_LOCK_ASSERT(inp); 365 INP_HASH_LOCK_ASSERT(pcbinfo); 366 367 if (inp->inp_flags & INP_HIGHPORT) { 368 first = V_ipport_hifirstauto; /* sysctl */ 369 last = V_ipport_hilastauto; 370 lastport = &pcbinfo->ipi_lasthi; 371 } else if (inp->inp_flags & INP_LOWPORT) { 372 error = priv_check_cred(cred, PRIV_NETINET_RESERVEDPORT, 0); 373 if (error) 374 return (error); 375 first = V_ipport_lowfirstauto; /* 1023 */ 376 last = V_ipport_lowlastauto; /* 600 */ 377 lastport = &pcbinfo->ipi_lastlow; 378 } else { 379 first = V_ipport_firstauto; /* sysctl */ 380 last = V_ipport_lastauto; 381 lastport = &pcbinfo->ipi_lastport; 382 } 383 /* 384 * For UDP, use random port allocation as long as the user 385 * allows it. For TCP (and as of yet unknown) connections, 386 * use random port allocation only if the user allows it AND 387 * ipport_tick() allows it. 388 */ 389 if (V_ipport_randomized && 390 (!V_ipport_stoprandom || pcbinfo == &V_udbinfo)) 391 dorandom = 1; 392 else 393 dorandom = 0; 394 /* 395 * It makes no sense to do random port allocation if 396 * we have the only port available. 397 */ 398 if (first == last) 399 dorandom = 0; 400 /* Make sure to not include UDP packets in the count. */ 401 if (pcbinfo != &V_udbinfo) 402 V_ipport_tcpallocs++; 403 /* 404 * Instead of having two loops further down counting up or down 405 * make sure that first is always <= last and go with only one 406 * code path implementing all logic. 407 */ 408 if (first > last) { 409 aux = first; 410 first = last; 411 last = aux; 412 } 413 414 #ifdef INET 415 /* Make the compiler happy. */ 416 laddr.s_addr = 0; 417 if ((inp->inp_vflag & (INP_IPV4|INP_IPV6)) == INP_IPV4) { 418 KASSERT(laddrp != NULL, ("%s: laddrp NULL for v4 inp %p", 419 __func__, inp)); 420 laddr = *laddrp; 421 } 422 #endif 423 tmpinp = NULL; /* Make compiler happy. */ 424 lport = *lportp; 425 426 if (dorandom) 427 *lastport = first + (arc4random() % (last - first)); 428 429 count = last - first; 430 431 do { 432 if (count-- < 0) /* completely used? */ 433 return (EADDRNOTAVAIL); 434 ++*lastport; 435 if (*lastport < first || *lastport > last) 436 *lastport = first; 437 lport = htons(*lastport); 438 439 #ifdef INET6 440 if ((inp->inp_vflag & INP_IPV6) != 0) 441 tmpinp = in6_pcblookup_local(pcbinfo, 442 &inp->in6p_laddr, lport, lookupflags, cred); 443 #endif 444 #if defined(INET) && defined(INET6) 445 else 446 #endif 447 #ifdef INET 448 tmpinp = in_pcblookup_local(pcbinfo, laddr, 449 lport, lookupflags, cred); 450 #endif 451 } while (tmpinp != NULL); 452 453 #ifdef INET 454 if ((inp->inp_vflag & (INP_IPV4|INP_IPV6)) == INP_IPV4) 455 laddrp->s_addr = laddr.s_addr; 456 #endif 457 *lportp = lport; 458 459 return (0); 460 } 461 #endif /* INET || INET6 */ 462 463 #ifdef INET 464 /* 465 * Set up a bind operation on a PCB, performing port allocation 466 * as required, but do not actually modify the PCB. Callers can 467 * either complete the bind by setting inp_laddr/inp_lport and 468 * calling in_pcbinshash(), or they can just use the resulting 469 * port and address to authorise the sending of a once-off packet. 470 * 471 * On error, the values of *laddrp and *lportp are not changed. 472 */ 473 int 474 in_pcbbind_setup(struct inpcb *inp, struct sockaddr *nam, in_addr_t *laddrp, 475 u_short *lportp, struct ucred *cred) 476 { 477 struct socket *so = inp->inp_socket; 478 struct sockaddr_in *sin; 479 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; 480 struct in_addr laddr; 481 u_short lport = 0; 482 int lookupflags = 0, reuseport = (so->so_options & SO_REUSEPORT); 483 int error; 484 485 /* 486 * No state changes, so read locks are sufficient here. 487 */ 488 INP_LOCK_ASSERT(inp); 489 INP_HASH_LOCK_ASSERT(pcbinfo); 490 491 if (TAILQ_EMPTY(&V_in_ifaddrhead)) /* XXX broken! */ 492 return (EADDRNOTAVAIL); 493 laddr.s_addr = *laddrp; 494 if (nam != NULL && laddr.s_addr != INADDR_ANY) 495 return (EINVAL); 496 if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) == 0) 497 lookupflags = INPLOOKUP_WILDCARD; 498 if (nam == NULL) { 499 if ((error = prison_local_ip4(cred, &laddr)) != 0) 500 return (error); 501 } else { 502 sin = (struct sockaddr_in *)nam; 503 if (nam->sa_len != sizeof (*sin)) 504 return (EINVAL); 505 #ifdef notdef 506 /* 507 * We should check the family, but old programs 508 * incorrectly fail to initialize it. 509 */ 510 if (sin->sin_family != AF_INET) 511 return (EAFNOSUPPORT); 512 #endif 513 error = prison_local_ip4(cred, &sin->sin_addr); 514 if (error) 515 return (error); 516 if (sin->sin_port != *lportp) { 517 /* Don't allow the port to change. */ 518 if (*lportp != 0) 519 return (EINVAL); 520 lport = sin->sin_port; 521 } 522 /* NB: lport is left as 0 if the port isn't being changed. */ 523 if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 524 /* 525 * Treat SO_REUSEADDR as SO_REUSEPORT for multicast; 526 * allow complete duplication of binding if 527 * SO_REUSEPORT is set, or if SO_REUSEADDR is set 528 * and a multicast address is bound on both 529 * new and duplicated sockets. 530 */ 531 if (so->so_options & SO_REUSEADDR) 532 reuseport = SO_REUSEADDR|SO_REUSEPORT; 533 } else if (sin->sin_addr.s_addr != INADDR_ANY) { 534 sin->sin_port = 0; /* yech... */ 535 bzero(&sin->sin_zero, sizeof(sin->sin_zero)); 536 /* 537 * Is the address a local IP address? 538 * If INP_BINDANY is set, then the socket may be bound 539 * to any endpoint address, local or not. 540 */ 541 if ((inp->inp_flags & INP_BINDANY) == 0 && 542 ifa_ifwithaddr_check((struct sockaddr *)sin) == 0) 543 return (EADDRNOTAVAIL); 544 } 545 laddr = sin->sin_addr; 546 if (lport) { 547 struct inpcb *t; 548 struct tcptw *tw; 549 550 /* GROSS */ 551 if (ntohs(lport) <= V_ipport_reservedhigh && 552 ntohs(lport) >= V_ipport_reservedlow && 553 priv_check_cred(cred, PRIV_NETINET_RESERVEDPORT, 554 0)) 555 return (EACCES); 556 if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr)) && 557 priv_check_cred(inp->inp_cred, 558 PRIV_NETINET_REUSEPORT, 0) != 0) { 559 t = in_pcblookup_local(pcbinfo, sin->sin_addr, 560 lport, INPLOOKUP_WILDCARD, cred); 561 /* 562 * XXX 563 * This entire block sorely needs a rewrite. 564 */ 565 if (t && 566 ((t->inp_flags & INP_TIMEWAIT) == 0) && 567 (so->so_type != SOCK_STREAM || 568 ntohl(t->inp_faddr.s_addr) == INADDR_ANY) && 569 (ntohl(sin->sin_addr.s_addr) != INADDR_ANY || 570 ntohl(t->inp_laddr.s_addr) != INADDR_ANY || 571 (t->inp_socket->so_options & 572 SO_REUSEPORT) == 0) && 573 (inp->inp_cred->cr_uid != 574 t->inp_cred->cr_uid)) 575 return (EADDRINUSE); 576 } 577 t = in_pcblookup_local(pcbinfo, sin->sin_addr, 578 lport, lookupflags, cred); 579 if (t && (t->inp_flags & INP_TIMEWAIT)) { 580 /* 581 * XXXRW: If an incpb has had its timewait 582 * state recycled, we treat the address as 583 * being in use (for now). This is better 584 * than a panic, but not desirable. 585 */ 586 tw = intotw(inp); 587 if (tw == NULL || 588 (reuseport & tw->tw_so_options) == 0) 589 return (EADDRINUSE); 590 } else if (t && 591 (reuseport & t->inp_socket->so_options) == 0) { 592 #ifdef INET6 593 if (ntohl(sin->sin_addr.s_addr) != 594 INADDR_ANY || 595 ntohl(t->inp_laddr.s_addr) != 596 INADDR_ANY || 597 INP_SOCKAF(so) == 598 INP_SOCKAF(t->inp_socket)) 599 #endif 600 return (EADDRINUSE); 601 } 602 } 603 } 604 if (*lportp != 0) 605 lport = *lportp; 606 if (lport == 0) { 607 error = in_pcb_lport(inp, &laddr, &lport, cred, lookupflags); 608 if (error != 0) 609 return (error); 610 611 } 612 *laddrp = laddr.s_addr; 613 *lportp = lport; 614 return (0); 615 } 616 617 /* 618 * Connect from a socket to a specified address. 619 * Both address and port must be specified in argument sin. 620 * If don't have a local address for this socket yet, 621 * then pick one. 622 */ 623 int 624 in_pcbconnect_mbuf(struct inpcb *inp, struct sockaddr *nam, 625 struct ucred *cred, struct mbuf *m) 626 { 627 u_short lport, fport; 628 in_addr_t laddr, faddr; 629 int anonport, error; 630 631 INP_WLOCK_ASSERT(inp); 632 INP_HASH_WLOCK_ASSERT(inp->inp_pcbinfo); 633 634 lport = inp->inp_lport; 635 laddr = inp->inp_laddr.s_addr; 636 anonport = (lport == 0); 637 error = in_pcbconnect_setup(inp, nam, &laddr, &lport, &faddr, &fport, 638 NULL, cred); 639 if (error) 640 return (error); 641 642 /* Do the initial binding of the local address if required. */ 643 if (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0) { 644 inp->inp_lport = lport; 645 inp->inp_laddr.s_addr = laddr; 646 if (in_pcbinshash(inp) != 0) { 647 inp->inp_laddr.s_addr = INADDR_ANY; 648 inp->inp_lport = 0; 649 return (EAGAIN); 650 } 651 } 652 653 /* Commit the remaining changes. */ 654 inp->inp_lport = lport; 655 inp->inp_laddr.s_addr = laddr; 656 inp->inp_faddr.s_addr = faddr; 657 inp->inp_fport = fport; 658 in_pcbrehash_mbuf(inp, m); 659 660 if (anonport) 661 inp->inp_flags |= INP_ANONPORT; 662 return (0); 663 } 664 665 int 666 in_pcbconnect(struct inpcb *inp, struct sockaddr *nam, struct ucred *cred) 667 { 668 669 return (in_pcbconnect_mbuf(inp, nam, cred, NULL)); 670 } 671 672 /* 673 * Do proper source address selection on an unbound socket in case 674 * of connect. Take jails into account as well. 675 */ 676 static int 677 in_pcbladdr(struct inpcb *inp, struct in_addr *faddr, struct in_addr *laddr, 678 struct ucred *cred) 679 { 680 struct ifaddr *ifa; 681 struct sockaddr *sa; 682 struct sockaddr_in *sin; 683 struct route sro; 684 int error; 685 686 KASSERT(laddr != NULL, ("%s: laddr NULL", __func__)); 687 688 /* 689 * Bypass source address selection and use the primary jail IP 690 * if requested. 691 */ 692 if (cred != NULL && !prison_saddrsel_ip4(cred, laddr)) 693 return (0); 694 695 error = 0; 696 bzero(&sro, sizeof(sro)); 697 698 sin = (struct sockaddr_in *)&sro.ro_dst; 699 sin->sin_family = AF_INET; 700 sin->sin_len = sizeof(struct sockaddr_in); 701 sin->sin_addr.s_addr = faddr->s_addr; 702 703 /* 704 * If route is known our src addr is taken from the i/f, 705 * else punt. 706 * 707 * Find out route to destination. 708 */ 709 if ((inp->inp_socket->so_options & SO_DONTROUTE) == 0) 710 in_rtalloc_ign(&sro, 0, inp->inp_inc.inc_fibnum); 711 712 /* 713 * If we found a route, use the address corresponding to 714 * the outgoing interface. 715 * 716 * Otherwise assume faddr is reachable on a directly connected 717 * network and try to find a corresponding interface to take 718 * the source address from. 719 */ 720 if (sro.ro_rt == NULL || sro.ro_rt->rt_ifp == NULL) { 721 struct in_ifaddr *ia; 722 struct ifnet *ifp; 723 724 ia = ifatoia(ifa_ifwithdstaddr((struct sockaddr *)sin)); 725 if (ia == NULL) 726 ia = ifatoia(ifa_ifwithnet((struct sockaddr *)sin, 0)); 727 if (ia == NULL) { 728 error = ENETUNREACH; 729 goto done; 730 } 731 732 if (cred == NULL || !prison_flag(cred, PR_IP4)) { 733 laddr->s_addr = ia->ia_addr.sin_addr.s_addr; 734 ifa_free(&ia->ia_ifa); 735 goto done; 736 } 737 738 ifp = ia->ia_ifp; 739 ifa_free(&ia->ia_ifa); 740 ia = NULL; 741 IF_ADDR_LOCK(ifp); 742 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 743 744 sa = ifa->ifa_addr; 745 if (sa->sa_family != AF_INET) 746 continue; 747 sin = (struct sockaddr_in *)sa; 748 if (prison_check_ip4(cred, &sin->sin_addr) == 0) { 749 ia = (struct in_ifaddr *)ifa; 750 break; 751 } 752 } 753 if (ia != NULL) { 754 laddr->s_addr = ia->ia_addr.sin_addr.s_addr; 755 IF_ADDR_UNLOCK(ifp); 756 goto done; 757 } 758 IF_ADDR_UNLOCK(ifp); 759 760 /* 3. As a last resort return the 'default' jail address. */ 761 error = prison_get_ip4(cred, laddr); 762 goto done; 763 } 764 765 /* 766 * If the outgoing interface on the route found is not 767 * a loopback interface, use the address from that interface. 768 * In case of jails do those three steps: 769 * 1. check if the interface address belongs to the jail. If so use it. 770 * 2. check if we have any address on the outgoing interface 771 * belonging to this jail. If so use it. 772 * 3. as a last resort return the 'default' jail address. 773 */ 774 if ((sro.ro_rt->rt_ifp->if_flags & IFF_LOOPBACK) == 0) { 775 struct in_ifaddr *ia; 776 struct ifnet *ifp; 777 778 /* If not jailed, use the default returned. */ 779 if (cred == NULL || !prison_flag(cred, PR_IP4)) { 780 ia = (struct in_ifaddr *)sro.ro_rt->rt_ifa; 781 laddr->s_addr = ia->ia_addr.sin_addr.s_addr; 782 goto done; 783 } 784 785 /* Jailed. */ 786 /* 1. Check if the iface address belongs to the jail. */ 787 sin = (struct sockaddr_in *)sro.ro_rt->rt_ifa->ifa_addr; 788 if (prison_check_ip4(cred, &sin->sin_addr) == 0) { 789 ia = (struct in_ifaddr *)sro.ro_rt->rt_ifa; 790 laddr->s_addr = ia->ia_addr.sin_addr.s_addr; 791 goto done; 792 } 793 794 /* 795 * 2. Check if we have any address on the outgoing interface 796 * belonging to this jail. 797 */ 798 ia = NULL; 799 ifp = sro.ro_rt->rt_ifp; 800 IF_ADDR_LOCK(ifp); 801 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 802 sa = ifa->ifa_addr; 803 if (sa->sa_family != AF_INET) 804 continue; 805 sin = (struct sockaddr_in *)sa; 806 if (prison_check_ip4(cred, &sin->sin_addr) == 0) { 807 ia = (struct in_ifaddr *)ifa; 808 break; 809 } 810 } 811 if (ia != NULL) { 812 laddr->s_addr = ia->ia_addr.sin_addr.s_addr; 813 IF_ADDR_UNLOCK(ifp); 814 goto done; 815 } 816 IF_ADDR_UNLOCK(ifp); 817 818 /* 3. As a last resort return the 'default' jail address. */ 819 error = prison_get_ip4(cred, laddr); 820 goto done; 821 } 822 823 /* 824 * The outgoing interface is marked with 'loopback net', so a route 825 * to ourselves is here. 826 * Try to find the interface of the destination address and then 827 * take the address from there. That interface is not necessarily 828 * a loopback interface. 829 * In case of jails, check that it is an address of the jail 830 * and if we cannot find, fall back to the 'default' jail address. 831 */ 832 if ((sro.ro_rt->rt_ifp->if_flags & IFF_LOOPBACK) != 0) { 833 struct sockaddr_in sain; 834 struct in_ifaddr *ia; 835 836 bzero(&sain, sizeof(struct sockaddr_in)); 837 sain.sin_family = AF_INET; 838 sain.sin_len = sizeof(struct sockaddr_in); 839 sain.sin_addr.s_addr = faddr->s_addr; 840 841 ia = ifatoia(ifa_ifwithdstaddr(sintosa(&sain))); 842 if (ia == NULL) 843 ia = ifatoia(ifa_ifwithnet(sintosa(&sain), 0)); 844 if (ia == NULL) 845 ia = ifatoia(ifa_ifwithaddr(sintosa(&sain))); 846 847 if (cred == NULL || !prison_flag(cred, PR_IP4)) { 848 if (ia == NULL) { 849 error = ENETUNREACH; 850 goto done; 851 } 852 laddr->s_addr = ia->ia_addr.sin_addr.s_addr; 853 ifa_free(&ia->ia_ifa); 854 goto done; 855 } 856 857 /* Jailed. */ 858 if (ia != NULL) { 859 struct ifnet *ifp; 860 861 ifp = ia->ia_ifp; 862 ifa_free(&ia->ia_ifa); 863 ia = NULL; 864 IF_ADDR_LOCK(ifp); 865 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 866 867 sa = ifa->ifa_addr; 868 if (sa->sa_family != AF_INET) 869 continue; 870 sin = (struct sockaddr_in *)sa; 871 if (prison_check_ip4(cred, 872 &sin->sin_addr) == 0) { 873 ia = (struct in_ifaddr *)ifa; 874 break; 875 } 876 } 877 if (ia != NULL) { 878 laddr->s_addr = ia->ia_addr.sin_addr.s_addr; 879 IF_ADDR_UNLOCK(ifp); 880 goto done; 881 } 882 IF_ADDR_UNLOCK(ifp); 883 } 884 885 /* 3. As a last resort return the 'default' jail address. */ 886 error = prison_get_ip4(cred, laddr); 887 goto done; 888 } 889 890 done: 891 if (sro.ro_rt != NULL) 892 RTFREE(sro.ro_rt); 893 return (error); 894 } 895 896 /* 897 * Set up for a connect from a socket to the specified address. 898 * On entry, *laddrp and *lportp should contain the current local 899 * address and port for the PCB; these are updated to the values 900 * that should be placed in inp_laddr and inp_lport to complete 901 * the connect. 902 * 903 * On success, *faddrp and *fportp will be set to the remote address 904 * and port. These are not updated in the error case. 905 * 906 * If the operation fails because the connection already exists, 907 * *oinpp will be set to the PCB of that connection so that the 908 * caller can decide to override it. In all other cases, *oinpp 909 * is set to NULL. 910 */ 911 int 912 in_pcbconnect_setup(struct inpcb *inp, struct sockaddr *nam, 913 in_addr_t *laddrp, u_short *lportp, in_addr_t *faddrp, u_short *fportp, 914 struct inpcb **oinpp, struct ucred *cred) 915 { 916 struct sockaddr_in *sin = (struct sockaddr_in *)nam; 917 struct in_ifaddr *ia; 918 struct inpcb *oinp; 919 struct in_addr laddr, faddr; 920 u_short lport, fport; 921 int error; 922 923 /* 924 * Because a global state change doesn't actually occur here, a read 925 * lock is sufficient. 926 */ 927 INP_LOCK_ASSERT(inp); 928 INP_HASH_LOCK_ASSERT(inp->inp_pcbinfo); 929 930 if (oinpp != NULL) 931 *oinpp = NULL; 932 if (nam->sa_len != sizeof (*sin)) 933 return (EINVAL); 934 if (sin->sin_family != AF_INET) 935 return (EAFNOSUPPORT); 936 if (sin->sin_port == 0) 937 return (EADDRNOTAVAIL); 938 laddr.s_addr = *laddrp; 939 lport = *lportp; 940 faddr = sin->sin_addr; 941 fport = sin->sin_port; 942 943 if (!TAILQ_EMPTY(&V_in_ifaddrhead)) { 944 /* 945 * If the destination address is INADDR_ANY, 946 * use the primary local address. 947 * If the supplied address is INADDR_BROADCAST, 948 * and the primary interface supports broadcast, 949 * choose the broadcast address for that interface. 950 */ 951 if (faddr.s_addr == INADDR_ANY) { 952 IN_IFADDR_RLOCK(); 953 faddr = 954 IA_SIN(TAILQ_FIRST(&V_in_ifaddrhead))->sin_addr; 955 IN_IFADDR_RUNLOCK(); 956 if (cred != NULL && 957 (error = prison_get_ip4(cred, &faddr)) != 0) 958 return (error); 959 } else if (faddr.s_addr == (u_long)INADDR_BROADCAST) { 960 IN_IFADDR_RLOCK(); 961 if (TAILQ_FIRST(&V_in_ifaddrhead)->ia_ifp->if_flags & 962 IFF_BROADCAST) 963 faddr = satosin(&TAILQ_FIRST( 964 &V_in_ifaddrhead)->ia_broadaddr)->sin_addr; 965 IN_IFADDR_RUNLOCK(); 966 } 967 } 968 if (laddr.s_addr == INADDR_ANY) { 969 error = in_pcbladdr(inp, &faddr, &laddr, cred); 970 /* 971 * If the destination address is multicast and an outgoing 972 * interface has been set as a multicast option, prefer the 973 * address of that interface as our source address. 974 */ 975 if (IN_MULTICAST(ntohl(faddr.s_addr)) && 976 inp->inp_moptions != NULL) { 977 struct ip_moptions *imo; 978 struct ifnet *ifp; 979 980 imo = inp->inp_moptions; 981 if (imo->imo_multicast_ifp != NULL) { 982 ifp = imo->imo_multicast_ifp; 983 IN_IFADDR_RLOCK(); 984 TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) { 985 if ((ia->ia_ifp == ifp) && 986 (cred == NULL || 987 prison_check_ip4(cred, 988 &ia->ia_addr.sin_addr) == 0)) 989 break; 990 } 991 if (ia == NULL) 992 error = EADDRNOTAVAIL; 993 else { 994 laddr = ia->ia_addr.sin_addr; 995 error = 0; 996 } 997 IN_IFADDR_RUNLOCK(); 998 } 999 } 1000 if (error) 1001 return (error); 1002 } 1003 oinp = in_pcblookup_hash_locked(inp->inp_pcbinfo, faddr, fport, 1004 laddr, lport, 0, NULL); 1005 if (oinp != NULL) { 1006 if (oinpp != NULL) 1007 *oinpp = oinp; 1008 return (EADDRINUSE); 1009 } 1010 if (lport == 0) { 1011 error = in_pcbbind_setup(inp, NULL, &laddr.s_addr, &lport, 1012 cred); 1013 if (error) 1014 return (error); 1015 } 1016 *laddrp = laddr.s_addr; 1017 *lportp = lport; 1018 *faddrp = faddr.s_addr; 1019 *fportp = fport; 1020 return (0); 1021 } 1022 1023 void 1024 in_pcbdisconnect(struct inpcb *inp) 1025 { 1026 1027 INP_WLOCK_ASSERT(inp); 1028 INP_HASH_WLOCK_ASSERT(inp->inp_pcbinfo); 1029 1030 inp->inp_faddr.s_addr = INADDR_ANY; 1031 inp->inp_fport = 0; 1032 in_pcbrehash(inp); 1033 } 1034 #endif 1035 1036 /* 1037 * in_pcbdetach() is responsibe for disassociating a socket from an inpcb. 1038 * For most protocols, this will be invoked immediately prior to calling 1039 * in_pcbfree(). However, with TCP the inpcb may significantly outlive the 1040 * socket, in which case in_pcbfree() is deferred. 1041 */ 1042 void 1043 in_pcbdetach(struct inpcb *inp) 1044 { 1045 1046 KASSERT(inp->inp_socket != NULL, ("%s: inp_socket == NULL", __func__)); 1047 1048 inp->inp_socket->so_pcb = NULL; 1049 inp->inp_socket = NULL; 1050 } 1051 1052 /* 1053 * in_pcbref() bumps the reference count on an inpcb in order to maintain 1054 * stability of an inpcb pointer despite the inpcb lock being released. This 1055 * is used in TCP when the inpcbinfo lock needs to be acquired or upgraded, 1056 * but where the inpcb lock is already held. 1057 * 1058 * in_pcbref() should be used only to provide brief memory stability, and 1059 * must always be followed by a call to INP_WLOCK() and in_pcbrele() to 1060 * garbage collect the inpcb if it has been in_pcbfree()'d from another 1061 * context. Until in_pcbrele() has returned that the inpcb is still valid, 1062 * lock and rele are the *only* safe operations that may be performed on the 1063 * inpcb. 1064 * 1065 * While the inpcb will not be freed, releasing the inpcb lock means that the 1066 * connection's state may change, so the caller should be careful to 1067 * revalidate any cached state on reacquiring the lock. Drop the reference 1068 * using in_pcbrele(). 1069 */ 1070 void 1071 in_pcbref(struct inpcb *inp) 1072 { 1073 1074 KASSERT(inp->inp_refcount > 0, ("%s: refcount 0", __func__)); 1075 1076 refcount_acquire(&inp->inp_refcount); 1077 } 1078 1079 /* 1080 * Drop a refcount on an inpcb elevated using in_pcbref(); because a call to 1081 * in_pcbfree() may have been made between in_pcbref() and in_pcbrele(), we 1082 * return a flag indicating whether or not the inpcb remains valid. If it is 1083 * valid, we return with the inpcb lock held. 1084 * 1085 * Notice that, unlike in_pcbref(), the inpcb lock must be held to drop a 1086 * reference on an inpcb. Historically more work was done here (actually, in 1087 * in_pcbfree_internal()) but has been moved to in_pcbfree() to avoid the 1088 * need for the pcbinfo lock in in_pcbrele(). Deferring the free is entirely 1089 * about memory stability (and continued use of the write lock). 1090 */ 1091 int 1092 in_pcbrele_rlocked(struct inpcb *inp) 1093 { 1094 struct inpcbinfo *pcbinfo; 1095 1096 KASSERT(inp->inp_refcount > 0, ("%s: refcount 0", __func__)); 1097 1098 INP_RLOCK_ASSERT(inp); 1099 1100 if (refcount_release(&inp->inp_refcount) == 0) 1101 return (0); 1102 1103 KASSERT(inp->inp_socket == NULL, ("%s: inp_socket != NULL", __func__)); 1104 1105 INP_RUNLOCK(inp); 1106 pcbinfo = inp->inp_pcbinfo; 1107 uma_zfree(pcbinfo->ipi_zone, inp); 1108 return (1); 1109 } 1110 1111 int 1112 in_pcbrele_wlocked(struct inpcb *inp) 1113 { 1114 struct inpcbinfo *pcbinfo; 1115 1116 KASSERT(inp->inp_refcount > 0, ("%s: refcount 0", __func__)); 1117 1118 INP_WLOCK_ASSERT(inp); 1119 1120 if (refcount_release(&inp->inp_refcount) == 0) 1121 return (0); 1122 1123 KASSERT(inp->inp_socket == NULL, ("%s: inp_socket != NULL", __func__)); 1124 1125 INP_WUNLOCK(inp); 1126 pcbinfo = inp->inp_pcbinfo; 1127 uma_zfree(pcbinfo->ipi_zone, inp); 1128 return (1); 1129 } 1130 1131 /* 1132 * Temporary wrapper. 1133 */ 1134 int 1135 in_pcbrele(struct inpcb *inp) 1136 { 1137 1138 return (in_pcbrele_wlocked(inp)); 1139 } 1140 1141 /* 1142 * Unconditionally schedule an inpcb to be freed by decrementing its 1143 * reference count, which should occur only after the inpcb has been detached 1144 * from its socket. If another thread holds a temporary reference (acquired 1145 * using in_pcbref()) then the free is deferred until that reference is 1146 * released using in_pcbrele(), but the inpcb is still unlocked. Almost all 1147 * work, including removal from global lists, is done in this context, where 1148 * the pcbinfo lock is held. 1149 */ 1150 void 1151 in_pcbfree(struct inpcb *inp) 1152 { 1153 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; 1154 1155 KASSERT(inp->inp_socket == NULL, ("%s: inp_socket != NULL", __func__)); 1156 1157 INP_INFO_WLOCK_ASSERT(pcbinfo); 1158 INP_WLOCK_ASSERT(inp); 1159 1160 /* XXXRW: Do as much as possible here. */ 1161 #ifdef IPSEC 1162 if (inp->inp_sp != NULL) 1163 ipsec_delete_pcbpolicy(inp); 1164 #endif /* IPSEC */ 1165 inp->inp_gencnt = ++pcbinfo->ipi_gencnt; 1166 in_pcbremlists(inp); 1167 #ifdef INET6 1168 if (inp->inp_vflag & INP_IPV6PROTO) { 1169 ip6_freepcbopts(inp->in6p_outputopts); 1170 if (inp->in6p_moptions != NULL) 1171 ip6_freemoptions(inp->in6p_moptions); 1172 } 1173 #endif 1174 if (inp->inp_options) 1175 (void)m_free(inp->inp_options); 1176 #ifdef INET 1177 if (inp->inp_moptions != NULL) 1178 inp_freemoptions(inp->inp_moptions); 1179 #endif 1180 inp->inp_vflag = 0; 1181 crfree(inp->inp_cred); 1182 #ifdef MAC 1183 mac_inpcb_destroy(inp); 1184 #endif 1185 if (!in_pcbrele_wlocked(inp)) 1186 INP_WUNLOCK(inp); 1187 } 1188 1189 /* 1190 * in_pcbdrop() removes an inpcb from hashed lists, releasing its address and 1191 * port reservation, and preventing it from being returned by inpcb lookups. 1192 * 1193 * It is used by TCP to mark an inpcb as unused and avoid future packet 1194 * delivery or event notification when a socket remains open but TCP has 1195 * closed. This might occur as a result of a shutdown()-initiated TCP close 1196 * or a RST on the wire, and allows the port binding to be reused while still 1197 * maintaining the invariant that so_pcb always points to a valid inpcb until 1198 * in_pcbdetach(). 1199 * 1200 * XXXRW: Possibly in_pcbdrop() should also prevent future notifications by 1201 * in_pcbnotifyall() and in_pcbpurgeif0()? 1202 */ 1203 void 1204 in_pcbdrop(struct inpcb *inp) 1205 { 1206 1207 INP_WLOCK_ASSERT(inp); 1208 1209 /* 1210 * XXXRW: Possibly we should protect the setting of INP_DROPPED with 1211 * the hash lock...? 1212 */ 1213 inp->inp_flags |= INP_DROPPED; 1214 if (inp->inp_flags & INP_INHASHLIST) { 1215 struct inpcbport *phd = inp->inp_phd; 1216 1217 INP_HASH_WLOCK(inp->inp_pcbinfo); 1218 LIST_REMOVE(inp, inp_hash); 1219 LIST_REMOVE(inp, inp_portlist); 1220 if (LIST_FIRST(&phd->phd_pcblist) == NULL) { 1221 LIST_REMOVE(phd, phd_hash); 1222 free(phd, M_PCB); 1223 } 1224 INP_HASH_WUNLOCK(inp->inp_pcbinfo); 1225 inp->inp_flags &= ~INP_INHASHLIST; 1226 } 1227 } 1228 1229 #ifdef INET 1230 /* 1231 * Common routines to return the socket addresses associated with inpcbs. 1232 */ 1233 struct sockaddr * 1234 in_sockaddr(in_port_t port, struct in_addr *addr_p) 1235 { 1236 struct sockaddr_in *sin; 1237 1238 sin = malloc(sizeof *sin, M_SONAME, 1239 M_WAITOK | M_ZERO); 1240 sin->sin_family = AF_INET; 1241 sin->sin_len = sizeof(*sin); 1242 sin->sin_addr = *addr_p; 1243 sin->sin_port = port; 1244 1245 return (struct sockaddr *)sin; 1246 } 1247 1248 int 1249 in_getsockaddr(struct socket *so, struct sockaddr **nam) 1250 { 1251 struct inpcb *inp; 1252 struct in_addr addr; 1253 in_port_t port; 1254 1255 inp = sotoinpcb(so); 1256 KASSERT(inp != NULL, ("in_getsockaddr: inp == NULL")); 1257 1258 INP_RLOCK(inp); 1259 port = inp->inp_lport; 1260 addr = inp->inp_laddr; 1261 INP_RUNLOCK(inp); 1262 1263 *nam = in_sockaddr(port, &addr); 1264 return 0; 1265 } 1266 1267 int 1268 in_getpeeraddr(struct socket *so, struct sockaddr **nam) 1269 { 1270 struct inpcb *inp; 1271 struct in_addr addr; 1272 in_port_t port; 1273 1274 inp = sotoinpcb(so); 1275 KASSERT(inp != NULL, ("in_getpeeraddr: inp == NULL")); 1276 1277 INP_RLOCK(inp); 1278 port = inp->inp_fport; 1279 addr = inp->inp_faddr; 1280 INP_RUNLOCK(inp); 1281 1282 *nam = in_sockaddr(port, &addr); 1283 return 0; 1284 } 1285 1286 void 1287 in_pcbnotifyall(struct inpcbinfo *pcbinfo, struct in_addr faddr, int errno, 1288 struct inpcb *(*notify)(struct inpcb *, int)) 1289 { 1290 struct inpcb *inp, *inp_temp; 1291 1292 INP_INFO_WLOCK(pcbinfo); 1293 LIST_FOREACH_SAFE(inp, pcbinfo->ipi_listhead, inp_list, inp_temp) { 1294 INP_WLOCK(inp); 1295 #ifdef INET6 1296 if ((inp->inp_vflag & INP_IPV4) == 0) { 1297 INP_WUNLOCK(inp); 1298 continue; 1299 } 1300 #endif 1301 if (inp->inp_faddr.s_addr != faddr.s_addr || 1302 inp->inp_socket == NULL) { 1303 INP_WUNLOCK(inp); 1304 continue; 1305 } 1306 if ((*notify)(inp, errno)) 1307 INP_WUNLOCK(inp); 1308 } 1309 INP_INFO_WUNLOCK(pcbinfo); 1310 } 1311 1312 void 1313 in_pcbpurgeif0(struct inpcbinfo *pcbinfo, struct ifnet *ifp) 1314 { 1315 struct inpcb *inp; 1316 struct ip_moptions *imo; 1317 int i, gap; 1318 1319 INP_INFO_RLOCK(pcbinfo); 1320 LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) { 1321 INP_WLOCK(inp); 1322 imo = inp->inp_moptions; 1323 if ((inp->inp_vflag & INP_IPV4) && 1324 imo != NULL) { 1325 /* 1326 * Unselect the outgoing interface if it is being 1327 * detached. 1328 */ 1329 if (imo->imo_multicast_ifp == ifp) 1330 imo->imo_multicast_ifp = NULL; 1331 1332 /* 1333 * Drop multicast group membership if we joined 1334 * through the interface being detached. 1335 */ 1336 for (i = 0, gap = 0; i < imo->imo_num_memberships; 1337 i++) { 1338 if (imo->imo_membership[i]->inm_ifp == ifp) { 1339 in_delmulti(imo->imo_membership[i]); 1340 gap++; 1341 } else if (gap != 0) 1342 imo->imo_membership[i - gap] = 1343 imo->imo_membership[i]; 1344 } 1345 imo->imo_num_memberships -= gap; 1346 } 1347 INP_WUNLOCK(inp); 1348 } 1349 INP_INFO_RUNLOCK(pcbinfo); 1350 } 1351 1352 /* 1353 * Lookup a PCB based on the local address and port. Caller must hold the 1354 * hash lock. No inpcb locks or references are acquired. 1355 */ 1356 #define INP_LOOKUP_MAPPED_PCB_COST 3 1357 struct inpcb * 1358 in_pcblookup_local(struct inpcbinfo *pcbinfo, struct in_addr laddr, 1359 u_short lport, int lookupflags, struct ucred *cred) 1360 { 1361 struct inpcb *inp; 1362 #ifdef INET6 1363 int matchwild = 3 + INP_LOOKUP_MAPPED_PCB_COST; 1364 #else 1365 int matchwild = 3; 1366 #endif 1367 int wildcard; 1368 1369 KASSERT((lookupflags & ~(INPLOOKUP_WILDCARD)) == 0, 1370 ("%s: invalid lookup flags %d", __func__, lookupflags)); 1371 1372 INP_HASH_LOCK_ASSERT(pcbinfo); 1373 1374 if ((lookupflags & INPLOOKUP_WILDCARD) == 0) { 1375 struct inpcbhead *head; 1376 /* 1377 * Look for an unconnected (wildcard foreign addr) PCB that 1378 * matches the local address and port we're looking for. 1379 */ 1380 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 1381 0, pcbinfo->ipi_hashmask)]; 1382 LIST_FOREACH(inp, head, inp_hash) { 1383 #ifdef INET6 1384 /* XXX inp locking */ 1385 if ((inp->inp_vflag & INP_IPV4) == 0) 1386 continue; 1387 #endif 1388 if (inp->inp_faddr.s_addr == INADDR_ANY && 1389 inp->inp_laddr.s_addr == laddr.s_addr && 1390 inp->inp_lport == lport) { 1391 /* 1392 * Found? 1393 */ 1394 if (cred == NULL || 1395 prison_equal_ip4(cred->cr_prison, 1396 inp->inp_cred->cr_prison)) 1397 return (inp); 1398 } 1399 } 1400 /* 1401 * Not found. 1402 */ 1403 return (NULL); 1404 } else { 1405 struct inpcbporthead *porthash; 1406 struct inpcbport *phd; 1407 struct inpcb *match = NULL; 1408 /* 1409 * Best fit PCB lookup. 1410 * 1411 * First see if this local port is in use by looking on the 1412 * port hash list. 1413 */ 1414 porthash = &pcbinfo->ipi_porthashbase[INP_PCBPORTHASH(lport, 1415 pcbinfo->ipi_porthashmask)]; 1416 LIST_FOREACH(phd, porthash, phd_hash) { 1417 if (phd->phd_port == lport) 1418 break; 1419 } 1420 if (phd != NULL) { 1421 /* 1422 * Port is in use by one or more PCBs. Look for best 1423 * fit. 1424 */ 1425 LIST_FOREACH(inp, &phd->phd_pcblist, inp_portlist) { 1426 wildcard = 0; 1427 if (cred != NULL && 1428 !prison_equal_ip4(inp->inp_cred->cr_prison, 1429 cred->cr_prison)) 1430 continue; 1431 #ifdef INET6 1432 /* XXX inp locking */ 1433 if ((inp->inp_vflag & INP_IPV4) == 0) 1434 continue; 1435 /* 1436 * We never select the PCB that has 1437 * INP_IPV6 flag and is bound to :: if 1438 * we have another PCB which is bound 1439 * to 0.0.0.0. If a PCB has the 1440 * INP_IPV6 flag, then we set its cost 1441 * higher than IPv4 only PCBs. 1442 * 1443 * Note that the case only happens 1444 * when a socket is bound to ::, under 1445 * the condition that the use of the 1446 * mapped address is allowed. 1447 */ 1448 if ((inp->inp_vflag & INP_IPV6) != 0) 1449 wildcard += INP_LOOKUP_MAPPED_PCB_COST; 1450 #endif 1451 if (inp->inp_faddr.s_addr != INADDR_ANY) 1452 wildcard++; 1453 if (inp->inp_laddr.s_addr != INADDR_ANY) { 1454 if (laddr.s_addr == INADDR_ANY) 1455 wildcard++; 1456 else if (inp->inp_laddr.s_addr != laddr.s_addr) 1457 continue; 1458 } else { 1459 if (laddr.s_addr != INADDR_ANY) 1460 wildcard++; 1461 } 1462 if (wildcard < matchwild) { 1463 match = inp; 1464 matchwild = wildcard; 1465 if (matchwild == 0) 1466 break; 1467 } 1468 } 1469 } 1470 return (match); 1471 } 1472 } 1473 #undef INP_LOOKUP_MAPPED_PCB_COST 1474 1475 /* 1476 * Lookup PCB in hash list, using pcbinfo tables. This variation assumes 1477 * that the caller has locked the hash list, and will not perform any further 1478 * locking or reference operations on either the hash list or the connection. 1479 */ 1480 static struct inpcb * 1481 in_pcblookup_hash_locked(struct inpcbinfo *pcbinfo, struct in_addr faddr, 1482 u_int fport_arg, struct in_addr laddr, u_int lport_arg, int lookupflags, 1483 struct ifnet *ifp) 1484 { 1485 struct inpcbhead *head; 1486 struct inpcb *inp, *tmpinp; 1487 u_short fport = fport_arg, lport = lport_arg; 1488 1489 KASSERT((lookupflags & ~(INPLOOKUP_WILDCARD)) == 0, 1490 ("%s: invalid lookup flags %d", __func__, lookupflags)); 1491 1492 INP_HASH_LOCK_ASSERT(pcbinfo); 1493 1494 /* 1495 * First look for an exact match. 1496 */ 1497 tmpinp = NULL; 1498 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(faddr.s_addr, lport, fport, 1499 pcbinfo->ipi_hashmask)]; 1500 LIST_FOREACH(inp, head, inp_hash) { 1501 #ifdef INET6 1502 /* XXX inp locking */ 1503 if ((inp->inp_vflag & INP_IPV4) == 0) 1504 continue; 1505 #endif 1506 if (inp->inp_faddr.s_addr == faddr.s_addr && 1507 inp->inp_laddr.s_addr == laddr.s_addr && 1508 inp->inp_fport == fport && 1509 inp->inp_lport == lport) { 1510 /* 1511 * XXX We should be able to directly return 1512 * the inp here, without any checks. 1513 * Well unless both bound with SO_REUSEPORT? 1514 */ 1515 if (prison_flag(inp->inp_cred, PR_IP4)) 1516 return (inp); 1517 if (tmpinp == NULL) 1518 tmpinp = inp; 1519 } 1520 } 1521 if (tmpinp != NULL) 1522 return (tmpinp); 1523 1524 /* 1525 * Then look for a wildcard match, if requested. 1526 */ 1527 if ((lookupflags & INPLOOKUP_WILDCARD) != 0) { 1528 struct inpcb *local_wild = NULL, *local_exact = NULL; 1529 #ifdef INET6 1530 struct inpcb *local_wild_mapped = NULL; 1531 #endif 1532 struct inpcb *jail_wild = NULL; 1533 int injail; 1534 1535 /* 1536 * Order of socket selection - we always prefer jails. 1537 * 1. jailed, non-wild. 1538 * 2. jailed, wild. 1539 * 3. non-jailed, non-wild. 1540 * 4. non-jailed, wild. 1541 */ 1542 1543 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 1544 0, pcbinfo->ipi_hashmask)]; 1545 LIST_FOREACH(inp, head, inp_hash) { 1546 #ifdef INET6 1547 /* XXX inp locking */ 1548 if ((inp->inp_vflag & INP_IPV4) == 0) 1549 continue; 1550 #endif 1551 if (inp->inp_faddr.s_addr != INADDR_ANY || 1552 inp->inp_lport != lport) 1553 continue; 1554 1555 /* XXX inp locking */ 1556 if (ifp && ifp->if_type == IFT_FAITH && 1557 (inp->inp_flags & INP_FAITH) == 0) 1558 continue; 1559 1560 injail = prison_flag(inp->inp_cred, PR_IP4); 1561 if (injail) { 1562 if (prison_check_ip4(inp->inp_cred, 1563 &laddr) != 0) 1564 continue; 1565 } else { 1566 if (local_exact != NULL) 1567 continue; 1568 } 1569 1570 if (inp->inp_laddr.s_addr == laddr.s_addr) { 1571 if (injail) 1572 return (inp); 1573 else 1574 local_exact = inp; 1575 } else if (inp->inp_laddr.s_addr == INADDR_ANY) { 1576 #ifdef INET6 1577 /* XXX inp locking, NULL check */ 1578 if (inp->inp_vflag & INP_IPV6PROTO) 1579 local_wild_mapped = inp; 1580 else 1581 #endif /* INET6 */ 1582 if (injail) 1583 jail_wild = inp; 1584 else 1585 local_wild = inp; 1586 } 1587 } /* LIST_FOREACH */ 1588 if (jail_wild != NULL) 1589 return (jail_wild); 1590 if (local_exact != NULL) 1591 return (local_exact); 1592 if (local_wild != NULL) 1593 return (local_wild); 1594 #ifdef INET6 1595 if (local_wild_mapped != NULL) 1596 return (local_wild_mapped); 1597 #endif /* defined(INET6) */ 1598 } /* if ((lookupflags & INPLOOKUP_WILDCARD) != 0) */ 1599 1600 return (NULL); 1601 } 1602 1603 /* 1604 * Lookup PCB in hash list, using pcbinfo tables. This variation locks the 1605 * hash list lock, and will return the inpcb locked (i.e., requires 1606 * INPLOOKUP_LOCKPCB). 1607 */ 1608 static struct inpcb * 1609 in_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in_addr faddr, 1610 u_int fport, struct in_addr laddr, u_int lport, int lookupflags, 1611 struct ifnet *ifp) 1612 { 1613 struct inpcb *inp; 1614 1615 INP_HASH_RLOCK(pcbinfo); 1616 inp = in_pcblookup_hash_locked(pcbinfo, faddr, fport, laddr, lport, 1617 (lookupflags & ~(INPLOOKUP_RLOCKPCB | INPLOOKUP_WLOCKPCB)), ifp); 1618 if (inp != NULL) { 1619 in_pcbref(inp); 1620 INP_HASH_RUNLOCK(pcbinfo); 1621 if (lookupflags & INPLOOKUP_WLOCKPCB) { 1622 INP_WLOCK(inp); 1623 if (in_pcbrele_wlocked(inp)) 1624 return (NULL); 1625 } else if (lookupflags & INPLOOKUP_RLOCKPCB) { 1626 INP_RLOCK(inp); 1627 if (in_pcbrele_rlocked(inp)) 1628 return (NULL); 1629 } else 1630 panic("%s: locking bug", __func__); 1631 } else 1632 INP_HASH_RUNLOCK(pcbinfo); 1633 return (inp); 1634 } 1635 1636 /* 1637 * Public inpcb lookup routines, accepting a 4-tuple, and optionally, an mbuf 1638 * from which a pre-calculated hash value may be extracted. 1639 */ 1640 struct inpcb * 1641 in_pcblookup(struct inpcbinfo *pcbinfo, struct in_addr faddr, u_int fport, 1642 struct in_addr laddr, u_int lport, int lookupflags, struct ifnet *ifp) 1643 { 1644 1645 KASSERT((lookupflags & ~INPLOOKUP_MASK) == 0, 1646 ("%s: invalid lookup flags %d", __func__, lookupflags)); 1647 KASSERT((lookupflags & (INPLOOKUP_RLOCKPCB | INPLOOKUP_WLOCKPCB)) != 0, 1648 ("%s: LOCKPCB not set", __func__)); 1649 1650 return (in_pcblookup_hash(pcbinfo, faddr, fport, laddr, lport, 1651 lookupflags, ifp)); 1652 } 1653 1654 struct inpcb * 1655 in_pcblookup_mbuf(struct inpcbinfo *pcbinfo, struct in_addr faddr, 1656 u_int fport, struct in_addr laddr, u_int lport, int lookupflags, 1657 struct ifnet *ifp, struct mbuf *m) 1658 { 1659 1660 KASSERT((lookupflags & ~INPLOOKUP_MASK) == 0, 1661 ("%s: invalid lookup flags %d", __func__, lookupflags)); 1662 KASSERT((lookupflags & (INPLOOKUP_RLOCKPCB | INPLOOKUP_WLOCKPCB)) != 0, 1663 ("%s: LOCKPCB not set", __func__)); 1664 1665 return (in_pcblookup_hash(pcbinfo, faddr, fport, laddr, lport, 1666 lookupflags, ifp)); 1667 } 1668 #endif /* INET */ 1669 1670 /* 1671 * Insert PCB onto various hash lists. 1672 */ 1673 int 1674 in_pcbinshash(struct inpcb *inp) 1675 { 1676 struct inpcbhead *pcbhash; 1677 struct inpcbporthead *pcbporthash; 1678 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; 1679 struct inpcbport *phd; 1680 u_int32_t hashkey_faddr; 1681 1682 INP_WLOCK_ASSERT(inp); 1683 INP_HASH_WLOCK_ASSERT(pcbinfo); 1684 1685 KASSERT((inp->inp_flags & INP_INHASHLIST) == 0, 1686 ("in_pcbinshash: INP_INHASHLIST")); 1687 1688 #ifdef INET6 1689 if (inp->inp_vflag & INP_IPV6) 1690 hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */; 1691 else 1692 #endif /* INET6 */ 1693 hashkey_faddr = inp->inp_faddr.s_addr; 1694 1695 pcbhash = &pcbinfo->ipi_hashbase[INP_PCBHASH(hashkey_faddr, 1696 inp->inp_lport, inp->inp_fport, pcbinfo->ipi_hashmask)]; 1697 1698 pcbporthash = &pcbinfo->ipi_porthashbase[ 1699 INP_PCBPORTHASH(inp->inp_lport, pcbinfo->ipi_porthashmask)]; 1700 1701 /* 1702 * Go through port list and look for a head for this lport. 1703 */ 1704 LIST_FOREACH(phd, pcbporthash, phd_hash) { 1705 if (phd->phd_port == inp->inp_lport) 1706 break; 1707 } 1708 /* 1709 * If none exists, malloc one and tack it on. 1710 */ 1711 if (phd == NULL) { 1712 phd = malloc(sizeof(struct inpcbport), M_PCB, M_NOWAIT); 1713 if (phd == NULL) { 1714 return (ENOBUFS); /* XXX */ 1715 } 1716 phd->phd_port = inp->inp_lport; 1717 LIST_INIT(&phd->phd_pcblist); 1718 LIST_INSERT_HEAD(pcbporthash, phd, phd_hash); 1719 } 1720 inp->inp_phd = phd; 1721 LIST_INSERT_HEAD(&phd->phd_pcblist, inp, inp_portlist); 1722 LIST_INSERT_HEAD(pcbhash, inp, inp_hash); 1723 inp->inp_flags |= INP_INHASHLIST; 1724 return (0); 1725 } 1726 1727 /* 1728 * Move PCB to the proper hash bucket when { faddr, fport } have been 1729 * changed. NOTE: This does not handle the case of the lport changing (the 1730 * hashed port list would have to be updated as well), so the lport must 1731 * not change after in_pcbinshash() has been called. 1732 */ 1733 void 1734 in_pcbrehash_mbuf(struct inpcb *inp, struct mbuf *m) 1735 { 1736 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; 1737 struct inpcbhead *head; 1738 u_int32_t hashkey_faddr; 1739 1740 INP_WLOCK_ASSERT(inp); 1741 INP_HASH_WLOCK_ASSERT(pcbinfo); 1742 1743 KASSERT(inp->inp_flags & INP_INHASHLIST, 1744 ("in_pcbrehash: !INP_INHASHLIST")); 1745 1746 #ifdef INET6 1747 if (inp->inp_vflag & INP_IPV6) 1748 hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */; 1749 else 1750 #endif /* INET6 */ 1751 hashkey_faddr = inp->inp_faddr.s_addr; 1752 1753 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(hashkey_faddr, 1754 inp->inp_lport, inp->inp_fport, pcbinfo->ipi_hashmask)]; 1755 1756 LIST_REMOVE(inp, inp_hash); 1757 LIST_INSERT_HEAD(head, inp, inp_hash); 1758 } 1759 1760 void 1761 in_pcbrehash(struct inpcb *inp) 1762 { 1763 1764 in_pcbrehash_mbuf(inp, NULL); 1765 } 1766 1767 /* 1768 * Remove PCB from various lists. 1769 */ 1770 static void 1771 in_pcbremlists(struct inpcb *inp) 1772 { 1773 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; 1774 1775 INP_INFO_WLOCK_ASSERT(pcbinfo); 1776 INP_WLOCK_ASSERT(inp); 1777 1778 inp->inp_gencnt = ++pcbinfo->ipi_gencnt; 1779 if (inp->inp_flags & INP_INHASHLIST) { 1780 struct inpcbport *phd = inp->inp_phd; 1781 1782 INP_HASH_WLOCK(pcbinfo); 1783 LIST_REMOVE(inp, inp_hash); 1784 LIST_REMOVE(inp, inp_portlist); 1785 if (LIST_FIRST(&phd->phd_pcblist) == NULL) { 1786 LIST_REMOVE(phd, phd_hash); 1787 free(phd, M_PCB); 1788 } 1789 INP_HASH_WUNLOCK(pcbinfo); 1790 inp->inp_flags &= ~INP_INHASHLIST; 1791 } 1792 LIST_REMOVE(inp, inp_list); 1793 pcbinfo->ipi_count--; 1794 } 1795 1796 /* 1797 * A set label operation has occurred at the socket layer, propagate the 1798 * label change into the in_pcb for the socket. 1799 */ 1800 void 1801 in_pcbsosetlabel(struct socket *so) 1802 { 1803 #ifdef MAC 1804 struct inpcb *inp; 1805 1806 inp = sotoinpcb(so); 1807 KASSERT(inp != NULL, ("in_pcbsosetlabel: so->so_pcb == NULL")); 1808 1809 INP_WLOCK(inp); 1810 SOCK_LOCK(so); 1811 mac_inpcb_sosetlabel(so, inp); 1812 SOCK_UNLOCK(so); 1813 INP_WUNLOCK(inp); 1814 #endif 1815 } 1816 1817 /* 1818 * ipport_tick runs once per second, determining if random port allocation 1819 * should be continued. If more than ipport_randomcps ports have been 1820 * allocated in the last second, then we return to sequential port 1821 * allocation. We return to random allocation only once we drop below 1822 * ipport_randomcps for at least ipport_randomtime seconds. 1823 */ 1824 static void 1825 ipport_tick(void *xtp) 1826 { 1827 VNET_ITERATOR_DECL(vnet_iter); 1828 1829 VNET_LIST_RLOCK_NOSLEEP(); 1830 VNET_FOREACH(vnet_iter) { 1831 CURVNET_SET(vnet_iter); /* XXX appease INVARIANTS here */ 1832 if (V_ipport_tcpallocs <= 1833 V_ipport_tcplastcount + V_ipport_randomcps) { 1834 if (V_ipport_stoprandom > 0) 1835 V_ipport_stoprandom--; 1836 } else 1837 V_ipport_stoprandom = V_ipport_randomtime; 1838 V_ipport_tcplastcount = V_ipport_tcpallocs; 1839 CURVNET_RESTORE(); 1840 } 1841 VNET_LIST_RUNLOCK_NOSLEEP(); 1842 callout_reset(&ipport_tick_callout, hz, ipport_tick, NULL); 1843 } 1844 1845 static void 1846 ip_fini(void *xtp) 1847 { 1848 1849 callout_stop(&ipport_tick_callout); 1850 } 1851 1852 /* 1853 * The ipport_callout should start running at about the time we attach the 1854 * inet or inet6 domains. 1855 */ 1856 static void 1857 ipport_tick_init(const void *unused __unused) 1858 { 1859 1860 /* Start ipport_tick. */ 1861 callout_init(&ipport_tick_callout, CALLOUT_MPSAFE); 1862 callout_reset(&ipport_tick_callout, 1, ipport_tick, NULL); 1863 EVENTHANDLER_REGISTER(shutdown_pre_sync, ip_fini, NULL, 1864 SHUTDOWN_PRI_DEFAULT); 1865 } 1866 SYSINIT(ipport_tick_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE, 1867 ipport_tick_init, NULL); 1868 1869 void 1870 inp_wlock(struct inpcb *inp) 1871 { 1872 1873 INP_WLOCK(inp); 1874 } 1875 1876 void 1877 inp_wunlock(struct inpcb *inp) 1878 { 1879 1880 INP_WUNLOCK(inp); 1881 } 1882 1883 void 1884 inp_rlock(struct inpcb *inp) 1885 { 1886 1887 INP_RLOCK(inp); 1888 } 1889 1890 void 1891 inp_runlock(struct inpcb *inp) 1892 { 1893 1894 INP_RUNLOCK(inp); 1895 } 1896 1897 #ifdef INVARIANTS 1898 void 1899 inp_lock_assert(struct inpcb *inp) 1900 { 1901 1902 INP_WLOCK_ASSERT(inp); 1903 } 1904 1905 void 1906 inp_unlock_assert(struct inpcb *inp) 1907 { 1908 1909 INP_UNLOCK_ASSERT(inp); 1910 } 1911 #endif 1912 1913 void 1914 inp_apply_all(void (*func)(struct inpcb *, void *), void *arg) 1915 { 1916 struct inpcb *inp; 1917 1918 INP_INFO_RLOCK(&V_tcbinfo); 1919 LIST_FOREACH(inp, V_tcbinfo.ipi_listhead, inp_list) { 1920 INP_WLOCK(inp); 1921 func(inp, arg); 1922 INP_WUNLOCK(inp); 1923 } 1924 INP_INFO_RUNLOCK(&V_tcbinfo); 1925 } 1926 1927 struct socket * 1928 inp_inpcbtosocket(struct inpcb *inp) 1929 { 1930 1931 INP_WLOCK_ASSERT(inp); 1932 return (inp->inp_socket); 1933 } 1934 1935 struct tcpcb * 1936 inp_inpcbtotcpcb(struct inpcb *inp) 1937 { 1938 1939 INP_WLOCK_ASSERT(inp); 1940 return ((struct tcpcb *)inp->inp_ppcb); 1941 } 1942 1943 int 1944 inp_ip_tos_get(const struct inpcb *inp) 1945 { 1946 1947 return (inp->inp_ip_tos); 1948 } 1949 1950 void 1951 inp_ip_tos_set(struct inpcb *inp, int val) 1952 { 1953 1954 inp->inp_ip_tos = val; 1955 } 1956 1957 void 1958 inp_4tuple_get(struct inpcb *inp, uint32_t *laddr, uint16_t *lp, 1959 uint32_t *faddr, uint16_t *fp) 1960 { 1961 1962 INP_LOCK_ASSERT(inp); 1963 *laddr = inp->inp_laddr.s_addr; 1964 *faddr = inp->inp_faddr.s_addr; 1965 *lp = inp->inp_lport; 1966 *fp = inp->inp_fport; 1967 } 1968 1969 struct inpcb * 1970 so_sotoinpcb(struct socket *so) 1971 { 1972 1973 return (sotoinpcb(so)); 1974 } 1975 1976 struct tcpcb * 1977 so_sototcpcb(struct socket *so) 1978 { 1979 1980 return (sototcpcb(so)); 1981 } 1982 1983 #ifdef DDB 1984 static void 1985 db_print_indent(int indent) 1986 { 1987 int i; 1988 1989 for (i = 0; i < indent; i++) 1990 db_printf(" "); 1991 } 1992 1993 static void 1994 db_print_inconninfo(struct in_conninfo *inc, const char *name, int indent) 1995 { 1996 char faddr_str[48], laddr_str[48]; 1997 1998 db_print_indent(indent); 1999 db_printf("%s at %p\n", name, inc); 2000 2001 indent += 2; 2002 2003 #ifdef INET6 2004 if (inc->inc_flags & INC_ISIPV6) { 2005 /* IPv6. */ 2006 ip6_sprintf(laddr_str, &inc->inc6_laddr); 2007 ip6_sprintf(faddr_str, &inc->inc6_faddr); 2008 } else { 2009 #endif 2010 /* IPv4. */ 2011 inet_ntoa_r(inc->inc_laddr, laddr_str); 2012 inet_ntoa_r(inc->inc_faddr, faddr_str); 2013 #ifdef INET6 2014 } 2015 #endif 2016 db_print_indent(indent); 2017 db_printf("inc_laddr %s inc_lport %u\n", laddr_str, 2018 ntohs(inc->inc_lport)); 2019 db_print_indent(indent); 2020 db_printf("inc_faddr %s inc_fport %u\n", faddr_str, 2021 ntohs(inc->inc_fport)); 2022 } 2023 2024 static void 2025 db_print_inpflags(int inp_flags) 2026 { 2027 int comma; 2028 2029 comma = 0; 2030 if (inp_flags & INP_RECVOPTS) { 2031 db_printf("%sINP_RECVOPTS", comma ? ", " : ""); 2032 comma = 1; 2033 } 2034 if (inp_flags & INP_RECVRETOPTS) { 2035 db_printf("%sINP_RECVRETOPTS", comma ? ", " : ""); 2036 comma = 1; 2037 } 2038 if (inp_flags & INP_RECVDSTADDR) { 2039 db_printf("%sINP_RECVDSTADDR", comma ? ", " : ""); 2040 comma = 1; 2041 } 2042 if (inp_flags & INP_HDRINCL) { 2043 db_printf("%sINP_HDRINCL", comma ? ", " : ""); 2044 comma = 1; 2045 } 2046 if (inp_flags & INP_HIGHPORT) { 2047 db_printf("%sINP_HIGHPORT", comma ? ", " : ""); 2048 comma = 1; 2049 } 2050 if (inp_flags & INP_LOWPORT) { 2051 db_printf("%sINP_LOWPORT", comma ? ", " : ""); 2052 comma = 1; 2053 } 2054 if (inp_flags & INP_ANONPORT) { 2055 db_printf("%sINP_ANONPORT", comma ? ", " : ""); 2056 comma = 1; 2057 } 2058 if (inp_flags & INP_RECVIF) { 2059 db_printf("%sINP_RECVIF", comma ? ", " : ""); 2060 comma = 1; 2061 } 2062 if (inp_flags & INP_MTUDISC) { 2063 db_printf("%sINP_MTUDISC", comma ? ", " : ""); 2064 comma = 1; 2065 } 2066 if (inp_flags & INP_FAITH) { 2067 db_printf("%sINP_FAITH", comma ? ", " : ""); 2068 comma = 1; 2069 } 2070 if (inp_flags & INP_RECVTTL) { 2071 db_printf("%sINP_RECVTTL", comma ? ", " : ""); 2072 comma = 1; 2073 } 2074 if (inp_flags & INP_DONTFRAG) { 2075 db_printf("%sINP_DONTFRAG", comma ? ", " : ""); 2076 comma = 1; 2077 } 2078 if (inp_flags & IN6P_IPV6_V6ONLY) { 2079 db_printf("%sIN6P_IPV6_V6ONLY", comma ? ", " : ""); 2080 comma = 1; 2081 } 2082 if (inp_flags & IN6P_PKTINFO) { 2083 db_printf("%sIN6P_PKTINFO", comma ? ", " : ""); 2084 comma = 1; 2085 } 2086 if (inp_flags & IN6P_HOPLIMIT) { 2087 db_printf("%sIN6P_HOPLIMIT", comma ? ", " : ""); 2088 comma = 1; 2089 } 2090 if (inp_flags & IN6P_HOPOPTS) { 2091 db_printf("%sIN6P_HOPOPTS", comma ? ", " : ""); 2092 comma = 1; 2093 } 2094 if (inp_flags & IN6P_DSTOPTS) { 2095 db_printf("%sIN6P_DSTOPTS", comma ? ", " : ""); 2096 comma = 1; 2097 } 2098 if (inp_flags & IN6P_RTHDR) { 2099 db_printf("%sIN6P_RTHDR", comma ? ", " : ""); 2100 comma = 1; 2101 } 2102 if (inp_flags & IN6P_RTHDRDSTOPTS) { 2103 db_printf("%sIN6P_RTHDRDSTOPTS", comma ? ", " : ""); 2104 comma = 1; 2105 } 2106 if (inp_flags & IN6P_TCLASS) { 2107 db_printf("%sIN6P_TCLASS", comma ? ", " : ""); 2108 comma = 1; 2109 } 2110 if (inp_flags & IN6P_AUTOFLOWLABEL) { 2111 db_printf("%sIN6P_AUTOFLOWLABEL", comma ? ", " : ""); 2112 comma = 1; 2113 } 2114 if (inp_flags & INP_TIMEWAIT) { 2115 db_printf("%sINP_TIMEWAIT", comma ? ", " : ""); 2116 comma = 1; 2117 } 2118 if (inp_flags & INP_ONESBCAST) { 2119 db_printf("%sINP_ONESBCAST", comma ? ", " : ""); 2120 comma = 1; 2121 } 2122 if (inp_flags & INP_DROPPED) { 2123 db_printf("%sINP_DROPPED", comma ? ", " : ""); 2124 comma = 1; 2125 } 2126 if (inp_flags & INP_SOCKREF) { 2127 db_printf("%sINP_SOCKREF", comma ? ", " : ""); 2128 comma = 1; 2129 } 2130 if (inp_flags & IN6P_RFC2292) { 2131 db_printf("%sIN6P_RFC2292", comma ? ", " : ""); 2132 comma = 1; 2133 } 2134 if (inp_flags & IN6P_MTU) { 2135 db_printf("IN6P_MTU%s", comma ? ", " : ""); 2136 comma = 1; 2137 } 2138 } 2139 2140 static void 2141 db_print_inpvflag(u_char inp_vflag) 2142 { 2143 int comma; 2144 2145 comma = 0; 2146 if (inp_vflag & INP_IPV4) { 2147 db_printf("%sINP_IPV4", comma ? ", " : ""); 2148 comma = 1; 2149 } 2150 if (inp_vflag & INP_IPV6) { 2151 db_printf("%sINP_IPV6", comma ? ", " : ""); 2152 comma = 1; 2153 } 2154 if (inp_vflag & INP_IPV6PROTO) { 2155 db_printf("%sINP_IPV6PROTO", comma ? ", " : ""); 2156 comma = 1; 2157 } 2158 } 2159 2160 static void 2161 db_print_inpcb(struct inpcb *inp, const char *name, int indent) 2162 { 2163 2164 db_print_indent(indent); 2165 db_printf("%s at %p\n", name, inp); 2166 2167 indent += 2; 2168 2169 db_print_indent(indent); 2170 db_printf("inp_flow: 0x%x\n", inp->inp_flow); 2171 2172 db_print_inconninfo(&inp->inp_inc, "inp_conninfo", indent); 2173 2174 db_print_indent(indent); 2175 db_printf("inp_ppcb: %p inp_pcbinfo: %p inp_socket: %p\n", 2176 inp->inp_ppcb, inp->inp_pcbinfo, inp->inp_socket); 2177 2178 db_print_indent(indent); 2179 db_printf("inp_label: %p inp_flags: 0x%x (", 2180 inp->inp_label, inp->inp_flags); 2181 db_print_inpflags(inp->inp_flags); 2182 db_printf(")\n"); 2183 2184 db_print_indent(indent); 2185 db_printf("inp_sp: %p inp_vflag: 0x%x (", inp->inp_sp, 2186 inp->inp_vflag); 2187 db_print_inpvflag(inp->inp_vflag); 2188 db_printf(")\n"); 2189 2190 db_print_indent(indent); 2191 db_printf("inp_ip_ttl: %d inp_ip_p: %d inp_ip_minttl: %d\n", 2192 inp->inp_ip_ttl, inp->inp_ip_p, inp->inp_ip_minttl); 2193 2194 db_print_indent(indent); 2195 #ifdef INET6 2196 if (inp->inp_vflag & INP_IPV6) { 2197 db_printf("in6p_options: %p in6p_outputopts: %p " 2198 "in6p_moptions: %p\n", inp->in6p_options, 2199 inp->in6p_outputopts, inp->in6p_moptions); 2200 db_printf("in6p_icmp6filt: %p in6p_cksum %d " 2201 "in6p_hops %u\n", inp->in6p_icmp6filt, inp->in6p_cksum, 2202 inp->in6p_hops); 2203 } else 2204 #endif 2205 { 2206 db_printf("inp_ip_tos: %d inp_ip_options: %p " 2207 "inp_ip_moptions: %p\n", inp->inp_ip_tos, 2208 inp->inp_options, inp->inp_moptions); 2209 } 2210 2211 db_print_indent(indent); 2212 db_printf("inp_phd: %p inp_gencnt: %ju\n", inp->inp_phd, 2213 (uintmax_t)inp->inp_gencnt); 2214 } 2215 2216 DB_SHOW_COMMAND(inpcb, db_show_inpcb) 2217 { 2218 struct inpcb *inp; 2219 2220 if (!have_addr) { 2221 db_printf("usage: show inpcb <addr>\n"); 2222 return; 2223 } 2224 inp = (struct inpcb *)addr; 2225 2226 db_print_inpcb(inp, "inpcb", 0); 2227 } 2228 #endif 2229