1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_inet.h" 36 #include "opt_inet6.h" 37 #include "opt_tcpdebug.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/callout.h> 42 #include <sys/kernel.h> 43 #include <sys/sysctl.h> 44 #include <sys/malloc.h> 45 #include <sys/mbuf.h> 46 #include <sys/priv.h> 47 #include <sys/proc.h> 48 #include <sys/socket.h> 49 #include <sys/socketvar.h> 50 #include <sys/protosw.h> 51 #include <sys/random.h> 52 #include <sys/refcount.h> 53 54 #include <vm/uma.h> 55 56 #include <net/route.h> 57 #include <net/if.h> 58 #include <net/if_var.h> 59 #include <net/vnet.h> 60 61 #include <netinet/in.h> 62 #include <netinet/in_pcb.h> 63 #include <netinet/in_systm.h> 64 #include <netinet/in_var.h> 65 #include <netinet/ip.h> 66 #include <netinet/ip_icmp.h> 67 #include <netinet/ip_var.h> 68 #ifdef INET6 69 #include <netinet/ip6.h> 70 #include <netinet6/in6_pcb.h> 71 #include <netinet6/ip6_var.h> 72 #include <netinet6/scope6_var.h> 73 #include <netinet6/nd6.h> 74 #endif 75 #include <netinet/tcp.h> 76 #include <netinet/tcp_fsm.h> 77 #include <netinet/tcp_seq.h> 78 #include <netinet/tcp_timer.h> 79 #include <netinet/tcp_var.h> 80 #ifdef INET6 81 #include <netinet6/tcp6_var.h> 82 #endif 83 #include <netinet/tcpip.h> 84 #ifdef TCPDEBUG 85 #include <netinet/tcp_debug.h> 86 #endif 87 #ifdef INET6 88 #include <netinet6/ip6protosw.h> 89 #endif 90 91 #include <machine/in_cksum.h> 92 93 #include <security/mac/mac_framework.h> 94 95 static VNET_DEFINE(uma_zone_t, tcptw_zone); 96 #define V_tcptw_zone VNET(tcptw_zone) 97 static int maxtcptw; 98 99 /* 100 * The timed wait queue contains references to each of the TCP sessions 101 * currently in the TIME_WAIT state. The queue pointers, including the 102 * queue pointers in each tcptw structure, are protected using the global 103 * timewait lock, which must be held over queue iteration and modification. 104 */ 105 static VNET_DEFINE(TAILQ_HEAD(, tcptw), twq_2msl); 106 #define V_twq_2msl VNET(twq_2msl) 107 108 /* Global timewait lock */ 109 static VNET_DEFINE(struct rwlock, tw_lock); 110 #define V_tw_lock VNET(tw_lock) 111 112 #define TW_LOCK_INIT(tw, d) rw_init_flags(&(tw), (d), 0) 113 #define TW_LOCK_DESTROY(tw) rw_destroy(&(tw)) 114 #define TW_RLOCK(tw) rw_rlock(&(tw)) 115 #define TW_WLOCK(tw) rw_wlock(&(tw)) 116 #define TW_RUNLOCK(tw) rw_runlock(&(tw)) 117 #define TW_WUNLOCK(tw) rw_wunlock(&(tw)) 118 #define TW_LOCK_ASSERT(tw) rw_assert(&(tw), RA_LOCKED) 119 #define TW_RLOCK_ASSERT(tw) rw_assert(&(tw), RA_RLOCKED) 120 #define TW_WLOCK_ASSERT(tw) rw_assert(&(tw), RA_WLOCKED) 121 #define TW_UNLOCK_ASSERT(tw) rw_assert(&(tw), RA_UNLOCKED) 122 123 static void tcp_tw_2msl_reset(struct tcptw *, int); 124 static void tcp_tw_2msl_stop(struct tcptw *, int); 125 static int tcp_twrespond(struct tcptw *, int); 126 127 /* 128 * tw_pcbref() bumps the reference count on an tw in order to maintain 129 * stability of an tw pointer despite the tw lock being released. 130 */ 131 static void 132 tw_pcbref(struct tcptw *tw) 133 { 134 135 KASSERT(tw->tw_refcount > 0, ("%s: refcount 0", __func__)); 136 refcount_acquire(&tw->tw_refcount); 137 } 138 139 /* 140 * Drop a refcount on an tw elevated using tw_pcbref(). 141 */ 142 static int 143 tw_pcbrele(struct tcptw *tw) 144 { 145 146 KASSERT(tw->tw_refcount > 0, ("%s: refcount 0", __func__)); 147 if (!refcount_release(&tw->tw_refcount)) 148 return (0); 149 uma_zfree(V_tcptw_zone, tw); 150 return (1); 151 } 152 153 static int 154 tcptw_auto_size(void) 155 { 156 int halfrange; 157 158 /* 159 * Max out at half the ephemeral port range so that TIME_WAIT 160 * sockets don't tie up too many ephemeral ports. 161 */ 162 if (V_ipport_lastauto > V_ipport_firstauto) 163 halfrange = (V_ipport_lastauto - V_ipport_firstauto) / 2; 164 else 165 halfrange = (V_ipport_firstauto - V_ipport_lastauto) / 2; 166 /* Protect against goofy port ranges smaller than 32. */ 167 return (imin(imax(halfrange, 32), maxsockets / 5)); 168 } 169 170 static int 171 sysctl_maxtcptw(SYSCTL_HANDLER_ARGS) 172 { 173 int error, new; 174 175 if (maxtcptw == 0) 176 new = tcptw_auto_size(); 177 else 178 new = maxtcptw; 179 error = sysctl_handle_int(oidp, &new, 0, req); 180 if (error == 0 && req->newptr) 181 if (new >= 32) { 182 maxtcptw = new; 183 uma_zone_set_max(V_tcptw_zone, maxtcptw); 184 } 185 return (error); 186 } 187 188 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, maxtcptw, CTLTYPE_INT|CTLFLAG_RW, 189 &maxtcptw, 0, sysctl_maxtcptw, "IU", 190 "Maximum number of compressed TCP TIME_WAIT entries"); 191 192 VNET_DEFINE(int, nolocaltimewait) = 0; 193 #define V_nolocaltimewait VNET(nolocaltimewait) 194 SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, nolocaltimewait, CTLFLAG_RW, 195 &VNET_NAME(nolocaltimewait), 0, 196 "Do not create compressed TCP TIME_WAIT entries for local connections"); 197 198 void 199 tcp_tw_zone_change(void) 200 { 201 202 if (maxtcptw == 0) 203 uma_zone_set_max(V_tcptw_zone, tcptw_auto_size()); 204 } 205 206 void 207 tcp_tw_init(void) 208 { 209 210 V_tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw), 211 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 212 TUNABLE_INT_FETCH("net.inet.tcp.maxtcptw", &maxtcptw); 213 if (maxtcptw == 0) 214 uma_zone_set_max(V_tcptw_zone, tcptw_auto_size()); 215 else 216 uma_zone_set_max(V_tcptw_zone, maxtcptw); 217 TAILQ_INIT(&V_twq_2msl); 218 TW_LOCK_INIT(V_tw_lock, "tcptw"); 219 } 220 221 #ifdef VIMAGE 222 void 223 tcp_tw_destroy(void) 224 { 225 struct tcptw *tw; 226 227 INP_INFO_WLOCK(&V_tcbinfo); 228 while ((tw = TAILQ_FIRST(&V_twq_2msl)) != NULL) 229 tcp_twclose(tw, 0); 230 INP_INFO_WUNLOCK(&V_tcbinfo); 231 232 TW_LOCK_DESTROY(V_tw_lock); 233 uma_zdestroy(V_tcptw_zone); 234 } 235 #endif 236 237 /* 238 * Move a TCP connection into TIME_WAIT state. 239 * tcbinfo is locked. 240 * inp is locked, and is unlocked before returning. 241 */ 242 void 243 tcp_twstart(struct tcpcb *tp) 244 { 245 struct tcptw *tw; 246 struct inpcb *inp = tp->t_inpcb; 247 int acknow; 248 struct socket *so; 249 #ifdef INET6 250 int isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6; 251 #endif 252 253 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 254 INP_WLOCK_ASSERT(inp); 255 256 if (V_nolocaltimewait) { 257 int error = 0; 258 #ifdef INET6 259 if (isipv6) 260 error = in6_localaddr(&inp->in6p_faddr); 261 #endif 262 #if defined(INET6) && defined(INET) 263 else 264 #endif 265 #ifdef INET 266 error = in_localip(inp->inp_faddr); 267 #endif 268 if (error) { 269 tp = tcp_close(tp); 270 if (tp != NULL) 271 INP_WUNLOCK(inp); 272 return; 273 } 274 } 275 276 tw = uma_zalloc(V_tcptw_zone, M_NOWAIT); 277 if (tw == NULL) { 278 tw = tcp_tw_2msl_reuse(); 279 if (tw == NULL) { 280 tp = tcp_close(tp); 281 if (tp != NULL) 282 INP_WUNLOCK(inp); 283 return; 284 } 285 } 286 tw->tw_inpcb = inp; 287 refcount_init(&tw->tw_refcount, 1); 288 289 /* 290 * Recover last window size sent. 291 */ 292 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) 293 tw->last_win = (tp->rcv_adv - tp->rcv_nxt) >> tp->rcv_scale; 294 else 295 tw->last_win = 0; 296 297 /* 298 * Set t_recent if timestamps are used on the connection. 299 */ 300 if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) == 301 (TF_REQ_TSTMP|TF_RCVD_TSTMP)) { 302 tw->t_recent = tp->ts_recent; 303 tw->ts_offset = tp->ts_offset; 304 } else { 305 tw->t_recent = 0; 306 tw->ts_offset = 0; 307 } 308 309 tw->snd_nxt = tp->snd_nxt; 310 tw->rcv_nxt = tp->rcv_nxt; 311 tw->iss = tp->iss; 312 tw->irs = tp->irs; 313 tw->t_starttime = tp->t_starttime; 314 tw->tw_time = 0; 315 316 /* XXX 317 * If this code will 318 * be used for fin-wait-2 state also, then we may need 319 * a ts_recent from the last segment. 320 */ 321 acknow = tp->t_flags & TF_ACKNOW; 322 323 /* 324 * First, discard tcpcb state, which includes stopping its timers and 325 * freeing it. tcp_discardcb() used to also release the inpcb, but 326 * that work is now done in the caller. 327 * 328 * Note: soisdisconnected() call used to be made in tcp_discardcb(), 329 * and might not be needed here any longer. 330 */ 331 tcp_discardcb(tp); 332 so = inp->inp_socket; 333 soisdisconnected(so); 334 tw->tw_cred = crhold(so->so_cred); 335 SOCK_LOCK(so); 336 tw->tw_so_options = so->so_options; 337 SOCK_UNLOCK(so); 338 if (acknow) 339 tcp_twrespond(tw, TH_ACK); 340 inp->inp_ppcb = tw; 341 inp->inp_flags |= INP_TIMEWAIT; 342 tcp_tw_2msl_reset(tw, 0); 343 344 /* 345 * If the inpcb owns the sole reference to the socket, then we can 346 * detach and free the socket as it is not needed in time wait. 347 */ 348 if (inp->inp_flags & INP_SOCKREF) { 349 KASSERT(so->so_state & SS_PROTOREF, 350 ("tcp_twstart: !SS_PROTOREF")); 351 inp->inp_flags &= ~INP_SOCKREF; 352 INP_WUNLOCK(inp); 353 ACCEPT_LOCK(); 354 SOCK_LOCK(so); 355 so->so_state &= ~SS_PROTOREF; 356 sofree(so); 357 } else 358 INP_WUNLOCK(inp); 359 } 360 361 /* 362 * Returns 1 if the TIME_WAIT state was killed and we should start over, 363 * looking for a pcb in the listen state. Returns 0 otherwise. 364 */ 365 int 366 tcp_twcheck(struct inpcb *inp, struct tcpopt *to __unused, struct tcphdr *th, 367 struct mbuf *m, int tlen) 368 { 369 struct tcptw *tw; 370 int thflags; 371 tcp_seq seq; 372 373 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 374 INP_WLOCK_ASSERT(inp); 375 376 /* 377 * XXXRW: Time wait state for inpcb has been recycled, but inpcb is 378 * still present. This is undesirable, but temporarily necessary 379 * until we work out how to handle inpcb's who's timewait state has 380 * been removed. 381 */ 382 tw = intotw(inp); 383 if (tw == NULL) 384 goto drop; 385 386 thflags = th->th_flags; 387 388 /* 389 * NOTE: for FIN_WAIT_2 (to be added later), 390 * must validate sequence number before accepting RST 391 */ 392 393 /* 394 * If the segment contains RST: 395 * Drop the segment - see Stevens, vol. 2, p. 964 and 396 * RFC 1337. 397 */ 398 if (thflags & TH_RST) 399 goto drop; 400 401 #if 0 402 /* PAWS not needed at the moment */ 403 /* 404 * RFC 1323 PAWS: If we have a timestamp reply on this segment 405 * and it's less than ts_recent, drop it. 406 */ 407 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 408 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 409 if ((thflags & TH_ACK) == 0) 410 goto drop; 411 goto ack; 412 } 413 /* 414 * ts_recent is never updated because we never accept new segments. 415 */ 416 #endif 417 418 /* 419 * If a new connection request is received 420 * while in TIME_WAIT, drop the old connection 421 * and start over if the sequence numbers 422 * are above the previous ones. 423 */ 424 if ((thflags & TH_SYN) && SEQ_GT(th->th_seq, tw->rcv_nxt)) { 425 tcp_twclose(tw, 0); 426 return (1); 427 } 428 429 /* 430 * Drop the segment if it does not contain an ACK. 431 */ 432 if ((thflags & TH_ACK) == 0) 433 goto drop; 434 435 /* 436 * Reset the 2MSL timer if this is a duplicate FIN. 437 */ 438 if (thflags & TH_FIN) { 439 seq = th->th_seq + tlen + (thflags & TH_SYN ? 1 : 0); 440 if (seq + 1 == tw->rcv_nxt) 441 tcp_tw_2msl_reset(tw, 1); 442 } 443 444 /* 445 * Acknowledge the segment if it has data or is not a duplicate ACK. 446 */ 447 if (thflags != TH_ACK || tlen != 0 || 448 th->th_seq != tw->rcv_nxt || th->th_ack != tw->snd_nxt) 449 tcp_twrespond(tw, TH_ACK); 450 drop: 451 INP_WUNLOCK(inp); 452 m_freem(m); 453 return (0); 454 } 455 456 void 457 tcp_twclose(struct tcptw *tw, int reuse) 458 { 459 struct socket *so; 460 struct inpcb *inp; 461 462 /* 463 * At this point, we are in one of two situations: 464 * 465 * (1) We have no socket, just an inpcb<->twtcp pair. We can free 466 * all state. 467 * 468 * (2) We have a socket -- if we own a reference, release it and 469 * notify the socket layer. 470 */ 471 inp = tw->tw_inpcb; 472 KASSERT((inp->inp_flags & INP_TIMEWAIT), ("tcp_twclose: !timewait")); 473 KASSERT(intotw(inp) == tw, ("tcp_twclose: inp_ppcb != tw")); 474 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); /* in_pcbfree() */ 475 INP_WLOCK_ASSERT(inp); 476 477 tw->tw_inpcb = NULL; 478 tcp_tw_2msl_stop(tw, reuse); 479 inp->inp_ppcb = NULL; 480 in_pcbdrop(inp); 481 482 so = inp->inp_socket; 483 if (so != NULL) { 484 /* 485 * If there's a socket, handle two cases: first, we own a 486 * strong reference, which we will now release, or we don't 487 * in which case another reference exists (XXXRW: think 488 * about this more), and we don't need to take action. 489 */ 490 if (inp->inp_flags & INP_SOCKREF) { 491 inp->inp_flags &= ~INP_SOCKREF; 492 INP_WUNLOCK(inp); 493 ACCEPT_LOCK(); 494 SOCK_LOCK(so); 495 KASSERT(so->so_state & SS_PROTOREF, 496 ("tcp_twclose: INP_SOCKREF && !SS_PROTOREF")); 497 so->so_state &= ~SS_PROTOREF; 498 sofree(so); 499 } else { 500 /* 501 * If we don't own the only reference, the socket and 502 * inpcb need to be left around to be handled by 503 * tcp_usr_detach() later. 504 */ 505 INP_WUNLOCK(inp); 506 } 507 } else 508 in_pcbfree(inp); 509 TCPSTAT_INC(tcps_closed); 510 } 511 512 static int 513 tcp_twrespond(struct tcptw *tw, int flags) 514 { 515 struct inpcb *inp = tw->tw_inpcb; 516 #if defined(INET6) || defined(INET) 517 struct tcphdr *th = NULL; 518 #endif 519 struct mbuf *m; 520 #ifdef INET 521 struct ip *ip = NULL; 522 #endif 523 u_int hdrlen, optlen; 524 int error = 0; /* Keep compiler happy */ 525 struct tcpopt to; 526 #ifdef INET6 527 struct ip6_hdr *ip6 = NULL; 528 int isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6; 529 #endif 530 hdrlen = 0; /* Keep compiler happy */ 531 532 INP_WLOCK_ASSERT(inp); 533 534 m = m_gethdr(M_NOWAIT, MT_DATA); 535 if (m == NULL) 536 return (ENOBUFS); 537 m->m_data += max_linkhdr; 538 539 #ifdef MAC 540 mac_inpcb_create_mbuf(inp, m); 541 #endif 542 543 #ifdef INET6 544 if (isipv6) { 545 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 546 ip6 = mtod(m, struct ip6_hdr *); 547 th = (struct tcphdr *)(ip6 + 1); 548 tcpip_fillheaders(inp, ip6, th); 549 } 550 #endif 551 #if defined(INET6) && defined(INET) 552 else 553 #endif 554 #ifdef INET 555 { 556 hdrlen = sizeof(struct tcpiphdr); 557 ip = mtod(m, struct ip *); 558 th = (struct tcphdr *)(ip + 1); 559 tcpip_fillheaders(inp, ip, th); 560 } 561 #endif 562 to.to_flags = 0; 563 564 /* 565 * Send a timestamp and echo-reply if both our side and our peer 566 * have sent timestamps in our SYN's and this is not a RST. 567 */ 568 if (tw->t_recent && flags == TH_ACK) { 569 to.to_flags |= TOF_TS; 570 to.to_tsval = tcp_ts_getticks() + tw->ts_offset; 571 to.to_tsecr = tw->t_recent; 572 } 573 optlen = tcp_addoptions(&to, (u_char *)(th + 1)); 574 575 m->m_len = hdrlen + optlen; 576 m->m_pkthdr.len = m->m_len; 577 578 KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small")); 579 580 th->th_seq = htonl(tw->snd_nxt); 581 th->th_ack = htonl(tw->rcv_nxt); 582 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 583 th->th_flags = flags; 584 th->th_win = htons(tw->last_win); 585 586 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 587 #ifdef INET6 588 if (isipv6) { 589 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 590 th->th_sum = in6_cksum_pseudo(ip6, 591 sizeof(struct tcphdr) + optlen, IPPROTO_TCP, 0); 592 ip6->ip6_hlim = in6_selecthlim(inp, NULL); 593 error = ip6_output(m, inp->in6p_outputopts, NULL, 594 (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp); 595 } 596 #endif 597 #if defined(INET6) && defined(INET) 598 else 599 #endif 600 #ifdef INET 601 { 602 m->m_pkthdr.csum_flags = CSUM_TCP; 603 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 604 htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP)); 605 ip->ip_len = htons(m->m_pkthdr.len); 606 if (V_path_mtu_discovery) 607 ip->ip_off |= htons(IP_DF); 608 error = ip_output(m, inp->inp_options, NULL, 609 ((tw->tw_so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 610 NULL, inp); 611 } 612 #endif 613 if (flags & TH_ACK) 614 TCPSTAT_INC(tcps_sndacks); 615 else 616 TCPSTAT_INC(tcps_sndctrl); 617 TCPSTAT_INC(tcps_sndtotal); 618 return (error); 619 } 620 621 static void 622 tcp_tw_2msl_reset(struct tcptw *tw, int rearm) 623 { 624 625 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 626 INP_WLOCK_ASSERT(tw->tw_inpcb); 627 628 TW_WLOCK(V_tw_lock); 629 if (rearm) 630 TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl); 631 tw->tw_time = ticks + 2 * tcp_msl; 632 TAILQ_INSERT_TAIL(&V_twq_2msl, tw, tw_2msl); 633 TW_WUNLOCK(V_tw_lock); 634 } 635 636 static void 637 tcp_tw_2msl_stop(struct tcptw *tw, int reuse) 638 { 639 640 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 641 642 TW_WLOCK(V_tw_lock); 643 TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl); 644 crfree(tw->tw_cred); 645 tw->tw_cred = NULL; 646 TW_WUNLOCK(V_tw_lock); 647 648 if (!reuse) 649 tw_pcbrele(tw); 650 } 651 652 struct tcptw * 653 tcp_tw_2msl_reuse(void) 654 { 655 struct tcptw *tw; 656 657 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 658 659 TW_WLOCK(V_tw_lock); 660 tw = TAILQ_FIRST(&V_twq_2msl); 661 if (tw == NULL) { 662 TW_WUNLOCK(V_tw_lock); 663 return NULL; 664 } 665 TW_WUNLOCK(V_tw_lock); 666 667 INP_WLOCK(tw->tw_inpcb); 668 tcp_twclose(tw, 1); 669 670 return (tw); 671 } 672 673 void 674 tcp_tw_2msl_scan(void) 675 { 676 struct tcptw *tw; 677 678 for (;;) { 679 TW_RLOCK(V_tw_lock); 680 tw = TAILQ_FIRST(&V_twq_2msl); 681 if (tw == NULL || tw->tw_time - ticks > 0) { 682 TW_RUNLOCK(V_tw_lock); 683 break; 684 } 685 tw_pcbref(tw); 686 TW_RUNLOCK(V_tw_lock); 687 688 /* Close timewait state */ 689 if (INP_INFO_TRY_WLOCK(&V_tcbinfo)) { 690 if (tw_pcbrele(tw)) { 691 INP_INFO_WUNLOCK(&V_tcbinfo); 692 continue; 693 } 694 695 KASSERT(tw->tw_inpcb != NULL, 696 ("%s: tw->tw_inpcb == NULL", __func__)); 697 INP_WLOCK(tw->tw_inpcb); 698 tcp_twclose(tw, 0); 699 INP_INFO_WUNLOCK(&V_tcbinfo); 700 } else { 701 /* INP_INFO lock is busy; continue later. */ 702 tw_pcbrele(tw); 703 break; 704 } 705 } 706 } 707