1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 #include "opt_tcpdebug.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/callout.h> 44 #include <sys/kernel.h> 45 #include <sys/sysctl.h> 46 #include <sys/malloc.h> 47 #include <sys/mbuf.h> 48 #include <sys/priv.h> 49 #include <sys/proc.h> 50 #include <sys/socket.h> 51 #include <sys/socketvar.h> 52 #ifndef INVARIANTS 53 #include <sys/syslog.h> 54 #endif 55 #include <sys/protosw.h> 56 #include <sys/random.h> 57 58 #include <vm/uma.h> 59 60 #include <net/route.h> 61 #include <net/if.h> 62 #include <net/if_var.h> 63 #include <net/vnet.h> 64 65 #include <netinet/in.h> 66 #include <netinet/in_kdtrace.h> 67 #include <netinet/in_pcb.h> 68 #include <netinet/in_systm.h> 69 #include <netinet/in_var.h> 70 #include <netinet/ip.h> 71 #include <netinet/ip_icmp.h> 72 #include <netinet/ip_var.h> 73 #ifdef INET6 74 #include <netinet/ip6.h> 75 #include <netinet6/in6_pcb.h> 76 #include <netinet6/ip6_var.h> 77 #include <netinet6/scope6_var.h> 78 #include <netinet6/nd6.h> 79 #endif 80 #include <netinet/tcp.h> 81 #include <netinet/tcp_fsm.h> 82 #include <netinet/tcp_seq.h> 83 #include <netinet/tcp_timer.h> 84 #include <netinet/tcp_var.h> 85 #ifdef INET6 86 #include <netinet6/tcp6_var.h> 87 #endif 88 #include <netinet/tcpip.h> 89 #ifdef TCPDEBUG 90 #include <netinet/tcp_debug.h> 91 #endif 92 #ifdef INET6 93 #include <netinet6/ip6protosw.h> 94 #endif 95 96 #include <machine/in_cksum.h> 97 98 #include <security/mac/mac_framework.h> 99 100 VNET_DEFINE_STATIC(uma_zone_t, tcptw_zone); 101 #define V_tcptw_zone VNET(tcptw_zone) 102 static int maxtcptw; 103 104 /* 105 * The timed wait queue contains references to each of the TCP sessions 106 * currently in the TIME_WAIT state. The queue pointers, including the 107 * queue pointers in each tcptw structure, are protected using the global 108 * timewait lock, which must be held over queue iteration and modification. 109 * 110 * Rules on tcptw usage: 111 * - a inpcb is always freed _after_ its tcptw 112 * - a tcptw relies on its inpcb reference counting for memory stability 113 * - a tcptw is dereferenceable only while its inpcb is locked 114 */ 115 VNET_DEFINE_STATIC(TAILQ_HEAD(, tcptw), twq_2msl); 116 #define V_twq_2msl VNET(twq_2msl) 117 118 /* Global timewait lock */ 119 VNET_DEFINE_STATIC(struct rwlock, tw_lock); 120 #define V_tw_lock VNET(tw_lock) 121 122 #define TW_LOCK_INIT(tw, d) rw_init_flags(&(tw), (d), 0) 123 #define TW_LOCK_DESTROY(tw) rw_destroy(&(tw)) 124 #define TW_RLOCK(tw) rw_rlock(&(tw)) 125 #define TW_WLOCK(tw) rw_wlock(&(tw)) 126 #define TW_RUNLOCK(tw) rw_runlock(&(tw)) 127 #define TW_WUNLOCK(tw) rw_wunlock(&(tw)) 128 #define TW_LOCK_ASSERT(tw) rw_assert(&(tw), RA_LOCKED) 129 #define TW_RLOCK_ASSERT(tw) rw_assert(&(tw), RA_RLOCKED) 130 #define TW_WLOCK_ASSERT(tw) rw_assert(&(tw), RA_WLOCKED) 131 #define TW_UNLOCK_ASSERT(tw) rw_assert(&(tw), RA_UNLOCKED) 132 133 static void tcp_tw_2msl_reset(struct tcptw *, int); 134 static void tcp_tw_2msl_stop(struct tcptw *, int); 135 static int tcp_twrespond(struct tcptw *, int); 136 137 static int 138 tcptw_auto_size(void) 139 { 140 int halfrange; 141 142 /* 143 * Max out at half the ephemeral port range so that TIME_WAIT 144 * sockets don't tie up too many ephemeral ports. 145 */ 146 if (V_ipport_lastauto > V_ipport_firstauto) 147 halfrange = (V_ipport_lastauto - V_ipport_firstauto) / 2; 148 else 149 halfrange = (V_ipport_firstauto - V_ipport_lastauto) / 2; 150 /* Protect against goofy port ranges smaller than 32. */ 151 return (imin(imax(halfrange, 32), maxsockets / 5)); 152 } 153 154 static int 155 sysctl_maxtcptw(SYSCTL_HANDLER_ARGS) 156 { 157 int error, new; 158 159 if (maxtcptw == 0) 160 new = tcptw_auto_size(); 161 else 162 new = maxtcptw; 163 error = sysctl_handle_int(oidp, &new, 0, req); 164 if (error == 0 && req->newptr) 165 if (new >= 32) { 166 maxtcptw = new; 167 uma_zone_set_max(V_tcptw_zone, maxtcptw); 168 } 169 return (error); 170 } 171 172 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, maxtcptw, CTLTYPE_INT|CTLFLAG_RW, 173 &maxtcptw, 0, sysctl_maxtcptw, "IU", 174 "Maximum number of compressed TCP TIME_WAIT entries"); 175 176 VNET_DEFINE_STATIC(int, nolocaltimewait) = 0; 177 #define V_nolocaltimewait VNET(nolocaltimewait) 178 SYSCTL_INT(_net_inet_tcp, OID_AUTO, nolocaltimewait, CTLFLAG_VNET | CTLFLAG_RW, 179 &VNET_NAME(nolocaltimewait), 0, 180 "Do not create compressed TCP TIME_WAIT entries for local connections"); 181 182 void 183 tcp_tw_zone_change(void) 184 { 185 186 if (maxtcptw == 0) 187 uma_zone_set_max(V_tcptw_zone, tcptw_auto_size()); 188 } 189 190 void 191 tcp_tw_init(void) 192 { 193 194 V_tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw), 195 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 196 TUNABLE_INT_FETCH("net.inet.tcp.maxtcptw", &maxtcptw); 197 if (maxtcptw == 0) 198 uma_zone_set_max(V_tcptw_zone, tcptw_auto_size()); 199 else 200 uma_zone_set_max(V_tcptw_zone, maxtcptw); 201 TAILQ_INIT(&V_twq_2msl); 202 TW_LOCK_INIT(V_tw_lock, "tcptw"); 203 } 204 205 #ifdef VIMAGE 206 void 207 tcp_tw_destroy(void) 208 { 209 struct tcptw *tw; 210 struct epoch_tracker et; 211 212 INP_INFO_RLOCK_ET(&V_tcbinfo, et); 213 while ((tw = TAILQ_FIRST(&V_twq_2msl)) != NULL) 214 tcp_twclose(tw, 0); 215 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 216 217 TW_LOCK_DESTROY(V_tw_lock); 218 uma_zdestroy(V_tcptw_zone); 219 } 220 #endif 221 222 /* 223 * Move a TCP connection into TIME_WAIT state. 224 * tcbinfo is locked. 225 * inp is locked, and is unlocked before returning. 226 */ 227 void 228 tcp_twstart(struct tcpcb *tp) 229 { 230 struct tcptw twlocal, *tw; 231 struct inpcb *inp = tp->t_inpcb; 232 struct socket *so; 233 uint32_t recwin; 234 bool acknow, local; 235 #ifdef INET6 236 bool isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6; 237 #endif 238 239 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 240 INP_WLOCK_ASSERT(inp); 241 242 /* A dropped inp should never transition to TIME_WAIT state. */ 243 KASSERT((inp->inp_flags & INP_DROPPED) == 0, ("tcp_twstart: " 244 "(inp->inp_flags & INP_DROPPED) != 0")); 245 246 if (V_nolocaltimewait) { 247 #ifdef INET6 248 if (isipv6) 249 local = in6_localaddr(&inp->in6p_faddr); 250 else 251 #endif 252 #ifdef INET 253 local = in_localip(inp->inp_faddr); 254 #else 255 local = false; 256 #endif 257 } else 258 local = false; 259 260 /* 261 * For use only by DTrace. We do not reference the state 262 * after this point so modifying it in place is not a problem. 263 */ 264 tcp_state_change(tp, TCPS_TIME_WAIT); 265 266 if (local) 267 tw = &twlocal; 268 else 269 tw = uma_zalloc(V_tcptw_zone, M_NOWAIT); 270 if (tw == NULL) { 271 /* 272 * Reached limit on total number of TIMEWAIT connections 273 * allowed. Remove a connection from TIMEWAIT queue in LRU 274 * fashion to make room for this connection. 275 * 276 * XXX: Check if it possible to always have enough room 277 * in advance based on guarantees provided by uma_zalloc(). 278 */ 279 tw = tcp_tw_2msl_scan(1); 280 if (tw == NULL) { 281 tp = tcp_close(tp); 282 if (tp != NULL) 283 INP_WUNLOCK(inp); 284 return; 285 } 286 } 287 /* 288 * For !local case the tcptw will hold a reference on its inpcb 289 * until tcp_twclose is called. 290 */ 291 tw->tw_inpcb = inp; 292 293 /* 294 * Recover last window size sent. 295 */ 296 so = inp->inp_socket; 297 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 298 (long)TCP_MAXWIN << tp->rcv_scale); 299 if (recwin < (so->so_rcv.sb_hiwat / 4) && 300 recwin < tp->t_maxseg) 301 recwin = 0; 302 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 303 recwin < (tp->rcv_adv - tp->rcv_nxt)) 304 recwin = (tp->rcv_adv - tp->rcv_nxt); 305 tw->last_win = (u_short)(recwin >> tp->rcv_scale); 306 307 /* 308 * Set t_recent if timestamps are used on the connection. 309 */ 310 if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) == 311 (TF_REQ_TSTMP|TF_RCVD_TSTMP)) { 312 tw->t_recent = tp->ts_recent; 313 tw->ts_offset = tp->ts_offset; 314 } else { 315 tw->t_recent = 0; 316 tw->ts_offset = 0; 317 } 318 319 tw->snd_nxt = tp->snd_nxt; 320 tw->rcv_nxt = tp->rcv_nxt; 321 tw->iss = tp->iss; 322 tw->irs = tp->irs; 323 tw->t_starttime = tp->t_starttime; 324 tw->tw_time = 0; 325 326 /* XXX 327 * If this code will 328 * be used for fin-wait-2 state also, then we may need 329 * a ts_recent from the last segment. 330 */ 331 acknow = tp->t_flags & TF_ACKNOW; 332 333 /* 334 * First, discard tcpcb state, which includes stopping its timers and 335 * freeing it. tcp_discardcb() used to also release the inpcb, but 336 * that work is now done in the caller. 337 * 338 * Note: soisdisconnected() call used to be made in tcp_discardcb(), 339 * and might not be needed here any longer. 340 */ 341 tcp_discardcb(tp); 342 soisdisconnected(so); 343 tw->tw_so_options = so->so_options; 344 inp->inp_flags |= INP_TIMEWAIT; 345 if (acknow) 346 tcp_twrespond(tw, TH_ACK); 347 if (local) 348 in_pcbdrop(inp); 349 else { 350 in_pcbref(inp); /* Reference from tw */ 351 tw->tw_cred = crhold(so->so_cred); 352 inp->inp_ppcb = tw; 353 TCPSTATES_INC(TCPS_TIME_WAIT); 354 tcp_tw_2msl_reset(tw, 0); 355 } 356 357 /* 358 * If the inpcb owns the sole reference to the socket, then we can 359 * detach and free the socket as it is not needed in time wait. 360 */ 361 if (inp->inp_flags & INP_SOCKREF) { 362 KASSERT(so->so_state & SS_PROTOREF, 363 ("tcp_twstart: !SS_PROTOREF")); 364 inp->inp_flags &= ~INP_SOCKREF; 365 INP_WUNLOCK(inp); 366 SOCK_LOCK(so); 367 so->so_state &= ~SS_PROTOREF; 368 sofree(so); 369 } else 370 INP_WUNLOCK(inp); 371 } 372 373 /* 374 * Returns 1 if the TIME_WAIT state was killed and we should start over, 375 * looking for a pcb in the listen state. Returns 0 otherwise. 376 */ 377 int 378 tcp_twcheck(struct inpcb *inp, struct tcpopt *to __unused, struct tcphdr *th, 379 struct mbuf *m, int tlen) 380 { 381 struct tcptw *tw; 382 int thflags; 383 tcp_seq seq; 384 385 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 386 INP_WLOCK_ASSERT(inp); 387 388 /* 389 * XXXRW: Time wait state for inpcb has been recycled, but inpcb is 390 * still present. This is undesirable, but temporarily necessary 391 * until we work out how to handle inpcb's who's timewait state has 392 * been removed. 393 */ 394 tw = intotw(inp); 395 if (tw == NULL) 396 goto drop; 397 398 thflags = th->th_flags; 399 400 /* 401 * NOTE: for FIN_WAIT_2 (to be added later), 402 * must validate sequence number before accepting RST 403 */ 404 405 /* 406 * If the segment contains RST: 407 * Drop the segment - see Stevens, vol. 2, p. 964 and 408 * RFC 1337. 409 */ 410 if (thflags & TH_RST) 411 goto drop; 412 413 #if 0 414 /* PAWS not needed at the moment */ 415 /* 416 * RFC 1323 PAWS: If we have a timestamp reply on this segment 417 * and it's less than ts_recent, drop it. 418 */ 419 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 420 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 421 if ((thflags & TH_ACK) == 0) 422 goto drop; 423 goto ack; 424 } 425 /* 426 * ts_recent is never updated because we never accept new segments. 427 */ 428 #endif 429 430 /* 431 * If a new connection request is received 432 * while in TIME_WAIT, drop the old connection 433 * and start over if the sequence numbers 434 * are above the previous ones. 435 */ 436 if ((thflags & TH_SYN) && SEQ_GT(th->th_seq, tw->rcv_nxt)) { 437 tcp_twclose(tw, 0); 438 return (1); 439 } 440 441 /* 442 * Drop the segment if it does not contain an ACK. 443 */ 444 if ((thflags & TH_ACK) == 0) 445 goto drop; 446 447 /* 448 * Reset the 2MSL timer if this is a duplicate FIN. 449 */ 450 if (thflags & TH_FIN) { 451 seq = th->th_seq + tlen + (thflags & TH_SYN ? 1 : 0); 452 if (seq + 1 == tw->rcv_nxt) 453 tcp_tw_2msl_reset(tw, 1); 454 } 455 456 /* 457 * Acknowledge the segment if it has data or is not a duplicate ACK. 458 */ 459 if (thflags != TH_ACK || tlen != 0 || 460 th->th_seq != tw->rcv_nxt || th->th_ack != tw->snd_nxt) { 461 TCP_PROBE5(receive, NULL, NULL, m, NULL, th); 462 tcp_twrespond(tw, TH_ACK); 463 goto dropnoprobe; 464 } 465 drop: 466 TCP_PROBE5(receive, NULL, NULL, m, NULL, th); 467 dropnoprobe: 468 INP_WUNLOCK(inp); 469 m_freem(m); 470 return (0); 471 } 472 473 void 474 tcp_twclose(struct tcptw *tw, int reuse) 475 { 476 struct socket *so; 477 struct inpcb *inp; 478 479 /* 480 * At this point, we are in one of two situations: 481 * 482 * (1) We have no socket, just an inpcb<->twtcp pair. We can free 483 * all state. 484 * 485 * (2) We have a socket -- if we own a reference, release it and 486 * notify the socket layer. 487 */ 488 inp = tw->tw_inpcb; 489 KASSERT((inp->inp_flags & INP_TIMEWAIT), ("tcp_twclose: !timewait")); 490 KASSERT(intotw(inp) == tw, ("tcp_twclose: inp_ppcb != tw")); 491 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); /* in_pcbfree() */ 492 INP_WLOCK_ASSERT(inp); 493 494 tcp_tw_2msl_stop(tw, reuse); 495 inp->inp_ppcb = NULL; 496 in_pcbdrop(inp); 497 498 so = inp->inp_socket; 499 if (so != NULL) { 500 /* 501 * If there's a socket, handle two cases: first, we own a 502 * strong reference, which we will now release, or we don't 503 * in which case another reference exists (XXXRW: think 504 * about this more), and we don't need to take action. 505 */ 506 if (inp->inp_flags & INP_SOCKREF) { 507 inp->inp_flags &= ~INP_SOCKREF; 508 INP_WUNLOCK(inp); 509 SOCK_LOCK(so); 510 KASSERT(so->so_state & SS_PROTOREF, 511 ("tcp_twclose: INP_SOCKREF && !SS_PROTOREF")); 512 so->so_state &= ~SS_PROTOREF; 513 sofree(so); 514 } else { 515 /* 516 * If we don't own the only reference, the socket and 517 * inpcb need to be left around to be handled by 518 * tcp_usr_detach() later. 519 */ 520 INP_WUNLOCK(inp); 521 } 522 } else { 523 /* 524 * The socket has been already cleaned-up for us, only free the 525 * inpcb. 526 */ 527 in_pcbfree(inp); 528 } 529 TCPSTAT_INC(tcps_closed); 530 } 531 532 static int 533 tcp_twrespond(struct tcptw *tw, int flags) 534 { 535 struct inpcb *inp = tw->tw_inpcb; 536 #if defined(INET6) || defined(INET) 537 struct tcphdr *th = NULL; 538 #endif 539 struct mbuf *m; 540 #ifdef INET 541 struct ip *ip = NULL; 542 #endif 543 u_int hdrlen, optlen; 544 int error = 0; /* Keep compiler happy */ 545 struct tcpopt to; 546 #ifdef INET6 547 struct ip6_hdr *ip6 = NULL; 548 int isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6; 549 #endif 550 hdrlen = 0; /* Keep compiler happy */ 551 552 INP_WLOCK_ASSERT(inp); 553 554 m = m_gethdr(M_NOWAIT, MT_DATA); 555 if (m == NULL) 556 return (ENOBUFS); 557 m->m_data += max_linkhdr; 558 559 #ifdef MAC 560 mac_inpcb_create_mbuf(inp, m); 561 #endif 562 563 #ifdef INET6 564 if (isipv6) { 565 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 566 ip6 = mtod(m, struct ip6_hdr *); 567 th = (struct tcphdr *)(ip6 + 1); 568 tcpip_fillheaders(inp, ip6, th); 569 } 570 #endif 571 #if defined(INET6) && defined(INET) 572 else 573 #endif 574 #ifdef INET 575 { 576 hdrlen = sizeof(struct tcpiphdr); 577 ip = mtod(m, struct ip *); 578 th = (struct tcphdr *)(ip + 1); 579 tcpip_fillheaders(inp, ip, th); 580 } 581 #endif 582 to.to_flags = 0; 583 584 /* 585 * Send a timestamp and echo-reply if both our side and our peer 586 * have sent timestamps in our SYN's and this is not a RST. 587 */ 588 if (tw->t_recent && flags == TH_ACK) { 589 to.to_flags |= TOF_TS; 590 to.to_tsval = tcp_ts_getticks() + tw->ts_offset; 591 to.to_tsecr = tw->t_recent; 592 } 593 optlen = tcp_addoptions(&to, (u_char *)(th + 1)); 594 595 m->m_len = hdrlen + optlen; 596 m->m_pkthdr.len = m->m_len; 597 598 KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small")); 599 600 th->th_seq = htonl(tw->snd_nxt); 601 th->th_ack = htonl(tw->rcv_nxt); 602 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 603 th->th_flags = flags; 604 th->th_win = htons(tw->last_win); 605 606 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 607 #ifdef INET6 608 if (isipv6) { 609 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 610 th->th_sum = in6_cksum_pseudo(ip6, 611 sizeof(struct tcphdr) + optlen, IPPROTO_TCP, 0); 612 ip6->ip6_hlim = in6_selecthlim(inp, NULL); 613 TCP_PROBE5(send, NULL, NULL, ip6, NULL, th); 614 error = ip6_output(m, inp->in6p_outputopts, NULL, 615 (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp); 616 } 617 #endif 618 #if defined(INET6) && defined(INET) 619 else 620 #endif 621 #ifdef INET 622 { 623 m->m_pkthdr.csum_flags = CSUM_TCP; 624 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 625 htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP)); 626 ip->ip_len = htons(m->m_pkthdr.len); 627 if (V_path_mtu_discovery) 628 ip->ip_off |= htons(IP_DF); 629 TCP_PROBE5(send, NULL, NULL, ip, NULL, th); 630 error = ip_output(m, inp->inp_options, NULL, 631 ((tw->tw_so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 632 NULL, inp); 633 } 634 #endif 635 if (flags & TH_ACK) 636 TCPSTAT_INC(tcps_sndacks); 637 else 638 TCPSTAT_INC(tcps_sndctrl); 639 TCPSTAT_INC(tcps_sndtotal); 640 return (error); 641 } 642 643 static void 644 tcp_tw_2msl_reset(struct tcptw *tw, int rearm) 645 { 646 647 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 648 INP_WLOCK_ASSERT(tw->tw_inpcb); 649 650 TW_WLOCK(V_tw_lock); 651 if (rearm) 652 TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl); 653 tw->tw_time = ticks + 2 * tcp_msl; 654 TAILQ_INSERT_TAIL(&V_twq_2msl, tw, tw_2msl); 655 TW_WUNLOCK(V_tw_lock); 656 } 657 658 static void 659 tcp_tw_2msl_stop(struct tcptw *tw, int reuse) 660 { 661 struct ucred *cred; 662 struct inpcb *inp; 663 int released __unused; 664 665 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 666 667 TW_WLOCK(V_tw_lock); 668 inp = tw->tw_inpcb; 669 tw->tw_inpcb = NULL; 670 671 TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl); 672 cred = tw->tw_cred; 673 tw->tw_cred = NULL; 674 TW_WUNLOCK(V_tw_lock); 675 676 if (cred != NULL) 677 crfree(cred); 678 679 released = in_pcbrele_wlocked(inp); 680 KASSERT(!released, ("%s: inp should not be released here", __func__)); 681 682 if (!reuse) 683 uma_zfree(V_tcptw_zone, tw); 684 TCPSTATES_DEC(TCPS_TIME_WAIT); 685 } 686 687 struct tcptw * 688 tcp_tw_2msl_scan(int reuse) 689 { 690 struct tcptw *tw; 691 struct inpcb *inp; 692 struct epoch_tracker et; 693 694 #ifdef INVARIANTS 695 if (reuse) { 696 /* 697 * Exclusive pcbinfo lock is not required in reuse case even if 698 * two inpcb locks can be acquired simultaneously: 699 * - the inpcb transitioning to TIME_WAIT state in 700 * tcp_tw_start(), 701 * - the inpcb closed by tcp_twclose(). 702 * 703 * It is because only inpcbs in FIN_WAIT2 or CLOSING states can 704 * transition in TIME_WAIT state. Then a pcbcb cannot be in 705 * TIME_WAIT list and transitioning to TIME_WAIT state at same 706 * time. 707 */ 708 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 709 } 710 #endif 711 712 for (;;) { 713 TW_RLOCK(V_tw_lock); 714 tw = TAILQ_FIRST(&V_twq_2msl); 715 if (tw == NULL || (!reuse && (tw->tw_time - ticks) > 0)) { 716 TW_RUNLOCK(V_tw_lock); 717 break; 718 } 719 KASSERT(tw->tw_inpcb != NULL, ("%s: tw->tw_inpcb == NULL", 720 __func__)); 721 722 inp = tw->tw_inpcb; 723 in_pcbref(inp); 724 TW_RUNLOCK(V_tw_lock); 725 726 INP_INFO_RLOCK_ET(&V_tcbinfo, et); 727 INP_WLOCK(inp); 728 tw = intotw(inp); 729 if (in_pcbrele_wlocked(inp)) { 730 if (__predict_true(tw == NULL)) { 731 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 732 continue; 733 } else { 734 /* This should not happen as in TIMEWAIT 735 * state the inp should not be destroyed 736 * before its tcptw. If INVARIANTS is 737 * defined panic. 738 */ 739 #ifdef INVARIANTS 740 panic("%s: Panic before an infinite " 741 "loop: INP_TIMEWAIT && (INP_FREED " 742 "|| inp last reference) && tw != " 743 "NULL", __func__); 744 #else 745 log(LOG_ERR, "%s: Avoid an infinite " 746 "loop: INP_TIMEWAIT && (INP_FREED " 747 "|| inp last reference) && tw != " 748 "NULL", __func__); 749 #endif 750 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 751 break; 752 } 753 } 754 755 if (tw == NULL) { 756 /* tcp_twclose() has already been called */ 757 INP_WUNLOCK(inp); 758 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 759 continue; 760 } 761 762 tcp_twclose(tw, reuse); 763 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 764 if (reuse) 765 return tw; 766 } 767 768 return NULL; 769 } 770