xref: /freebsd/sys/netinet/tcp_timewait.c (revision 52267f7411adcc76ede961420e08c0e42f42d415)
1 /*-
2  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	@(#)tcp_subr.c	8.2 (Berkeley) 5/24/95
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 #include "opt_mac.h"
38 #include "opt_tcpdebug.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/callout.h>
43 #include <sys/kernel.h>
44 #include <sys/sysctl.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/priv.h>
48 #include <sys/proc.h>
49 #include <sys/socket.h>
50 #include <sys/socketvar.h>
51 #include <sys/protosw.h>
52 #include <sys/random.h>
53 #include <sys/vimage.h>
54 
55 #include <vm/uma.h>
56 
57 #include <net/route.h>
58 #include <net/if.h>
59 
60 #include <netinet/in.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/ip.h>
63 #ifdef INET6
64 #include <netinet/ip6.h>
65 #endif
66 #include <netinet/in_pcb.h>
67 #ifdef INET6
68 #include <netinet6/in6_pcb.h>
69 #endif
70 #include <netinet/in_var.h>
71 #include <netinet/ip_var.h>
72 #ifdef INET6
73 #include <netinet6/ip6_var.h>
74 #include <netinet6/scope6_var.h>
75 #include <netinet6/nd6.h>
76 #endif
77 #include <netinet/ip_icmp.h>
78 #include <netinet/tcp.h>
79 #include <netinet/tcp_fsm.h>
80 #include <netinet/tcp_seq.h>
81 #include <netinet/tcp_timer.h>
82 #include <netinet/tcp_var.h>
83 #ifdef INET6
84 #include <netinet6/tcp6_var.h>
85 #endif
86 #include <netinet/tcpip.h>
87 #ifdef TCPDEBUG
88 #include <netinet/tcp_debug.h>
89 #endif
90 #include <netinet6/ip6protosw.h>
91 #include <netinet/vinet.h>
92 
93 #include <machine/in_cksum.h>
94 
95 #include <security/mac/mac_framework.h>
96 
97 static uma_zone_t tcptw_zone;
98 static int	maxtcptw;
99 
100 /*
101  * The timed wait queue contains references to each of the TCP sessions
102  * currently in the TIME_WAIT state.  The queue pointers, including the
103  * queue pointers in each tcptw structure, are protected using the global
104  * tcbinfo lock, which must be held over queue iteration and modification.
105  */
106 #ifdef VIMAGE_GLOBALS
107 static TAILQ_HEAD(, tcptw)	twq_2msl;
108 int	nolocaltimewait;
109 #endif
110 
111 static void	tcp_tw_2msl_reset(struct tcptw *, int);
112 static void	tcp_tw_2msl_stop(struct tcptw *);
113 
114 static int
115 tcptw_auto_size(void)
116 {
117 	INIT_VNET_INET(curvnet);
118 	int halfrange;
119 
120 	/*
121 	 * Max out at half the ephemeral port range so that TIME_WAIT
122 	 * sockets don't tie up too many ephemeral ports.
123 	 */
124 	if (V_ipport_lastauto > V_ipport_firstauto)
125 		halfrange = (V_ipport_lastauto - V_ipport_firstauto) / 2;
126 	else
127 		halfrange = (V_ipport_firstauto - V_ipport_lastauto) / 2;
128 	/* Protect against goofy port ranges smaller than 32. */
129 	return (imin(imax(halfrange, 32), maxsockets / 5));
130 }
131 
132 static int
133 sysctl_maxtcptw(SYSCTL_HANDLER_ARGS)
134 {
135 	int error, new;
136 
137 	if (maxtcptw == 0)
138 		new = tcptw_auto_size();
139 	else
140 		new = maxtcptw;
141 	error = sysctl_handle_int(oidp, &new, 0, req);
142 	if (error == 0 && req->newptr)
143 		if (new >= 32) {
144 			maxtcptw = new;
145 			uma_zone_set_max(tcptw_zone, maxtcptw);
146 		}
147 	return (error);
148 }
149 
150 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, maxtcptw, CTLTYPE_INT|CTLFLAG_RW,
151     &maxtcptw, 0, sysctl_maxtcptw, "IU",
152     "Maximum number of compressed TCP TIME_WAIT entries");
153 
154 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp, OID_AUTO, nolocaltimewait,
155     CTLFLAG_RW, nolocaltimewait, 0,
156     "Do not create compressed TCP TIME_WAIT entries for local connections");
157 
158 void
159 tcp_tw_zone_change(void)
160 {
161 
162 	if (maxtcptw == 0)
163 		uma_zone_set_max(tcptw_zone, tcptw_auto_size());
164 }
165 
166 void
167 tcp_tw_init(void)
168 {
169 	INIT_VNET_INET(curvnet);
170 
171 	tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw),
172 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
173 	TUNABLE_INT_FETCH("net.inet.tcp.maxtcptw", &maxtcptw);
174 	if (maxtcptw == 0)
175 		uma_zone_set_max(tcptw_zone, tcptw_auto_size());
176 	else
177 		uma_zone_set_max(tcptw_zone, maxtcptw);
178 	TAILQ_INIT(&V_twq_2msl);
179 }
180 
181 /*
182  * Move a TCP connection into TIME_WAIT state.
183  *    tcbinfo is locked.
184  *    inp is locked, and is unlocked before returning.
185  */
186 void
187 tcp_twstart(struct tcpcb *tp)
188 {
189 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
190 	INIT_VNET_INET(tp->t_vnet);
191 #endif
192 	struct tcptw *tw;
193 	struct inpcb *inp = tp->t_inpcb;
194 	int acknow;
195 	struct socket *so;
196 
197 	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);	/* tcp_tw_2msl_reset(). */
198 	INP_WLOCK_ASSERT(inp);
199 
200 	if (V_nolocaltimewait && in_localip(inp->inp_faddr)) {
201 		tp = tcp_close(tp);
202 		if (tp != NULL)
203 			INP_WUNLOCK(inp);
204 		return;
205 	}
206 
207 	tw = uma_zalloc(tcptw_zone, M_NOWAIT);
208 	if (tw == NULL) {
209 		tw = tcp_tw_2msl_scan(1);
210 		if (tw == NULL) {
211 			tp = tcp_close(tp);
212 			if (tp != NULL)
213 				INP_WUNLOCK(inp);
214 			return;
215 		}
216 	}
217 	tw->tw_inpcb = inp;
218 
219 	/*
220 	 * Recover last window size sent.
221 	 */
222 	tw->last_win = (tp->rcv_adv - tp->rcv_nxt) >> tp->rcv_scale;
223 
224 	/*
225 	 * Set t_recent if timestamps are used on the connection.
226 	 */
227 	if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
228 	    (TF_REQ_TSTMP|TF_RCVD_TSTMP)) {
229 		tw->t_recent = tp->ts_recent;
230 		tw->ts_offset = tp->ts_offset;
231 	} else {
232 		tw->t_recent = 0;
233 		tw->ts_offset = 0;
234 	}
235 
236 	tw->snd_nxt = tp->snd_nxt;
237 	tw->rcv_nxt = tp->rcv_nxt;
238 	tw->iss     = tp->iss;
239 	tw->irs     = tp->irs;
240 	tw->t_starttime = tp->t_starttime;
241 	tw->tw_time = 0;
242 
243 /* XXX
244  * If this code will
245  * be used for fin-wait-2 state also, then we may need
246  * a ts_recent from the last segment.
247  */
248 	acknow = tp->t_flags & TF_ACKNOW;
249 
250 	/*
251 	 * First, discard tcpcb state, which includes stopping its timers and
252 	 * freeing it.  tcp_discardcb() used to also release the inpcb, but
253 	 * that work is now done in the caller.
254 	 *
255 	 * Note: soisdisconnected() call used to be made in tcp_discardcb(),
256 	 * and might not be needed here any longer.
257 	 */
258 	tcp_discardcb(tp);
259 	so = inp->inp_socket;
260 	soisdisconnected(so);
261 	tw->tw_cred = crhold(so->so_cred);
262 	SOCK_LOCK(so);
263 	tw->tw_so_options = so->so_options;
264 	SOCK_UNLOCK(so);
265 	if (acknow)
266 		tcp_twrespond(tw, TH_ACK);
267 	inp->inp_ppcb = tw;
268 	inp->inp_vflag |= INP_TIMEWAIT;
269 	tcp_tw_2msl_reset(tw, 0);
270 
271 	/*
272 	 * If the inpcb owns the sole reference to the socket, then we can
273 	 * detach and free the socket as it is not needed in time wait.
274 	 */
275 	if (inp->inp_vflag & INP_SOCKREF) {
276 		KASSERT(so->so_state & SS_PROTOREF,
277 		    ("tcp_twstart: !SS_PROTOREF"));
278 		inp->inp_vflag &= ~INP_SOCKREF;
279 		INP_WUNLOCK(inp);
280 		ACCEPT_LOCK();
281 		SOCK_LOCK(so);
282 		so->so_state &= ~SS_PROTOREF;
283 		sofree(so);
284 	} else
285 		INP_WUNLOCK(inp);
286 }
287 
288 #if 0
289 /*
290  * The appromixate rate of ISN increase of Microsoft TCP stacks;
291  * the actual rate is slightly higher due to the addition of
292  * random positive increments.
293  *
294  * Most other new OSes use semi-randomized ISN values, so we
295  * do not need to worry about them.
296  */
297 #define MS_ISN_BYTES_PER_SECOND		250000
298 
299 /*
300  * Determine if the ISN we will generate has advanced beyond the last
301  * sequence number used by the previous connection.  If so, indicate
302  * that it is safe to recycle this tw socket by returning 1.
303  */
304 int
305 tcp_twrecycleable(struct tcptw *tw)
306 {
307 	INIT_VNET_INET(curvnet);
308 	tcp_seq new_iss = tw->iss;
309 	tcp_seq new_irs = tw->irs;
310 
311 	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
312 	new_iss += (ticks - tw->t_starttime) * (ISN_BYTES_PER_SECOND / hz);
313 	new_irs += (ticks - tw->t_starttime) * (MS_ISN_BYTES_PER_SECOND / hz);
314 
315 	if (SEQ_GT(new_iss, tw->snd_nxt) && SEQ_GT(new_irs, tw->rcv_nxt))
316 		return (1);
317 	else
318 		return (0);
319 }
320 #endif
321 
322 /*
323  * Returns 1 if the TIME_WAIT state was killed and we should start over,
324  * looking for a pcb in the listen state.  Returns 0 otherwise.
325  */
326 int
327 tcp_twcheck(struct inpcb *inp, struct tcpopt *to, struct tcphdr *th,
328     struct mbuf *m, int tlen)
329 {
330 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
331 	INIT_VNET_INET(curvnet);
332 #endif
333 	struct tcptw *tw;
334 	int thflags;
335 	tcp_seq seq;
336 #ifdef INET6
337 	int isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
338 #else
339 	const int isipv6 = 0;
340 #endif
341 
342 	/* tcbinfo lock required for tcp_twclose(), tcp_tw_2msl_reset(). */
343 	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
344 	INP_WLOCK_ASSERT(inp);
345 
346 	/*
347 	 * XXXRW: Time wait state for inpcb has been recycled, but inpcb is
348 	 * still present.  This is undesirable, but temporarily necessary
349 	 * until we work out how to handle inpcb's who's timewait state has
350 	 * been removed.
351 	 */
352 	tw = intotw(inp);
353 	if (tw == NULL)
354 		goto drop;
355 
356 	thflags = th->th_flags;
357 
358 	/*
359 	 * NOTE: for FIN_WAIT_2 (to be added later),
360 	 * must validate sequence number before accepting RST
361 	 */
362 
363 	/*
364 	 * If the segment contains RST:
365 	 *	Drop the segment - see Stevens, vol. 2, p. 964 and
366 	 *      RFC 1337.
367 	 */
368 	if (thflags & TH_RST)
369 		goto drop;
370 
371 #if 0
372 /* PAWS not needed at the moment */
373 	/*
374 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment
375 	 * and it's less than ts_recent, drop it.
376 	 */
377 	if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
378 	    TSTMP_LT(to.to_tsval, tp->ts_recent)) {
379 		if ((thflags & TH_ACK) == 0)
380 			goto drop;
381 		goto ack;
382 	}
383 	/*
384 	 * ts_recent is never updated because we never accept new segments.
385 	 */
386 #endif
387 
388 	/*
389 	 * If a new connection request is received
390 	 * while in TIME_WAIT, drop the old connection
391 	 * and start over if the sequence numbers
392 	 * are above the previous ones.
393 	 */
394 	if ((thflags & TH_SYN) && SEQ_GT(th->th_seq, tw->rcv_nxt)) {
395 		tcp_twclose(tw, 0);
396 		return (1);
397 	}
398 
399 	/*
400 	 * Drop the the segment if it does not contain an ACK.
401 	 */
402 	if ((thflags & TH_ACK) == 0)
403 		goto drop;
404 
405 	/*
406 	 * Reset the 2MSL timer if this is a duplicate FIN.
407 	 */
408 	if (thflags & TH_FIN) {
409 		seq = th->th_seq + tlen + (thflags & TH_SYN ? 1 : 0);
410 		if (seq + 1 == tw->rcv_nxt)
411 			tcp_tw_2msl_reset(tw, 1);
412 	}
413 
414 	/*
415 	 * Acknowledge the segment if it has data or is not a duplicate ACK.
416 	 */
417 	if (thflags != TH_ACK || tlen != 0 ||
418 	    th->th_seq != tw->rcv_nxt || th->th_ack != tw->snd_nxt)
419 		tcp_twrespond(tw, TH_ACK);
420 	goto drop;
421 
422 	/*
423 	 * Generate a RST, dropping incoming segment.
424 	 * Make ACK acceptable to originator of segment.
425 	 * Don't bother to respond if destination was broadcast/multicast.
426 	 */
427 	if (m->m_flags & (M_BCAST|M_MCAST))
428 		goto drop;
429 	if (isipv6) {
430 #ifdef INET6
431 		struct ip6_hdr *ip6;
432 
433 		/* IPv6 anycast check is done at tcp6_input() */
434 		ip6 = mtod(m, struct ip6_hdr *);
435 		if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
436 		    IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
437 			goto drop;
438 #endif
439 	} else {
440 		struct ip *ip;
441 
442 		ip = mtod(m, struct ip *);
443 		if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
444 		    IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
445 		    ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
446 		    in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
447 			goto drop;
448 	}
449 	if (thflags & TH_ACK) {
450 		tcp_respond(NULL,
451 		    mtod(m, void *), th, m, 0, th->th_ack, TH_RST);
452 	} else {
453 		seq = th->th_seq + (thflags & TH_SYN ? 1 : 0);
454 		tcp_respond(NULL,
455 		    mtod(m, void *), th, m, seq, 0, TH_RST|TH_ACK);
456 	}
457 	INP_WUNLOCK(inp);
458 	return (0);
459 
460 drop:
461 	INP_WUNLOCK(inp);
462 	m_freem(m);
463 	return (0);
464 }
465 
466 void
467 tcp_twclose(struct tcptw *tw, int reuse)
468 {
469 	INIT_VNET_INET(curvnet);
470 	struct socket *so;
471 	struct inpcb *inp;
472 
473 	/*
474 	 * At this point, we are in one of two situations:
475 	 *
476 	 * (1) We have no socket, just an inpcb<->twtcp pair.  We can free
477 	 *     all state.
478 	 *
479 	 * (2) We have a socket -- if we own a reference, release it and
480 	 *     notify the socket layer.
481 	 */
482 	inp = tw->tw_inpcb;
483 	KASSERT((inp->inp_vflag & INP_TIMEWAIT), ("tcp_twclose: !timewait"));
484 	KASSERT(intotw(inp) == tw, ("tcp_twclose: inp_ppcb != tw"));
485 	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);	/* tcp_tw_2msl_stop(). */
486 	INP_WLOCK_ASSERT(inp);
487 
488 	tw->tw_inpcb = NULL;
489 	tcp_tw_2msl_stop(tw);
490 	inp->inp_ppcb = NULL;
491 	in_pcbdrop(inp);
492 
493 	so = inp->inp_socket;
494 	if (so != NULL) {
495 		/*
496 		 * If there's a socket, handle two cases: first, we own a
497 		 * strong reference, which we will now release, or we don't
498 		 * in which case another reference exists (XXXRW: think
499 		 * about this more), and we don't need to take action.
500 		 */
501 		if (inp->inp_vflag & INP_SOCKREF) {
502 			inp->inp_vflag &= ~INP_SOCKREF;
503 			INP_WUNLOCK(inp);
504 			ACCEPT_LOCK();
505 			SOCK_LOCK(so);
506 			KASSERT(so->so_state & SS_PROTOREF,
507 			    ("tcp_twclose: INP_SOCKREF && !SS_PROTOREF"));
508 			so->so_state &= ~SS_PROTOREF;
509 			sofree(so);
510 		} else {
511 			/*
512 			 * If we don't own the only reference, the socket and
513 			 * inpcb need to be left around to be handled by
514 			 * tcp_usr_detach() later.
515 			 */
516 			INP_WUNLOCK(inp);
517 		}
518 	} else
519 		in_pcbfree(inp);
520 	V_tcpstat.tcps_closed++;
521 	crfree(tw->tw_cred);
522 	tw->tw_cred = NULL;
523 	if (reuse)
524 		return;
525 	uma_zfree(tcptw_zone, tw);
526 }
527 
528 int
529 tcp_twrespond(struct tcptw *tw, int flags)
530 {
531 	INIT_VNET_INET(curvnet);
532 	struct inpcb *inp = tw->tw_inpcb;
533 	struct tcphdr *th;
534 	struct mbuf *m;
535 	struct ip *ip = NULL;
536 	u_int hdrlen, optlen;
537 	int error;
538 	struct tcpopt to;
539 #ifdef INET6
540 	struct ip6_hdr *ip6 = NULL;
541 	int isipv6 = inp->inp_inc.inc_isipv6;
542 #endif
543 
544 	INP_WLOCK_ASSERT(inp);
545 
546 	m = m_gethdr(M_DONTWAIT, MT_DATA);
547 	if (m == NULL)
548 		return (ENOBUFS);
549 	m->m_data += max_linkhdr;
550 
551 #ifdef MAC
552 	mac_inpcb_create_mbuf(inp, m);
553 #endif
554 
555 #ifdef INET6
556 	if (isipv6) {
557 		hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
558 		ip6 = mtod(m, struct ip6_hdr *);
559 		th = (struct tcphdr *)(ip6 + 1);
560 		tcpip_fillheaders(inp, ip6, th);
561 	} else
562 #endif
563 	{
564 		hdrlen = sizeof(struct tcpiphdr);
565 		ip = mtod(m, struct ip *);
566 		th = (struct tcphdr *)(ip + 1);
567 		tcpip_fillheaders(inp, ip, th);
568 	}
569 	to.to_flags = 0;
570 
571 	/*
572 	 * Send a timestamp and echo-reply if both our side and our peer
573 	 * have sent timestamps in our SYN's and this is not a RST.
574 	 */
575 	if (tw->t_recent && flags == TH_ACK) {
576 		to.to_flags |= TOF_TS;
577 		to.to_tsval = ticks + tw->ts_offset;
578 		to.to_tsecr = tw->t_recent;
579 	}
580 	optlen = tcp_addoptions(&to, (u_char *)(th + 1));
581 
582 	m->m_len = hdrlen + optlen;
583 	m->m_pkthdr.len = m->m_len;
584 
585 	KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small"));
586 
587 	th->th_seq = htonl(tw->snd_nxt);
588 	th->th_ack = htonl(tw->rcv_nxt);
589 	th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
590 	th->th_flags = flags;
591 	th->th_win = htons(tw->last_win);
592 
593 #ifdef INET6
594 	if (isipv6) {
595 		th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr),
596 		    sizeof(struct tcphdr) + optlen);
597 		ip6->ip6_hlim = in6_selecthlim(inp, NULL);
598 		error = ip6_output(m, inp->in6p_outputopts, NULL,
599 		    (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp);
600 	} else
601 #endif
602 	{
603 		th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
604 		    htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP));
605 		m->m_pkthdr.csum_flags = CSUM_TCP;
606 		m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
607 		ip->ip_len = m->m_pkthdr.len;
608 		if (V_path_mtu_discovery)
609 			ip->ip_off |= IP_DF;
610 		error = ip_output(m, inp->inp_options, NULL,
611 		    ((tw->tw_so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0),
612 		    NULL, inp);
613 	}
614 	if (flags & TH_ACK)
615 		V_tcpstat.tcps_sndacks++;
616 	else
617 		V_tcpstat.tcps_sndctrl++;
618 	V_tcpstat.tcps_sndtotal++;
619 	return (error);
620 }
621 
622 static void
623 tcp_tw_2msl_reset(struct tcptw *tw, int rearm)
624 {
625 	INIT_VNET_INET(curvnet);
626 
627 	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
628 	INP_WLOCK_ASSERT(tw->tw_inpcb);
629 	if (rearm)
630 		TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl);
631 	tw->tw_time = ticks + 2 * tcp_msl;
632 	TAILQ_INSERT_TAIL(&V_twq_2msl, tw, tw_2msl);
633 }
634 
635 static void
636 tcp_tw_2msl_stop(struct tcptw *tw)
637 {
638 	INIT_VNET_INET(curvnet);
639 
640 	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
641 	TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl);
642 }
643 
644 struct tcptw *
645 tcp_tw_2msl_scan(int reuse)
646 {
647 	INIT_VNET_INET(curvnet);
648 	struct tcptw *tw;
649 
650 	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
651 	for (;;) {
652 		tw = TAILQ_FIRST(&V_twq_2msl);
653 		if (tw == NULL || (!reuse && tw->tw_time > ticks))
654 			break;
655 		INP_WLOCK(tw->tw_inpcb);
656 		tcp_twclose(tw, reuse);
657 		if (reuse)
658 			return (tw);
659 	}
660 	return (NULL);
661 }
662