xref: /freebsd/sys/netinet/tcp_timewait.c (revision d65cd7a57bf0600b722afc770838a5d0c1c3a8e1)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)tcp_subr.c	8.2 (Berkeley) 5/24/95
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_tcpdebug.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/callout.h>
44 #include <sys/kernel.h>
45 #include <sys/sysctl.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/priv.h>
49 #include <sys/proc.h>
50 #include <sys/socket.h>
51 #include <sys/socketvar.h>
52 #ifndef INVARIANTS
53 #include <sys/syslog.h>
54 #endif
55 #include <sys/protosw.h>
56 #include <sys/random.h>
57 
58 #include <vm/uma.h>
59 
60 #include <net/route.h>
61 #include <net/if.h>
62 #include <net/if_var.h>
63 #include <net/vnet.h>
64 
65 #include <netinet/in.h>
66 #include <netinet/in_kdtrace.h>
67 #include <netinet/in_pcb.h>
68 #include <netinet/in_systm.h>
69 #include <netinet/in_var.h>
70 #include <netinet/ip.h>
71 #include <netinet/ip_icmp.h>
72 #include <netinet/ip_var.h>
73 #ifdef INET6
74 #include <netinet/ip6.h>
75 #include <netinet6/in6_pcb.h>
76 #include <netinet6/ip6_var.h>
77 #include <netinet6/scope6_var.h>
78 #include <netinet6/nd6.h>
79 #endif
80 #include <netinet/tcp.h>
81 #include <netinet/tcp_fsm.h>
82 #include <netinet/tcp_seq.h>
83 #include <netinet/tcp_timer.h>
84 #include <netinet/tcp_var.h>
85 #ifdef INET6
86 #include <netinet6/tcp6_var.h>
87 #endif
88 #include <netinet/tcpip.h>
89 #ifdef TCPDEBUG
90 #include <netinet/tcp_debug.h>
91 #endif
92 #ifdef INET6
93 #include <netinet6/ip6protosw.h>
94 #endif
95 
96 #include <machine/in_cksum.h>
97 
98 #include <security/mac/mac_framework.h>
99 
100 VNET_DEFINE_STATIC(uma_zone_t, tcptw_zone);
101 #define	V_tcptw_zone		VNET(tcptw_zone)
102 static int	maxtcptw;
103 
104 /*
105  * The timed wait queue contains references to each of the TCP sessions
106  * currently in the TIME_WAIT state.  The queue pointers, including the
107  * queue pointers in each tcptw structure, are protected using the global
108  * timewait lock, which must be held over queue iteration and modification.
109  *
110  * Rules on tcptw usage:
111  *  - a inpcb is always freed _after_ its tcptw
112  *  - a tcptw relies on its inpcb reference counting for memory stability
113  *  - a tcptw is dereferenceable only while its inpcb is locked
114  */
115 VNET_DEFINE_STATIC(TAILQ_HEAD(, tcptw), twq_2msl);
116 #define	V_twq_2msl		VNET(twq_2msl)
117 
118 /* Global timewait lock */
119 VNET_DEFINE_STATIC(struct rwlock, tw_lock);
120 #define	V_tw_lock		VNET(tw_lock)
121 
122 #define	TW_LOCK_INIT(tw, d)	rw_init_flags(&(tw), (d), 0)
123 #define	TW_LOCK_DESTROY(tw)	rw_destroy(&(tw))
124 #define	TW_RLOCK(tw)		rw_rlock(&(tw))
125 #define	TW_WLOCK(tw)		rw_wlock(&(tw))
126 #define	TW_RUNLOCK(tw)		rw_runlock(&(tw))
127 #define	TW_WUNLOCK(tw)		rw_wunlock(&(tw))
128 #define	TW_LOCK_ASSERT(tw)	rw_assert(&(tw), RA_LOCKED)
129 #define	TW_RLOCK_ASSERT(tw)	rw_assert(&(tw), RA_RLOCKED)
130 #define	TW_WLOCK_ASSERT(tw)	rw_assert(&(tw), RA_WLOCKED)
131 #define	TW_UNLOCK_ASSERT(tw)	rw_assert(&(tw), RA_UNLOCKED)
132 
133 static void	tcp_tw_2msl_reset(struct tcptw *, int);
134 static void	tcp_tw_2msl_stop(struct tcptw *, int);
135 static int	tcp_twrespond(struct tcptw *, int);
136 
137 static int
138 tcptw_auto_size(void)
139 {
140 	int halfrange;
141 
142 	/*
143 	 * Max out at half the ephemeral port range so that TIME_WAIT
144 	 * sockets don't tie up too many ephemeral ports.
145 	 */
146 	if (V_ipport_lastauto > V_ipport_firstauto)
147 		halfrange = (V_ipport_lastauto - V_ipport_firstauto) / 2;
148 	else
149 		halfrange = (V_ipport_firstauto - V_ipport_lastauto) / 2;
150 	/* Protect against goofy port ranges smaller than 32. */
151 	return (imin(imax(halfrange, 32), maxsockets / 5));
152 }
153 
154 static int
155 sysctl_maxtcptw(SYSCTL_HANDLER_ARGS)
156 {
157 	int error, new;
158 
159 	if (maxtcptw == 0)
160 		new = tcptw_auto_size();
161 	else
162 		new = maxtcptw;
163 	error = sysctl_handle_int(oidp, &new, 0, req);
164 	if (error == 0 && req->newptr)
165 		if (new >= 32) {
166 			maxtcptw = new;
167 			uma_zone_set_max(V_tcptw_zone, maxtcptw);
168 		}
169 	return (error);
170 }
171 
172 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, maxtcptw,
173     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
174     &maxtcptw, 0, sysctl_maxtcptw, "IU",
175     "Maximum number of compressed TCP TIME_WAIT entries");
176 
177 VNET_DEFINE_STATIC(int, nolocaltimewait) = 0;
178 #define	V_nolocaltimewait	VNET(nolocaltimewait)
179 SYSCTL_INT(_net_inet_tcp, OID_AUTO, nolocaltimewait, CTLFLAG_VNET | CTLFLAG_RW,
180     &VNET_NAME(nolocaltimewait), 0,
181     "Do not create compressed TCP TIME_WAIT entries for local connections");
182 
183 void
184 tcp_tw_zone_change(void)
185 {
186 
187 	if (maxtcptw == 0)
188 		uma_zone_set_max(V_tcptw_zone, tcptw_auto_size());
189 }
190 
191 void
192 tcp_tw_init(void)
193 {
194 
195 	V_tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw),
196 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
197 	TUNABLE_INT_FETCH("net.inet.tcp.maxtcptw", &maxtcptw);
198 	if (maxtcptw == 0)
199 		uma_zone_set_max(V_tcptw_zone, tcptw_auto_size());
200 	else
201 		uma_zone_set_max(V_tcptw_zone, maxtcptw);
202 	TAILQ_INIT(&V_twq_2msl);
203 	TW_LOCK_INIT(V_tw_lock, "tcptw");
204 }
205 
206 #ifdef VIMAGE
207 void
208 tcp_tw_destroy(void)
209 {
210 	struct tcptw *tw;
211 	struct epoch_tracker et;
212 
213 	NET_EPOCH_ENTER(et);
214 	while ((tw = TAILQ_FIRST(&V_twq_2msl)) != NULL)
215 		tcp_twclose(tw, 0);
216 	NET_EPOCH_EXIT(et);
217 
218 	TW_LOCK_DESTROY(V_tw_lock);
219 	uma_zdestroy(V_tcptw_zone);
220 }
221 #endif
222 
223 /*
224  * Move a TCP connection into TIME_WAIT state.
225  *    tcbinfo is locked.
226  *    inp is locked, and is unlocked before returning.
227  */
228 void
229 tcp_twstart(struct tcpcb *tp)
230 {
231 	struct tcptw twlocal, *tw;
232 	struct inpcb *inp = tp->t_inpcb;
233 	struct socket *so;
234 	uint32_t recwin;
235 	bool acknow, local;
236 #ifdef INET6
237 	bool isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6;
238 #endif
239 
240 	NET_EPOCH_ASSERT();
241 	INP_WLOCK_ASSERT(inp);
242 
243 	/* A dropped inp should never transition to TIME_WAIT state. */
244 	KASSERT((inp->inp_flags & INP_DROPPED) == 0, ("tcp_twstart: "
245 	    "(inp->inp_flags & INP_DROPPED) != 0"));
246 
247 	if (V_nolocaltimewait) {
248 #ifdef INET6
249 		if (isipv6)
250 			local = in6_localaddr(&inp->in6p_faddr);
251 		else
252 #endif
253 #ifdef INET
254 			local = in_localip(inp->inp_faddr);
255 #else
256 			local = false;
257 #endif
258 	} else
259 		local = false;
260 
261 	/*
262 	 * For use only by DTrace.  We do not reference the state
263 	 * after this point so modifying it in place is not a problem.
264 	 */
265 	tcp_state_change(tp, TCPS_TIME_WAIT);
266 
267 	if (local)
268 		tw = &twlocal;
269 	else
270 		tw = uma_zalloc(V_tcptw_zone, M_NOWAIT);
271 	if (tw == NULL) {
272 		/*
273 		 * Reached limit on total number of TIMEWAIT connections
274 		 * allowed. Remove a connection from TIMEWAIT queue in LRU
275 		 * fashion to make room for this connection.
276 		 *
277 		 * XXX:  Check if it possible to always have enough room
278 		 * in advance based on guarantees provided by uma_zalloc().
279 		 */
280 		tw = tcp_tw_2msl_scan(1);
281 		if (tw == NULL) {
282 			tp = tcp_close(tp);
283 			if (tp != NULL)
284 				INP_WUNLOCK(inp);
285 			return;
286 		}
287 	}
288 	/*
289 	 * For !local case the tcptw will hold a reference on its inpcb
290 	 * until tcp_twclose is called.
291 	 */
292 	tw->tw_inpcb = inp;
293 
294 	/*
295 	 * Recover last window size sent.
296 	 */
297 	so = inp->inp_socket;
298 	recwin = lmin(lmax(sbspace(&so->so_rcv), 0),
299 	    (long)TCP_MAXWIN << tp->rcv_scale);
300 	if (recwin < (so->so_rcv.sb_hiwat / 4) &&
301 	    recwin < tp->t_maxseg)
302 		recwin = 0;
303 	if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
304 	    recwin < (tp->rcv_adv - tp->rcv_nxt))
305 		recwin = (tp->rcv_adv - tp->rcv_nxt);
306 	tw->last_win = (u_short)(recwin >> tp->rcv_scale);
307 
308 	/*
309 	 * Set t_recent if timestamps are used on the connection.
310 	 */
311 	if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
312 	    (TF_REQ_TSTMP|TF_RCVD_TSTMP)) {
313 		tw->t_recent = tp->ts_recent;
314 		tw->ts_offset = tp->ts_offset;
315 	} else {
316 		tw->t_recent = 0;
317 		tw->ts_offset = 0;
318 	}
319 
320 	tw->snd_nxt = tp->snd_nxt;
321 	tw->rcv_nxt = tp->rcv_nxt;
322 	tw->iss     = tp->iss;
323 	tw->irs     = tp->irs;
324 	tw->t_starttime = tp->t_starttime;
325 	tw->tw_time = 0;
326 
327 /* XXX
328  * If this code will
329  * be used for fin-wait-2 state also, then we may need
330  * a ts_recent from the last segment.
331  */
332 	acknow = tp->t_flags & TF_ACKNOW;
333 
334 	/*
335 	 * First, discard tcpcb state, which includes stopping its timers and
336 	 * freeing it.  tcp_discardcb() used to also release the inpcb, but
337 	 * that work is now done in the caller.
338 	 *
339 	 * Note: soisdisconnected() call used to be made in tcp_discardcb(),
340 	 * and might not be needed here any longer.
341 	 */
342 	tcp_discardcb(tp);
343 	soisdisconnected(so);
344 	tw->tw_so_options = so->so_options;
345 	inp->inp_flags |= INP_TIMEWAIT;
346 	if (acknow)
347 		tcp_twrespond(tw, TH_ACK);
348 	if (local)
349 		in_pcbdrop(inp);
350 	else {
351 		in_pcbref(inp);	/* Reference from tw */
352 		tw->tw_cred = crhold(so->so_cred);
353 		inp->inp_ppcb = tw;
354 		TCPSTATES_INC(TCPS_TIME_WAIT);
355 		tcp_tw_2msl_reset(tw, 0);
356 	}
357 
358 	/*
359 	 * If the inpcb owns the sole reference to the socket, then we can
360 	 * detach and free the socket as it is not needed in time wait.
361 	 */
362 	if (inp->inp_flags & INP_SOCKREF) {
363 		KASSERT(so->so_state & SS_PROTOREF,
364 		    ("tcp_twstart: !SS_PROTOREF"));
365 		inp->inp_flags &= ~INP_SOCKREF;
366 		INP_WUNLOCK(inp);
367 		SOCK_LOCK(so);
368 		so->so_state &= ~SS_PROTOREF;
369 		sofree(so);
370 	} else
371 		INP_WUNLOCK(inp);
372 }
373 
374 /*
375  * Returns 1 if the TIME_WAIT state was killed and we should start over,
376  * looking for a pcb in the listen state.  Returns 0 otherwise.
377  */
378 int
379 tcp_twcheck(struct inpcb *inp, struct tcpopt *to __unused, struct tcphdr *th,
380     struct mbuf *m, int tlen)
381 {
382 	struct tcptw *tw;
383 	int thflags;
384 	tcp_seq seq;
385 
386 	NET_EPOCH_ASSERT();
387 	INP_WLOCK_ASSERT(inp);
388 
389 	/*
390 	 * XXXRW: Time wait state for inpcb has been recycled, but inpcb is
391 	 * still present.  This is undesirable, but temporarily necessary
392 	 * until we work out how to handle inpcb's who's timewait state has
393 	 * been removed.
394 	 */
395 	tw = intotw(inp);
396 	if (tw == NULL)
397 		goto drop;
398 
399 	thflags = th->th_flags;
400 
401 	/*
402 	 * NOTE: for FIN_WAIT_2 (to be added later),
403 	 * must validate sequence number before accepting RST
404 	 */
405 
406 	/*
407 	 * If the segment contains RST:
408 	 *	Drop the segment - see Stevens, vol. 2, p. 964 and
409 	 *      RFC 1337.
410 	 */
411 	if (thflags & TH_RST)
412 		goto drop;
413 
414 #if 0
415 /* PAWS not needed at the moment */
416 	/*
417 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment
418 	 * and it's less than ts_recent, drop it.
419 	 */
420 	if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
421 	    TSTMP_LT(to.to_tsval, tp->ts_recent)) {
422 		if ((thflags & TH_ACK) == 0)
423 			goto drop;
424 		goto ack;
425 	}
426 	/*
427 	 * ts_recent is never updated because we never accept new segments.
428 	 */
429 #endif
430 
431 	/*
432 	 * If a new connection request is received
433 	 * while in TIME_WAIT, drop the old connection
434 	 * and start over if the sequence numbers
435 	 * are above the previous ones.
436 	 */
437 	if ((thflags & TH_SYN) && SEQ_GT(th->th_seq, tw->rcv_nxt)) {
438 		tcp_twclose(tw, 0);
439 		return (1);
440 	}
441 
442 	/*
443 	 * Drop the segment if it does not contain an ACK.
444 	 */
445 	if ((thflags & TH_ACK) == 0)
446 		goto drop;
447 
448 	/*
449 	 * Reset the 2MSL timer if this is a duplicate FIN.
450 	 */
451 	if (thflags & TH_FIN) {
452 		seq = th->th_seq + tlen + (thflags & TH_SYN ? 1 : 0);
453 		if (seq + 1 == tw->rcv_nxt)
454 			tcp_tw_2msl_reset(tw, 1);
455 	}
456 
457 	/*
458 	 * Acknowledge the segment if it has data or is not a duplicate ACK.
459 	 */
460 	if (thflags != TH_ACK || tlen != 0 ||
461 	    th->th_seq != tw->rcv_nxt || th->th_ack != tw->snd_nxt) {
462 		TCP_PROBE5(receive, NULL, NULL, m, NULL, th);
463 		tcp_twrespond(tw, TH_ACK);
464 		goto dropnoprobe;
465 	}
466 drop:
467 	TCP_PROBE5(receive, NULL, NULL, m, NULL, th);
468 dropnoprobe:
469 	INP_WUNLOCK(inp);
470 	m_freem(m);
471 	return (0);
472 }
473 
474 void
475 tcp_twclose(struct tcptw *tw, int reuse)
476 {
477 	struct socket *so;
478 	struct inpcb *inp;
479 
480 	/*
481 	 * At this point, we are in one of two situations:
482 	 *
483 	 * (1) We have no socket, just an inpcb<->twtcp pair.  We can free
484 	 *     all state.
485 	 *
486 	 * (2) We have a socket -- if we own a reference, release it and
487 	 *     notify the socket layer.
488 	 */
489 	inp = tw->tw_inpcb;
490 	KASSERT((inp->inp_flags & INP_TIMEWAIT), ("tcp_twclose: !timewait"));
491 	KASSERT(intotw(inp) == tw, ("tcp_twclose: inp_ppcb != tw"));
492 	NET_EPOCH_ASSERT();
493 	INP_WLOCK_ASSERT(inp);
494 
495 	tcp_tw_2msl_stop(tw, reuse);
496 	inp->inp_ppcb = NULL;
497 	in_pcbdrop(inp);
498 
499 	so = inp->inp_socket;
500 	if (so != NULL) {
501 		/*
502 		 * If there's a socket, handle two cases: first, we own a
503 		 * strong reference, which we will now release, or we don't
504 		 * in which case another reference exists (XXXRW: think
505 		 * about this more), and we don't need to take action.
506 		 */
507 		if (inp->inp_flags & INP_SOCKREF) {
508 			inp->inp_flags &= ~INP_SOCKREF;
509 			INP_WUNLOCK(inp);
510 			SOCK_LOCK(so);
511 			KASSERT(so->so_state & SS_PROTOREF,
512 			    ("tcp_twclose: INP_SOCKREF && !SS_PROTOREF"));
513 			so->so_state &= ~SS_PROTOREF;
514 			sofree(so);
515 		} else {
516 			/*
517 			 * If we don't own the only reference, the socket and
518 			 * inpcb need to be left around to be handled by
519 			 * tcp_usr_detach() later.
520 			 */
521 			INP_WUNLOCK(inp);
522 		}
523 	} else {
524 		/*
525 		 * The socket has been already cleaned-up for us, only free the
526 		 * inpcb.
527 		 */
528 		in_pcbfree(inp);
529 	}
530 	TCPSTAT_INC(tcps_closed);
531 }
532 
533 static int
534 tcp_twrespond(struct tcptw *tw, int flags)
535 {
536 	struct inpcb *inp = tw->tw_inpcb;
537 #if defined(INET6) || defined(INET)
538 	struct tcphdr *th = NULL;
539 #endif
540 	struct mbuf *m;
541 #ifdef INET
542 	struct ip *ip = NULL;
543 #endif
544 	u_int hdrlen, optlen;
545 	int error = 0;			/* Keep compiler happy */
546 	struct tcpopt to;
547 #ifdef INET6
548 	struct ip6_hdr *ip6 = NULL;
549 	int isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6;
550 #endif
551 	hdrlen = 0;                     /* Keep compiler happy */
552 
553 	INP_WLOCK_ASSERT(inp);
554 
555 	m = m_gethdr(M_NOWAIT, MT_DATA);
556 	if (m == NULL)
557 		return (ENOBUFS);
558 	m->m_data += max_linkhdr;
559 
560 #ifdef MAC
561 	mac_inpcb_create_mbuf(inp, m);
562 #endif
563 
564 #ifdef INET6
565 	if (isipv6) {
566 		hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
567 		ip6 = mtod(m, struct ip6_hdr *);
568 		th = (struct tcphdr *)(ip6 + 1);
569 		tcpip_fillheaders(inp, ip6, th);
570 	}
571 #endif
572 #if defined(INET6) && defined(INET)
573 	else
574 #endif
575 #ifdef INET
576 	{
577 		hdrlen = sizeof(struct tcpiphdr);
578 		ip = mtod(m, struct ip *);
579 		th = (struct tcphdr *)(ip + 1);
580 		tcpip_fillheaders(inp, ip, th);
581 	}
582 #endif
583 	to.to_flags = 0;
584 
585 	/*
586 	 * Send a timestamp and echo-reply if both our side and our peer
587 	 * have sent timestamps in our SYN's and this is not a RST.
588 	 */
589 	if (tw->t_recent && flags == TH_ACK) {
590 		to.to_flags |= TOF_TS;
591 		to.to_tsval = tcp_ts_getticks() + tw->ts_offset;
592 		to.to_tsecr = tw->t_recent;
593 	}
594 	optlen = tcp_addoptions(&to, (u_char *)(th + 1));
595 
596 	m->m_len = hdrlen + optlen;
597 	m->m_pkthdr.len = m->m_len;
598 
599 	KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small"));
600 
601 	th->th_seq = htonl(tw->snd_nxt);
602 	th->th_ack = htonl(tw->rcv_nxt);
603 	th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
604 	th->th_flags = flags;
605 	th->th_win = htons(tw->last_win);
606 
607 	m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
608 #ifdef INET6
609 	if (isipv6) {
610 		m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
611 		th->th_sum = in6_cksum_pseudo(ip6,
612 		    sizeof(struct tcphdr) + optlen, IPPROTO_TCP, 0);
613 		ip6->ip6_hlim = in6_selecthlim(inp, NULL);
614 		TCP_PROBE5(send, NULL, NULL, ip6, NULL, th);
615 		error = ip6_output(m, inp->in6p_outputopts, NULL,
616 		    (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp);
617 	}
618 #endif
619 #if defined(INET6) && defined(INET)
620 	else
621 #endif
622 #ifdef INET
623 	{
624 		m->m_pkthdr.csum_flags = CSUM_TCP;
625 		th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
626 		    htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP));
627 		ip->ip_len = htons(m->m_pkthdr.len);
628 		if (V_path_mtu_discovery)
629 			ip->ip_off |= htons(IP_DF);
630 		TCP_PROBE5(send, NULL, NULL, ip, NULL, th);
631 		error = ip_output(m, inp->inp_options, NULL,
632 		    ((tw->tw_so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0),
633 		    NULL, inp);
634 	}
635 #endif
636 	if (flags & TH_ACK)
637 		TCPSTAT_INC(tcps_sndacks);
638 	else
639 		TCPSTAT_INC(tcps_sndctrl);
640 	TCPSTAT_INC(tcps_sndtotal);
641 	return (error);
642 }
643 
644 static void
645 tcp_tw_2msl_reset(struct tcptw *tw, int rearm)
646 {
647 
648 	NET_EPOCH_ASSERT();
649 	INP_WLOCK_ASSERT(tw->tw_inpcb);
650 
651 	TW_WLOCK(V_tw_lock);
652 	if (rearm)
653 		TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl);
654 	tw->tw_time = ticks + 2 * tcp_msl;
655 	TAILQ_INSERT_TAIL(&V_twq_2msl, tw, tw_2msl);
656 	TW_WUNLOCK(V_tw_lock);
657 }
658 
659 static void
660 tcp_tw_2msl_stop(struct tcptw *tw, int reuse)
661 {
662 	struct ucred *cred;
663 	struct inpcb *inp;
664 	int released __unused;
665 
666 	NET_EPOCH_ASSERT();
667 
668 	TW_WLOCK(V_tw_lock);
669 	inp = tw->tw_inpcb;
670 	tw->tw_inpcb = NULL;
671 
672 	TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl);
673 	cred = tw->tw_cred;
674 	tw->tw_cred = NULL;
675 	TW_WUNLOCK(V_tw_lock);
676 
677 	if (cred != NULL)
678 		crfree(cred);
679 
680 	released = in_pcbrele_wlocked(inp);
681 	KASSERT(!released, ("%s: inp should not be released here", __func__));
682 
683 	if (!reuse)
684 		uma_zfree(V_tcptw_zone, tw);
685 	TCPSTATES_DEC(TCPS_TIME_WAIT);
686 }
687 
688 struct tcptw *
689 tcp_tw_2msl_scan(int reuse)
690 {
691 	struct tcptw *tw;
692 	struct inpcb *inp;
693 
694 	NET_EPOCH_ASSERT();
695 
696 	for (;;) {
697 		TW_RLOCK(V_tw_lock);
698 		tw = TAILQ_FIRST(&V_twq_2msl);
699 		if (tw == NULL || (!reuse && (tw->tw_time - ticks) > 0)) {
700 			TW_RUNLOCK(V_tw_lock);
701 			break;
702 		}
703 		KASSERT(tw->tw_inpcb != NULL, ("%s: tw->tw_inpcb == NULL",
704 		    __func__));
705 
706 		inp = tw->tw_inpcb;
707 		in_pcbref(inp);
708 		TW_RUNLOCK(V_tw_lock);
709 
710 		INP_WLOCK(inp);
711 		tw = intotw(inp);
712 		if (in_pcbrele_wlocked(inp)) {
713 			if (__predict_true(tw == NULL)) {
714 				continue;
715 			} else {
716 				/* This should not happen as in TIMEWAIT
717 				 * state the inp should not be destroyed
718 				 * before its tcptw. If INVARIANTS is
719 				 * defined panic.
720 				 */
721 #ifdef INVARIANTS
722 				panic("%s: Panic before an infinite "
723 					  "loop: INP_TIMEWAIT && (INP_FREED "
724 					  "|| inp last reference) && tw != "
725 					  "NULL", __func__);
726 #else
727 				log(LOG_ERR, "%s: Avoid an infinite "
728 					"loop: INP_TIMEWAIT && (INP_FREED "
729 					"|| inp last reference) && tw != "
730 					"NULL", __func__);
731 #endif
732 				break;
733 			}
734 		}
735 
736 		if (tw == NULL) {
737 			/* tcp_twclose() has already been called */
738 			INP_WUNLOCK(inp);
739 			continue;
740 		}
741 
742 		tcp_twclose(tw, reuse);
743 		if (reuse)
744 			return tw;
745 	}
746 
747 	return NULL;
748 }
749