xref: /freebsd/sys/netinet/tcp_timewait.c (revision ce74223a366b59bb7ea8febef9e4db1fc700136a)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)tcp_subr.c	8.2 (Berkeley) 5/24/95
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_tcpdebug.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/callout.h>
44 #include <sys/kernel.h>
45 #include <sys/sysctl.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/priv.h>
49 #include <sys/proc.h>
50 #include <sys/socket.h>
51 #include <sys/socketvar.h>
52 #ifndef INVARIANTS
53 #include <sys/syslog.h>
54 #endif
55 #include <sys/protosw.h>
56 #include <sys/random.h>
57 
58 #include <vm/uma.h>
59 
60 #include <net/route.h>
61 #include <net/if.h>
62 #include <net/if_var.h>
63 #include <net/vnet.h>
64 
65 #include <netinet/in.h>
66 #include <netinet/in_kdtrace.h>
67 #include <netinet/in_pcb.h>
68 #include <netinet/in_systm.h>
69 #include <netinet/in_var.h>
70 #include <netinet/ip.h>
71 #include <netinet/ip_icmp.h>
72 #include <netinet/ip_var.h>
73 #ifdef INET6
74 #include <netinet/ip6.h>
75 #include <netinet6/in6_pcb.h>
76 #include <netinet6/ip6_var.h>
77 #include <netinet6/scope6_var.h>
78 #include <netinet6/nd6.h>
79 #endif
80 #include <netinet/tcp.h>
81 #include <netinet/tcp_fsm.h>
82 #include <netinet/tcp_seq.h>
83 #include <netinet/tcp_timer.h>
84 #include <netinet/tcp_var.h>
85 #ifdef INET6
86 #include <netinet6/tcp6_var.h>
87 #endif
88 #include <netinet/tcpip.h>
89 #ifdef TCPDEBUG
90 #include <netinet/tcp_debug.h>
91 #endif
92 #ifdef INET6
93 #include <netinet6/ip6protosw.h>
94 #endif
95 
96 #include <netinet/udp.h>
97 #include <netinet/udp_var.h>
98 #include <machine/in_cksum.h>
99 
100 #include <security/mac/mac_framework.h>
101 
102 VNET_DEFINE_STATIC(uma_zone_t, tcptw_zone);
103 #define	V_tcptw_zone		VNET(tcptw_zone)
104 static int	maxtcptw;
105 
106 /*
107  * The timed wait queue contains references to each of the TCP sessions
108  * currently in the TIME_WAIT state.  The queue pointers, including the
109  * queue pointers in each tcptw structure, are protected using the global
110  * timewait lock, which must be held over queue iteration and modification.
111  *
112  * Rules on tcptw usage:
113  *  - a inpcb is always freed _after_ its tcptw
114  *  - a tcptw relies on its inpcb reference counting for memory stability
115  *  - a tcptw is dereferenceable only while its inpcb is locked
116  */
117 VNET_DEFINE_STATIC(TAILQ_HEAD(, tcptw), twq_2msl);
118 #define	V_twq_2msl		VNET(twq_2msl)
119 
120 /* Global timewait lock */
121 VNET_DEFINE_STATIC(struct rwlock, tw_lock);
122 #define	V_tw_lock		VNET(tw_lock)
123 
124 #define	TW_LOCK_INIT(tw, d)	rw_init_flags(&(tw), (d), 0)
125 #define	TW_LOCK_DESTROY(tw)	rw_destroy(&(tw))
126 #define	TW_RLOCK(tw)		rw_rlock(&(tw))
127 #define	TW_WLOCK(tw)		rw_wlock(&(tw))
128 #define	TW_RUNLOCK(tw)		rw_runlock(&(tw))
129 #define	TW_WUNLOCK(tw)		rw_wunlock(&(tw))
130 #define	TW_LOCK_ASSERT(tw)	rw_assert(&(tw), RA_LOCKED)
131 #define	TW_RLOCK_ASSERT(tw)	rw_assert(&(tw), RA_RLOCKED)
132 #define	TW_WLOCK_ASSERT(tw)	rw_assert(&(tw), RA_WLOCKED)
133 #define	TW_UNLOCK_ASSERT(tw)	rw_assert(&(tw), RA_UNLOCKED)
134 
135 static void	tcp_tw_2msl_reset(struct tcptw *, int);
136 static void	tcp_tw_2msl_stop(struct tcptw *, int);
137 static int	tcp_twrespond(struct tcptw *, int);
138 
139 static int
140 tcptw_auto_size(void)
141 {
142 	int halfrange;
143 
144 	/*
145 	 * Max out at half the ephemeral port range so that TIME_WAIT
146 	 * sockets don't tie up too many ephemeral ports.
147 	 */
148 	if (V_ipport_lastauto > V_ipport_firstauto)
149 		halfrange = (V_ipport_lastauto - V_ipport_firstauto) / 2;
150 	else
151 		halfrange = (V_ipport_firstauto - V_ipport_lastauto) / 2;
152 	/* Protect against goofy port ranges smaller than 32. */
153 	return (imin(imax(halfrange, 32), maxsockets / 5));
154 }
155 
156 static int
157 sysctl_maxtcptw(SYSCTL_HANDLER_ARGS)
158 {
159 	int error, new;
160 
161 	if (maxtcptw == 0)
162 		new = tcptw_auto_size();
163 	else
164 		new = maxtcptw;
165 	error = sysctl_handle_int(oidp, &new, 0, req);
166 	if (error == 0 && req->newptr)
167 		if (new >= 32) {
168 			maxtcptw = new;
169 			uma_zone_set_max(V_tcptw_zone, maxtcptw);
170 		}
171 	return (error);
172 }
173 
174 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, maxtcptw,
175     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
176     &maxtcptw, 0, sysctl_maxtcptw, "IU",
177     "Maximum number of compressed TCP TIME_WAIT entries");
178 
179 VNET_DEFINE_STATIC(bool, nolocaltimewait) = true;
180 #define	V_nolocaltimewait	VNET(nolocaltimewait)
181 SYSCTL_BOOL(_net_inet_tcp, OID_AUTO, nolocaltimewait, CTLFLAG_VNET | CTLFLAG_RW,
182     &VNET_NAME(nolocaltimewait), true,
183     "Do not create compressed TCP TIME_WAIT entries for local connections");
184 
185 void
186 tcp_tw_zone_change(void)
187 {
188 
189 	if (maxtcptw == 0)
190 		uma_zone_set_max(V_tcptw_zone, tcptw_auto_size());
191 }
192 
193 void
194 tcp_tw_init(void)
195 {
196 
197 	V_tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw),
198 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
199 	TUNABLE_INT_FETCH("net.inet.tcp.maxtcptw", &maxtcptw);
200 	if (maxtcptw == 0)
201 		uma_zone_set_max(V_tcptw_zone, tcptw_auto_size());
202 	else
203 		uma_zone_set_max(V_tcptw_zone, maxtcptw);
204 	TAILQ_INIT(&V_twq_2msl);
205 	TW_LOCK_INIT(V_tw_lock, "tcptw");
206 }
207 
208 #ifdef VIMAGE
209 void
210 tcp_tw_destroy(void)
211 {
212 	struct tcptw *tw;
213 	struct epoch_tracker et;
214 
215 	NET_EPOCH_ENTER(et);
216 	while ((tw = TAILQ_FIRST(&V_twq_2msl)) != NULL)
217 		tcp_twclose(tw, 0);
218 	NET_EPOCH_EXIT(et);
219 
220 	TW_LOCK_DESTROY(V_tw_lock);
221 	uma_zdestroy(V_tcptw_zone);
222 }
223 #endif
224 
225 /*
226  * Move a TCP connection into TIME_WAIT state.
227  *    tcbinfo is locked.
228  *    inp is locked, and is unlocked before returning.
229  */
230 void
231 tcp_twstart(struct tcpcb *tp)
232 {
233 	struct tcptw twlocal, *tw;
234 	struct inpcb *inp = tp->t_inpcb;
235 	struct socket *so;
236 	uint32_t recwin;
237 	bool acknow, local;
238 #ifdef INET6
239 	bool isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6;
240 #endif
241 
242 	NET_EPOCH_ASSERT();
243 	INP_WLOCK_ASSERT(inp);
244 
245 	/* A dropped inp should never transition to TIME_WAIT state. */
246 	KASSERT((inp->inp_flags & INP_DROPPED) == 0, ("tcp_twstart: "
247 	    "(inp->inp_flags & INP_DROPPED) != 0"));
248 
249 	if (V_nolocaltimewait) {
250 #ifdef INET6
251 		if (isipv6)
252 			local = in6_localaddr(&inp->in6p_faddr);
253 		else
254 #endif
255 #ifdef INET
256 			local = in_localip(inp->inp_faddr);
257 #else
258 			local = false;
259 #endif
260 	} else
261 		local = false;
262 
263 	/*
264 	 * For use only by DTrace.  We do not reference the state
265 	 * after this point so modifying it in place is not a problem.
266 	 */
267 	tcp_state_change(tp, TCPS_TIME_WAIT);
268 
269 	if (local)
270 		tw = &twlocal;
271 	else
272 		tw = uma_zalloc(V_tcptw_zone, M_NOWAIT);
273 	if (tw == NULL) {
274 		/*
275 		 * Reached limit on total number of TIMEWAIT connections
276 		 * allowed. Remove a connection from TIMEWAIT queue in LRU
277 		 * fashion to make room for this connection.
278 		 * If that fails, use on stack tw at least to be able to
279 		 * run through tcp_twrespond() and standard tcpcb discard
280 		 * routine.
281 		 *
282 		 * XXX:  Check if it possible to always have enough room
283 		 * in advance based on guarantees provided by uma_zalloc().
284 		 */
285 		tw = tcp_tw_2msl_scan(1);
286 		if (tw == NULL) {
287 			tw = &twlocal;
288 			local = true;
289 		}
290 	}
291 	/*
292 	 * For !local case the tcptw will hold a reference on its inpcb
293 	 * until tcp_twclose is called.
294 	 */
295 	tw->tw_inpcb = inp;
296 
297 	/*
298 	 * Recover last window size sent.
299 	 */
300 	so = inp->inp_socket;
301 	recwin = lmin(lmax(sbspace(&so->so_rcv), 0),
302 	    (long)TCP_MAXWIN << tp->rcv_scale);
303 	if (recwin < (so->so_rcv.sb_hiwat / 4) &&
304 	    recwin < tp->t_maxseg)
305 		recwin = 0;
306 	if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
307 	    recwin < (tp->rcv_adv - tp->rcv_nxt))
308 		recwin = (tp->rcv_adv - tp->rcv_nxt);
309 	tw->last_win = (u_short)(recwin >> tp->rcv_scale);
310 
311 	/*
312 	 * Set t_recent if timestamps are used on the connection.
313 	 */
314 	if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
315 	    (TF_REQ_TSTMP|TF_RCVD_TSTMP)) {
316 		tw->t_recent = tp->ts_recent;
317 		tw->ts_offset = tp->ts_offset;
318 	} else {
319 		tw->t_recent = 0;
320 		tw->ts_offset = 0;
321 	}
322 
323 	tw->snd_nxt = tp->snd_nxt;
324 	tw->t_port = tp->t_port;
325 	tw->rcv_nxt = tp->rcv_nxt;
326 	tw->iss     = tp->iss;
327 	tw->irs     = tp->irs;
328 	tw->t_starttime = tp->t_starttime;
329 	tw->tw_time = 0;
330 
331 /* XXX
332  * If this code will
333  * be used for fin-wait-2 state also, then we may need
334  * a ts_recent from the last segment.
335  */
336 	acknow = tp->t_flags & TF_ACKNOW;
337 
338 	/*
339 	 * First, discard tcpcb state, which includes stopping its timers and
340 	 * freeing it.  tcp_discardcb() used to also release the inpcb, but
341 	 * that work is now done in the caller.
342 	 *
343 	 * Note: soisdisconnected() call used to be made in tcp_discardcb(),
344 	 * and might not be needed here any longer.
345 	 */
346 	tcp_discardcb(tp);
347 	soisdisconnected(so);
348 	tw->tw_so_options = so->so_options;
349 	inp->inp_flags |= INP_TIMEWAIT;
350 	if (acknow)
351 		tcp_twrespond(tw, TH_ACK);
352 	if (local)
353 		in_pcbdrop(inp);
354 	else {
355 		in_pcbref(inp);	/* Reference from tw */
356 		tw->tw_cred = crhold(so->so_cred);
357 		inp->inp_ppcb = tw;
358 		TCPSTATES_INC(TCPS_TIME_WAIT);
359 		tcp_tw_2msl_reset(tw, 0);
360 	}
361 
362 	/*
363 	 * If the inpcb owns the sole reference to the socket, then we can
364 	 * detach and free the socket as it is not needed in time wait.
365 	 */
366 	if (inp->inp_flags & INP_SOCKREF) {
367 		KASSERT(so->so_state & SS_PROTOREF,
368 		    ("tcp_twstart: !SS_PROTOREF"));
369 		inp->inp_flags &= ~INP_SOCKREF;
370 		INP_WUNLOCK(inp);
371 		SOCK_LOCK(so);
372 		so->so_state &= ~SS_PROTOREF;
373 		sofree(so);
374 	} else
375 		INP_WUNLOCK(inp);
376 }
377 
378 /*
379  * Returns 1 if the TIME_WAIT state was killed and we should start over,
380  * looking for a pcb in the listen state.  Returns 0 otherwise.
381  *
382  * For pure SYN-segments the PCB shall be read-locked and the tcpopt pointer
383  * may be NULL.  For the rest write-lock and valid tcpopt.
384  */
385 int
386 tcp_twcheck(struct inpcb *inp, struct tcpopt *to, struct tcphdr *th,
387     struct mbuf *m, int tlen)
388 {
389 	struct tcptw *tw;
390 	int thflags;
391 	tcp_seq seq;
392 
393 	NET_EPOCH_ASSERT();
394 	INP_LOCK_ASSERT(inp);
395 
396 	/*
397 	 * XXXRW: Time wait state for inpcb has been recycled, but inpcb is
398 	 * still present.  This is undesirable, but temporarily necessary
399 	 * until we work out how to handle inpcb's who's timewait state has
400 	 * been removed.
401 	 */
402 	tw = intotw(inp);
403 	if (tw == NULL)
404 		goto drop;
405 
406 	thflags = th->th_flags;
407 #ifdef INVARIANTS
408 	if ((thflags & (TH_SYN | TH_ACK)) == TH_SYN)
409 		INP_RLOCK_ASSERT(inp);
410 	else {
411 		INP_WLOCK_ASSERT(inp);
412 		KASSERT(to != NULL,
413 		    ("%s: called without options on a non-SYN segment",
414 		    __func__));
415 	}
416 #endif
417 
418 	/*
419 	 * NOTE: for FIN_WAIT_2 (to be added later),
420 	 * must validate sequence number before accepting RST
421 	 */
422 
423 	/*
424 	 * If the segment contains RST:
425 	 *	Drop the segment - see Stevens, vol. 2, p. 964 and
426 	 *      RFC 1337.
427 	 */
428 	if (thflags & TH_RST)
429 		goto drop;
430 
431 #if 0
432 /* PAWS not needed at the moment */
433 	/*
434 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment
435 	 * and it's less than ts_recent, drop it.
436 	 */
437 	if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
438 	    TSTMP_LT(to.to_tsval, tp->ts_recent)) {
439 		if ((thflags & TH_ACK) == 0)
440 			goto drop;
441 		goto ack;
442 	}
443 	/*
444 	 * ts_recent is never updated because we never accept new segments.
445 	 */
446 #endif
447 
448 	/*
449 	 * If a new connection request is received
450 	 * while in TIME_WAIT, drop the old connection
451 	 * and start over if the sequence numbers
452 	 * are above the previous ones.
453 	 * Allow UDP port number changes in this case.
454 	 */
455 	if ((thflags & TH_SYN) && SEQ_GT(th->th_seq, tw->rcv_nxt)) {
456 		/*
457 		 * In case we can't upgrade our lock just pretend we have
458 		 * lost this packet.
459 		 */
460 		if (((thflags & (TH_SYN | TH_ACK)) == TH_SYN) &&
461 		    INP_TRY_UPGRADE(inp) == 0)
462 			goto drop;
463 		tcp_twclose(tw, 0);
464 		return (1);
465 	}
466 
467 	/*
468 	 * Send RST if UDP port numbers don't match
469 	 */
470 	if (tw->t_port != m->m_pkthdr.tcp_tun_port) {
471 		if (th->th_flags & TH_ACK) {
472 			tcp_respond(NULL, mtod(m, void *), th, m,
473 			    (tcp_seq)0, th->th_ack, TH_RST);
474 		} else {
475 			if (th->th_flags & TH_SYN)
476 				tlen++;
477 			if (th->th_flags & TH_FIN)
478 				tlen++;
479 			tcp_respond(NULL, mtod(m, void *), th, m,
480 			    th->th_seq+tlen, (tcp_seq)0, TH_RST|TH_ACK);
481 		}
482 		INP_UNLOCK(inp);
483 		return (0);
484 	}
485 
486 	/*
487 	 * Drop the segment if it does not contain an ACK.
488 	 */
489 	if ((thflags & TH_ACK) == 0)
490 		goto drop;
491 
492 	INP_WLOCK_ASSERT(inp);
493 
494 	/*
495 	 * If timestamps were negotiated during SYN/ACK and a
496 	 * segment without a timestamp is received, silently drop
497 	 * the segment, unless the missing timestamps are tolerated.
498 	 * See section 3.2 of RFC 7323.
499 	 */
500 	if (((to->to_flags & TOF_TS) == 0) && (tw->t_recent != 0) &&
501 	    (V_tcp_tolerate_missing_ts == 0)) {
502 		goto drop;
503 	}
504 
505 	/*
506 	 * Reset the 2MSL timer if this is a duplicate FIN.
507 	 */
508 	if (thflags & TH_FIN) {
509 		seq = th->th_seq + tlen + (thflags & TH_SYN ? 1 : 0);
510 		if (seq + 1 == tw->rcv_nxt)
511 			tcp_tw_2msl_reset(tw, 1);
512 	}
513 
514 	/*
515 	 * Acknowledge the segment if it has data or is not a duplicate ACK.
516 	 */
517 	if (thflags != TH_ACK || tlen != 0 ||
518 	    th->th_seq != tw->rcv_nxt || th->th_ack != tw->snd_nxt) {
519 		TCP_PROBE5(receive, NULL, NULL, m, NULL, th);
520 		tcp_twrespond(tw, TH_ACK);
521 		goto dropnoprobe;
522 	}
523 drop:
524 	TCP_PROBE5(receive, NULL, NULL, m, NULL, th);
525 dropnoprobe:
526 	INP_UNLOCK(inp);
527 	m_freem(m);
528 	return (0);
529 }
530 
531 void
532 tcp_twclose(struct tcptw *tw, int reuse)
533 {
534 	struct socket *so;
535 	struct inpcb *inp;
536 
537 	/*
538 	 * At this point, we are in one of two situations:
539 	 *
540 	 * (1) We have no socket, just an inpcb<->twtcp pair.  We can free
541 	 *     all state.
542 	 *
543 	 * (2) We have a socket -- if we own a reference, release it and
544 	 *     notify the socket layer.
545 	 */
546 	inp = tw->tw_inpcb;
547 	KASSERT((inp->inp_flags & INP_TIMEWAIT), ("tcp_twclose: !timewait"));
548 	KASSERT(intotw(inp) == tw, ("tcp_twclose: inp_ppcb != tw"));
549 	NET_EPOCH_ASSERT();
550 	INP_WLOCK_ASSERT(inp);
551 
552 	tcp_tw_2msl_stop(tw, reuse);
553 	inp->inp_ppcb = NULL;
554 	in_pcbdrop(inp);
555 
556 	so = inp->inp_socket;
557 	if (so != NULL) {
558 		/*
559 		 * If there's a socket, handle two cases: first, we own a
560 		 * strong reference, which we will now release, or we don't
561 		 * in which case another reference exists (XXXRW: think
562 		 * about this more), and we don't need to take action.
563 		 */
564 		if (inp->inp_flags & INP_SOCKREF) {
565 			inp->inp_flags &= ~INP_SOCKREF;
566 			INP_WUNLOCK(inp);
567 			SOCK_LOCK(so);
568 			KASSERT(so->so_state & SS_PROTOREF,
569 			    ("tcp_twclose: INP_SOCKREF && !SS_PROTOREF"));
570 			so->so_state &= ~SS_PROTOREF;
571 			sofree(so);
572 		} else {
573 			/*
574 			 * If we don't own the only reference, the socket and
575 			 * inpcb need to be left around to be handled by
576 			 * tcp_usr_detach() later.
577 			 */
578 			INP_WUNLOCK(inp);
579 		}
580 	} else {
581 		/*
582 		 * The socket has been already cleaned-up for us, only free the
583 		 * inpcb.
584 		 */
585 		in_pcbfree(inp);
586 	}
587 	TCPSTAT_INC(tcps_closed);
588 }
589 
590 static int
591 tcp_twrespond(struct tcptw *tw, int flags)
592 {
593 	struct inpcb *inp = tw->tw_inpcb;
594 #if defined(INET6) || defined(INET)
595 	struct tcphdr *th = NULL;
596 #endif
597 	struct mbuf *m;
598 #ifdef INET
599 	struct ip *ip = NULL;
600 #endif
601 	u_int hdrlen, optlen, ulen;
602 	int error = 0;			/* Keep compiler happy */
603 	struct tcpopt to;
604 #ifdef INET6
605 	struct ip6_hdr *ip6 = NULL;
606 	int isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6;
607 #endif
608 	struct udphdr *udp = NULL;
609 	hdrlen = 0;                     /* Keep compiler happy */
610 
611 	INP_WLOCK_ASSERT(inp);
612 
613 	m = m_gethdr(M_NOWAIT, MT_DATA);
614 	if (m == NULL)
615 		return (ENOBUFS);
616 	m->m_data += max_linkhdr;
617 
618 #ifdef MAC
619 	mac_inpcb_create_mbuf(inp, m);
620 #endif
621 
622 #ifdef INET6
623 	if (isipv6) {
624 		hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
625 		ip6 = mtod(m, struct ip6_hdr *);
626 		if (tw->t_port) {
627 			udp = (struct udphdr *)(ip6 + 1);
628 			hdrlen += sizeof(struct udphdr);
629 			udp->uh_sport = htons(V_tcp_udp_tunneling_port);
630 			udp->uh_dport = tw->t_port;
631 			ulen = (hdrlen - sizeof(struct ip6_hdr));
632 			th = (struct tcphdr *)(udp + 1);
633 		} else
634 			th = (struct tcphdr *)(ip6 + 1);
635 		tcpip_fillheaders(inp, tw->t_port, ip6, th);
636 	}
637 #endif
638 #if defined(INET6) && defined(INET)
639 	else
640 #endif
641 #ifdef INET
642 	{
643 		hdrlen = sizeof(struct tcpiphdr);
644 		ip = mtod(m, struct ip *);
645 		if (tw->t_port) {
646 			udp = (struct udphdr *)(ip + 1);
647 			hdrlen += sizeof(struct udphdr);
648 			udp->uh_sport = htons(V_tcp_udp_tunneling_port);
649 			udp->uh_dport = tw->t_port;
650 			ulen = (hdrlen - sizeof(struct ip));
651 			th = (struct tcphdr *)(udp + 1);
652 		} else
653 			th = (struct tcphdr *)(ip + 1);
654 		tcpip_fillheaders(inp, tw->t_port, ip, th);
655 	}
656 #endif
657 	to.to_flags = 0;
658 
659 	/*
660 	 * Send a timestamp and echo-reply if both our side and our peer
661 	 * have sent timestamps in our SYN's and this is not a RST.
662 	 */
663 	if (tw->t_recent && flags == TH_ACK) {
664 		to.to_flags |= TOF_TS;
665 		to.to_tsval = tcp_ts_getticks() + tw->ts_offset;
666 		to.to_tsecr = tw->t_recent;
667 	}
668 	optlen = tcp_addoptions(&to, (u_char *)(th + 1));
669 
670 	if (udp) {
671 		ulen += optlen;
672 		udp->uh_ulen = htons(ulen);
673 	}
674 	m->m_len = hdrlen + optlen;
675 	m->m_pkthdr.len = m->m_len;
676 
677 	KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small"));
678 
679 	th->th_seq = htonl(tw->snd_nxt);
680 	th->th_ack = htonl(tw->rcv_nxt);
681 	th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
682 	th->th_flags = flags;
683 	th->th_win = htons(tw->last_win);
684 
685 #ifdef INET6
686 	if (isipv6) {
687 		if (tw->t_port) {
688 			m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
689 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
690 			udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
691 			th->th_sum = htons(0);
692 		} else {
693 			m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
694 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
695 			th->th_sum = in6_cksum_pseudo(ip6,
696 			    sizeof(struct tcphdr) + optlen, IPPROTO_TCP, 0);
697 		}
698 		ip6->ip6_hlim = in6_selecthlim(inp, NULL);
699 		TCP_PROBE5(send, NULL, NULL, ip6, NULL, th);
700 		error = ip6_output(m, inp->in6p_outputopts, NULL,
701 		    (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp);
702 	}
703 #endif
704 #if defined(INET6) && defined(INET)
705 	else
706 #endif
707 #ifdef INET
708 	{
709 		if (tw->t_port) {
710 			m->m_pkthdr.csum_flags = CSUM_UDP;
711 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
712 			udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
713 			    ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
714 			th->th_sum = htons(0);
715 		} else {
716 			m->m_pkthdr.csum_flags = CSUM_TCP;
717 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
718 			th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
719 			    htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP));
720 		}
721 		ip->ip_len = htons(m->m_pkthdr.len);
722 		if (V_path_mtu_discovery)
723 			ip->ip_off |= htons(IP_DF);
724 		TCP_PROBE5(send, NULL, NULL, ip, NULL, th);
725 		error = ip_output(m, inp->inp_options, NULL,
726 		    ((tw->tw_so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0),
727 		    NULL, inp);
728 	}
729 #endif
730 	if (flags & TH_ACK)
731 		TCPSTAT_INC(tcps_sndacks);
732 	else
733 		TCPSTAT_INC(tcps_sndctrl);
734 	TCPSTAT_INC(tcps_sndtotal);
735 	return (error);
736 }
737 
738 static void
739 tcp_tw_2msl_reset(struct tcptw *tw, int rearm)
740 {
741 
742 	NET_EPOCH_ASSERT();
743 	INP_WLOCK_ASSERT(tw->tw_inpcb);
744 
745 	TW_WLOCK(V_tw_lock);
746 	if (rearm)
747 		TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl);
748 	tw->tw_time = ticks + 2 * tcp_msl;
749 	TAILQ_INSERT_TAIL(&V_twq_2msl, tw, tw_2msl);
750 	TW_WUNLOCK(V_tw_lock);
751 }
752 
753 static void
754 tcp_tw_2msl_stop(struct tcptw *tw, int reuse)
755 {
756 	struct ucred *cred;
757 	struct inpcb *inp;
758 	int released __unused;
759 
760 	NET_EPOCH_ASSERT();
761 
762 	TW_WLOCK(V_tw_lock);
763 	inp = tw->tw_inpcb;
764 	tw->tw_inpcb = NULL;
765 
766 	TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl);
767 	cred = tw->tw_cred;
768 	tw->tw_cred = NULL;
769 	TW_WUNLOCK(V_tw_lock);
770 
771 	if (cred != NULL)
772 		crfree(cred);
773 
774 	released = in_pcbrele_wlocked(inp);
775 	KASSERT(!released, ("%s: inp should not be released here", __func__));
776 
777 	if (!reuse)
778 		uma_zfree(V_tcptw_zone, tw);
779 	TCPSTATES_DEC(TCPS_TIME_WAIT);
780 }
781 
782 struct tcptw *
783 tcp_tw_2msl_scan(int reuse)
784 {
785 	struct tcptw *tw;
786 	struct inpcb *inp;
787 
788 	NET_EPOCH_ASSERT();
789 
790 	for (;;) {
791 		TW_RLOCK(V_tw_lock);
792 		tw = TAILQ_FIRST(&V_twq_2msl);
793 		if (tw == NULL || (!reuse && (tw->tw_time - ticks) > 0)) {
794 			TW_RUNLOCK(V_tw_lock);
795 			break;
796 		}
797 		KASSERT(tw->tw_inpcb != NULL, ("%s: tw->tw_inpcb == NULL",
798 		    __func__));
799 
800 		inp = tw->tw_inpcb;
801 		in_pcbref(inp);
802 		TW_RUNLOCK(V_tw_lock);
803 
804 		INP_WLOCK(inp);
805 		tw = intotw(inp);
806 		if (in_pcbrele_wlocked(inp)) {
807 			if (__predict_true(tw == NULL)) {
808 				continue;
809 			} else {
810 				/* This should not happen as in TIMEWAIT
811 				 * state the inp should not be destroyed
812 				 * before its tcptw. If INVARIANTS is
813 				 * defined panic.
814 				 */
815 #ifdef INVARIANTS
816 				panic("%s: Panic before an infinite "
817 					  "loop: INP_TIMEWAIT && (INP_FREED "
818 					  "|| inp last reference) && tw != "
819 					  "NULL", __func__);
820 #else
821 				log(LOG_ERR, "%s: Avoid an infinite "
822 					"loop: INP_TIMEWAIT && (INP_FREED "
823 					"|| inp last reference) && tw != "
824 					"NULL", __func__);
825 #endif
826 				break;
827 			}
828 		}
829 
830 		if (tw == NULL) {
831 			/* tcp_twclose() has already been called */
832 			INP_WUNLOCK(inp);
833 			continue;
834 		}
835 
836 		tcp_twclose(tw, reuse);
837 		if (reuse)
838 			return tw;
839 	}
840 
841 	return NULL;
842 }
843