xref: /freebsd/sys/netinet/tcp_timewait.c (revision a2464ee12761660f50d0b6f59f233949ebcacc87)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)tcp_subr.c	8.2 (Berkeley) 5/24/95
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_ipsec.h"
40 #include "opt_tcpdebug.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/callout.h>
45 #include <sys/kernel.h>
46 #include <sys/sysctl.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/priv.h>
50 #include <sys/proc.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/syslog.h>
54 #include <sys/protosw.h>
55 #include <sys/random.h>
56 
57 #include <vm/uma.h>
58 
59 #include <net/route.h>
60 #include <net/if.h>
61 #include <net/if_var.h>
62 #include <net/vnet.h>
63 
64 #include <netinet/in.h>
65 #include <netinet/in_kdtrace.h>
66 #include <netinet/in_pcb.h>
67 #include <netinet/in_systm.h>
68 #include <netinet/in_var.h>
69 #include <netinet/ip.h>
70 #include <netinet/ip_icmp.h>
71 #include <netinet/ip_var.h>
72 #ifdef INET6
73 #include <netinet/ip6.h>
74 #include <netinet6/in6_pcb.h>
75 #include <netinet6/ip6_var.h>
76 #include <netinet6/scope6_var.h>
77 #include <netinet6/nd6.h>
78 #endif
79 #include <netinet/tcp.h>
80 #include <netinet/tcp_fsm.h>
81 #include <netinet/tcp_seq.h>
82 #include <netinet/tcp_timer.h>
83 #include <netinet/tcp_var.h>
84 #include <netinet/tcp_hpts.h>
85 #ifdef INET6
86 #include <netinet6/tcp6_var.h>
87 #endif
88 #include <netinet/tcpip.h>
89 #ifdef TCPDEBUG
90 #include <netinet/tcp_debug.h>
91 #endif
92 #ifdef INET6
93 #include <netinet6/ip6protosw.h>
94 #endif
95 
96 #include <netinet/udp.h>
97 #include <netinet/udp_var.h>
98 
99 #include <netipsec/ipsec_support.h>
100 
101 #include <machine/in_cksum.h>
102 
103 #include <security/mac/mac_framework.h>
104 
105 VNET_DEFINE_STATIC(uma_zone_t, tcptw_zone);
106 #define	V_tcptw_zone		VNET(tcptw_zone)
107 static int	maxtcptw;
108 
109 /*
110  * The timed wait queue contains references to each of the TCP sessions
111  * currently in the TIME_WAIT state.  The queue pointers, including the
112  * queue pointers in each tcptw structure, are protected using the global
113  * timewait lock, which must be held over queue iteration and modification.
114  *
115  * Rules on tcptw usage:
116  *  - a inpcb is always freed _after_ its tcptw
117  *  - a tcptw relies on its inpcb reference counting for memory stability
118  *  - a tcptw is dereferenceable only while its inpcb is locked
119  */
120 VNET_DEFINE_STATIC(TAILQ_HEAD(, tcptw), twq_2msl);
121 #define	V_twq_2msl		VNET(twq_2msl)
122 
123 /* Global timewait lock */
124 VNET_DEFINE_STATIC(struct rwlock, tw_lock);
125 #define	V_tw_lock		VNET(tw_lock)
126 
127 #define	TW_LOCK_INIT(tw, d)	rw_init_flags(&(tw), (d), 0)
128 #define	TW_LOCK_DESTROY(tw)	rw_destroy(&(tw))
129 #define	TW_RLOCK(tw)		rw_rlock(&(tw))
130 #define	TW_WLOCK(tw)		rw_wlock(&(tw))
131 #define	TW_RUNLOCK(tw)		rw_runlock(&(tw))
132 #define	TW_WUNLOCK(tw)		rw_wunlock(&(tw))
133 #define	TW_LOCK_ASSERT(tw)	rw_assert(&(tw), RA_LOCKED)
134 #define	TW_RLOCK_ASSERT(tw)	rw_assert(&(tw), RA_RLOCKED)
135 #define	TW_WLOCK_ASSERT(tw)	rw_assert(&(tw), RA_WLOCKED)
136 #define	TW_UNLOCK_ASSERT(tw)	rw_assert(&(tw), RA_UNLOCKED)
137 
138 static void	tcp_tw_2msl_reset(struct tcptw *, int);
139 static void	tcp_tw_2msl_stop(struct tcptw *, int);
140 static int	tcp_twrespond(struct tcptw *, int);
141 
142 static int
143 tcptw_auto_size(void)
144 {
145 	int halfrange;
146 
147 	/*
148 	 * Max out at half the ephemeral port range so that TIME_WAIT
149 	 * sockets don't tie up too many ephemeral ports.
150 	 */
151 	if (V_ipport_lastauto > V_ipport_firstauto)
152 		halfrange = (V_ipport_lastauto - V_ipport_firstauto) / 2;
153 	else
154 		halfrange = (V_ipport_firstauto - V_ipport_lastauto) / 2;
155 	/* Protect against goofy port ranges smaller than 32. */
156 	return (imin(imax(halfrange, 32), maxsockets / 5));
157 }
158 
159 static int
160 sysctl_maxtcptw(SYSCTL_HANDLER_ARGS)
161 {
162 	int error, new;
163 
164 	if (maxtcptw == 0)
165 		new = tcptw_auto_size();
166 	else
167 		new = maxtcptw;
168 	error = sysctl_handle_int(oidp, &new, 0, req);
169 	if (error == 0 && req->newptr)
170 		if (new >= 32) {
171 			maxtcptw = new;
172 			uma_zone_set_max(V_tcptw_zone, maxtcptw);
173 		}
174 	return (error);
175 }
176 
177 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, maxtcptw,
178     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
179     &maxtcptw, 0, sysctl_maxtcptw, "IU",
180     "Maximum number of compressed TCP TIME_WAIT entries");
181 
182 VNET_DEFINE_STATIC(bool, nolocaltimewait) = true;
183 #define	V_nolocaltimewait	VNET(nolocaltimewait)
184 SYSCTL_BOOL(_net_inet_tcp, OID_AUTO, nolocaltimewait, CTLFLAG_VNET | CTLFLAG_RW,
185     &VNET_NAME(nolocaltimewait), true,
186     "Do not create compressed TCP TIME_WAIT entries for local connections");
187 
188 void
189 tcp_tw_zone_change(void)
190 {
191 
192 	if (maxtcptw == 0)
193 		uma_zone_set_max(V_tcptw_zone, tcptw_auto_size());
194 }
195 
196 void
197 tcp_tw_init(void)
198 {
199 
200 	V_tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw),
201 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
202 	TUNABLE_INT_FETCH("net.inet.tcp.maxtcptw", &maxtcptw);
203 	if (maxtcptw == 0)
204 		uma_zone_set_max(V_tcptw_zone, tcptw_auto_size());
205 	else
206 		uma_zone_set_max(V_tcptw_zone, maxtcptw);
207 	TAILQ_INIT(&V_twq_2msl);
208 	TW_LOCK_INIT(V_tw_lock, "tcptw");
209 }
210 
211 #ifdef VIMAGE
212 void
213 tcp_tw_destroy(void)
214 {
215 	struct tcptw *tw;
216 	struct epoch_tracker et;
217 
218 	NET_EPOCH_ENTER(et);
219 	while ((tw = TAILQ_FIRST(&V_twq_2msl)) != NULL)
220 		tcp_twclose(tw, 0);
221 	NET_EPOCH_EXIT(et);
222 
223 	TW_LOCK_DESTROY(V_tw_lock);
224 	uma_zdestroy(V_tcptw_zone);
225 }
226 #endif
227 
228 /*
229  * Move a TCP connection into TIME_WAIT state.
230  *    tcbinfo is locked.
231  *    inp is locked, and is unlocked before returning.
232  */
233 void
234 tcp_twstart(struct tcpcb *tp)
235 {
236 	struct tcptw twlocal, *tw;
237 	struct inpcb *inp = tp->t_inpcb;
238 	struct socket *so;
239 	uint32_t recwin;
240 	bool acknow, local;
241 #ifdef INET6
242 	bool isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6;
243 #endif
244 
245 	NET_EPOCH_ASSERT();
246 	INP_WLOCK_ASSERT(inp);
247 
248 	/* A dropped inp should never transition to TIME_WAIT state. */
249 	KASSERT((inp->inp_flags & INP_DROPPED) == 0, ("tcp_twstart: "
250 	    "(inp->inp_flags & INP_DROPPED) != 0"));
251 
252 	if (V_nolocaltimewait) {
253 #ifdef INET6
254 		if (isipv6)
255 			local = in6_localaddr(&inp->in6p_faddr);
256 		else
257 #endif
258 #ifdef INET
259 			local = in_localip(inp->inp_faddr);
260 #else
261 			local = false;
262 #endif
263 	} else
264 		local = false;
265 
266 	/*
267 	 * For use only by DTrace.  We do not reference the state
268 	 * after this point so modifying it in place is not a problem.
269 	 */
270 	tcp_state_change(tp, TCPS_TIME_WAIT);
271 
272 	if (local)
273 		tw = &twlocal;
274 	else
275 		tw = uma_zalloc(V_tcptw_zone, M_NOWAIT);
276 	if (tw == NULL) {
277 		/*
278 		 * Reached limit on total number of TIMEWAIT connections
279 		 * allowed. Remove a connection from TIMEWAIT queue in LRU
280 		 * fashion to make room for this connection.
281 		 * If that fails, use on stack tw at least to be able to
282 		 * run through tcp_twrespond() and standard tcpcb discard
283 		 * routine.
284 		 *
285 		 * XXX:  Check if it possible to always have enough room
286 		 * in advance based on guarantees provided by uma_zalloc().
287 		 */
288 		tw = tcp_tw_2msl_scan(1);
289 		if (tw == NULL) {
290 			tw = &twlocal;
291 			local = true;
292 		}
293 	}
294 	/*
295 	 * For !local case the tcptw will hold a reference on its inpcb
296 	 * until tcp_twclose is called.
297 	 */
298 	tw->tw_inpcb = inp;
299 
300 	/*
301 	 * Recover last window size sent.
302 	 */
303 	so = inp->inp_socket;
304 	recwin = lmin(lmax(sbspace(&so->so_rcv), 0),
305 	    (long)TCP_MAXWIN << tp->rcv_scale);
306 	if (recwin < (so->so_rcv.sb_hiwat / 4) &&
307 	    recwin < tp->t_maxseg)
308 		recwin = 0;
309 	if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
310 	    recwin < (tp->rcv_adv - tp->rcv_nxt))
311 		recwin = (tp->rcv_adv - tp->rcv_nxt);
312 	tw->last_win = (u_short)(recwin >> tp->rcv_scale);
313 
314 	/*
315 	 * Set t_recent if timestamps are used on the connection.
316 	 */
317 	if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
318 	    (TF_REQ_TSTMP|TF_RCVD_TSTMP)) {
319 		tw->t_recent = tp->ts_recent;
320 		tw->ts_offset = tp->ts_offset;
321 	} else {
322 		tw->t_recent = 0;
323 		tw->ts_offset = 0;
324 	}
325 
326 	tw->snd_nxt = tp->snd_nxt;
327 	tw->t_port = tp->t_port;
328 	tw->rcv_nxt = tp->rcv_nxt;
329 	tw->tw_time = 0;
330 	tw->tw_flags = tp->t_flags;
331 
332 /* XXX
333  * If this code will
334  * be used for fin-wait-2 state also, then we may need
335  * a ts_recent from the last segment.
336  */
337 	acknow = tp->t_flags & TF_ACKNOW;
338 
339 	/*
340 	 * First, discard tcpcb state, which includes stopping its timers and
341 	 * freeing it.  tcp_discardcb() used to also release the inpcb, but
342 	 * that work is now done in the caller.
343 	 *
344 	 * Note: soisdisconnected() call used to be made in tcp_discardcb(),
345 	 * and might not be needed here any longer.
346 	 */
347 #ifdef TCPHPTS
348 	tcp_hpts_remove(inp);
349 #endif
350 	tcp_discardcb(tp);
351 	soisdisconnected(so);
352 	tw->tw_so_options = so->so_options;
353 	inp->inp_flags |= INP_TIMEWAIT;
354 	if (acknow)
355 		tcp_twrespond(tw, TH_ACK);
356 	if (local)
357 		in_pcbdrop(inp);
358 	else {
359 		in_pcbref(inp);	/* Reference from tw */
360 		tw->tw_cred = crhold(so->so_cred);
361 		inp->inp_ppcb = tw;
362 		TCPSTATES_INC(TCPS_TIME_WAIT);
363 		tcp_tw_2msl_reset(tw, 0);
364 	}
365 
366 	/*
367 	 * If the inpcb owns the sole reference to the socket, then we can
368 	 * detach and free the socket as it is not needed in time wait.
369 	 */
370 	if (inp->inp_flags & INP_SOCKREF) {
371 		inp->inp_flags &= ~INP_SOCKREF;
372 		INP_WUNLOCK(inp);
373 		sorele(so);
374 	} else
375 		INP_WUNLOCK(inp);
376 }
377 
378 /*
379  * Returns 1 if the TIME_WAIT state was killed and we should start over,
380  * looking for a pcb in the listen state.  Returns 0 otherwise.
381  *
382  * For pure SYN-segments the PCB shall be read-locked and the tcpopt pointer
383  * may be NULL.  For the rest write-lock and valid tcpopt.
384  */
385 int
386 tcp_twcheck(struct inpcb *inp, struct tcpopt *to, struct tcphdr *th,
387     struct mbuf *m, int tlen)
388 {
389 	struct tcptw *tw;
390 	char *s;
391 	int thflags;
392 	tcp_seq seq;
393 
394 	NET_EPOCH_ASSERT();
395 	INP_LOCK_ASSERT(inp);
396 
397 	/*
398 	 * XXXRW: Time wait state for inpcb has been recycled, but inpcb is
399 	 * still present.  This is undesirable, but temporarily necessary
400 	 * until we work out how to handle inpcb's who's timewait state has
401 	 * been removed.
402 	 */
403 	tw = intotw(inp);
404 	if (tw == NULL)
405 		goto drop;
406 
407 	thflags = tcp_get_flags(th);
408 #ifdef INVARIANTS
409 	if ((thflags & (TH_SYN | TH_ACK)) == TH_SYN)
410 		INP_RLOCK_ASSERT(inp);
411 	else {
412 		INP_WLOCK_ASSERT(inp);
413 		KASSERT(to != NULL,
414 		    ("%s: called without options on a non-SYN segment",
415 		    __func__));
416 	}
417 #endif
418 
419 	/*
420 	 * NOTE: for FIN_WAIT_2 (to be added later),
421 	 * must validate sequence number before accepting RST
422 	 */
423 
424 	/*
425 	 * If the segment contains RST:
426 	 *	Drop the segment - see Stevens, vol. 2, p. 964 and
427 	 *      RFC 1337.
428 	 */
429 	if (thflags & TH_RST)
430 		goto drop;
431 
432 #if 0
433 /* PAWS not needed at the moment */
434 	/*
435 	 * RFC 1323 PAWS: If we have a timestamp reply on this segment
436 	 * and it's less than ts_recent, drop it.
437 	 */
438 	if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
439 	    TSTMP_LT(to.to_tsval, tp->ts_recent)) {
440 		if ((thflags & TH_ACK) == 0)
441 			goto drop;
442 		goto ack;
443 	}
444 	/*
445 	 * ts_recent is never updated because we never accept new segments.
446 	 */
447 #endif
448 
449 	/* Honor the drop_synfin sysctl variable. */
450 	if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) {
451 		if ((s = tcp_log_addrs(&inp->inp_inc, th, NULL, NULL))) {
452 			log(LOG_DEBUG, "%s; %s: "
453 			    "SYN|FIN segment ignored (based on "
454 			    "sysctl setting)\n", s, __func__);
455 			free(s, M_TCPLOG);
456 		}
457 		goto drop;
458 	}
459 
460 	/*
461 	 * If a new connection request is received
462 	 * while in TIME_WAIT, drop the old connection
463 	 * and start over if the sequence numbers
464 	 * are above the previous ones.
465 	 * Allow UDP port number changes in this case.
466 	 */
467 	if (((thflags & (TH_SYN | TH_ACK)) == TH_SYN) &&
468 	    SEQ_GT(th->th_seq, tw->rcv_nxt)) {
469 		/*
470 		 * In case we can't upgrade our lock just pretend we have
471 		 * lost this packet.
472 		 */
473 		if (INP_TRY_UPGRADE(inp) == 0)
474 			goto drop;
475 		tcp_twclose(tw, 0);
476 		TCPSTAT_INC(tcps_tw_recycles);
477 		return (1);
478 	}
479 
480 	/*
481 	 * Send RST if UDP port numbers don't match
482 	 */
483 	if (tw->t_port != m->m_pkthdr.tcp_tun_port) {
484 		if (tcp_get_flags(th) & TH_ACK) {
485 			tcp_respond(NULL, mtod(m, void *), th, m,
486 			    (tcp_seq)0, th->th_ack, TH_RST);
487 		} else {
488 			if (tcp_get_flags(th) & TH_SYN)
489 				tlen++;
490 			if (tcp_get_flags(th) & TH_FIN)
491 				tlen++;
492 			tcp_respond(NULL, mtod(m, void *), th, m,
493 			    th->th_seq+tlen, (tcp_seq)0, TH_RST|TH_ACK);
494 		}
495 		INP_UNLOCK(inp);
496 		TCPSTAT_INC(tcps_tw_resets);
497 		return (0);
498 	}
499 
500 	/*
501 	 * Drop the segment if it does not contain an ACK.
502 	 */
503 	if ((thflags & TH_ACK) == 0)
504 		goto drop;
505 
506 	INP_WLOCK_ASSERT(inp);
507 
508 	/*
509 	 * If timestamps were negotiated during SYN/ACK and a
510 	 * segment without a timestamp is received, silently drop
511 	 * the segment, unless the missing timestamps are tolerated.
512 	 * See section 3.2 of RFC 7323.
513 	 */
514 	if (((to->to_flags & TOF_TS) == 0) && (tw->t_recent != 0) &&
515 	    (V_tcp_tolerate_missing_ts == 0)) {
516 		goto drop;
517 	}
518 
519 	/*
520 	 * Reset the 2MSL timer if this is a duplicate FIN.
521 	 */
522 	if (thflags & TH_FIN) {
523 		seq = th->th_seq + tlen + (thflags & TH_SYN ? 1 : 0);
524 		if (seq + 1 == tw->rcv_nxt)
525 			tcp_tw_2msl_reset(tw, 1);
526 	}
527 
528 	/*
529 	 * Acknowledge the segment if it has data or is not a duplicate ACK.
530 	 */
531 	if (thflags != TH_ACK || tlen != 0 ||
532 	    th->th_seq != tw->rcv_nxt || th->th_ack != tw->snd_nxt) {
533 		TCP_PROBE5(receive, NULL, NULL, m, NULL, th);
534 		tcp_twrespond(tw, TH_ACK);
535 		TCPSTAT_INC(tcps_tw_responds);
536 		goto dropnoprobe;
537 	}
538 drop:
539 	TCP_PROBE5(receive, NULL, NULL, m, NULL, th);
540 dropnoprobe:
541 	INP_UNLOCK(inp);
542 	m_freem(m);
543 	return (0);
544 }
545 
546 void
547 tcp_twclose(struct tcptw *tw, int reuse)
548 {
549 	struct socket *so;
550 	struct inpcb *inp;
551 
552 	/*
553 	 * At this point, we are in one of two situations:
554 	 *
555 	 * (1) We have no socket, just an inpcb<->twtcp pair.  We can free
556 	 *     all state.
557 	 *
558 	 * (2) We have a socket -- if we own a reference, release it and
559 	 *     notify the socket layer.
560 	 */
561 	inp = tw->tw_inpcb;
562 	KASSERT((inp->inp_flags & INP_TIMEWAIT), ("tcp_twclose: !timewait"));
563 	KASSERT(intotw(inp) == tw, ("tcp_twclose: inp_ppcb != tw"));
564 	NET_EPOCH_ASSERT();
565 	INP_WLOCK_ASSERT(inp);
566 
567 	tcp_tw_2msl_stop(tw, reuse);
568 	inp->inp_ppcb = NULL;
569 	in_pcbdrop(inp);
570 
571 	so = inp->inp_socket;
572 	if (so != NULL) {
573 		/*
574 		 * If there's a socket, handle two cases: first, we own a
575 		 * strong reference, which we will now release, or we don't
576 		 * in which case another reference exists (XXXRW: think
577 		 * about this more), and we don't need to take action.
578 		 */
579 		if (inp->inp_flags & INP_SOCKREF) {
580 			inp->inp_flags &= ~INP_SOCKREF;
581 			INP_WUNLOCK(inp);
582 			sorele(so);
583 		} else {
584 			/*
585 			 * If we don't own the only reference, the socket and
586 			 * inpcb need to be left around to be handled by
587 			 * tcp_usr_detach() later.
588 			 */
589 			INP_WUNLOCK(inp);
590 		}
591 	} else {
592 		/*
593 		 * The socket has been already cleaned-up for us, only free the
594 		 * inpcb.
595 		 */
596 		in_pcbfree(inp);
597 	}
598 	TCPSTAT_INC(tcps_closed);
599 }
600 
601 static int
602 tcp_twrespond(struct tcptw *tw, int flags)
603 {
604 	struct inpcb *inp = tw->tw_inpcb;
605 #if defined(INET6) || defined(INET)
606 	struct tcphdr *th = NULL;
607 #endif
608 	struct mbuf *m;
609 #ifdef INET
610 	struct ip *ip = NULL;
611 #endif
612 	u_int hdrlen, optlen, ulen;
613 	int error = 0;			/* Keep compiler happy */
614 	struct tcpopt to;
615 #ifdef INET6
616 	struct ip6_hdr *ip6 = NULL;
617 	int isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6;
618 #endif
619 	struct udphdr *udp = NULL;
620 	hdrlen = 0;                     /* Keep compiler happy */
621 
622 	INP_WLOCK_ASSERT(inp);
623 
624 	m = m_gethdr(M_NOWAIT, MT_DATA);
625 	if (m == NULL)
626 		return (ENOBUFS);
627 	m->m_data += max_linkhdr;
628 
629 #ifdef MAC
630 	mac_inpcb_create_mbuf(inp, m);
631 #endif
632 
633 #ifdef INET6
634 	if (isipv6) {
635 		hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
636 		ip6 = mtod(m, struct ip6_hdr *);
637 		if (tw->t_port) {
638 			udp = (struct udphdr *)(ip6 + 1);
639 			hdrlen += sizeof(struct udphdr);
640 			udp->uh_sport = htons(V_tcp_udp_tunneling_port);
641 			udp->uh_dport = tw->t_port;
642 			ulen = (hdrlen - sizeof(struct ip6_hdr));
643 			th = (struct tcphdr *)(udp + 1);
644 		} else
645 			th = (struct tcphdr *)(ip6 + 1);
646 		tcpip_fillheaders(inp, tw->t_port, ip6, th);
647 	}
648 #endif
649 #if defined(INET6) && defined(INET)
650 	else
651 #endif
652 #ifdef INET
653 	{
654 		hdrlen = sizeof(struct tcpiphdr);
655 		ip = mtod(m, struct ip *);
656 		if (tw->t_port) {
657 			udp = (struct udphdr *)(ip + 1);
658 			hdrlen += sizeof(struct udphdr);
659 			udp->uh_sport = htons(V_tcp_udp_tunneling_port);
660 			udp->uh_dport = tw->t_port;
661 			ulen = (hdrlen - sizeof(struct ip));
662 			th = (struct tcphdr *)(udp + 1);
663 		} else
664 			th = (struct tcphdr *)(ip + 1);
665 		tcpip_fillheaders(inp, tw->t_port, ip, th);
666 	}
667 #endif
668 	to.to_flags = 0;
669 
670 	/*
671 	 * Send a timestamp and echo-reply if both our side and our peer
672 	 * have sent timestamps in our SYN's and this is not a RST.
673 	 */
674 	if (tw->t_recent && flags == TH_ACK) {
675 		to.to_flags |= TOF_TS;
676 		to.to_tsval = tcp_ts_getticks() + tw->ts_offset;
677 		to.to_tsecr = tw->t_recent;
678 	}
679 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
680 	if (tw->tw_flags & TF_SIGNATURE)
681 		to.to_flags |= TOF_SIGNATURE;
682 #endif
683 	optlen = tcp_addoptions(&to, (u_char *)(th + 1));
684 
685 	if (udp) {
686 		ulen += optlen;
687 		udp->uh_ulen = htons(ulen);
688 	}
689 	m->m_len = hdrlen + optlen;
690 	m->m_pkthdr.len = m->m_len;
691 
692 	KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small"));
693 
694 	th->th_seq = htonl(tw->snd_nxt);
695 	th->th_ack = htonl(tw->rcv_nxt);
696 	th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
697 	tcp_set_flags(th, flags);
698 	th->th_win = htons(tw->last_win);
699 
700 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
701 	if (tw->tw_flags & TF_SIGNATURE) {
702 		if (!TCPMD5_ENABLED() ||
703 		    TCPMD5_OUTPUT(m, th, to.to_signature) != 0)
704 			return (-1);
705 	}
706 #endif
707 #ifdef INET6
708 	if (isipv6) {
709 		if (tw->t_port) {
710 			m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
711 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
712 			udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
713 			th->th_sum = htons(0);
714 		} else {
715 			m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
716 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
717 			th->th_sum = in6_cksum_pseudo(ip6,
718 			    sizeof(struct tcphdr) + optlen, IPPROTO_TCP, 0);
719 		}
720 		ip6->ip6_hlim = in6_selecthlim(inp, NULL);
721 		TCP_PROBE5(send, NULL, NULL, ip6, NULL, th);
722 		error = ip6_output(m, inp->in6p_outputopts, NULL,
723 		    (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp);
724 	}
725 #endif
726 #if defined(INET6) && defined(INET)
727 	else
728 #endif
729 #ifdef INET
730 	{
731 		if (tw->t_port) {
732 			m->m_pkthdr.csum_flags = CSUM_UDP;
733 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
734 			udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
735 			    ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
736 			th->th_sum = htons(0);
737 		} else {
738 			m->m_pkthdr.csum_flags = CSUM_TCP;
739 			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
740 			th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
741 			    htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP));
742 		}
743 		ip->ip_len = htons(m->m_pkthdr.len);
744 		if (V_path_mtu_discovery)
745 			ip->ip_off |= htons(IP_DF);
746 		TCP_PROBE5(send, NULL, NULL, ip, NULL, th);
747 		error = ip_output(m, inp->inp_options, NULL,
748 		    ((tw->tw_so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0),
749 		    NULL, inp);
750 	}
751 #endif
752 	if (flags & TH_ACK)
753 		TCPSTAT_INC(tcps_sndacks);
754 	else
755 		TCPSTAT_INC(tcps_sndctrl);
756 	TCPSTAT_INC(tcps_sndtotal);
757 	return (error);
758 }
759 
760 static void
761 tcp_tw_2msl_reset(struct tcptw *tw, int rearm)
762 {
763 
764 	NET_EPOCH_ASSERT();
765 	INP_WLOCK_ASSERT(tw->tw_inpcb);
766 
767 	TW_WLOCK(V_tw_lock);
768 	if (rearm)
769 		TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl);
770 	tw->tw_time = ticks + 2 * V_tcp_msl;
771 	TAILQ_INSERT_TAIL(&V_twq_2msl, tw, tw_2msl);
772 	TW_WUNLOCK(V_tw_lock);
773 }
774 
775 static void
776 tcp_tw_2msl_stop(struct tcptw *tw, int reuse)
777 {
778 	struct ucred *cred;
779 	struct inpcb *inp;
780 	int released __unused;
781 
782 	NET_EPOCH_ASSERT();
783 
784 	TW_WLOCK(V_tw_lock);
785 	inp = tw->tw_inpcb;
786 	tw->tw_inpcb = NULL;
787 
788 	TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl);
789 	cred = tw->tw_cred;
790 	tw->tw_cred = NULL;
791 	TW_WUNLOCK(V_tw_lock);
792 
793 	if (cred != NULL)
794 		crfree(cred);
795 
796 	released = in_pcbrele_wlocked(inp);
797 	KASSERT(!released, ("%s: inp should not be released here", __func__));
798 
799 	if (!reuse)
800 		uma_zfree(V_tcptw_zone, tw);
801 	TCPSTATES_DEC(TCPS_TIME_WAIT);
802 }
803 
804 struct tcptw *
805 tcp_tw_2msl_scan(int reuse)
806 {
807 	struct tcptw *tw;
808 	struct inpcb *inp;
809 
810 	NET_EPOCH_ASSERT();
811 
812 	for (;;) {
813 		TW_RLOCK(V_tw_lock);
814 		tw = TAILQ_FIRST(&V_twq_2msl);
815 		if (tw == NULL || (!reuse && (tw->tw_time - ticks) > 0)) {
816 			TW_RUNLOCK(V_tw_lock);
817 			break;
818 		}
819 		KASSERT(tw->tw_inpcb != NULL, ("%s: tw->tw_inpcb == NULL",
820 		    __func__));
821 
822 		inp = tw->tw_inpcb;
823 		in_pcbref(inp);
824 		TW_RUNLOCK(V_tw_lock);
825 
826 		INP_WLOCK(inp);
827 		tw = intotw(inp);
828 		if (in_pcbrele_wlocked(inp)) {
829 			if (__predict_true(tw == NULL)) {
830 				continue;
831 			} else {
832 				/* This should not happen as in TIMEWAIT
833 				 * state the inp should not be destroyed
834 				 * before its tcptw. If INVARIANTS is
835 				 * defined panic.
836 				 */
837 #ifdef INVARIANTS
838 				panic("%s: Panic before an infinite "
839 					  "loop: INP_TIMEWAIT && (INP_FREED "
840 					  "|| inp last reference) && tw != "
841 					  "NULL", __func__);
842 #else
843 				log(LOG_ERR, "%s: Avoid an infinite "
844 					"loop: INP_TIMEWAIT && (INP_FREED "
845 					"|| inp last reference) && tw != "
846 					"NULL", __func__);
847 #endif
848 				break;
849 			}
850 		}
851 
852 		if (tw == NULL) {
853 			/* tcp_twclose() has already been called */
854 			INP_WUNLOCK(inp);
855 			continue;
856 		}
857 
858 		tcp_twclose(tw, reuse);
859 		if (reuse)
860 			return tw;
861 	}
862 
863 	return NULL;
864 }
865