xref: /freebsd/sys/netinet/tcp_timewait.c (revision ba54cdcdda639bebc917b1796ecbc35a83ff8625)
1 /*-
2  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	@(#)tcp_subr.c	8.2 (Berkeley) 5/24/95
30  * $FreeBSD$
31  */
32 
33 #include "opt_compat.h"
34 #include "opt_inet.h"
35 #include "opt_inet6.h"
36 #include "opt_ipsec.h"
37 #include "opt_mac.h"
38 #include "opt_tcpdebug.h"
39 #include "opt_tcp_sack.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/callout.h>
44 #include <sys/kernel.h>
45 #include <sys/sysctl.h>
46 #include <sys/mac.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #ifdef INET6
50 #include <sys/domain.h>
51 #endif
52 #include <sys/proc.h>
53 #include <sys/socket.h>
54 #include <sys/socketvar.h>
55 #include <sys/protosw.h>
56 #include <sys/random.h>
57 
58 #include <vm/uma.h>
59 
60 #include <net/route.h>
61 #include <net/if.h>
62 
63 #include <netinet/in.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/ip.h>
66 #ifdef INET6
67 #include <netinet/ip6.h>
68 #endif
69 #include <netinet/in_pcb.h>
70 #ifdef INET6
71 #include <netinet6/in6_pcb.h>
72 #endif
73 #include <netinet/in_var.h>
74 #include <netinet/ip_var.h>
75 #ifdef INET6
76 #include <netinet6/ip6_var.h>
77 #include <netinet6/nd6.h>
78 #endif
79 #include <netinet/tcp.h>
80 #include <netinet/tcp_fsm.h>
81 #include <netinet/tcp_seq.h>
82 #include <netinet/tcp_timer.h>
83 #include <netinet/tcp_var.h>
84 #ifdef INET6
85 #include <netinet6/tcp6_var.h>
86 #endif
87 #include <netinet/tcpip.h>
88 #ifdef TCPDEBUG
89 #include <netinet/tcp_debug.h>
90 #endif
91 #include <netinet6/ip6protosw.h>
92 
93 #ifdef IPSEC
94 #include <netinet6/ipsec.h>
95 #ifdef INET6
96 #include <netinet6/ipsec6.h>
97 #endif
98 #include <netkey/key.h>
99 #endif /*IPSEC*/
100 
101 #ifdef FAST_IPSEC
102 #include <netipsec/ipsec.h>
103 #include <netipsec/xform.h>
104 #ifdef INET6
105 #include <netipsec/ipsec6.h>
106 #endif
107 #include <netipsec/key.h>
108 #define	IPSEC
109 #endif /*FAST_IPSEC*/
110 
111 #include <machine/in_cksum.h>
112 #include <sys/md5.h>
113 
114 int	tcp_mssdflt = TCP_MSS;
115 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW,
116     &tcp_mssdflt , 0, "Default TCP Maximum Segment Size");
117 
118 #ifdef INET6
119 int	tcp_v6mssdflt = TCP6_MSS;
120 SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt,
121 	CTLFLAG_RW, &tcp_v6mssdflt , 0,
122 	"Default TCP Maximum Segment Size for IPv6");
123 #endif
124 
125 /*
126  * Minimum MSS we accept and use. This prevents DoS attacks where
127  * we are forced to a ridiculous low MSS like 20 and send hundreds
128  * of packets instead of one. The effect scales with the available
129  * bandwidth and quickly saturates the CPU and network interface
130  * with packet generation and sending. Set to zero to disable MINMSS
131  * checking. This setting prevents us from sending too small packets.
132  */
133 int	tcp_minmss = TCP_MINMSS;
134 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW,
135     &tcp_minmss , 0, "Minmum TCP Maximum Segment Size");
136 /*
137  * Number of TCP segments per second we accept from remote host
138  * before we start to calculate average segment size. If average
139  * segment size drops below the minimum TCP MSS we assume a DoS
140  * attack and reset+drop the connection. Care has to be taken not to
141  * set this value too small to not kill interactive type connections
142  * (telnet, SSH) which send many small packets.
143  */
144 int     tcp_minmssoverload = TCP_MINMSSOVERLOAD;
145 SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmssoverload, CTLFLAG_RW,
146     &tcp_minmssoverload , 0, "Number of TCP Segments per Second allowed to"
147     "be under the MINMSS Size");
148 
149 #if 0
150 static int	tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
151 SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW,
152     &tcp_rttdflt , 0, "Default maximum TCP Round Trip Time");
153 #endif
154 
155 int	tcp_do_rfc1323 = 1;
156 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
157     &tcp_do_rfc1323 , 0, "Enable rfc1323 (high performance TCP) extensions");
158 
159 static int	tcp_tcbhashsize = 0;
160 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN,
161      &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
162 
163 static int	do_tcpdrain = 1;
164 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
165      "Enable tcp_drain routine for extra help when low on mbufs");
166 
167 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD,
168     &tcbinfo.ipi_count, 0, "Number of active PCBs");
169 
170 static int	icmp_may_rst = 1;
171 SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0,
172     "Certain ICMP unreachable messages may abort connections in SYN_SENT");
173 
174 static int	tcp_isn_reseed_interval = 0;
175 SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW,
176     &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret");
177 
178 /*
179  * TCP bandwidth limiting sysctls.  Note that the default lower bound of
180  * 1024 exists only for debugging.  A good production default would be
181  * something like 6100.
182  */
183 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, inflight, CTLFLAG_RW, 0,
184     "TCP inflight data limiting");
185 
186 static int	tcp_inflight_enable = 1;
187 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, enable, CTLFLAG_RW,
188     &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting");
189 
190 static int	tcp_inflight_debug = 0;
191 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, debug, CTLFLAG_RW,
192     &tcp_inflight_debug, 0, "Debug TCP inflight calculations");
193 
194 static int	tcp_inflight_min = 6144;
195 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, min, CTLFLAG_RW,
196     &tcp_inflight_min, 0, "Lower-bound for TCP inflight window");
197 
198 static int	tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT;
199 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, max, CTLFLAG_RW,
200     &tcp_inflight_max, 0, "Upper-bound for TCP inflight window");
201 
202 static int	tcp_inflight_stab = 20;
203 SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, stab, CTLFLAG_RW,
204     &tcp_inflight_stab, 0, "Inflight Algorithm Stabilization 20 = 2 packets");
205 
206 uma_zone_t sack_hole_zone;
207 
208 static struct inpcb *tcp_notify(struct inpcb *, int);
209 static void	tcp_discardcb(struct tcpcb *);
210 static void	tcp_isn_tick(void *);
211 
212 /*
213  * Target size of TCP PCB hash tables. Must be a power of two.
214  *
215  * Note that this can be overridden by the kernel environment
216  * variable net.inet.tcp.tcbhashsize
217  */
218 #ifndef TCBHASHSIZE
219 #define TCBHASHSIZE	512
220 #endif
221 
222 /*
223  * XXX
224  * Callouts should be moved into struct tcp directly.  They are currently
225  * separate because the tcpcb structure is exported to userland for sysctl
226  * parsing purposes, which do not know about callouts.
227  */
228 struct	tcpcb_mem {
229 	struct	tcpcb tcb;
230 	struct	callout tcpcb_mem_rexmt, tcpcb_mem_persist, tcpcb_mem_keep;
231 	struct	callout tcpcb_mem_2msl, tcpcb_mem_delack;
232 };
233 
234 static uma_zone_t tcpcb_zone;
235 static uma_zone_t tcptw_zone;
236 struct callout isn_callout;
237 
238 /*
239  * Tcp initialization
240  */
241 void
242 tcp_init()
243 {
244 	int hashsize = TCBHASHSIZE;
245 
246 	tcp_delacktime = TCPTV_DELACK;
247 	tcp_keepinit = TCPTV_KEEP_INIT;
248 	tcp_keepidle = TCPTV_KEEP_IDLE;
249 	tcp_keepintvl = TCPTV_KEEPINTVL;
250 	tcp_maxpersistidle = TCPTV_KEEP_IDLE;
251 	tcp_msl = TCPTV_MSL;
252 	tcp_rexmit_min = TCPTV_MIN;
253 	tcp_rexmit_slop = TCPTV_CPU_VAR;
254 
255 	INP_INFO_LOCK_INIT(&tcbinfo, "tcp");
256 	LIST_INIT(&tcb);
257 	tcbinfo.listhead = &tcb;
258 	TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize);
259 	if (!powerof2(hashsize)) {
260 		printf("WARNING: TCB hash size not a power of 2\n");
261 		hashsize = 512; /* safe default */
262 	}
263 	tcp_tcbhashsize = hashsize;
264 	tcbinfo.hashbase = hashinit(hashsize, M_PCB, &tcbinfo.hashmask);
265 	tcbinfo.porthashbase = hashinit(hashsize, M_PCB,
266 					&tcbinfo.porthashmask);
267 	tcbinfo.ipi_zone = uma_zcreate("inpcb", sizeof(struct inpcb),
268 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
269 	uma_zone_set_max(tcbinfo.ipi_zone, maxsockets);
270 #ifdef INET6
271 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
272 #else /* INET6 */
273 #define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
274 #endif /* INET6 */
275 	if (max_protohdr < TCP_MINPROTOHDR)
276 		max_protohdr = TCP_MINPROTOHDR;
277 	if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
278 		panic("tcp_init");
279 #undef TCP_MINPROTOHDR
280 	/*
281 	 * These have to be type stable for the benefit of the timers.
282 	 */
283 	tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem),
284 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
285 	uma_zone_set_max(tcpcb_zone, maxsockets);
286 	tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw),
287 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
288 	uma_zone_set_max(tcptw_zone, maxsockets / 5);
289 	tcp_timer_init();
290 	syncache_init();
291 	tcp_hc_init();
292 	tcp_reass_init();
293 	callout_init(&isn_callout, CALLOUT_MPSAFE);
294 	tcp_isn_tick(NULL);
295 	EVENTHANDLER_REGISTER(shutdown_pre_sync, tcp_fini, NULL,
296 		SHUTDOWN_PRI_DEFAULT);
297 	sack_hole_zone = uma_zcreate("sackhole", sizeof(struct sackhole),
298 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
299 }
300 
301 void
302 tcp_fini(xtp)
303 	void *xtp;
304 {
305 	callout_stop(&isn_callout);
306 
307 }
308 
309 /*
310  * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
311  * tcp_template used to store this data in mbufs, but we now recopy it out
312  * of the tcpcb each time to conserve mbufs.
313  */
314 void
315 tcpip_fillheaders(inp, ip_ptr, tcp_ptr)
316 	struct inpcb *inp;
317 	void *ip_ptr;
318 	void *tcp_ptr;
319 {
320 	struct tcphdr *th = (struct tcphdr *)tcp_ptr;
321 
322 	INP_LOCK_ASSERT(inp);
323 
324 #ifdef INET6
325 	if ((inp->inp_vflag & INP_IPV6) != 0) {
326 		struct ip6_hdr *ip6;
327 
328 		ip6 = (struct ip6_hdr *)ip_ptr;
329 		ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
330 			(inp->in6p_flowinfo & IPV6_FLOWINFO_MASK);
331 		ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
332 			(IPV6_VERSION & IPV6_VERSION_MASK);
333 		ip6->ip6_nxt = IPPROTO_TCP;
334 		ip6->ip6_plen = sizeof(struct tcphdr);
335 		ip6->ip6_src = inp->in6p_laddr;
336 		ip6->ip6_dst = inp->in6p_faddr;
337 	} else
338 #endif
339 	{
340 		struct ip *ip;
341 
342 		ip = (struct ip *)ip_ptr;
343 		ip->ip_v = IPVERSION;
344 		ip->ip_hl = 5;
345 		ip->ip_tos = inp->inp_ip_tos;
346 		ip->ip_len = 0;
347 		ip->ip_id = 0;
348 		ip->ip_off = 0;
349 		ip->ip_ttl = inp->inp_ip_ttl;
350 		ip->ip_sum = 0;
351 		ip->ip_p = IPPROTO_TCP;
352 		ip->ip_src = inp->inp_laddr;
353 		ip->ip_dst = inp->inp_faddr;
354 	}
355 	th->th_sport = inp->inp_lport;
356 	th->th_dport = inp->inp_fport;
357 	th->th_seq = 0;
358 	th->th_ack = 0;
359 	th->th_x2 = 0;
360 	th->th_off = 5;
361 	th->th_flags = 0;
362 	th->th_win = 0;
363 	th->th_urp = 0;
364 	th->th_sum = 0;		/* in_pseudo() is called later for ipv4 */
365 }
366 
367 /*
368  * Create template to be used to send tcp packets on a connection.
369  * Allocates an mbuf and fills in a skeletal tcp/ip header.  The only
370  * use for this function is in keepalives, which use tcp_respond.
371  */
372 struct tcptemp *
373 tcpip_maketemplate(inp)
374 	struct inpcb *inp;
375 {
376 	struct mbuf *m;
377 	struct tcptemp *n;
378 
379 	m = m_get(M_DONTWAIT, MT_HEADER);
380 	if (m == NULL)
381 		return (0);
382 	m->m_len = sizeof(struct tcptemp);
383 	n = mtod(m, struct tcptemp *);
384 
385 	tcpip_fillheaders(inp, (void *)&n->tt_ipgen, (void *)&n->tt_t);
386 	return (n);
387 }
388 
389 /*
390  * Send a single message to the TCP at address specified by
391  * the given TCP/IP header.  If m == NULL, then we make a copy
392  * of the tcpiphdr at ti and send directly to the addressed host.
393  * This is used to force keep alive messages out using the TCP
394  * template for a connection.  If flags are given then we send
395  * a message back to the TCP which originated the * segment ti,
396  * and discard the mbuf containing it and any other attached mbufs.
397  *
398  * In any case the ack and sequence number of the transmitted
399  * segment are as specified by the parameters.
400  *
401  * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
402  */
403 void
404 tcp_respond(tp, ipgen, th, m, ack, seq, flags)
405 	struct tcpcb *tp;
406 	void *ipgen;
407 	register struct tcphdr *th;
408 	register struct mbuf *m;
409 	tcp_seq ack, seq;
410 	int flags;
411 {
412 	register int tlen;
413 	int win = 0;
414 	struct ip *ip;
415 	struct tcphdr *nth;
416 #ifdef INET6
417 	struct ip6_hdr *ip6;
418 	int isipv6;
419 #endif /* INET6 */
420 	int ipflags = 0;
421 	struct inpcb *inp;
422 
423 	KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL"));
424 
425 #ifdef INET6
426 	isipv6 = ((struct ip *)ipgen)->ip_v == 6;
427 	ip6 = ipgen;
428 #endif /* INET6 */
429 	ip = ipgen;
430 
431 	if (tp != NULL) {
432 		inp = tp->t_inpcb;
433 		KASSERT(inp != NULL, ("tcp control block w/o inpcb"));
434 		INP_INFO_WLOCK_ASSERT(&tcbinfo);
435 		INP_LOCK_ASSERT(inp);
436 	} else
437 		inp = NULL;
438 
439 	if (tp != NULL) {
440 		if (!(flags & TH_RST)) {
441 			win = sbspace(&inp->inp_socket->so_rcv);
442 			if (win > (long)TCP_MAXWIN << tp->rcv_scale)
443 				win = (long)TCP_MAXWIN << tp->rcv_scale;
444 		}
445 	}
446 	if (m == NULL) {
447 		m = m_gethdr(M_DONTWAIT, MT_HEADER);
448 		if (m == NULL)
449 			return;
450 		tlen = 0;
451 		m->m_data += max_linkhdr;
452 #ifdef INET6
453 		if (isipv6) {
454 			bcopy((caddr_t)ip6, mtod(m, caddr_t),
455 			      sizeof(struct ip6_hdr));
456 			ip6 = mtod(m, struct ip6_hdr *);
457 			nth = (struct tcphdr *)(ip6 + 1);
458 		} else
459 #endif /* INET6 */
460 	      {
461 		bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
462 		ip = mtod(m, struct ip *);
463 		nth = (struct tcphdr *)(ip + 1);
464 	      }
465 		bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
466 		flags = TH_ACK;
467 	} else {
468 		m_freem(m->m_next);
469 		m->m_next = NULL;
470 		m->m_data = (caddr_t)ipgen;
471 		/* m_len is set later */
472 		tlen = 0;
473 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
474 #ifdef INET6
475 		if (isipv6) {
476 			xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
477 			nth = (struct tcphdr *)(ip6 + 1);
478 		} else
479 #endif /* INET6 */
480 	      {
481 		xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long);
482 		nth = (struct tcphdr *)(ip + 1);
483 	      }
484 		if (th != nth) {
485 			/*
486 			 * this is usually a case when an extension header
487 			 * exists between the IPv6 header and the
488 			 * TCP header.
489 			 */
490 			nth->th_sport = th->th_sport;
491 			nth->th_dport = th->th_dport;
492 		}
493 		xchg(nth->th_dport, nth->th_sport, n_short);
494 #undef xchg
495 	}
496 #ifdef INET6
497 	if (isipv6) {
498 		ip6->ip6_flow = 0;
499 		ip6->ip6_vfc = IPV6_VERSION;
500 		ip6->ip6_nxt = IPPROTO_TCP;
501 		ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) +
502 						tlen));
503 		tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
504 	} else
505 #endif
506 	{
507 		tlen += sizeof (struct tcpiphdr);
508 		ip->ip_len = tlen;
509 		ip->ip_ttl = ip_defttl;
510 		if (path_mtu_discovery)
511 			ip->ip_off |= IP_DF;
512 	}
513 	m->m_len = tlen;
514 	m->m_pkthdr.len = tlen;
515 	m->m_pkthdr.rcvif = NULL;
516 #ifdef MAC
517 	if (inp != NULL) {
518 		/*
519 		 * Packet is associated with a socket, so allow the
520 		 * label of the response to reflect the socket label.
521 		 */
522 		INP_LOCK_ASSERT(inp);
523 		mac_create_mbuf_from_inpcb(inp, m);
524 	} else {
525 		/*
526 		 * Packet is not associated with a socket, so possibly
527 		 * update the label in place.
528 		 */
529 		mac_reflect_mbuf_tcp(m);
530 	}
531 #endif
532 	nth->th_seq = htonl(seq);
533 	nth->th_ack = htonl(ack);
534 	nth->th_x2 = 0;
535 	nth->th_off = sizeof (struct tcphdr) >> 2;
536 	nth->th_flags = flags;
537 	if (tp != NULL)
538 		nth->th_win = htons((u_short) (win >> tp->rcv_scale));
539 	else
540 		nth->th_win = htons((u_short)win);
541 	nth->th_urp = 0;
542 #ifdef INET6
543 	if (isipv6) {
544 		nth->th_sum = 0;
545 		nth->th_sum = in6_cksum(m, IPPROTO_TCP,
546 					sizeof(struct ip6_hdr),
547 					tlen - sizeof(struct ip6_hdr));
548 		ip6->ip6_hlim = in6_selecthlim(tp != NULL ? tp->t_inpcb :
549 		    NULL, NULL);
550 	} else
551 #endif /* INET6 */
552 	{
553 		nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
554 		    htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
555 		m->m_pkthdr.csum_flags = CSUM_TCP;
556 		m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
557 	}
558 #ifdef TCPDEBUG
559 	if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG))
560 		tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
561 #endif
562 #ifdef INET6
563 	if (isipv6)
564 		(void) ip6_output(m, NULL, NULL, ipflags, NULL, NULL, inp);
565 	else
566 #endif /* INET6 */
567 	(void) ip_output(m, NULL, NULL, ipflags, NULL, inp);
568 }
569 
570 /*
571  * Create a new TCP control block, making an
572  * empty reassembly queue and hooking it to the argument
573  * protocol control block.  The `inp' parameter must have
574  * come from the zone allocator set up in tcp_init().
575  */
576 struct tcpcb *
577 tcp_newtcpcb(inp)
578 	struct inpcb *inp;
579 {
580 	struct tcpcb_mem *tm;
581 	struct tcpcb *tp;
582 #ifdef INET6
583 	int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
584 #endif /* INET6 */
585 	int callout_flag;
586 
587 	tm = uma_zalloc(tcpcb_zone, M_NOWAIT | M_ZERO);
588 	if (tm == NULL)
589 		return (NULL);
590 	tp = &tm->tcb;
591 	/*	LIST_INIT(&tp->t_segq); */	/* XXX covered by M_ZERO */
592 	tp->t_maxseg = tp->t_maxopd =
593 #ifdef INET6
594 		isipv6 ? tcp_v6mssdflt :
595 #endif /* INET6 */
596 		tcp_mssdflt;
597 
598 	/* Set up our timeouts. */
599 	callout_flag = debug_mpsafenet ? CALLOUT_MPSAFE : 0;
600 	callout_init(tp->tt_rexmt = &tm->tcpcb_mem_rexmt, callout_flag);
601 	callout_init(tp->tt_persist = &tm->tcpcb_mem_persist, callout_flag);
602 	callout_init(tp->tt_keep = &tm->tcpcb_mem_keep, callout_flag);
603 	callout_init(tp->tt_2msl = &tm->tcpcb_mem_2msl, callout_flag);
604 	callout_init(tp->tt_delack = &tm->tcpcb_mem_delack, callout_flag);
605 
606 	if (tcp_do_rfc1323)
607 		tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
608 	tp->sack_enable = tcp_do_sack;
609 	tp->t_inpcb = inp;	/* XXX */
610 	/*
611 	 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
612 	 * rtt estimate.  Set rttvar so that srtt + 4 * rttvar gives
613 	 * reasonable initial retransmit time.
614 	 */
615 	tp->t_srtt = TCPTV_SRTTBASE;
616 	tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
617 	tp->t_rttmin = tcp_rexmit_min;
618 	tp->t_rxtcur = TCPTV_RTOBASE;
619 	tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
620 	tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
621 	tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
622 	tp->t_rcvtime = ticks;
623 	tp->t_bw_rtttime = ticks;
624 	/*
625 	 * IPv4 TTL initialization is necessary for an IPv6 socket as well,
626 	 * because the socket may be bound to an IPv6 wildcard address,
627 	 * which may match an IPv4-mapped IPv6 address.
628 	 */
629 	inp->inp_ip_ttl = ip_defttl;
630 	inp->inp_ppcb = (caddr_t)tp;
631 	return (tp);		/* XXX */
632 }
633 
634 /*
635  * Drop a TCP connection, reporting
636  * the specified error.  If connection is synchronized,
637  * then send a RST to peer.
638  */
639 struct tcpcb *
640 tcp_drop(tp, errno)
641 	register struct tcpcb *tp;
642 	int errno;
643 {
644 	struct socket *so = tp->t_inpcb->inp_socket;
645 
646 	INP_LOCK_ASSERT(tp->t_inpcb);
647 	if (TCPS_HAVERCVDSYN(tp->t_state)) {
648 		tp->t_state = TCPS_CLOSED;
649 		(void) tcp_output(tp);
650 		tcpstat.tcps_drops++;
651 	} else
652 		tcpstat.tcps_conndrops++;
653 	if (errno == ETIMEDOUT && tp->t_softerror)
654 		errno = tp->t_softerror;
655 	so->so_error = errno;
656 	return (tcp_close(tp));
657 }
658 
659 static void
660 tcp_discardcb(tp)
661 	struct tcpcb *tp;
662 {
663 	struct tseg_qent *q;
664 	struct inpcb *inp = tp->t_inpcb;
665 	struct socket *so = inp->inp_socket;
666 #ifdef INET6
667 	int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
668 #endif /* INET6 */
669 
670 	INP_LOCK_ASSERT(inp);
671 
672 	/*
673 	 * Make sure that all of our timers are stopped before we
674 	 * delete the PCB.
675 	 */
676 	callout_stop(tp->tt_rexmt);
677 	callout_stop(tp->tt_persist);
678 	callout_stop(tp->tt_keep);
679 	callout_stop(tp->tt_2msl);
680 	callout_stop(tp->tt_delack);
681 
682 	/*
683 	 * If we got enough samples through the srtt filter,
684 	 * save the rtt and rttvar in the routing entry.
685 	 * 'Enough' is arbitrarily defined as 4 rtt samples.
686 	 * 4 samples is enough for the srtt filter to converge
687 	 * to within enough % of the correct value; fewer samples
688 	 * and we could save a bogus rtt. The danger is not high
689 	 * as tcp quickly recovers from everything.
690 	 * XXX: Works very well but needs some more statistics!
691 	 */
692 	if (tp->t_rttupdated >= 4) {
693 		struct hc_metrics_lite metrics;
694 		u_long ssthresh;
695 
696 		bzero(&metrics, sizeof(metrics));
697 		/*
698 		 * Update the ssthresh always when the conditions below
699 		 * are satisfied. This gives us better new start value
700 		 * for the congestion avoidance for new connections.
701 		 * ssthresh is only set if packet loss occured on a session.
702 		 */
703 		ssthresh = tp->snd_ssthresh;
704 		if (ssthresh != 0 && ssthresh < so->so_snd.sb_hiwat / 2) {
705 			/*
706 			 * convert the limit from user data bytes to
707 			 * packets then to packet data bytes.
708 			 */
709 			ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg;
710 			if (ssthresh < 2)
711 				ssthresh = 2;
712 			ssthresh *= (u_long)(tp->t_maxseg +
713 #ifdef INET6
714 				      (isipv6 ? sizeof (struct ip6_hdr) +
715 					       sizeof (struct tcphdr) :
716 #endif
717 				       sizeof (struct tcpiphdr)
718 #ifdef INET6
719 				       )
720 #endif
721 				      );
722 		} else
723 			ssthresh = 0;
724 		metrics.rmx_ssthresh = ssthresh;
725 
726 		metrics.rmx_rtt = tp->t_srtt;
727 		metrics.rmx_rttvar = tp->t_rttvar;
728 		/* XXX: This wraps if the pipe is more than 4 Gbit per second */
729 		metrics.rmx_bandwidth = tp->snd_bandwidth;
730 		metrics.rmx_cwnd = tp->snd_cwnd;
731 		metrics.rmx_sendpipe = 0;
732 		metrics.rmx_recvpipe = 0;
733 
734 		tcp_hc_update(&inp->inp_inc, &metrics);
735 	}
736 
737 	/* free the reassembly queue, if any */
738 	while ((q = LIST_FIRST(&tp->t_segq)) != NULL) {
739 		LIST_REMOVE(q, tqe_q);
740 		m_freem(q->tqe_m);
741 		uma_zfree(tcp_reass_zone, q);
742 		tp->t_segqlen--;
743 		tcp_reass_qsize--;
744 	}
745 	tcp_free_sackholes(tp);
746 	inp->inp_ppcb = NULL;
747 	tp->t_inpcb = NULL;
748 	uma_zfree(tcpcb_zone, tp);
749 	soisdisconnected(so);
750 }
751 
752 /*
753  * Close a TCP control block:
754  *    discard all space held by the tcp
755  *    discard internet protocol block
756  *    wake up any sleepers
757  */
758 struct tcpcb *
759 tcp_close(tp)
760 	struct tcpcb *tp;
761 {
762 	struct inpcb *inp = tp->t_inpcb;
763 #ifdef INET6
764 	struct socket *so = inp->inp_socket;
765 #endif
766 
767 	INP_LOCK_ASSERT(inp);
768 
769 	tcp_discardcb(tp);
770 #ifdef INET6
771 	if (INP_CHECK_SOCKAF(so, AF_INET6))
772 		in6_pcbdetach(inp);
773 	else
774 #endif
775 		in_pcbdetach(inp);
776 	tcpstat.tcps_closed++;
777 	return (NULL);
778 }
779 
780 void
781 tcp_drain()
782 {
783 	if (do_tcpdrain)
784 	{
785 		struct inpcb *inpb;
786 		struct tcpcb *tcpb;
787 		struct tseg_qent *te;
788 
789 	/*
790 	 * Walk the tcpbs, if existing, and flush the reassembly queue,
791 	 * if there is one...
792 	 * XXX: The "Net/3" implementation doesn't imply that the TCP
793 	 *      reassembly queue should be flushed, but in a situation
794 	 *	where we're really low on mbufs, this is potentially
795 	 *	usefull.
796 	 */
797 		INP_INFO_RLOCK(&tcbinfo);
798 		LIST_FOREACH(inpb, tcbinfo.listhead, inp_list) {
799 			if (inpb->inp_vflag & INP_TIMEWAIT)
800 				continue;
801 			INP_LOCK(inpb);
802 			if ((tcpb = intotcpcb(inpb)) != NULL) {
803 				while ((te = LIST_FIRST(&tcpb->t_segq))
804 			            != NULL) {
805 					LIST_REMOVE(te, tqe_q);
806 					m_freem(te->tqe_m);
807 					uma_zfree(tcp_reass_zone, te);
808 					tcpb->t_segqlen--;
809 					tcp_reass_qsize--;
810 				}
811 			}
812 			INP_UNLOCK(inpb);
813 		}
814 		INP_INFO_RUNLOCK(&tcbinfo);
815 	}
816 }
817 
818 /*
819  * Notify a tcp user of an asynchronous error;
820  * store error as soft error, but wake up user
821  * (for now, won't do anything until can select for soft error).
822  *
823  * Do not wake up user since there currently is no mechanism for
824  * reporting soft errors (yet - a kqueue filter may be added).
825  */
826 static struct inpcb *
827 tcp_notify(inp, error)
828 	struct inpcb *inp;
829 	int error;
830 {
831 	struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb;
832 
833 	INP_LOCK_ASSERT(inp);
834 
835 	/*
836 	 * Ignore some errors if we are hooked up.
837 	 * If connection hasn't completed, has retransmitted several times,
838 	 * and receives a second error, give up now.  This is better
839 	 * than waiting a long time to establish a connection that
840 	 * can never complete.
841 	 */
842 	if (tp->t_state == TCPS_ESTABLISHED &&
843 	    (error == EHOSTUNREACH || error == ENETUNREACH ||
844 	     error == EHOSTDOWN)) {
845 		return (inp);
846 	} else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
847 	    tp->t_softerror) {
848 		tcp_drop(tp, error);
849 		return (struct inpcb *)0;
850 	} else {
851 		tp->t_softerror = error;
852 		return (inp);
853 	}
854 #if 0
855 	wakeup( &so->so_timeo);
856 	sorwakeup(so);
857 	sowwakeup(so);
858 #endif
859 }
860 
861 static int
862 tcp_pcblist(SYSCTL_HANDLER_ARGS)
863 {
864 	int error, i, n, s;
865 	struct inpcb *inp, **inp_list;
866 	inp_gen_t gencnt;
867 	struct xinpgen xig;
868 
869 	/*
870 	 * The process of preparing the TCB list is too time-consuming and
871 	 * resource-intensive to repeat twice on every request.
872 	 */
873 	if (req->oldptr == NULL) {
874 		n = tcbinfo.ipi_count;
875 		req->oldidx = 2 * (sizeof xig)
876 			+ (n + n/8) * sizeof(struct xtcpcb);
877 		return (0);
878 	}
879 
880 	if (req->newptr != NULL)
881 		return (EPERM);
882 
883 	/*
884 	 * OK, now we're committed to doing something.
885 	 */
886 	s = splnet();
887 	INP_INFO_RLOCK(&tcbinfo);
888 	gencnt = tcbinfo.ipi_gencnt;
889 	n = tcbinfo.ipi_count;
890 	INP_INFO_RUNLOCK(&tcbinfo);
891 	splx(s);
892 
893 	error = sysctl_wire_old_buffer(req, 2 * (sizeof xig)
894 		+ n * sizeof(struct xtcpcb));
895 	if (error != 0)
896 		return (error);
897 
898 	xig.xig_len = sizeof xig;
899 	xig.xig_count = n;
900 	xig.xig_gen = gencnt;
901 	xig.xig_sogen = so_gencnt;
902 	error = SYSCTL_OUT(req, &xig, sizeof xig);
903 	if (error)
904 		return (error);
905 
906 	inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
907 	if (inp_list == NULL)
908 		return (ENOMEM);
909 
910 	s = splnet();
911 	INP_INFO_RLOCK(&tcbinfo);
912 	for (inp = LIST_FIRST(tcbinfo.listhead), i = 0; inp != NULL && i < n;
913 	     inp = LIST_NEXT(inp, inp_list)) {
914 		INP_LOCK(inp);
915 		if (inp->inp_gencnt <= gencnt) {
916 			/*
917 			 * XXX: This use of cr_cansee(), introduced with
918 			 * TCP state changes, is not quite right, but for
919 			 * now, better than nothing.
920 			 */
921 			if (inp->inp_vflag & INP_TIMEWAIT)
922 				error = cr_cansee(req->td->td_ucred,
923 				    intotw(inp)->tw_cred);
924 			else
925 				error = cr_canseesocket(req->td->td_ucred,
926 				    inp->inp_socket);
927 			if (error == 0)
928 				inp_list[i++] = inp;
929 		}
930 		INP_UNLOCK(inp);
931 	}
932 	INP_INFO_RUNLOCK(&tcbinfo);
933 	splx(s);
934 	n = i;
935 
936 	error = 0;
937 	for (i = 0; i < n; i++) {
938 		inp = inp_list[i];
939 		if (inp->inp_gencnt <= gencnt) {
940 			struct xtcpcb xt;
941 			caddr_t inp_ppcb;
942 			xt.xt_len = sizeof xt;
943 			/* XXX should avoid extra copy */
944 			bcopy(inp, &xt.xt_inp, sizeof *inp);
945 			inp_ppcb = inp->inp_ppcb;
946 			if (inp_ppcb == NULL)
947 				bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
948 			else if (inp->inp_vflag & INP_TIMEWAIT) {
949 				bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
950 				xt.xt_tp.t_state = TCPS_TIME_WAIT;
951 			} else
952 				bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp);
953 			if (inp->inp_socket != NULL)
954 				sotoxsocket(inp->inp_socket, &xt.xt_socket);
955 			else {
956 				bzero(&xt.xt_socket, sizeof xt.xt_socket);
957 				xt.xt_socket.xso_protocol = IPPROTO_TCP;
958 			}
959 			xt.xt_inp.inp_gencnt = inp->inp_gencnt;
960 			error = SYSCTL_OUT(req, &xt, sizeof xt);
961 		}
962 	}
963 	if (!error) {
964 		/*
965 		 * Give the user an updated idea of our state.
966 		 * If the generation differs from what we told
967 		 * her before, she knows that something happened
968 		 * while we were processing this request, and it
969 		 * might be necessary to retry.
970 		 */
971 		s = splnet();
972 		INP_INFO_RLOCK(&tcbinfo);
973 		xig.xig_gen = tcbinfo.ipi_gencnt;
974 		xig.xig_sogen = so_gencnt;
975 		xig.xig_count = tcbinfo.ipi_count;
976 		INP_INFO_RUNLOCK(&tcbinfo);
977 		splx(s);
978 		error = SYSCTL_OUT(req, &xig, sizeof xig);
979 	}
980 	free(inp_list, M_TEMP);
981 	return (error);
982 }
983 
984 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
985 	    tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
986 
987 static int
988 tcp_getcred(SYSCTL_HANDLER_ARGS)
989 {
990 	struct xucred xuc;
991 	struct sockaddr_in addrs[2];
992 	struct inpcb *inp;
993 	int error, s;
994 
995 	error = suser_cred(req->td->td_ucred, SUSER_ALLOWJAIL);
996 	if (error)
997 		return (error);
998 	error = SYSCTL_IN(req, addrs, sizeof(addrs));
999 	if (error)
1000 		return (error);
1001 	s = splnet();
1002 	INP_INFO_RLOCK(&tcbinfo);
1003 	inp = in_pcblookup_hash(&tcbinfo, addrs[1].sin_addr, addrs[1].sin_port,
1004 	    addrs[0].sin_addr, addrs[0].sin_port, 0, NULL);
1005 	if (inp == NULL) {
1006 		error = ENOENT;
1007 		goto outunlocked;
1008 	}
1009 	INP_LOCK(inp);
1010 	if (inp->inp_socket == NULL) {
1011 		error = ENOENT;
1012 		goto out;
1013 	}
1014 	error = cr_canseesocket(req->td->td_ucred, inp->inp_socket);
1015 	if (error)
1016 		goto out;
1017 	cru2x(inp->inp_socket->so_cred, &xuc);
1018 out:
1019 	INP_UNLOCK(inp);
1020 outunlocked:
1021 	INP_INFO_RUNLOCK(&tcbinfo);
1022 	splx(s);
1023 	if (error == 0)
1024 		error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
1025 	return (error);
1026 }
1027 
1028 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred,
1029     CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
1030     tcp_getcred, "S,xucred", "Get the xucred of a TCP connection");
1031 
1032 #ifdef INET6
1033 static int
1034 tcp6_getcred(SYSCTL_HANDLER_ARGS)
1035 {
1036 	struct xucred xuc;
1037 	struct sockaddr_in6 addrs[2];
1038 	struct inpcb *inp;
1039 	int error, s, mapped = 0;
1040 
1041 	error = suser_cred(req->td->td_ucred, SUSER_ALLOWJAIL);
1042 	if (error)
1043 		return (error);
1044 	error = SYSCTL_IN(req, addrs, sizeof(addrs));
1045 	if (error)
1046 		return (error);
1047 	if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) {
1048 		if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr))
1049 			mapped = 1;
1050 		else
1051 			return (EINVAL);
1052 	}
1053 	s = splnet();
1054 	INP_INFO_RLOCK(&tcbinfo);
1055 	if (mapped == 1)
1056 		inp = in_pcblookup_hash(&tcbinfo,
1057 			*(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12],
1058 			addrs[1].sin6_port,
1059 			*(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12],
1060 			addrs[0].sin6_port,
1061 			0, NULL);
1062 	else
1063 		inp = in6_pcblookup_hash(&tcbinfo, &addrs[1].sin6_addr,
1064 				 addrs[1].sin6_port,
1065 				 &addrs[0].sin6_addr, addrs[0].sin6_port,
1066 				 0, NULL);
1067 	if (inp == NULL) {
1068 		error = ENOENT;
1069 		goto outunlocked;
1070 	}
1071 	INP_LOCK(inp);
1072 	if (inp->inp_socket == NULL) {
1073 		error = ENOENT;
1074 		goto out;
1075 	}
1076 	error = cr_canseesocket(req->td->td_ucred, inp->inp_socket);
1077 	if (error)
1078 		goto out;
1079 	cru2x(inp->inp_socket->so_cred, &xuc);
1080 out:
1081 	INP_UNLOCK(inp);
1082 outunlocked:
1083 	INP_INFO_RUNLOCK(&tcbinfo);
1084 	splx(s);
1085 	if (error == 0)
1086 		error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
1087 	return (error);
1088 }
1089 
1090 SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred,
1091     CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
1092     tcp6_getcred, "S,xucred", "Get the xucred of a TCP6 connection");
1093 #endif
1094 
1095 
1096 void
1097 tcp_ctlinput(cmd, sa, vip)
1098 	int cmd;
1099 	struct sockaddr *sa;
1100 	void *vip;
1101 {
1102 	struct ip *ip = vip;
1103 	struct tcphdr *th;
1104 	struct in_addr faddr;
1105 	struct inpcb *inp;
1106 	struct tcpcb *tp;
1107 	struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
1108 	tcp_seq icmp_seq;
1109 	int s;
1110 
1111 	faddr = ((struct sockaddr_in *)sa)->sin_addr;
1112 	if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
1113 		return;
1114 
1115 	if (cmd == PRC_QUENCH)
1116 		notify = tcp_quench;
1117 	else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
1118 		cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) && ip)
1119 		notify = tcp_drop_syn_sent;
1120 	else if (cmd == PRC_MSGSIZE)
1121 		notify = tcp_mtudisc;
1122 	/*
1123 	 * Redirects don't need to be handled up here.
1124 	 */
1125 	else if (PRC_IS_REDIRECT(cmd))
1126 		return;
1127 	/*
1128 	 * Hostdead is ugly because it goes linearly through all PCBs.
1129 	 * XXX: We never get this from ICMP, otherwise it makes an
1130 	 * excellent DoS attack on machines with many connections.
1131 	 */
1132 	else if (cmd == PRC_HOSTDEAD)
1133 		ip = NULL;
1134 	else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
1135 		return;
1136 	if (ip != NULL) {
1137 		s = splnet();
1138 		th = (struct tcphdr *)((caddr_t)ip
1139 				       + (ip->ip_hl << 2));
1140 		INP_INFO_WLOCK(&tcbinfo);
1141 		inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport,
1142 		    ip->ip_src, th->th_sport, 0, NULL);
1143 		if (inp != NULL)  {
1144 			INP_LOCK(inp);
1145 			if (inp->inp_socket != NULL) {
1146 				icmp_seq = htonl(th->th_seq);
1147 				tp = intotcpcb(inp);
1148 				if (SEQ_GEQ(icmp_seq, tp->snd_una) &&
1149 					SEQ_LT(icmp_seq, tp->snd_max))
1150 					inp = (*notify)(inp, inetctlerrmap[cmd]);
1151 			}
1152 			if (inp != NULL)
1153 				INP_UNLOCK(inp);
1154 		} else {
1155 			struct in_conninfo inc;
1156 
1157 			inc.inc_fport = th->th_dport;
1158 			inc.inc_lport = th->th_sport;
1159 			inc.inc_faddr = faddr;
1160 			inc.inc_laddr = ip->ip_src;
1161 #ifdef INET6
1162 			inc.inc_isipv6 = 0;
1163 #endif
1164 			syncache_unreach(&inc, th);
1165 		}
1166 		INP_INFO_WUNLOCK(&tcbinfo);
1167 		splx(s);
1168 	} else
1169 		in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify);
1170 }
1171 
1172 #ifdef INET6
1173 void
1174 tcp6_ctlinput(cmd, sa, d)
1175 	int cmd;
1176 	struct sockaddr *sa;
1177 	void *d;
1178 {
1179 	struct tcphdr th;
1180 	struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
1181 	struct ip6_hdr *ip6;
1182 	struct mbuf *m;
1183 	struct ip6ctlparam *ip6cp = NULL;
1184 	const struct sockaddr_in6 *sa6_src = NULL;
1185 	int off;
1186 	struct tcp_portonly {
1187 		u_int16_t th_sport;
1188 		u_int16_t th_dport;
1189 	} *thp;
1190 
1191 	if (sa->sa_family != AF_INET6 ||
1192 	    sa->sa_len != sizeof(struct sockaddr_in6))
1193 		return;
1194 
1195 	if (cmd == PRC_QUENCH)
1196 		notify = tcp_quench;
1197 	else if (cmd == PRC_MSGSIZE)
1198 		notify = tcp_mtudisc;
1199 	else if (!PRC_IS_REDIRECT(cmd) &&
1200 		 ((unsigned)cmd >= PRC_NCMDS || inet6ctlerrmap[cmd] == 0))
1201 		return;
1202 
1203 	/* if the parameter is from icmp6, decode it. */
1204 	if (d != NULL) {
1205 		ip6cp = (struct ip6ctlparam *)d;
1206 		m = ip6cp->ip6c_m;
1207 		ip6 = ip6cp->ip6c_ip6;
1208 		off = ip6cp->ip6c_off;
1209 		sa6_src = ip6cp->ip6c_src;
1210 	} else {
1211 		m = NULL;
1212 		ip6 = NULL;
1213 		off = 0;	/* fool gcc */
1214 		sa6_src = &sa6_any;
1215 	}
1216 
1217 	if (ip6 != NULL) {
1218 		struct in_conninfo inc;
1219 		/*
1220 		 * XXX: We assume that when IPV6 is non NULL,
1221 		 * M and OFF are valid.
1222 		 */
1223 
1224 		/* check if we can safely examine src and dst ports */
1225 		if (m->m_pkthdr.len < off + sizeof(*thp))
1226 			return;
1227 
1228 		bzero(&th, sizeof(th));
1229 		m_copydata(m, off, sizeof(*thp), (caddr_t)&th);
1230 
1231 		in6_pcbnotify(&tcbinfo, sa, th.th_dport,
1232 		    (struct sockaddr *)ip6cp->ip6c_src,
1233 		    th.th_sport, cmd, NULL, notify);
1234 
1235 		inc.inc_fport = th.th_dport;
1236 		inc.inc_lport = th.th_sport;
1237 		inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr;
1238 		inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr;
1239 		inc.inc_isipv6 = 1;
1240 		INP_INFO_WLOCK(&tcbinfo);
1241 		syncache_unreach(&inc, &th);
1242 		INP_INFO_WUNLOCK(&tcbinfo);
1243 	} else
1244 		in6_pcbnotify(&tcbinfo, sa, 0, (const struct sockaddr *)sa6_src,
1245 			      0, cmd, NULL, notify);
1246 }
1247 #endif /* INET6 */
1248 
1249 
1250 /*
1251  * Following is where TCP initial sequence number generation occurs.
1252  *
1253  * There are two places where we must use initial sequence numbers:
1254  * 1.  In SYN-ACK packets.
1255  * 2.  In SYN packets.
1256  *
1257  * All ISNs for SYN-ACK packets are generated by the syncache.  See
1258  * tcp_syncache.c for details.
1259  *
1260  * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
1261  * depends on this property.  In addition, these ISNs should be
1262  * unguessable so as to prevent connection hijacking.  To satisfy
1263  * the requirements of this situation, the algorithm outlined in
1264  * RFC 1948 is used, with only small modifications.
1265  *
1266  * Implementation details:
1267  *
1268  * Time is based off the system timer, and is corrected so that it
1269  * increases by one megabyte per second.  This allows for proper
1270  * recycling on high speed LANs while still leaving over an hour
1271  * before rollover.
1272  *
1273  * As reading the *exact* system time is too expensive to be done
1274  * whenever setting up a TCP connection, we increment the time
1275  * offset in two ways.  First, a small random positive increment
1276  * is added to isn_offset for each connection that is set up.
1277  * Second, the function tcp_isn_tick fires once per clock tick
1278  * and increments isn_offset as necessary so that sequence numbers
1279  * are incremented at approximately ISN_BYTES_PER_SECOND.  The
1280  * random positive increments serve only to ensure that the same
1281  * exact sequence number is never sent out twice (as could otherwise
1282  * happen when a port is recycled in less than the system tick
1283  * interval.)
1284  *
1285  * net.inet.tcp.isn_reseed_interval controls the number of seconds
1286  * between seeding of isn_secret.  This is normally set to zero,
1287  * as reseeding should not be necessary.
1288  *
1289  * Locking of the global variables isn_secret, isn_last_reseed, isn_offset,
1290  * isn_offset_old, and isn_ctx is performed using the TCP pcbinfo lock.  In
1291  * general, this means holding an exclusive (write) lock.
1292  */
1293 
1294 #define ISN_BYTES_PER_SECOND 1048576
1295 #define ISN_STATIC_INCREMENT 4096
1296 #define ISN_RANDOM_INCREMENT (4096 - 1)
1297 
1298 static u_char isn_secret[32];
1299 static int isn_last_reseed;
1300 static u_int32_t isn_offset, isn_offset_old;
1301 static MD5_CTX isn_ctx;
1302 
1303 tcp_seq
1304 tcp_new_isn(tp)
1305 	struct tcpcb *tp;
1306 {
1307 	u_int32_t md5_buffer[4];
1308 	tcp_seq new_isn;
1309 
1310 	INP_INFO_WLOCK_ASSERT(&tcbinfo);
1311 	INP_LOCK_ASSERT(tp->t_inpcb);
1312 
1313 	/* Seed if this is the first use, reseed if requested. */
1314 	if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) &&
1315 	     (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz)
1316 		< (u_int)ticks))) {
1317 		read_random(&isn_secret, sizeof(isn_secret));
1318 		isn_last_reseed = ticks;
1319 	}
1320 
1321 	/* Compute the md5 hash and return the ISN. */
1322 	MD5Init(&isn_ctx);
1323 	MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short));
1324 	MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short));
1325 #ifdef INET6
1326 	if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
1327 		MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
1328 			  sizeof(struct in6_addr));
1329 		MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
1330 			  sizeof(struct in6_addr));
1331 	} else
1332 #endif
1333 	{
1334 		MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
1335 			  sizeof(struct in_addr));
1336 		MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
1337 			  sizeof(struct in_addr));
1338 	}
1339 	MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret));
1340 	MD5Final((u_char *) &md5_buffer, &isn_ctx);
1341 	new_isn = (tcp_seq) md5_buffer[0];
1342 	isn_offset += ISN_STATIC_INCREMENT +
1343 		(arc4random() & ISN_RANDOM_INCREMENT);
1344 	new_isn += isn_offset;
1345 	return (new_isn);
1346 }
1347 
1348 /*
1349  * Increment the offset to the next ISN_BYTES_PER_SECOND / hz boundary
1350  * to keep time flowing at a relatively constant rate.  If the random
1351  * increments have already pushed us past the projected offset, do nothing.
1352  */
1353 static void
1354 tcp_isn_tick(xtp)
1355 	void *xtp;
1356 {
1357 	u_int32_t projected_offset;
1358 
1359 	INP_INFO_WLOCK(&tcbinfo);
1360 	projected_offset = isn_offset_old + ISN_BYTES_PER_SECOND / hz;
1361 
1362 	if (projected_offset > isn_offset)
1363 		isn_offset = projected_offset;
1364 
1365 	isn_offset_old = isn_offset;
1366 	callout_reset(&isn_callout, 1, tcp_isn_tick, NULL);
1367 	INP_INFO_WUNLOCK(&tcbinfo);
1368 }
1369 
1370 /*
1371  * When a source quench is received, close congestion window
1372  * to one segment.  We will gradually open it again as we proceed.
1373  */
1374 struct inpcb *
1375 tcp_quench(inp, errno)
1376 	struct inpcb *inp;
1377 	int errno;
1378 {
1379 	struct tcpcb *tp = intotcpcb(inp);
1380 
1381 	INP_LOCK_ASSERT(inp);
1382 	if (tp != NULL)
1383 		tp->snd_cwnd = tp->t_maxseg;
1384 	return (inp);
1385 }
1386 
1387 /*
1388  * When a specific ICMP unreachable message is received and the
1389  * connection state is SYN-SENT, drop the connection.  This behavior
1390  * is controlled by the icmp_may_rst sysctl.
1391  */
1392 struct inpcb *
1393 tcp_drop_syn_sent(inp, errno)
1394 	struct inpcb *inp;
1395 	int errno;
1396 {
1397 	struct tcpcb *tp = intotcpcb(inp);
1398 
1399 	INP_LOCK_ASSERT(inp);
1400 	if (tp != NULL && tp->t_state == TCPS_SYN_SENT) {
1401 		tcp_drop(tp, errno);
1402 		return (NULL);
1403 	}
1404 	return (inp);
1405 }
1406 
1407 /*
1408  * When `need fragmentation' ICMP is received, update our idea of the MSS
1409  * based on the new value in the route.  Also nudge TCP to send something,
1410  * since we know the packet we just sent was dropped.
1411  * This duplicates some code in the tcp_mss() function in tcp_input.c.
1412  */
1413 struct inpcb *
1414 tcp_mtudisc(inp, errno)
1415 	struct inpcb *inp;
1416 	int errno;
1417 {
1418 	struct tcpcb *tp = intotcpcb(inp);
1419 	struct socket *so = inp->inp_socket;
1420 	u_int maxmtu;
1421 	u_int romtu;
1422 	int mss;
1423 #ifdef INET6
1424 	int isipv6;
1425 #endif /* INET6 */
1426 
1427 	INP_LOCK_ASSERT(inp);
1428 	if (tp != NULL) {
1429 #ifdef INET6
1430 		isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
1431 #endif
1432 		maxmtu = tcp_hc_getmtu(&inp->inp_inc); /* IPv4 and IPv6 */
1433 		romtu =
1434 #ifdef INET6
1435 		    isipv6 ? tcp_maxmtu6(&inp->inp_inc) :
1436 #endif /* INET6 */
1437 		    tcp_maxmtu(&inp->inp_inc);
1438 		if (!maxmtu)
1439 			maxmtu = romtu;
1440 		else
1441 			maxmtu = min(maxmtu, romtu);
1442 		if (!maxmtu) {
1443 			tp->t_maxopd = tp->t_maxseg =
1444 #ifdef INET6
1445 				isipv6 ? tcp_v6mssdflt :
1446 #endif /* INET6 */
1447 				tcp_mssdflt;
1448 			return (inp);
1449 		}
1450 		mss = maxmtu -
1451 #ifdef INET6
1452 			(isipv6 ?
1453 			 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) :
1454 #endif /* INET6 */
1455 			 sizeof(struct tcpiphdr)
1456 #ifdef INET6
1457 			 )
1458 #endif /* INET6 */
1459 			;
1460 
1461 		/*
1462 		 * XXX - The above conditional probably violates the TCP
1463 		 * spec.  The problem is that, since we don't know the
1464 		 * other end's MSS, we are supposed to use a conservative
1465 		 * default.  But, if we do that, then MTU discovery will
1466 		 * never actually take place, because the conservative
1467 		 * default is much less than the MTUs typically seen
1468 		 * on the Internet today.  For the moment, we'll sweep
1469 		 * this under the carpet.
1470 		 *
1471 		 * The conservative default might not actually be a problem
1472 		 * if the only case this occurs is when sending an initial
1473 		 * SYN with options and data to a host we've never talked
1474 		 * to before.  Then, they will reply with an MSS value which
1475 		 * will get recorded and the new parameters should get
1476 		 * recomputed.  For Further Study.
1477 		 */
1478 		if (tp->t_maxopd <= mss)
1479 			return (inp);
1480 		tp->t_maxopd = mss;
1481 
1482 		if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
1483 		    (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)
1484 			mss -= TCPOLEN_TSTAMP_APPA;
1485 #if	(MCLBYTES & (MCLBYTES - 1)) == 0
1486 		if (mss > MCLBYTES)
1487 			mss &= ~(MCLBYTES-1);
1488 #else
1489 		if (mss > MCLBYTES)
1490 			mss = mss / MCLBYTES * MCLBYTES;
1491 #endif
1492 		if (so->so_snd.sb_hiwat < mss)
1493 			mss = so->so_snd.sb_hiwat;
1494 
1495 		tp->t_maxseg = mss;
1496 
1497 		tcpstat.tcps_mturesent++;
1498 		tp->t_rtttime = 0;
1499 		tp->snd_nxt = tp->snd_una;
1500 		tcp_output(tp);
1501 	}
1502 	return (inp);
1503 }
1504 
1505 /*
1506  * Look-up the routing entry to the peer of this inpcb.  If no route
1507  * is found and it cannot be allocated, then return NULL.  This routine
1508  * is called by TCP routines that access the rmx structure and by tcp_mss
1509  * to get the interface MTU.
1510  */
1511 u_long
1512 tcp_maxmtu(inc)
1513 	struct in_conninfo *inc;
1514 {
1515 	struct route sro;
1516 	struct sockaddr_in *dst;
1517 	struct ifnet *ifp;
1518 	u_long maxmtu = 0;
1519 
1520 	KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer"));
1521 
1522 	bzero(&sro, sizeof(sro));
1523 	if (inc->inc_faddr.s_addr != INADDR_ANY) {
1524 	        dst = (struct sockaddr_in *)&sro.ro_dst;
1525 		dst->sin_family = AF_INET;
1526 		dst->sin_len = sizeof(*dst);
1527 		dst->sin_addr = inc->inc_faddr;
1528 		rtalloc_ign(&sro, RTF_CLONING);
1529 	}
1530 	if (sro.ro_rt != NULL) {
1531 		ifp = sro.ro_rt->rt_ifp;
1532 		if (sro.ro_rt->rt_rmx.rmx_mtu == 0)
1533 			maxmtu = ifp->if_mtu;
1534 		else
1535 			maxmtu = min(sro.ro_rt->rt_rmx.rmx_mtu, ifp->if_mtu);
1536 		RTFREE(sro.ro_rt);
1537 	}
1538 	return (maxmtu);
1539 }
1540 
1541 #ifdef INET6
1542 u_long
1543 tcp_maxmtu6(inc)
1544 	struct in_conninfo *inc;
1545 {
1546 	struct route_in6 sro6;
1547 	struct ifnet *ifp;
1548 	u_long maxmtu = 0;
1549 
1550 	KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer"));
1551 
1552 	bzero(&sro6, sizeof(sro6));
1553 	if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) {
1554 		sro6.ro_dst.sin6_family = AF_INET6;
1555 		sro6.ro_dst.sin6_len = sizeof(struct sockaddr_in6);
1556 		sro6.ro_dst.sin6_addr = inc->inc6_faddr;
1557 		rtalloc_ign((struct route *)&sro6, RTF_CLONING);
1558 	}
1559 	if (sro6.ro_rt != NULL) {
1560 		ifp = sro6.ro_rt->rt_ifp;
1561 		if (sro6.ro_rt->rt_rmx.rmx_mtu == 0)
1562 			maxmtu = IN6_LINKMTU(sro6.ro_rt->rt_ifp);
1563 		else
1564 			maxmtu = min(sro6.ro_rt->rt_rmx.rmx_mtu,
1565 				     IN6_LINKMTU(sro6.ro_rt->rt_ifp));
1566 		RTFREE(sro6.ro_rt);
1567 	}
1568 
1569 	return (maxmtu);
1570 }
1571 #endif /* INET6 */
1572 
1573 #ifdef IPSEC
1574 /* compute ESP/AH header size for TCP, including outer IP header. */
1575 size_t
1576 ipsec_hdrsiz_tcp(tp)
1577 	struct tcpcb *tp;
1578 {
1579 	struct inpcb *inp;
1580 	struct mbuf *m;
1581 	size_t hdrsiz;
1582 	struct ip *ip;
1583 #ifdef INET6
1584 	struct ip6_hdr *ip6;
1585 #endif
1586 	struct tcphdr *th;
1587 
1588 	if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL))
1589 		return (0);
1590 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1591 	if (!m)
1592 		return (0);
1593 
1594 #ifdef INET6
1595 	if ((inp->inp_vflag & INP_IPV6) != 0) {
1596 		ip6 = mtod(m, struct ip6_hdr *);
1597 		th = (struct tcphdr *)(ip6 + 1);
1598 		m->m_pkthdr.len = m->m_len =
1599 			sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1600 		tcpip_fillheaders(inp, ip6, th);
1601 		hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1602 	} else
1603 #endif /* INET6 */
1604 	{
1605 		ip = mtod(m, struct ip *);
1606 		th = (struct tcphdr *)(ip + 1);
1607 		m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
1608 		tcpip_fillheaders(inp, ip, th);
1609 		hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
1610 	}
1611 
1612 	m_free(m);
1613 	return (hdrsiz);
1614 }
1615 #endif /*IPSEC*/
1616 
1617 /*
1618  * Move a TCP connection into TIME_WAIT state.
1619  *    tcbinfo is locked.
1620  *    inp is locked, and is unlocked before returning.
1621  */
1622 void
1623 tcp_twstart(tp)
1624 	struct tcpcb *tp;
1625 {
1626 	struct tcptw *tw;
1627 	struct inpcb *inp;
1628 	int tw_time, acknow;
1629 	struct socket *so;
1630 
1631 	INP_INFO_WLOCK_ASSERT(&tcbinfo);	/* tcp_timer_2msl_reset(). */
1632 	INP_LOCK_ASSERT(tp->t_inpcb);
1633 
1634 	tw = uma_zalloc(tcptw_zone, M_NOWAIT);
1635 	if (tw == NULL) {
1636 		tw = tcp_timer_2msl_tw(1);
1637 		if (tw == NULL) {
1638 			tcp_close(tp);
1639 			return;
1640 		}
1641 	}
1642 	inp = tp->t_inpcb;
1643 	tw->tw_inpcb = inp;
1644 
1645 	/*
1646 	 * Recover last window size sent.
1647 	 */
1648 	tw->last_win = (tp->rcv_adv - tp->rcv_nxt) >> tp->rcv_scale;
1649 
1650 	/*
1651 	 * Set t_recent if timestamps are used on the connection.
1652 	 */
1653 	if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
1654 	    (TF_REQ_TSTMP|TF_RCVD_TSTMP))
1655 		tw->t_recent = tp->ts_recent;
1656 	else
1657 		tw->t_recent = 0;
1658 
1659 	tw->snd_nxt = tp->snd_nxt;
1660 	tw->rcv_nxt = tp->rcv_nxt;
1661 	tw->iss     = tp->iss;
1662 	tw->irs     = tp->irs;
1663 	tw->t_starttime = tp->t_starttime;
1664 	tw->tw_time = 0;
1665 
1666 /* XXX
1667  * If this code will
1668  * be used for fin-wait-2 state also, then we may need
1669  * a ts_recent from the last segment.
1670  */
1671 	tw_time = 2 * tcp_msl;
1672 	acknow = tp->t_flags & TF_ACKNOW;
1673 	tcp_discardcb(tp);
1674 	so = inp->inp_socket;
1675 	ACCEPT_LOCK();
1676 	SOCK_LOCK(so);
1677 	so->so_pcb = NULL;
1678 	tw->tw_cred = crhold(so->so_cred);
1679 	tw->tw_so_options = so->so_options;
1680 	sotryfree(so);
1681 	inp->inp_socket = NULL;
1682 	if (acknow)
1683 		tcp_twrespond(tw, TH_ACK);
1684 	inp->inp_ppcb = (caddr_t)tw;
1685 	inp->inp_vflag |= INP_TIMEWAIT;
1686 	tcp_timer_2msl_reset(tw, tw_time);
1687 	INP_UNLOCK(inp);
1688 }
1689 
1690 /*
1691  * The appromixate rate of ISN increase of Microsoft TCP stacks;
1692  * the actual rate is slightly higher due to the addition of
1693  * random positive increments.
1694  *
1695  * Most other new OSes use semi-randomized ISN values, so we
1696  * do not need to worry about them.
1697  */
1698 #define MS_ISN_BYTES_PER_SECOND		250000
1699 
1700 /*
1701  * Determine if the ISN we will generate has advanced beyond the last
1702  * sequence number used by the previous connection.  If so, indicate
1703  * that it is safe to recycle this tw socket by returning 1.
1704  *
1705  * XXXRW: This function should assert the inpcb lock as it does multiple
1706  * non-atomic reads from the tcptw, but is currently called without it from
1707  * in_pcb.c:in_pcblookup_local().
1708  */
1709 int
1710 tcp_twrecycleable(struct tcptw *tw)
1711 {
1712 	tcp_seq new_iss = tw->iss;
1713 	tcp_seq new_irs = tw->irs;
1714 
1715 	new_iss += (ticks - tw->t_starttime) * (ISN_BYTES_PER_SECOND / hz);
1716 	new_irs += (ticks - tw->t_starttime) * (MS_ISN_BYTES_PER_SECOND / hz);
1717 
1718 	if (SEQ_GT(new_iss, tw->snd_nxt) && SEQ_GT(new_irs, tw->rcv_nxt))
1719 		return (1);
1720 	else
1721 		return (0);
1722 }
1723 
1724 struct tcptw *
1725 tcp_twclose(struct tcptw *tw, int reuse)
1726 {
1727 	struct inpcb *inp;
1728 
1729 	inp = tw->tw_inpcb;
1730 	INP_INFO_WLOCK_ASSERT(&tcbinfo);	/* tcp_timer_2msl_stop(). */
1731 	INP_LOCK_ASSERT(inp);
1732 
1733 	tw->tw_inpcb = NULL;
1734 	tcp_timer_2msl_stop(tw);
1735 	inp->inp_ppcb = NULL;
1736 #ifdef INET6
1737 	if (inp->inp_vflag & INP_IPV6PROTO)
1738 		in6_pcbdetach(inp);
1739 	else
1740 #endif
1741 		in_pcbdetach(inp);
1742 	tcpstat.tcps_closed++;
1743 	crfree(tw->tw_cred);
1744 	tw->tw_cred = NULL;
1745 	if (reuse)
1746 		return (tw);
1747 	uma_zfree(tcptw_zone, tw);
1748 	return (NULL);
1749 }
1750 
1751 int
1752 tcp_twrespond(struct tcptw *tw, int flags)
1753 {
1754 	struct inpcb *inp = tw->tw_inpcb;
1755 	struct tcphdr *th;
1756 	struct mbuf *m;
1757 	struct ip *ip = NULL;
1758 	u_int8_t *optp;
1759 	u_int hdrlen, optlen;
1760 	int error;
1761 #ifdef INET6
1762 	struct ip6_hdr *ip6 = NULL;
1763 	int isipv6 = inp->inp_inc.inc_isipv6;
1764 #endif
1765 
1766 	INP_LOCK_ASSERT(inp);
1767 
1768 	m = m_gethdr(M_DONTWAIT, MT_HEADER);
1769 	if (m == NULL)
1770 		return (ENOBUFS);
1771 	m->m_data += max_linkhdr;
1772 
1773 #ifdef MAC
1774 	mac_create_mbuf_from_inpcb(inp, m);
1775 #endif
1776 
1777 #ifdef INET6
1778 	if (isipv6) {
1779 		hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1780 		ip6 = mtod(m, struct ip6_hdr *);
1781 		th = (struct tcphdr *)(ip6 + 1);
1782 		tcpip_fillheaders(inp, ip6, th);
1783 	} else
1784 #endif
1785 	{
1786 		hdrlen = sizeof(struct tcpiphdr);
1787 		ip = mtod(m, struct ip *);
1788 		th = (struct tcphdr *)(ip + 1);
1789 		tcpip_fillheaders(inp, ip, th);
1790 	}
1791 	optp = (u_int8_t *)(th + 1);
1792 
1793 	/*
1794 	 * Send a timestamp and echo-reply if both our side and our peer
1795 	 * have sent timestamps in our SYN's and this is not a RST.
1796 	 */
1797 	if (tw->t_recent && flags == TH_ACK) {
1798 		u_int32_t *lp = (u_int32_t *)optp;
1799 
1800 		/* Form timestamp option as shown in appendix A of RFC 1323. */
1801 		*lp++ = htonl(TCPOPT_TSTAMP_HDR);
1802 		*lp++ = htonl(ticks);
1803 		*lp   = htonl(tw->t_recent);
1804 		optp += TCPOLEN_TSTAMP_APPA;
1805 	}
1806 
1807 	optlen = optp - (u_int8_t *)(th + 1);
1808 
1809 	m->m_len = hdrlen + optlen;
1810 	m->m_pkthdr.len = m->m_len;
1811 
1812 	KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small"));
1813 
1814 	th->th_seq = htonl(tw->snd_nxt);
1815 	th->th_ack = htonl(tw->rcv_nxt);
1816 	th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
1817 	th->th_flags = flags;
1818 	th->th_win = htons(tw->last_win);
1819 
1820 #ifdef INET6
1821 	if (isipv6) {
1822 		th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr),
1823 		    sizeof(struct tcphdr) + optlen);
1824 		ip6->ip6_hlim = in6_selecthlim(inp, NULL);
1825 		error = ip6_output(m, inp->in6p_outputopts, NULL,
1826 		    (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp);
1827 	} else
1828 #endif
1829 	{
1830 		th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1831 		    htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP));
1832 		m->m_pkthdr.csum_flags = CSUM_TCP;
1833 		m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1834 		ip->ip_len = m->m_pkthdr.len;
1835 		if (path_mtu_discovery)
1836 			ip->ip_off |= IP_DF;
1837 		error = ip_output(m, inp->inp_options, NULL,
1838 		    ((tw->tw_so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0),
1839 		    NULL, inp);
1840 	}
1841 	if (flags & TH_ACK)
1842 		tcpstat.tcps_sndacks++;
1843 	else
1844 		tcpstat.tcps_sndctrl++;
1845 	tcpstat.tcps_sndtotal++;
1846 	return (error);
1847 }
1848 
1849 /*
1850  * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING
1851  *
1852  * This code attempts to calculate the bandwidth-delay product as a
1853  * means of determining the optimal window size to maximize bandwidth,
1854  * minimize RTT, and avoid the over-allocation of buffers on interfaces and
1855  * routers.  This code also does a fairly good job keeping RTTs in check
1856  * across slow links like modems.  We implement an algorithm which is very
1857  * similar (but not meant to be) TCP/Vegas.  The code operates on the
1858  * transmitter side of a TCP connection and so only effects the transmit
1859  * side of the connection.
1860  *
1861  * BACKGROUND:  TCP makes no provision for the management of buffer space
1862  * at the end points or at the intermediate routers and switches.  A TCP
1863  * stream, whether using NewReno or not, will eventually buffer as
1864  * many packets as it is able and the only reason this typically works is
1865  * due to the fairly small default buffers made available for a connection
1866  * (typicaly 16K or 32K).  As machines use larger windows and/or window
1867  * scaling it is now fairly easy for even a single TCP connection to blow-out
1868  * all available buffer space not only on the local interface, but on
1869  * intermediate routers and switches as well.  NewReno makes a misguided
1870  * attempt to 'solve' this problem by waiting for an actual failure to occur,
1871  * then backing off, then steadily increasing the window again until another
1872  * failure occurs, ad-infinitum.  This results in terrible oscillation that
1873  * is only made worse as network loads increase and the idea of intentionally
1874  * blowing out network buffers is, frankly, a terrible way to manage network
1875  * resources.
1876  *
1877  * It is far better to limit the transmit window prior to the failure
1878  * condition being achieved.  There are two general ways to do this:  First
1879  * you can 'scan' through different transmit window sizes and locate the
1880  * point where the RTT stops increasing, indicating that you have filled the
1881  * pipe, then scan backwards until you note that RTT stops decreasing, then
1882  * repeat ad-infinitum.  This method works in principle but has severe
1883  * implementation issues due to RTT variances, timer granularity, and
1884  * instability in the algorithm which can lead to many false positives and
1885  * create oscillations as well as interact badly with other TCP streams
1886  * implementing the same algorithm.
1887  *
1888  * The second method is to limit the window to the bandwidth delay product
1889  * of the link.  This is the method we implement.  RTT variances and our
1890  * own manipulation of the congestion window, bwnd, can potentially
1891  * destabilize the algorithm.  For this reason we have to stabilize the
1892  * elements used to calculate the window.  We do this by using the minimum
1893  * observed RTT, the long term average of the observed bandwidth, and
1894  * by adding two segments worth of slop.  It isn't perfect but it is able
1895  * to react to changing conditions and gives us a very stable basis on
1896  * which to extend the algorithm.
1897  */
1898 void
1899 tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq)
1900 {
1901 	u_long bw;
1902 	u_long bwnd;
1903 	int save_ticks;
1904 
1905 	INP_LOCK_ASSERT(tp->t_inpcb);
1906 
1907 	/*
1908 	 * If inflight_enable is disabled in the middle of a tcp connection,
1909 	 * make sure snd_bwnd is effectively disabled.
1910 	 */
1911 	if (tcp_inflight_enable == 0) {
1912 		tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
1913 		tp->snd_bandwidth = 0;
1914 		return;
1915 	}
1916 
1917 	/*
1918 	 * Figure out the bandwidth.  Due to the tick granularity this
1919 	 * is a very rough number and it MUST be averaged over a fairly
1920 	 * long period of time.  XXX we need to take into account a link
1921 	 * that is not using all available bandwidth, but for now our
1922 	 * slop will ramp us up if this case occurs and the bandwidth later
1923 	 * increases.
1924 	 *
1925 	 * Note: if ticks rollover 'bw' may wind up negative.  We must
1926 	 * effectively reset t_bw_rtttime for this case.
1927 	 */
1928 	save_ticks = ticks;
1929 	if ((u_int)(save_ticks - tp->t_bw_rtttime) < 1)
1930 		return;
1931 
1932 	bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz /
1933 	    (save_ticks - tp->t_bw_rtttime);
1934 	tp->t_bw_rtttime = save_ticks;
1935 	tp->t_bw_rtseq = ack_seq;
1936 	if (tp->t_bw_rtttime == 0 || (int)bw < 0)
1937 		return;
1938 	bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4;
1939 
1940 	tp->snd_bandwidth = bw;
1941 
1942 	/*
1943 	 * Calculate the semi-static bandwidth delay product, plus two maximal
1944 	 * segments.  The additional slop puts us squarely in the sweet
1945 	 * spot and also handles the bandwidth run-up case and stabilization.
1946 	 * Without the slop we could be locking ourselves into a lower
1947 	 * bandwidth.
1948 	 *
1949 	 * Situations Handled:
1950 	 *	(1) Prevents over-queueing of packets on LANs, especially on
1951 	 *	    high speed LANs, allowing larger TCP buffers to be
1952 	 *	    specified, and also does a good job preventing
1953 	 *	    over-queueing of packets over choke points like modems
1954 	 *	    (at least for the transmit side).
1955 	 *
1956 	 *	(2) Is able to handle changing network loads (bandwidth
1957 	 *	    drops so bwnd drops, bandwidth increases so bwnd
1958 	 *	    increases).
1959 	 *
1960 	 *	(3) Theoretically should stabilize in the face of multiple
1961 	 *	    connections implementing the same algorithm (this may need
1962 	 *	    a little work).
1963 	 *
1964 	 *	(4) Stability value (defaults to 20 = 2 maximal packets) can
1965 	 *	    be adjusted with a sysctl but typically only needs to be
1966 	 *	    on very slow connections.  A value no smaller then 5
1967 	 *	    should be used, but only reduce this default if you have
1968 	 *	    no other choice.
1969 	 */
1970 #define USERTT	((tp->t_srtt + tp->t_rttbest) / 2)
1971 	bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + tcp_inflight_stab * tp->t_maxseg / 10;
1972 #undef USERTT
1973 
1974 	if (tcp_inflight_debug > 0) {
1975 		static int ltime;
1976 		if ((u_int)(ticks - ltime) >= hz / tcp_inflight_debug) {
1977 			ltime = ticks;
1978 			printf("%p bw %ld rttbest %d srtt %d bwnd %ld\n",
1979 			    tp,
1980 			    bw,
1981 			    tp->t_rttbest,
1982 			    tp->t_srtt,
1983 			    bwnd
1984 			);
1985 		}
1986 	}
1987 	if ((long)bwnd < tcp_inflight_min)
1988 		bwnd = tcp_inflight_min;
1989 	if (bwnd > tcp_inflight_max)
1990 		bwnd = tcp_inflight_max;
1991 	if ((long)bwnd < tp->t_maxseg * 2)
1992 		bwnd = tp->t_maxseg * 2;
1993 	tp->snd_bwnd = bwnd;
1994 }
1995 
1996 #ifdef TCP_SIGNATURE
1997 /*
1998  * Callback function invoked by m_apply() to digest TCP segment data
1999  * contained within an mbuf chain.
2000  */
2001 static int
2002 tcp_signature_apply(void *fstate, void *data, u_int len)
2003 {
2004 
2005 	MD5Update(fstate, (u_char *)data, len);
2006 	return (0);
2007 }
2008 
2009 /*
2010  * Compute TCP-MD5 hash of a TCPv4 segment. (RFC2385)
2011  *
2012  * Parameters:
2013  * m		pointer to head of mbuf chain
2014  * off0		offset to TCP header within the mbuf chain
2015  * len		length of TCP segment data, excluding options
2016  * optlen	length of TCP segment options
2017  * buf		pointer to storage for computed MD5 digest
2018  * direction	direction of flow (IPSEC_DIR_INBOUND or OUTBOUND)
2019  *
2020  * We do this over ip, tcphdr, segment data, and the key in the SADB.
2021  * When called from tcp_input(), we can be sure that th_sum has been
2022  * zeroed out and verified already.
2023  *
2024  * This function is for IPv4 use only. Calling this function with an
2025  * IPv6 packet in the mbuf chain will yield undefined results.
2026  *
2027  * Return 0 if successful, otherwise return -1.
2028  *
2029  * XXX The key is retrieved from the system's PF_KEY SADB, by keying a
2030  * search with the destination IP address, and a 'magic SPI' to be
2031  * determined by the application. This is hardcoded elsewhere to 1179
2032  * right now. Another branch of this code exists which uses the SPD to
2033  * specify per-application flows but it is unstable.
2034  */
2035 int
2036 tcp_signature_compute(struct mbuf *m, int off0, int len, int optlen,
2037     u_char *buf, u_int direction)
2038 {
2039 	union sockaddr_union dst;
2040 	struct ippseudo ippseudo;
2041 	MD5_CTX ctx;
2042 	int doff;
2043 	struct ip *ip;
2044 	struct ipovly *ipovly;
2045 	struct secasvar *sav;
2046 	struct tcphdr *th;
2047 	u_short savecsum;
2048 
2049 	KASSERT(m != NULL, ("NULL mbuf chain"));
2050 	KASSERT(buf != NULL, ("NULL signature pointer"));
2051 
2052 	/* Extract the destination from the IP header in the mbuf. */
2053 	ip = mtod(m, struct ip *);
2054 	bzero(&dst, sizeof(union sockaddr_union));
2055 	dst.sa.sa_len = sizeof(struct sockaddr_in);
2056 	dst.sa.sa_family = AF_INET;
2057 	dst.sin.sin_addr = (direction == IPSEC_DIR_INBOUND) ?
2058 	    ip->ip_src : ip->ip_dst;
2059 
2060 	/* Look up an SADB entry which matches the address of the peer. */
2061 	sav = KEY_ALLOCSA(&dst, IPPROTO_TCP, htonl(TCP_SIG_SPI));
2062 	if (sav == NULL) {
2063 		printf("%s: SADB lookup failed for %s\n", __func__,
2064 		    inet_ntoa(dst.sin.sin_addr));
2065 		return (EINVAL);
2066 	}
2067 
2068 	MD5Init(&ctx);
2069 	ipovly = (struct ipovly *)ip;
2070 	th = (struct tcphdr *)((u_char *)ip + off0);
2071 	doff = off0 + sizeof(struct tcphdr) + optlen;
2072 
2073 	/*
2074 	 * Step 1: Update MD5 hash with IP pseudo-header.
2075 	 *
2076 	 * XXX The ippseudo header MUST be digested in network byte order,
2077 	 * or else we'll fail the regression test. Assume all fields we've
2078 	 * been doing arithmetic on have been in host byte order.
2079 	 * XXX One cannot depend on ipovly->ih_len here. When called from
2080 	 * tcp_output(), the underlying ip_len member has not yet been set.
2081 	 */
2082 	ippseudo.ippseudo_src = ipovly->ih_src;
2083 	ippseudo.ippseudo_dst = ipovly->ih_dst;
2084 	ippseudo.ippseudo_pad = 0;
2085 	ippseudo.ippseudo_p = IPPROTO_TCP;
2086 	ippseudo.ippseudo_len = htons(len + sizeof(struct tcphdr) + optlen);
2087 	MD5Update(&ctx, (char *)&ippseudo, sizeof(struct ippseudo));
2088 
2089 	/*
2090 	 * Step 2: Update MD5 hash with TCP header, excluding options.
2091 	 * The TCP checksum must be set to zero.
2092 	 */
2093 	savecsum = th->th_sum;
2094 	th->th_sum = 0;
2095 	MD5Update(&ctx, (char *)th, sizeof(struct tcphdr));
2096 	th->th_sum = savecsum;
2097 
2098 	/*
2099 	 * Step 3: Update MD5 hash with TCP segment data.
2100 	 *         Use m_apply() to avoid an early m_pullup().
2101 	 */
2102 	if (len > 0)
2103 		m_apply(m, doff, len, tcp_signature_apply, &ctx);
2104 
2105 	/*
2106 	 * Step 4: Update MD5 hash with shared secret.
2107 	 */
2108 	MD5Update(&ctx, _KEYBUF(sav->key_auth), _KEYLEN(sav->key_auth));
2109 	MD5Final(buf, &ctx);
2110 
2111 	key_sa_recordxfer(sav, m);
2112 	KEY_FREESAV(&sav);
2113 	return (0);
2114 }
2115 #endif /* TCP_SIGNATURE */
2116