xref: /freebsd/sys/netinet/tcp_timewait.c (revision 6e8394b8baa7d5d9153ab90de6824bcd19b3b4e1)
1 /*
2  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)tcp_subr.c	8.2 (Berkeley) 5/24/95
34  *	$Id: tcp_subr.c,v 1.54 1999/05/03 23:57:31 billf Exp $
35  */
36 
37 #include "opt_compat.h"
38 #include "opt_tcpdebug.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/sysctl.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/protosw.h>
49 
50 #include <vm/vm_zone.h>
51 
52 #include <net/route.h>
53 #include <net/if.h>
54 
55 #define _IP_VHL
56 #include <netinet/in.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/ip.h>
59 #include <netinet/in_pcb.h>
60 #include <netinet/in_var.h>
61 #include <netinet/ip_var.h>
62 #include <netinet/tcp.h>
63 #include <netinet/tcp_fsm.h>
64 #include <netinet/tcp_seq.h>
65 #include <netinet/tcp_timer.h>
66 #include <netinet/tcp_var.h>
67 #include <netinet/tcpip.h>
68 #ifdef TCPDEBUG
69 #include <netinet/tcp_debug.h>
70 #endif
71 
72 int 	tcp_mssdflt = TCP_MSS;
73 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW,
74     &tcp_mssdflt , 0, "Default TCP Maximum Segment Size");
75 
76 static int 	tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
77 SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW,
78     &tcp_rttdflt , 0, "Default maximum TCP Round Trip Time");
79 
80 static int	tcp_do_rfc1323 = 1;
81 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
82     &tcp_do_rfc1323 , 0, "Enable rfc1323 (high performance TCP) extensions");
83 
84 static int	tcp_do_rfc1644 = 0;
85 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW,
86     &tcp_do_rfc1644 , 0, "Enable rfc1644 (TTCP) extensions");
87 
88 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD,
89     &tcbinfo.ipi_count, 0, "Number of active PCBs");
90 
91 static void	tcp_cleartaocache __P((void));
92 static void	tcp_notify __P((struct inpcb *, int));
93 
94 /*
95  * Target size of TCP PCB hash tables. Must be a power of two.
96  *
97  * Note that this can be overridden by the kernel environment
98  * variable net.inet.tcp.tcbhashsize
99  */
100 #ifndef TCBHASHSIZE
101 #define TCBHASHSIZE	512
102 #endif
103 
104 /*
105  * This is the actual shape of what we allocate using the zone
106  * allocator.  Doing it this way allows us to protect both structures
107  * using the same generation count, and also eliminates the overhead
108  * of allocating tcpcbs separately.  By hiding the structure here,
109  * we avoid changing most of the rest of the code (although it needs
110  * to be changed, eventually, for greater efficiency).
111  */
112 #define	ALIGNMENT	32
113 #define	ALIGNM1		(ALIGNMENT - 1)
114 struct	inp_tp {
115 	union {
116 		struct	inpcb inp;
117 		char	align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1];
118 	} inp_tp_u;
119 	struct	tcpcb tcb;
120 };
121 #undef ALIGNMENT
122 #undef ALIGNM1
123 
124 /*
125  * Tcp initialization
126  */
127 void
128 tcp_init()
129 {
130 	int hashsize;
131 
132 	tcp_iss = random();	/* wrong, but better than a constant */
133 	tcp_ccgen = 1;
134 	tcp_cleartaocache();
135 	LIST_INIT(&tcb);
136 	tcbinfo.listhead = &tcb;
137 	if (!(getenv_int("net.inet.tcp.tcbhashsize", &hashsize)))
138 		hashsize = TCBHASHSIZE;
139 	if (!powerof2(hashsize)) {
140 		printf("WARNING: TCB hash size not a power of 2\n");
141 		hashsize = 512; /* safe default */
142 	}
143 	tcbinfo.hashbase = hashinit(hashsize, M_PCB, &tcbinfo.hashmask);
144 	tcbinfo.porthashbase = hashinit(hashsize, M_PCB,
145 					&tcbinfo.porthashmask);
146 	tcbinfo.ipi_zone = zinit("tcpcb", sizeof(struct inp_tp), maxsockets,
147 				 ZONE_INTERRUPT, 0);
148 	if (max_protohdr < sizeof(struct tcpiphdr))
149 		max_protohdr = sizeof(struct tcpiphdr);
150 	if (max_linkhdr + sizeof(struct tcpiphdr) > MHLEN)
151 		panic("tcp_init");
152 }
153 
154 /*
155  * Create template to be used to send tcp packets on a connection.
156  * Call after host entry created, allocates an mbuf and fills
157  * in a skeletal tcp/ip header, minimizing the amount of work
158  * necessary when the connection is used.
159  */
160 struct tcpiphdr *
161 tcp_template(tp)
162 	struct tcpcb *tp;
163 {
164 	register struct inpcb *inp = tp->t_inpcb;
165 	register struct mbuf *m;
166 	register struct tcpiphdr *n;
167 
168 	if ((n = tp->t_template) == 0) {
169 		m = m_get(M_DONTWAIT, MT_HEADER);
170 		if (m == NULL)
171 			return (0);
172 		m->m_len = sizeof (struct tcpiphdr);
173 		n = mtod(m, struct tcpiphdr *);
174 	}
175 	bzero(n->ti_x1, sizeof(n->ti_x1));
176 	n->ti_pr = IPPROTO_TCP;
177 	n->ti_len = htons(sizeof (struct tcpiphdr) - sizeof (struct ip));
178 	n->ti_src = inp->inp_laddr;
179 	n->ti_dst = inp->inp_faddr;
180 	n->ti_sport = inp->inp_lport;
181 	n->ti_dport = inp->inp_fport;
182 	n->ti_seq = 0;
183 	n->ti_ack = 0;
184 	n->ti_x2 = 0;
185 	n->ti_off = 5;
186 	n->ti_flags = 0;
187 	n->ti_win = 0;
188 	n->ti_sum = 0;
189 	n->ti_urp = 0;
190 	return (n);
191 }
192 
193 /*
194  * Send a single message to the TCP at address specified by
195  * the given TCP/IP header.  If m == 0, then we make a copy
196  * of the tcpiphdr at ti and send directly to the addressed host.
197  * This is used to force keep alive messages out using the TCP
198  * template for a connection tp->t_template.  If flags are given
199  * then we send a message back to the TCP which originated the
200  * segment ti, and discard the mbuf containing it and any other
201  * attached mbufs.
202  *
203  * In any case the ack and sequence number of the transmitted
204  * segment are as specified by the parameters.
205  *
206  * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
207  */
208 void
209 tcp_respond(tp, ti, m, ack, seq, flags)
210 	struct tcpcb *tp;
211 	register struct tcpiphdr *ti;
212 	register struct mbuf *m;
213 	tcp_seq ack, seq;
214 	int flags;
215 {
216 	register int tlen;
217 	int win = 0;
218 	struct route *ro = 0;
219 	struct route sro;
220 
221 	if (tp) {
222 		if (!(flags & TH_RST))
223 			win = sbspace(&tp->t_inpcb->inp_socket->so_rcv);
224 		ro = &tp->t_inpcb->inp_route;
225 	} else {
226 		ro = &sro;
227 		bzero(ro, sizeof *ro);
228 	}
229 	if (m == 0) {
230 		m = m_gethdr(M_DONTWAIT, MT_HEADER);
231 		if (m == NULL)
232 			return;
233 #ifdef TCP_COMPAT_42
234 		tlen = 1;
235 #else
236 		tlen = 0;
237 #endif
238 		m->m_data += max_linkhdr;
239 		*mtod(m, struct tcpiphdr *) = *ti;
240 		ti = mtod(m, struct tcpiphdr *);
241 		flags = TH_ACK;
242 	} else {
243 		m_freem(m->m_next);
244 		m->m_next = 0;
245 		m->m_data = (caddr_t)ti;
246 		m->m_len = sizeof (struct tcpiphdr);
247 		tlen = 0;
248 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
249 		xchg(ti->ti_dst.s_addr, ti->ti_src.s_addr, n_long);
250 		xchg(ti->ti_dport, ti->ti_sport, n_short);
251 #undef xchg
252 	}
253 	ti->ti_len = htons((u_short)(sizeof (struct tcphdr) + tlen));
254 	tlen += sizeof (struct tcpiphdr);
255 	m->m_len = tlen;
256 	m->m_pkthdr.len = tlen;
257 	m->m_pkthdr.rcvif = (struct ifnet *) 0;
258 	bzero(ti->ti_x1, sizeof(ti->ti_x1));
259 	ti->ti_seq = htonl(seq);
260 	ti->ti_ack = htonl(ack);
261 	ti->ti_x2 = 0;
262 	ti->ti_off = sizeof (struct tcphdr) >> 2;
263 	ti->ti_flags = flags;
264 	if (tp)
265 		ti->ti_win = htons((u_short) (win >> tp->rcv_scale));
266 	else
267 		ti->ti_win = htons((u_short)win);
268 	ti->ti_urp = 0;
269 	ti->ti_sum = 0;
270 	ti->ti_sum = in_cksum(m, tlen);
271 	((struct ip *)ti)->ip_len = tlen;
272 	((struct ip *)ti)->ip_ttl = ip_defttl;
273 #ifdef TCPDEBUG
274 	if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
275 		tcp_trace(TA_OUTPUT, 0, tp, ti, 0);
276 #endif
277 	(void) ip_output(m, NULL, ro, 0, NULL);
278 	if (ro == &sro && ro->ro_rt) {
279 		RTFREE(ro->ro_rt);
280 	}
281 }
282 
283 /*
284  * Create a new TCP control block, making an
285  * empty reassembly queue and hooking it to the argument
286  * protocol control block.  The `inp' parameter must have
287  * come from the zone allocator set up in tcp_init().
288  */
289 struct tcpcb *
290 tcp_newtcpcb(inp)
291 	struct inpcb *inp;
292 {
293 	struct inp_tp *it;
294 	register struct tcpcb *tp;
295 
296 	it = (struct inp_tp *)inp;
297 	tp = &it->tcb;
298 	bzero((char *) tp, sizeof(struct tcpcb));
299 	tp->t_segq = NULL;
300 	tp->t_maxseg = tp->t_maxopd = tcp_mssdflt;
301 
302 	if (tcp_do_rfc1323)
303 		tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
304 	if (tcp_do_rfc1644)
305 		tp->t_flags |= TF_REQ_CC;
306 	tp->t_inpcb = inp;	/* XXX */
307 	/*
308 	 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
309 	 * rtt estimate.  Set rttvar so that srtt + 4 * rttvar gives
310 	 * reasonable initial retransmit time.
311 	 */
312 	tp->t_srtt = TCPTV_SRTTBASE;
313 	tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
314 	tp->t_rttmin = TCPTV_MIN;
315 	tp->t_rxtcur = TCPTV_RTOBASE;
316 	tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
317 	tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
318 	inp->inp_ip_ttl = ip_defttl;
319 	inp->inp_ppcb = (caddr_t)tp;
320 	return (tp);		/* XXX */
321 }
322 
323 /*
324  * Drop a TCP connection, reporting
325  * the specified error.  If connection is synchronized,
326  * then send a RST to peer.
327  */
328 struct tcpcb *
329 tcp_drop(tp, errno)
330 	register struct tcpcb *tp;
331 	int errno;
332 {
333 	struct socket *so = tp->t_inpcb->inp_socket;
334 
335 	if (TCPS_HAVERCVDSYN(tp->t_state)) {
336 		tp->t_state = TCPS_CLOSED;
337 		(void) tcp_output(tp);
338 		tcpstat.tcps_drops++;
339 	} else
340 		tcpstat.tcps_conndrops++;
341 	if (errno == ETIMEDOUT && tp->t_softerror)
342 		errno = tp->t_softerror;
343 	so->so_error = errno;
344 	return (tcp_close(tp));
345 }
346 
347 /*
348  * Close a TCP control block:
349  *	discard all space held by the tcp
350  *	discard internet protocol block
351  *	wake up any sleepers
352  */
353 struct tcpcb *
354 tcp_close(tp)
355 	register struct tcpcb *tp;
356 {
357 	register struct mbuf *q;
358 	register struct mbuf *nq;
359 	struct inpcb *inp = tp->t_inpcb;
360 	struct socket *so = inp->inp_socket;
361 	register struct rtentry *rt;
362 	int dosavessthresh;
363 
364 	/*
365 	 * If we got enough samples through the srtt filter,
366 	 * save the rtt and rttvar in the routing entry.
367 	 * 'Enough' is arbitrarily defined as the 16 samples.
368 	 * 16 samples is enough for the srtt filter to converge
369 	 * to within 5% of the correct value; fewer samples and
370 	 * we could save a very bogus rtt.
371 	 *
372 	 * Don't update the default route's characteristics and don't
373 	 * update anything that the user "locked".
374 	 */
375 	if (tp->t_rttupdated >= 16 &&
376 	    (rt = inp->inp_route.ro_rt) &&
377 	    ((struct sockaddr_in *)rt_key(rt))->sin_addr.s_addr != INADDR_ANY) {
378 		register u_long i = 0;
379 
380 		if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
381 			i = tp->t_srtt *
382 			    (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTT_SCALE));
383 			if (rt->rt_rmx.rmx_rtt && i)
384 				/*
385 				 * filter this update to half the old & half
386 				 * the new values, converting scale.
387 				 * See route.h and tcp_var.h for a
388 				 * description of the scaling constants.
389 				 */
390 				rt->rt_rmx.rmx_rtt =
391 				    (rt->rt_rmx.rmx_rtt + i) / 2;
392 			else
393 				rt->rt_rmx.rmx_rtt = i;
394 			tcpstat.tcps_cachedrtt++;
395 		}
396 		if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {
397 			i = tp->t_rttvar *
398 			    (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTTVAR_SCALE));
399 			if (rt->rt_rmx.rmx_rttvar && i)
400 				rt->rt_rmx.rmx_rttvar =
401 				    (rt->rt_rmx.rmx_rttvar + i) / 2;
402 			else
403 				rt->rt_rmx.rmx_rttvar = i;
404 			tcpstat.tcps_cachedrttvar++;
405 		}
406 		/*
407 		 * The old comment here said:
408 		 * update the pipelimit (ssthresh) if it has been updated
409 		 * already or if a pipesize was specified & the threshhold
410 		 * got below half the pipesize.  I.e., wait for bad news
411 		 * before we start updating, then update on both good
412 		 * and bad news.
413 		 *
414 		 * But we want to save the ssthresh even if no pipesize is
415 		 * specified explicitly in the route, because such
416 		 * connections still have an implicit pipesize specified
417 		 * by the global tcp_sendspace.  In the absence of a reliable
418 		 * way to calculate the pipesize, it will have to do.
419 		 */
420 		i = tp->snd_ssthresh;
421 		if (rt->rt_rmx.rmx_sendpipe != 0)
422 			dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe / 2);
423 		else
424 			dosavessthresh = (i < so->so_snd.sb_hiwat / 2);
425 		if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&
426 		     i != 0 && rt->rt_rmx.rmx_ssthresh != 0)
427 		    || dosavessthresh) {
428 			/*
429 			 * convert the limit from user data bytes to
430 			 * packets then to packet data bytes.
431 			 */
432 			i = (i + tp->t_maxseg / 2) / tp->t_maxseg;
433 			if (i < 2)
434 				i = 2;
435 			i *= (u_long)(tp->t_maxseg + sizeof (struct tcpiphdr));
436 			if (rt->rt_rmx.rmx_ssthresh)
437 				rt->rt_rmx.rmx_ssthresh =
438 				    (rt->rt_rmx.rmx_ssthresh + i) / 2;
439 			else
440 				rt->rt_rmx.rmx_ssthresh = i;
441 			tcpstat.tcps_cachedssthresh++;
442 		}
443 	}
444 	/* free the reassembly queue, if any */
445 	for (q = tp->t_segq; q; q = nq) {
446 		nq = q->m_nextpkt;
447 		tp->t_segq = nq;
448 		m_freem(q);
449 	}
450 	if (tp->t_template)
451 		(void) m_free(dtom(tp->t_template));
452 	inp->inp_ppcb = NULL;
453 	soisdisconnected(so);
454 	in_pcbdetach(inp);
455 	tcpstat.tcps_closed++;
456 	return ((struct tcpcb *)0);
457 }
458 
459 void
460 tcp_drain()
461 {
462 
463 }
464 
465 /*
466  * Notify a tcp user of an asynchronous error;
467  * store error as soft error, but wake up user
468  * (for now, won't do anything until can select for soft error).
469  */
470 static void
471 tcp_notify(inp, error)
472 	struct inpcb *inp;
473 	int error;
474 {
475 	register struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb;
476 	register struct socket *so = inp->inp_socket;
477 
478 	/*
479 	 * Ignore some errors if we are hooked up.
480 	 * If connection hasn't completed, has retransmitted several times,
481 	 * and receives a second error, give up now.  This is better
482 	 * than waiting a long time to establish a connection that
483 	 * can never complete.
484 	 */
485 	if (tp->t_state == TCPS_ESTABLISHED &&
486 	     (error == EHOSTUNREACH || error == ENETUNREACH ||
487 	      error == EHOSTDOWN)) {
488 		return;
489 	} else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
490 	    tp->t_softerror)
491 		so->so_error = error;
492 	else
493 		tp->t_softerror = error;
494 	wakeup((caddr_t) &so->so_timeo);
495 	sorwakeup(so);
496 	sowwakeup(so);
497 }
498 
499 static int
500 tcp_pcblist SYSCTL_HANDLER_ARGS
501 {
502 	int error, i, n, s;
503 	struct inpcb *inp, **inp_list;
504 	inp_gen_t gencnt;
505 	struct xinpgen xig;
506 
507 	/*
508 	 * The process of preparing the TCB list is too time-consuming and
509 	 * resource-intensive to repeat twice on every request.
510 	 */
511 	if (req->oldptr == 0) {
512 		n = tcbinfo.ipi_count;
513 		req->oldidx = 2 * (sizeof xig)
514 			+ (n + n/8) * sizeof(struct xtcpcb);
515 		return 0;
516 	}
517 
518 	if (req->newptr != 0)
519 		return EPERM;
520 
521 	/*
522 	 * OK, now we're committed to doing something.
523 	 */
524 	s = splnet();
525 	gencnt = tcbinfo.ipi_gencnt;
526 	n = tcbinfo.ipi_count;
527 	splx(s);
528 
529 	xig.xig_len = sizeof xig;
530 	xig.xig_count = n;
531 	xig.xig_gen = gencnt;
532 	xig.xig_sogen = so_gencnt;
533 	error = SYSCTL_OUT(req, &xig, sizeof xig);
534 	if (error)
535 		return error;
536 
537 	inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
538 	if (inp_list == 0)
539 		return ENOMEM;
540 
541 	s = splnet();
542 	for (inp = tcbinfo.listhead->lh_first, i = 0; inp && i < n;
543 	     inp = inp->inp_list.le_next) {
544 		if (inp->inp_gencnt <= gencnt && !prison_xinpcb(req->p, inp))
545 			inp_list[i++] = inp;
546 	}
547 	splx(s);
548 	n = i;
549 
550 	error = 0;
551 	for (i = 0; i < n; i++) {
552 		inp = inp_list[i];
553 		if (inp->inp_gencnt <= gencnt) {
554 			struct xtcpcb xt;
555 			caddr_t inp_ppcb;
556 			xt.xt_len = sizeof xt;
557 			/* XXX should avoid extra copy */
558 			bcopy(inp, &xt.xt_inp, sizeof *inp);
559 			inp_ppcb = inp->inp_ppcb;
560 			if (inp_ppcb != NULL)
561 				bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp);
562 			else
563 				bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
564 			if (inp->inp_socket)
565 				sotoxsocket(inp->inp_socket, &xt.xt_socket);
566 			error = SYSCTL_OUT(req, &xt, sizeof xt);
567 		}
568 	}
569 	if (!error) {
570 		/*
571 		 * Give the user an updated idea of our state.
572 		 * If the generation differs from what we told
573 		 * her before, she knows that something happened
574 		 * while we were processing this request, and it
575 		 * might be necessary to retry.
576 		 */
577 		s = splnet();
578 		xig.xig_gen = tcbinfo.ipi_gencnt;
579 		xig.xig_sogen = so_gencnt;
580 		xig.xig_count = tcbinfo.ipi_count;
581 		splx(s);
582 		error = SYSCTL_OUT(req, &xig, sizeof xig);
583 	}
584 	free(inp_list, M_TEMP);
585 	return error;
586 }
587 
588 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
589 	    tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
590 
591 void
592 tcp_ctlinput(cmd, sa, vip)
593 	int cmd;
594 	struct sockaddr *sa;
595 	void *vip;
596 {
597 	register struct ip *ip = vip;
598 	register struct tcphdr *th;
599 	void (*notify) __P((struct inpcb *, int)) = tcp_notify;
600 
601 	if (cmd == PRC_QUENCH)
602 		notify = tcp_quench;
603 	else if (cmd == PRC_MSGSIZE)
604 		notify = tcp_mtudisc;
605 	else if (!PRC_IS_REDIRECT(cmd) &&
606 		 ((unsigned)cmd > PRC_NCMDS || inetctlerrmap[cmd] == 0))
607 		return;
608 	if (ip) {
609 		th = (struct tcphdr *)((caddr_t)ip
610 				       + (IP_VHL_HL(ip->ip_vhl) << 2));
611 		in_pcbnotify(&tcb, sa, th->th_dport, ip->ip_src, th->th_sport,
612 			cmd, notify);
613 	} else
614 		in_pcbnotify(&tcb, sa, 0, zeroin_addr, 0, cmd, notify);
615 }
616 
617 /*
618  * When a source quench is received, close congestion window
619  * to one segment.  We will gradually open it again as we proceed.
620  */
621 void
622 tcp_quench(inp, errno)
623 	struct inpcb *inp;
624 	int errno;
625 {
626 	struct tcpcb *tp = intotcpcb(inp);
627 
628 	if (tp)
629 		tp->snd_cwnd = tp->t_maxseg;
630 }
631 
632 /*
633  * When `need fragmentation' ICMP is received, update our idea of the MSS
634  * based on the new value in the route.  Also nudge TCP to send something,
635  * since we know the packet we just sent was dropped.
636  * This duplicates some code in the tcp_mss() function in tcp_input.c.
637  */
638 void
639 tcp_mtudisc(inp, errno)
640 	struct inpcb *inp;
641 	int errno;
642 {
643 	struct tcpcb *tp = intotcpcb(inp);
644 	struct rtentry *rt;
645 	struct rmxp_tao *taop;
646 	struct socket *so = inp->inp_socket;
647 	int offered;
648 	int mss;
649 
650 	if (tp) {
651 		rt = tcp_rtlookup(inp);
652 		if (!rt || !rt->rt_rmx.rmx_mtu) {
653 			tp->t_maxopd = tp->t_maxseg = tcp_mssdflt;
654 			return;
655 		}
656 		taop = rmx_taop(rt->rt_rmx);
657 		offered = taop->tao_mssopt;
658 		mss = rt->rt_rmx.rmx_mtu - sizeof(struct tcpiphdr);
659 		if (offered)
660 			mss = min(mss, offered);
661 		/*
662 		 * XXX - The above conditional probably violates the TCP
663 		 * spec.  The problem is that, since we don't know the
664 		 * other end's MSS, we are supposed to use a conservative
665 		 * default.  But, if we do that, then MTU discovery will
666 		 * never actually take place, because the conservative
667 		 * default is much less than the MTUs typically seen
668 		 * on the Internet today.  For the moment, we'll sweep
669 		 * this under the carpet.
670 		 *
671 		 * The conservative default might not actually be a problem
672 		 * if the only case this occurs is when sending an initial
673 		 * SYN with options and data to a host we've never talked
674 		 * to before.  Then, they will reply with an MSS value which
675 		 * will get recorded and the new parameters should get
676 		 * recomputed.  For Further Study.
677 		 */
678 		if (tp->t_maxopd <= mss)
679 			return;
680 		tp->t_maxopd = mss;
681 
682 		if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
683 		    (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)
684 			mss -= TCPOLEN_TSTAMP_APPA;
685 		if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC &&
686 		    (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC)
687 			mss -= TCPOLEN_CC_APPA;
688 #if	(MCLBYTES & (MCLBYTES - 1)) == 0
689 		if (mss > MCLBYTES)
690 			mss &= ~(MCLBYTES-1);
691 #else
692 		if (mss > MCLBYTES)
693 			mss = mss / MCLBYTES * MCLBYTES;
694 #endif
695 		if (so->so_snd.sb_hiwat < mss)
696 			mss = so->so_snd.sb_hiwat;
697 
698 		tp->t_maxseg = mss;
699 
700 		tcpstat.tcps_mturesent++;
701 		tp->t_rtt = 0;
702 		tp->snd_nxt = tp->snd_una;
703 		tcp_output(tp);
704 	}
705 }
706 
707 /*
708  * Look-up the routing entry to the peer of this inpcb.  If no route
709  * is found and it cannot be allocated the return NULL.  This routine
710  * is called by TCP routines that access the rmx structure and by tcp_mss
711  * to get the interface MTU.
712  */
713 struct rtentry *
714 tcp_rtlookup(inp)
715 	struct inpcb *inp;
716 {
717 	struct route *ro;
718 	struct rtentry *rt;
719 
720 	ro = &inp->inp_route;
721 	rt = ro->ro_rt;
722 	if (rt == NULL || !(rt->rt_flags & RTF_UP)) {
723 		/* No route yet, so try to acquire one */
724 		if (inp->inp_faddr.s_addr != INADDR_ANY) {
725 			ro->ro_dst.sa_family = AF_INET;
726 			ro->ro_dst.sa_len = sizeof(ro->ro_dst);
727 			((struct sockaddr_in *) &ro->ro_dst)->sin_addr =
728 				inp->inp_faddr;
729 			rtalloc(ro);
730 			rt = ro->ro_rt;
731 		}
732 	}
733 	return rt;
734 }
735 
736 /*
737  * Return a pointer to the cached information about the remote host.
738  * The cached information is stored in the protocol specific part of
739  * the route metrics.
740  */
741 struct rmxp_tao *
742 tcp_gettaocache(inp)
743 	struct inpcb *inp;
744 {
745 	struct rtentry *rt = tcp_rtlookup(inp);
746 
747 	/* Make sure this is a host route and is up. */
748 	if (rt == NULL ||
749 	    (rt->rt_flags & (RTF_UP|RTF_HOST)) != (RTF_UP|RTF_HOST))
750 		return NULL;
751 
752 	return rmx_taop(rt->rt_rmx);
753 }
754 
755 /*
756  * Clear all the TAO cache entries, called from tcp_init.
757  *
758  * XXX
759  * This routine is just an empty one, because we assume that the routing
760  * routing tables are initialized at the same time when TCP, so there is
761  * nothing in the cache left over.
762  */
763 static void
764 tcp_cleartaocache()
765 {
766 }
767