xref: /freebsd/sys/netinet/tcp_timewait.c (revision a8445737e740901f5f2c8d24c12ef7fc8b00134e)
1 /*
2  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)tcp_subr.c	8.2 (Berkeley) 5/24/95
34  *	$Id: tcp_subr.c,v 1.46 1998/08/24 07:47:39 dfr Exp $
35  */
36 
37 #include "opt_compat.h"
38 #include "opt_tcpdebug.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/sysctl.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/protosw.h>
49 
50 #include <vm/vm_zone.h>
51 
52 #include <net/route.h>
53 #include <net/if.h>
54 
55 #define _IP_VHL
56 #include <netinet/in.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/ip.h>
59 #include <netinet/in_pcb.h>
60 #include <netinet/in_var.h>
61 #include <netinet/ip_var.h>
62 #include <netinet/tcp.h>
63 #include <netinet/tcp_fsm.h>
64 #include <netinet/tcp_seq.h>
65 #include <netinet/tcp_timer.h>
66 #include <netinet/tcp_var.h>
67 #include <netinet/tcpip.h>
68 #ifdef TCPDEBUG
69 #include <netinet/tcp_debug.h>
70 #endif
71 
72 int 	tcp_mssdflt = TCP_MSS;
73 SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt,
74 	CTLFLAG_RW, &tcp_mssdflt , 0, "");
75 
76 static int 	tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
77 SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt,
78 	CTLFLAG_RW, &tcp_rttdflt , 0, "");
79 
80 static int	tcp_do_rfc1323 = 1;
81 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323,
82 	CTLFLAG_RW, &tcp_do_rfc1323 , 0, "");
83 
84 static int	tcp_do_rfc1644 = 0;
85 SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644,
86 	CTLFLAG_RW, &tcp_do_rfc1644 , 0, "");
87 
88 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD, &tcbinfo.ipi_count,
89 	   0, "Number of active PCBs");
90 
91 static void	tcp_cleartaocache __P((void));
92 static void	tcp_notify __P((struct inpcb *, int));
93 
94 /*
95  * Target size of TCP PCB hash tables. Must be a power of two.
96  */
97 #ifndef TCBHASHSIZE
98 #define TCBHASHSIZE	512
99 #endif
100 
101 /*
102  * This is the actual shape of what we allocate using the zone
103  * allocator.  Doing it this way allows us to protect both structures
104  * using the same generation count, and also eliminates the overhead
105  * of allocating tcpcbs separately.  By hiding the structure here,
106  * we avoid changing most of the rest of the code (although it needs
107  * to be changed, eventually, for greater efficiency).
108  */
109 #define	ALIGNMENT	32
110 #define	ALIGNM1		(ALIGNMENT - 1)
111 struct	inp_tp {
112 	union {
113 		struct	inpcb inp;
114 		char	align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1];
115 	} inp_tp_u;
116 	struct	tcpcb tcb;
117 };
118 #undef ALIGNMENT
119 #undef ALIGNM1
120 
121 /*
122  * Tcp initialization
123  */
124 void
125 tcp_init()
126 {
127 
128 	tcp_iss = random();	/* wrong, but better than a constant */
129 	tcp_ccgen = 1;
130 	tcp_cleartaocache();
131 	LIST_INIT(&tcb);
132 	tcbinfo.listhead = &tcb;
133 	tcbinfo.hashbase = hashinit(TCBHASHSIZE, M_PCB, &tcbinfo.hashmask);
134 	tcbinfo.porthashbase = hashinit(TCBHASHSIZE, M_PCB,
135 					&tcbinfo.porthashmask);
136 	tcbinfo.ipi_zone = zinit("tcpcb", sizeof(struct inp_tp), maxsockets,
137 				 ZONE_INTERRUPT, 0);
138 	if (max_protohdr < sizeof(struct tcpiphdr))
139 		max_protohdr = sizeof(struct tcpiphdr);
140 	if (max_linkhdr + sizeof(struct tcpiphdr) > MHLEN)
141 		panic("tcp_init");
142 }
143 
144 /*
145  * Create template to be used to send tcp packets on a connection.
146  * Call after host entry created, allocates an mbuf and fills
147  * in a skeletal tcp/ip header, minimizing the amount of work
148  * necessary when the connection is used.
149  */
150 struct tcpiphdr *
151 tcp_template(tp)
152 	struct tcpcb *tp;
153 {
154 	register struct inpcb *inp = tp->t_inpcb;
155 	register struct mbuf *m;
156 	register struct tcpiphdr *n;
157 
158 	if ((n = tp->t_template) == 0) {
159 		m = m_get(M_DONTWAIT, MT_HEADER);
160 		if (m == NULL)
161 			return (0);
162 		m->m_len = sizeof (struct tcpiphdr);
163 		n = mtod(m, struct tcpiphdr *);
164 	}
165 	bzero(n->ti_x1, sizeof(n->ti_x1));
166 	n->ti_pr = IPPROTO_TCP;
167 	n->ti_len = htons(sizeof (struct tcpiphdr) - sizeof (struct ip));
168 	n->ti_src = inp->inp_laddr;
169 	n->ti_dst = inp->inp_faddr;
170 	n->ti_sport = inp->inp_lport;
171 	n->ti_dport = inp->inp_fport;
172 	n->ti_seq = 0;
173 	n->ti_ack = 0;
174 	n->ti_x2 = 0;
175 	n->ti_off = 5;
176 	n->ti_flags = 0;
177 	n->ti_win = 0;
178 	n->ti_sum = 0;
179 	n->ti_urp = 0;
180 	return (n);
181 }
182 
183 /*
184  * Send a single message to the TCP at address specified by
185  * the given TCP/IP header.  If m == 0, then we make a copy
186  * of the tcpiphdr at ti and send directly to the addressed host.
187  * This is used to force keep alive messages out using the TCP
188  * template for a connection tp->t_template.  If flags are given
189  * then we send a message back to the TCP which originated the
190  * segment ti, and discard the mbuf containing it and any other
191  * attached mbufs.
192  *
193  * In any case the ack and sequence number of the transmitted
194  * segment are as specified by the parameters.
195  *
196  * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
197  */
198 void
199 tcp_respond(tp, ti, m, ack, seq, flags)
200 	struct tcpcb *tp;
201 	register struct tcpiphdr *ti;
202 	register struct mbuf *m;
203 	tcp_seq ack, seq;
204 	int flags;
205 {
206 	register int tlen;
207 	int win = 0;
208 	struct route *ro = 0;
209 	struct route sro;
210 
211 	if (tp) {
212 		win = sbspace(&tp->t_inpcb->inp_socket->so_rcv);
213 		ro = &tp->t_inpcb->inp_route;
214 	} else {
215 		ro = &sro;
216 		bzero(ro, sizeof *ro);
217 	}
218 	if (m == 0) {
219 		m = m_gethdr(M_DONTWAIT, MT_HEADER);
220 		if (m == NULL)
221 			return;
222 #ifdef TCP_COMPAT_42
223 		tlen = 1;
224 #else
225 		tlen = 0;
226 #endif
227 		m->m_data += max_linkhdr;
228 		*mtod(m, struct tcpiphdr *) = *ti;
229 		ti = mtod(m, struct tcpiphdr *);
230 		flags = TH_ACK;
231 	} else {
232 		m_freem(m->m_next);
233 		m->m_next = 0;
234 		m->m_data = (caddr_t)ti;
235 		m->m_len = sizeof (struct tcpiphdr);
236 		tlen = 0;
237 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
238 		xchg(ti->ti_dst.s_addr, ti->ti_src.s_addr, n_long);
239 		xchg(ti->ti_dport, ti->ti_sport, n_short);
240 #undef xchg
241 	}
242 	ti->ti_len = htons((u_short)(sizeof (struct tcphdr) + tlen));
243 	tlen += sizeof (struct tcpiphdr);
244 	m->m_len = tlen;
245 	m->m_pkthdr.len = tlen;
246 	m->m_pkthdr.rcvif = (struct ifnet *) 0;
247 	bzero(ti->ti_x1, sizeof(ti->ti_x1));
248 	ti->ti_seq = htonl(seq);
249 	ti->ti_ack = htonl(ack);
250 	ti->ti_x2 = 0;
251 	ti->ti_off = sizeof (struct tcphdr) >> 2;
252 	ti->ti_flags = flags;
253 	if (tp)
254 		ti->ti_win = htons((u_short) (win >> tp->rcv_scale));
255 	else
256 		ti->ti_win = htons((u_short)win);
257 	ti->ti_urp = 0;
258 	ti->ti_sum = 0;
259 	ti->ti_sum = in_cksum(m, tlen);
260 	((struct ip *)ti)->ip_len = tlen;
261 	((struct ip *)ti)->ip_ttl = ip_defttl;
262 #ifdef TCPDEBUG
263 	if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
264 		tcp_trace(TA_OUTPUT, 0, tp, ti, 0);
265 #endif
266 	(void) ip_output(m, NULL, ro, 0, NULL);
267 	if (ro == &sro && ro->ro_rt) {
268 		RTFREE(ro->ro_rt);
269 	}
270 }
271 
272 /*
273  * Create a new TCP control block, making an
274  * empty reassembly queue and hooking it to the argument
275  * protocol control block.  The `inp' parameter must have
276  * come from the zone allocator set up in tcp_init().
277  */
278 struct tcpcb *
279 tcp_newtcpcb(inp)
280 	struct inpcb *inp;
281 {
282 	struct inp_tp *it;
283 	register struct tcpcb *tp;
284 
285 	it = (struct inp_tp *)inp;
286 	tp = &it->tcb;
287 	bzero((char *) tp, sizeof(struct tcpcb));
288 	tp->t_segq = NULL;
289 	tp->t_maxseg = tp->t_maxopd = tcp_mssdflt;
290 
291 	if (tcp_do_rfc1323)
292 		tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
293 	if (tcp_do_rfc1644)
294 		tp->t_flags |= TF_REQ_CC;
295 	tp->t_inpcb = inp;	/* XXX */
296 	/*
297 	 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
298 	 * rtt estimate.  Set rttvar so that srtt + 4 * rttvar gives
299 	 * reasonable initial retransmit time.
300 	 */
301 	tp->t_srtt = TCPTV_SRTTBASE;
302 	tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
303 	tp->t_rttmin = TCPTV_MIN;
304 	tp->t_rxtcur = TCPTV_RTOBASE;
305 	tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
306 	tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
307 	inp->inp_ip_ttl = ip_defttl;
308 	inp->inp_ppcb = (caddr_t)tp;
309 	return (tp);		/* XXX */
310 }
311 
312 /*
313  * Drop a TCP connection, reporting
314  * the specified error.  If connection is synchronized,
315  * then send a RST to peer.
316  */
317 struct tcpcb *
318 tcp_drop(tp, errno)
319 	register struct tcpcb *tp;
320 	int errno;
321 {
322 	struct socket *so = tp->t_inpcb->inp_socket;
323 
324 	if (TCPS_HAVERCVDSYN(tp->t_state)) {
325 		tp->t_state = TCPS_CLOSED;
326 		(void) tcp_output(tp);
327 		tcpstat.tcps_drops++;
328 	} else
329 		tcpstat.tcps_conndrops++;
330 	if (errno == ETIMEDOUT && tp->t_softerror)
331 		errno = tp->t_softerror;
332 	so->so_error = errno;
333 	return (tcp_close(tp));
334 }
335 
336 /*
337  * Close a TCP control block:
338  *	discard all space held by the tcp
339  *	discard internet protocol block
340  *	wake up any sleepers
341  */
342 struct tcpcb *
343 tcp_close(tp)
344 	register struct tcpcb *tp;
345 {
346 	register struct mbuf *q;
347 	register struct mbuf *nq;
348 	struct inpcb *inp = tp->t_inpcb;
349 	struct socket *so = inp->inp_socket;
350 	register struct mbuf *m;
351 	register struct rtentry *rt;
352 	int dosavessthresh;
353 
354 	/*
355 	 * If we got enough samples through the srtt filter,
356 	 * save the rtt and rttvar in the routing entry.
357 	 * 'Enough' is arbitrarily defined as the 16 samples.
358 	 * 16 samples is enough for the srtt filter to converge
359 	 * to within 5% of the correct value; fewer samples and
360 	 * we could save a very bogus rtt.
361 	 *
362 	 * Don't update the default route's characteristics and don't
363 	 * update anything that the user "locked".
364 	 */
365 	if (tp->t_rttupdated >= 16 &&
366 	    (rt = inp->inp_route.ro_rt) &&
367 	    ((struct sockaddr_in *)rt_key(rt))->sin_addr.s_addr != INADDR_ANY) {
368 		register u_long i = 0;
369 
370 		if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
371 			i = tp->t_srtt *
372 			    (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTT_SCALE));
373 			if (rt->rt_rmx.rmx_rtt && i)
374 				/*
375 				 * filter this update to half the old & half
376 				 * the new values, converting scale.
377 				 * See route.h and tcp_var.h for a
378 				 * description of the scaling constants.
379 				 */
380 				rt->rt_rmx.rmx_rtt =
381 				    (rt->rt_rmx.rmx_rtt + i) / 2;
382 			else
383 				rt->rt_rmx.rmx_rtt = i;
384 			tcpstat.tcps_cachedrtt++;
385 		}
386 		if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {
387 			i = tp->t_rttvar *
388 			    (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTTVAR_SCALE));
389 			if (rt->rt_rmx.rmx_rttvar && i)
390 				rt->rt_rmx.rmx_rttvar =
391 				    (rt->rt_rmx.rmx_rttvar + i) / 2;
392 			else
393 				rt->rt_rmx.rmx_rttvar = i;
394 			tcpstat.tcps_cachedrttvar++;
395 		}
396 		/*
397 		 * The old comment here said:
398 		 * update the pipelimit (ssthresh) if it has been updated
399 		 * already or if a pipesize was specified & the threshhold
400 		 * got below half the pipesize.  I.e., wait for bad news
401 		 * before we start updating, then update on both good
402 		 * and bad news.
403 		 *
404 		 * But we want to save the ssthresh even if no pipesize is
405 		 * specified explicitly in the route, because such
406 		 * connections still have an implicit pipesize specified
407 		 * by the global tcp_sendspace.  In the absence of a reliable
408 		 * way to calculate the pipesize, it will have to do.
409 		 */
410 		i = tp->snd_ssthresh;
411 		if (rt->rt_rmx.rmx_sendpipe != 0)
412 			dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe / 2);
413 		else
414 			dosavessthresh = (i < so->so_snd.sb_hiwat / 2);
415 		if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&
416 		     i != 0 && rt->rt_rmx.rmx_ssthresh != 0)
417 		    || dosavessthresh) {
418 			/*
419 			 * convert the limit from user data bytes to
420 			 * packets then to packet data bytes.
421 			 */
422 			i = (i + tp->t_maxseg / 2) / tp->t_maxseg;
423 			if (i < 2)
424 				i = 2;
425 			i *= (u_long)(tp->t_maxseg + sizeof (struct tcpiphdr));
426 			if (rt->rt_rmx.rmx_ssthresh)
427 				rt->rt_rmx.rmx_ssthresh =
428 				    (rt->rt_rmx.rmx_ssthresh + i) / 2;
429 			else
430 				rt->rt_rmx.rmx_ssthresh = i;
431 			tcpstat.tcps_cachedssthresh++;
432 		}
433 	}
434 	/* free the reassembly queue, if any */
435 	for (q = tp->t_segq; q; q = nq) {
436 		nq = q->m_nextpkt;
437 		tp->t_segq = nq;
438 		m_freem(q);
439 	}
440 	if (tp->t_template)
441 		(void) m_free(dtom(tp->t_template));
442 	inp->inp_ppcb = NULL;
443 	soisdisconnected(so);
444 	in_pcbdetach(inp);
445 	tcpstat.tcps_closed++;
446 	return ((struct tcpcb *)0);
447 }
448 
449 void
450 tcp_drain()
451 {
452 
453 }
454 
455 /*
456  * Notify a tcp user of an asynchronous error;
457  * store error as soft error, but wake up user
458  * (for now, won't do anything until can select for soft error).
459  */
460 static void
461 tcp_notify(inp, error)
462 	struct inpcb *inp;
463 	int error;
464 {
465 	register struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb;
466 	register struct socket *so = inp->inp_socket;
467 
468 	/*
469 	 * Ignore some errors if we are hooked up.
470 	 * If connection hasn't completed, has retransmitted several times,
471 	 * and receives a second error, give up now.  This is better
472 	 * than waiting a long time to establish a connection that
473 	 * can never complete.
474 	 */
475 	if (tp->t_state == TCPS_ESTABLISHED &&
476 	     (error == EHOSTUNREACH || error == ENETUNREACH ||
477 	      error == EHOSTDOWN)) {
478 		return;
479 	} else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
480 	    tp->t_softerror)
481 		so->so_error = error;
482 	else
483 		tp->t_softerror = error;
484 	wakeup((caddr_t) &so->so_timeo);
485 	sorwakeup(so);
486 	sowwakeup(so);
487 }
488 
489 static int
490 tcp_pcblist SYSCTL_HANDLER_ARGS
491 {
492 	int error, i, n, s;
493 	struct inpcb *inp, **inp_list;
494 	inp_gen_t gencnt;
495 	struct xinpgen xig;
496 
497 	/*
498 	 * The process of preparing the TCB list is too time-consuming and
499 	 * resource-intensive to repeat twice on every request.
500 	 */
501 	if (req->oldptr == 0) {
502 		n = tcbinfo.ipi_count;
503 		req->oldidx = 2 * (sizeof xig)
504 			+ (n + n/8) * sizeof(struct xtcpcb);
505 		return 0;
506 	}
507 
508 	if (req->newptr != 0)
509 		return EPERM;
510 
511 	/*
512 	 * OK, now we're committed to doing something.
513 	 */
514 	s = splnet();
515 	gencnt = tcbinfo.ipi_gencnt;
516 	n = tcbinfo.ipi_count;
517 	splx(s);
518 
519 	xig.xig_len = sizeof xig;
520 	xig.xig_count = n;
521 	xig.xig_gen = gencnt;
522 	xig.xig_sogen = so_gencnt;
523 	error = SYSCTL_OUT(req, &xig, sizeof xig);
524 	if (error)
525 		return error;
526 
527 	inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
528 	if (inp_list == 0)
529 		return ENOMEM;
530 
531 	s = splnet();
532 	for (inp = tcbinfo.listhead->lh_first, i = 0; inp && i < n;
533 	     inp = inp->inp_list.le_next) {
534 		if (inp->inp_gencnt <= gencnt)
535 			inp_list[i++] = inp;
536 	}
537 	splx(s);
538 	n = i;
539 
540 	error = 0;
541 	for (i = 0; i < n; i++) {
542 		inp = inp_list[i];
543 		if (inp->inp_gencnt <= gencnt) {
544 			struct xtcpcb xt;
545 			xt.xt_len = sizeof xt;
546 			/* XXX should avoid extra copy */
547 			bcopy(inp, &xt.xt_inp, sizeof *inp);
548 			bcopy(inp->inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp);
549 			if (inp->inp_socket)
550 				sotoxsocket(inp->inp_socket, &xt.xt_socket);
551 			error = SYSCTL_OUT(req, &xt, sizeof xt);
552 		}
553 	}
554 	if (!error) {
555 		/*
556 		 * Give the user an updated idea of our state.
557 		 * If the generation differs from what we told
558 		 * her before, she knows that something happened
559 		 * while we were processing this request, and it
560 		 * might be necessary to retry.
561 		 */
562 		s = splnet();
563 		xig.xig_gen = tcbinfo.ipi_gencnt;
564 		xig.xig_sogen = so_gencnt;
565 		xig.xig_count = tcbinfo.ipi_count;
566 		splx(s);
567 		error = SYSCTL_OUT(req, &xig, sizeof xig);
568 	}
569 	free(inp_list, M_TEMP);
570 	return error;
571 }
572 
573 SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
574 	    tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
575 
576 void
577 tcp_ctlinput(cmd, sa, vip)
578 	int cmd;
579 	struct sockaddr *sa;
580 	void *vip;
581 {
582 	register struct ip *ip = vip;
583 	register struct tcphdr *th;
584 	void (*notify) __P((struct inpcb *, int)) = tcp_notify;
585 
586 	if (cmd == PRC_QUENCH)
587 		notify = tcp_quench;
588 	else if (cmd == PRC_MSGSIZE)
589 		notify = tcp_mtudisc;
590 	else if (!PRC_IS_REDIRECT(cmd) &&
591 		 ((unsigned)cmd > PRC_NCMDS || inetctlerrmap[cmd] == 0))
592 		return;
593 	if (ip) {
594 		th = (struct tcphdr *)((caddr_t)ip
595 				       + (IP_VHL_HL(ip->ip_vhl) << 2));
596 		in_pcbnotify(&tcb, sa, th->th_dport, ip->ip_src, th->th_sport,
597 			cmd, notify);
598 	} else
599 		in_pcbnotify(&tcb, sa, 0, zeroin_addr, 0, cmd, notify);
600 }
601 
602 /*
603  * When a source quench is received, close congestion window
604  * to one segment.  We will gradually open it again as we proceed.
605  */
606 void
607 tcp_quench(inp, errno)
608 	struct inpcb *inp;
609 	int errno;
610 {
611 	struct tcpcb *tp = intotcpcb(inp);
612 
613 	if (tp)
614 		tp->snd_cwnd = tp->t_maxseg;
615 }
616 
617 /*
618  * When `need fragmentation' ICMP is received, update our idea of the MSS
619  * based on the new value in the route.  Also nudge TCP to send something,
620  * since we know the packet we just sent was dropped.
621  * This duplicates some code in the tcp_mss() function in tcp_input.c.
622  */
623 void
624 tcp_mtudisc(inp, errno)
625 	struct inpcb *inp;
626 	int errno;
627 {
628 	struct tcpcb *tp = intotcpcb(inp);
629 	struct rtentry *rt;
630 	struct rmxp_tao *taop;
631 	struct socket *so = inp->inp_socket;
632 	int offered;
633 	int mss;
634 
635 	if (tp) {
636 		rt = tcp_rtlookup(inp);
637 		if (!rt || !rt->rt_rmx.rmx_mtu) {
638 			tp->t_maxopd = tp->t_maxseg = tcp_mssdflt;
639 			return;
640 		}
641 		taop = rmx_taop(rt->rt_rmx);
642 		offered = taop->tao_mssopt;
643 		mss = rt->rt_rmx.rmx_mtu - sizeof(struct tcpiphdr);
644 		if (offered)
645 			mss = min(mss, offered);
646 		/*
647 		 * XXX - The above conditional probably violates the TCP
648 		 * spec.  The problem is that, since we don't know the
649 		 * other end's MSS, we are supposed to use a conservative
650 		 * default.  But, if we do that, then MTU discovery will
651 		 * never actually take place, because the conservative
652 		 * default is much less than the MTUs typically seen
653 		 * on the Internet today.  For the moment, we'll sweep
654 		 * this under the carpet.
655 		 *
656 		 * The conservative default might not actually be a problem
657 		 * if the only case this occurs is when sending an initial
658 		 * SYN with options and data to a host we've never talked
659 		 * to before.  Then, they will reply with an MSS value which
660 		 * will get recorded and the new parameters should get
661 		 * recomputed.  For Further Study.
662 		 */
663 		if (tp->t_maxopd <= mss)
664 			return;
665 		tp->t_maxopd = mss;
666 
667 		if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
668 		    (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)
669 			mss -= TCPOLEN_TSTAMP_APPA;
670 		if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC &&
671 		    (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC)
672 			mss -= TCPOLEN_CC_APPA;
673 #if	(MCLBYTES & (MCLBYTES - 1)) == 0
674 		if (mss > MCLBYTES)
675 			mss &= ~(MCLBYTES-1);
676 #else
677 		if (mss > MCLBYTES)
678 			mss = mss / MCLBYTES * MCLBYTES;
679 #endif
680 		if (so->so_snd.sb_hiwat < mss)
681 			mss = so->so_snd.sb_hiwat;
682 
683 		tp->t_maxseg = mss;
684 
685 		tcpstat.tcps_mturesent++;
686 		tp->t_rtt = 0;
687 		tp->snd_nxt = tp->snd_una;
688 		tcp_output(tp);
689 	}
690 }
691 
692 /*
693  * Look-up the routing entry to the peer of this inpcb.  If no route
694  * is found and it cannot be allocated the return NULL.  This routine
695  * is called by TCP routines that access the rmx structure and by tcp_mss
696  * to get the interface MTU.
697  */
698 struct rtentry *
699 tcp_rtlookup(inp)
700 	struct inpcb *inp;
701 {
702 	struct route *ro;
703 	struct rtentry *rt;
704 
705 	ro = &inp->inp_route;
706 	rt = ro->ro_rt;
707 	if (rt == NULL || !(rt->rt_flags & RTF_UP)) {
708 		/* No route yet, so try to acquire one */
709 		if (inp->inp_faddr.s_addr != INADDR_ANY) {
710 			ro->ro_dst.sa_family = AF_INET;
711 			ro->ro_dst.sa_len = sizeof(ro->ro_dst);
712 			((struct sockaddr_in *) &ro->ro_dst)->sin_addr =
713 				inp->inp_faddr;
714 			rtalloc(ro);
715 			rt = ro->ro_rt;
716 		}
717 	}
718 	return rt;
719 }
720 
721 /*
722  * Return a pointer to the cached information about the remote host.
723  * The cached information is stored in the protocol specific part of
724  * the route metrics.
725  */
726 struct rmxp_tao *
727 tcp_gettaocache(inp)
728 	struct inpcb *inp;
729 {
730 	struct rtentry *rt = tcp_rtlookup(inp);
731 
732 	/* Make sure this is a host route and is up. */
733 	if (rt == NULL ||
734 	    (rt->rt_flags & (RTF_UP|RTF_HOST)) != (RTF_UP|RTF_HOST))
735 		return NULL;
736 
737 	return rmx_taop(rt->rt_rmx);
738 }
739 
740 /*
741  * Clear all the TAO cache entries, called from tcp_init.
742  *
743  * XXX
744  * This routine is just an empty one, because we assume that the routing
745  * routing tables are initialized at the same time when TCP, so there is
746  * nothing in the cache left over.
747  */
748 static void
749 tcp_cleartaocache()
750 {
751 }
752