xref: /freebsd/sys/netinet/tcp_syncache.c (revision f6a4109212fd8fbabc731f07b2dd5c7e07fbec33)
1 /*-
2  * Copyright (c) 2001 Networks Associates Technology, Inc.
3  * All rights reserved.
4  *
5  * This software was developed for the FreeBSD Project by Jonathan Lemon
6  * and NAI Labs, the Security Research Division of Network Associates, Inc.
7  * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
8  * DARPA CHATS research program.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. The name of the author may not be used to endorse or promote
19  *    products derived from this software without specific prior written
20  *    permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $FreeBSD$
35  */
36 
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_ipsec.h"
40 #include "opt_mac.h"
41 #include "opt_tcpdebug.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/sysctl.h>
47 #include <sys/malloc.h>
48 #include <sys/mac.h>
49 #include <sys/mbuf.h>
50 #include <sys/md5.h>
51 #include <sys/proc.h>		/* for proc0 declaration */
52 #include <sys/random.h>
53 #include <sys/socket.h>
54 #include <sys/socketvar.h>
55 
56 #include <net/if.h>
57 #include <net/route.h>
58 
59 #include <netinet/in.h>
60 #include <netinet/in_systm.h>
61 #include <netinet/ip.h>
62 #include <netinet/in_var.h>
63 #include <netinet/in_pcb.h>
64 #include <netinet/ip_var.h>
65 #ifdef INET6
66 #include <netinet/ip6.h>
67 #include <netinet/icmp6.h>
68 #include <netinet6/nd6.h>
69 #include <netinet6/ip6_var.h>
70 #include <netinet6/in6_pcb.h>
71 #endif
72 #include <netinet/tcp.h>
73 #ifdef TCPDEBUG
74 #include <netinet/tcpip.h>
75 #endif
76 #include <netinet/tcp_fsm.h>
77 #include <netinet/tcp_seq.h>
78 #include <netinet/tcp_timer.h>
79 #include <netinet/tcp_var.h>
80 #ifdef TCPDEBUG
81 #include <netinet/tcp_debug.h>
82 #endif
83 #ifdef INET6
84 #include <netinet6/tcp6_var.h>
85 #endif
86 
87 #ifdef IPSEC
88 #include <netinet6/ipsec.h>
89 #ifdef INET6
90 #include <netinet6/ipsec6.h>
91 #endif
92 #endif /*IPSEC*/
93 
94 #ifdef FAST_IPSEC
95 #include <netipsec/ipsec.h>
96 #ifdef INET6
97 #include <netipsec/ipsec6.h>
98 #endif
99 #include <netipsec/key.h>
100 #endif /*FAST_IPSEC*/
101 
102 #include <machine/in_cksum.h>
103 #include <vm/uma.h>
104 
105 static int tcp_syncookies = 1;
106 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW,
107     &tcp_syncookies, 0,
108     "Use TCP SYN cookies if the syncache overflows");
109 
110 static void	 syncache_drop(struct syncache *, struct syncache_head *);
111 static void	 syncache_free(struct syncache *);
112 static void	 syncache_insert(struct syncache *, struct syncache_head *);
113 struct syncache *syncache_lookup(struct in_conninfo *, struct syncache_head **);
114 #ifdef TCPDEBUG
115 static int	 syncache_respond(struct syncache *, struct mbuf *, struct socket *);
116 #else
117 static int	 syncache_respond(struct syncache *, struct mbuf *);
118 #endif
119 static struct 	 socket *syncache_socket(struct syncache *, struct socket *,
120 		    struct mbuf *m);
121 static void	 syncache_timer(void *);
122 static u_int32_t syncookie_generate(struct syncache *);
123 static struct syncache *syncookie_lookup(struct in_conninfo *,
124 		    struct tcphdr *, struct socket *);
125 
126 /*
127  * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies.
128  * 3 retransmits corresponds to a timeout of (1 + 2 + 4 + 8 == 15) seconds,
129  * the odds are that the user has given up attempting to connect by then.
130  */
131 #define SYNCACHE_MAXREXMTS		3
132 
133 /* Arbitrary values */
134 #define TCP_SYNCACHE_HASHSIZE		512
135 #define TCP_SYNCACHE_BUCKETLIMIT	30
136 
137 struct tcp_syncache {
138 	struct	syncache_head *hashbase;
139 	uma_zone_t zone;
140 	u_int	hashsize;
141 	u_int	hashmask;
142 	u_int	bucket_limit;
143 	u_int	cache_count;
144 	u_int	cache_limit;
145 	u_int	rexmt_limit;
146 	u_int	hash_secret;
147 	TAILQ_HEAD(, syncache) timerq[SYNCACHE_MAXREXMTS + 1];
148 	struct	callout tt_timerq[SYNCACHE_MAXREXMTS + 1];
149 };
150 static struct tcp_syncache tcp_syncache;
151 
152 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0, "TCP SYN cache");
153 
154 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_RDTUN,
155      &tcp_syncache.bucket_limit, 0, "Per-bucket hash limit for syncache");
156 
157 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_RDTUN,
158      &tcp_syncache.cache_limit, 0, "Overall entry limit for syncache");
159 
160 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, count, CTLFLAG_RD,
161      &tcp_syncache.cache_count, 0, "Current number of entries in syncache");
162 
163 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_RDTUN,
164      &tcp_syncache.hashsize, 0, "Size of TCP syncache hashtable");
165 
166 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_RW,
167      &tcp_syncache.rexmt_limit, 0, "Limit on SYN/ACK retransmissions");
168 
169 static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache");
170 
171 #define SYNCACHE_HASH(inc, mask) 					\
172 	((tcp_syncache.hash_secret ^					\
173 	  (inc)->inc_faddr.s_addr ^					\
174 	  ((inc)->inc_faddr.s_addr >> 16) ^ 				\
175 	  (inc)->inc_fport ^ (inc)->inc_lport) & mask)
176 
177 #define SYNCACHE_HASH6(inc, mask) 					\
178 	((tcp_syncache.hash_secret ^					\
179 	  (inc)->inc6_faddr.s6_addr32[0] ^ 				\
180 	  (inc)->inc6_faddr.s6_addr32[3] ^ 				\
181 	  (inc)->inc_fport ^ (inc)->inc_lport) & mask)
182 
183 #define ENDPTS_EQ(a, b) (						\
184 	(a)->ie_fport == (b)->ie_fport &&				\
185 	(a)->ie_lport == (b)->ie_lport &&				\
186 	(a)->ie_faddr.s_addr == (b)->ie_faddr.s_addr &&			\
187 	(a)->ie_laddr.s_addr == (b)->ie_laddr.s_addr			\
188 )
189 
190 #define ENDPTS6_EQ(a, b) (memcmp(a, b, sizeof(*a)) == 0)
191 
192 #define SYNCACHE_TIMEOUT(sc, slot) do {				\
193 	sc->sc_rxtslot = (slot);					\
194 	sc->sc_rxttime = ticks + TCPTV_RTOBASE * tcp_backoff[(slot)];	\
195 	TAILQ_INSERT_TAIL(&tcp_syncache.timerq[(slot)], sc, sc_timerq);	\
196 	if (!callout_active(&tcp_syncache.tt_timerq[(slot)]))		\
197 		callout_reset(&tcp_syncache.tt_timerq[(slot)],		\
198 		    TCPTV_RTOBASE * tcp_backoff[(slot)],		\
199 		    syncache_timer, (void *)((intptr_t)(slot)));	\
200 } while (0)
201 
202 static void
203 syncache_free(struct syncache *sc)
204 {
205 	if (sc->sc_ipopts)
206 		(void) m_free(sc->sc_ipopts);
207 
208 	uma_zfree(tcp_syncache.zone, sc);
209 }
210 
211 void
212 syncache_init(void)
213 {
214 	int i;
215 
216 	tcp_syncache.cache_count = 0;
217 	tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
218 	tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT;
219 	tcp_syncache.cache_limit =
220 	    tcp_syncache.hashsize * tcp_syncache.bucket_limit;
221 	tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
222 	tcp_syncache.hash_secret = arc4random();
223 
224         TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize",
225 	    &tcp_syncache.hashsize);
226         TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit",
227 	    &tcp_syncache.cache_limit);
228         TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
229 	    &tcp_syncache.bucket_limit);
230 	if (!powerof2(tcp_syncache.hashsize)) {
231                 printf("WARNING: syncache hash size is not a power of 2.\n");
232 		tcp_syncache.hashsize = 512;	/* safe default */
233         }
234 	tcp_syncache.hashmask = tcp_syncache.hashsize - 1;
235 
236 	/* Allocate the hash table. */
237 	MALLOC(tcp_syncache.hashbase, struct syncache_head *,
238 	    tcp_syncache.hashsize * sizeof(struct syncache_head),
239 	    M_SYNCACHE, M_WAITOK);
240 
241 	/* Initialize the hash buckets. */
242 	for (i = 0; i < tcp_syncache.hashsize; i++) {
243 		TAILQ_INIT(&tcp_syncache.hashbase[i].sch_bucket);
244 		tcp_syncache.hashbase[i].sch_length = 0;
245 	}
246 
247 	/* Initialize the timer queues. */
248 	for (i = 0; i <= SYNCACHE_MAXREXMTS; i++) {
249 		TAILQ_INIT(&tcp_syncache.timerq[i]);
250 		callout_init(&tcp_syncache.tt_timerq[i],
251 			debug_mpsafenet ? CALLOUT_MPSAFE : 0);
252 	}
253 
254 	/*
255 	 * Allocate the syncache entries.  Allow the zone to allocate one
256 	 * more entry than cache limit, so a new entry can bump out an
257 	 * older one.
258 	 */
259 	tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache),
260 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
261 	uma_zone_set_max(tcp_syncache.zone, tcp_syncache.cache_limit);
262 	tcp_syncache.cache_limit -= 1;
263 }
264 
265 static void
266 syncache_insert(sc, sch)
267 	struct syncache *sc;
268 	struct syncache_head *sch;
269 {
270 	struct syncache *sc2;
271 	int i;
272 
273 	INP_INFO_WLOCK_ASSERT(&tcbinfo);
274 
275 	/*
276 	 * Make sure that we don't overflow the per-bucket
277 	 * limit or the total cache size limit.
278 	 */
279 	if (sch->sch_length >= tcp_syncache.bucket_limit) {
280 		/*
281 		 * The bucket is full, toss the oldest element.
282 		 */
283 		sc2 = TAILQ_FIRST(&sch->sch_bucket);
284 		sc2->sc_tp->ts_recent = ticks;
285 		syncache_drop(sc2, sch);
286 		tcpstat.tcps_sc_bucketoverflow++;
287 	} else if (tcp_syncache.cache_count >= tcp_syncache.cache_limit) {
288 		/*
289 		 * The cache is full.  Toss the oldest entry in the
290 		 * entire cache.  This is the front entry in the
291 		 * first non-empty timer queue with the largest
292 		 * timeout value.
293 		 */
294 		for (i = SYNCACHE_MAXREXMTS; i >= 0; i--) {
295 			sc2 = TAILQ_FIRST(&tcp_syncache.timerq[i]);
296 			if (sc2 != NULL)
297 				break;
298 		}
299 		sc2->sc_tp->ts_recent = ticks;
300 		syncache_drop(sc2, NULL);
301 		tcpstat.tcps_sc_cacheoverflow++;
302 	}
303 
304 	/* Initialize the entry's timer. */
305 	SYNCACHE_TIMEOUT(sc, 0);
306 
307 	/* Put it into the bucket. */
308 	TAILQ_INSERT_TAIL(&sch->sch_bucket, sc, sc_hash);
309 	sch->sch_length++;
310 	tcp_syncache.cache_count++;
311 	tcpstat.tcps_sc_added++;
312 }
313 
314 static void
315 syncache_drop(sc, sch)
316 	struct syncache *sc;
317 	struct syncache_head *sch;
318 {
319 	INP_INFO_WLOCK_ASSERT(&tcbinfo);
320 
321 	if (sch == NULL) {
322 #ifdef INET6
323 		if (sc->sc_inc.inc_isipv6) {
324 			sch = &tcp_syncache.hashbase[
325 			    SYNCACHE_HASH6(&sc->sc_inc, tcp_syncache.hashmask)];
326 		} else
327 #endif
328 		{
329 			sch = &tcp_syncache.hashbase[
330 			    SYNCACHE_HASH(&sc->sc_inc, tcp_syncache.hashmask)];
331 		}
332 	}
333 
334 	TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
335 	sch->sch_length--;
336 	tcp_syncache.cache_count--;
337 
338 	TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot], sc, sc_timerq);
339 	if (TAILQ_EMPTY(&tcp_syncache.timerq[sc->sc_rxtslot]))
340 		callout_stop(&tcp_syncache.tt_timerq[sc->sc_rxtslot]);
341 
342 	syncache_free(sc);
343 }
344 
345 /*
346  * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted.
347  * If we have retransmitted an entry the maximum number of times, expire it.
348  */
349 static void
350 syncache_timer(xslot)
351 	void *xslot;
352 {
353 	intptr_t slot = (intptr_t)xslot;
354 	struct syncache *sc, *nsc;
355 	struct inpcb *inp;
356 
357 	INP_INFO_WLOCK(&tcbinfo);
358         if (callout_pending(&tcp_syncache.tt_timerq[slot]) ||
359             !callout_active(&tcp_syncache.tt_timerq[slot])) {
360 		/* XXX can this happen? */
361 		INP_INFO_WUNLOCK(&tcbinfo);
362                 return;
363         }
364         callout_deactivate(&tcp_syncache.tt_timerq[slot]);
365 
366         nsc = TAILQ_FIRST(&tcp_syncache.timerq[slot]);
367 	while (nsc != NULL) {
368 		if (ticks < nsc->sc_rxttime)
369 			break;
370 		sc = nsc;
371 		inp = sc->sc_tp->t_inpcb;
372 		if (slot == SYNCACHE_MAXREXMTS ||
373 		    slot >= tcp_syncache.rexmt_limit ||
374 		    inp == NULL || inp->inp_gencnt != sc->sc_inp_gencnt) {
375 			nsc = TAILQ_NEXT(sc, sc_timerq);
376 			syncache_drop(sc, NULL);
377 			tcpstat.tcps_sc_stale++;
378 			continue;
379 		}
380 		/*
381 		 * syncache_respond() may call back into the syncache to
382 		 * to modify another entry, so do not obtain the next
383 		 * entry on the timer chain until it has completed.
384 		 */
385 #ifdef TCPDEBUG
386 		(void) syncache_respond(sc, NULL, NULL);
387 #else
388 		(void) syncache_respond(sc, NULL);
389 #endif
390 		nsc = TAILQ_NEXT(sc, sc_timerq);
391 		tcpstat.tcps_sc_retransmitted++;
392 		TAILQ_REMOVE(&tcp_syncache.timerq[slot], sc, sc_timerq);
393 		SYNCACHE_TIMEOUT(sc, slot + 1);
394 	}
395 	if (nsc != NULL)
396 		callout_reset(&tcp_syncache.tt_timerq[slot],
397 		    nsc->sc_rxttime - ticks, syncache_timer, (void *)(slot));
398 	INP_INFO_WUNLOCK(&tcbinfo);
399 }
400 
401 /*
402  * Find an entry in the syncache.
403  */
404 struct syncache *
405 syncache_lookup(inc, schp)
406 	struct in_conninfo *inc;
407 	struct syncache_head **schp;
408 {
409 	struct syncache *sc;
410 	struct syncache_head *sch;
411 
412 	INP_INFO_WLOCK_ASSERT(&tcbinfo);
413 
414 #ifdef INET6
415 	if (inc->inc_isipv6) {
416 		sch = &tcp_syncache.hashbase[
417 		    SYNCACHE_HASH6(inc, tcp_syncache.hashmask)];
418 		*schp = sch;
419 		TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
420 			if (ENDPTS6_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie))
421 				return (sc);
422 		}
423 	} else
424 #endif
425 	{
426 		sch = &tcp_syncache.hashbase[
427 		    SYNCACHE_HASH(inc, tcp_syncache.hashmask)];
428 		*schp = sch;
429 		TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
430 #ifdef INET6
431 			if (sc->sc_inc.inc_isipv6)
432 				continue;
433 #endif
434 			if (ENDPTS_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie))
435 				return (sc);
436 		}
437 	}
438 	return (NULL);
439 }
440 
441 /*
442  * This function is called when we get a RST for a
443  * non-existent connection, so that we can see if the
444  * connection is in the syn cache.  If it is, zap it.
445  */
446 void
447 syncache_chkrst(inc, th)
448 	struct in_conninfo *inc;
449 	struct tcphdr *th;
450 {
451 	struct syncache *sc;
452 	struct syncache_head *sch;
453 
454 	INP_INFO_WLOCK_ASSERT(&tcbinfo);
455 
456 	sc = syncache_lookup(inc, &sch);
457 	if (sc == NULL)
458 		return;
459 	/*
460 	 * If the RST bit is set, check the sequence number to see
461 	 * if this is a valid reset segment.
462 	 * RFC 793 page 37:
463 	 *   In all states except SYN-SENT, all reset (RST) segments
464 	 *   are validated by checking their SEQ-fields.  A reset is
465 	 *   valid if its sequence number is in the window.
466 	 *
467 	 *   The sequence number in the reset segment is normally an
468 	 *   echo of our outgoing acknowlegement numbers, but some hosts
469 	 *   send a reset with the sequence number at the rightmost edge
470 	 *   of our receive window, and we have to handle this case.
471 	 */
472 	if (SEQ_GEQ(th->th_seq, sc->sc_irs) &&
473 	    SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
474 		syncache_drop(sc, sch);
475 		tcpstat.tcps_sc_reset++;
476 	}
477 }
478 
479 void
480 syncache_badack(inc)
481 	struct in_conninfo *inc;
482 {
483 	struct syncache *sc;
484 	struct syncache_head *sch;
485 
486 	INP_INFO_WLOCK_ASSERT(&tcbinfo);
487 
488 	sc = syncache_lookup(inc, &sch);
489 	if (sc != NULL) {
490 		syncache_drop(sc, sch);
491 		tcpstat.tcps_sc_badack++;
492 	}
493 }
494 
495 void
496 syncache_unreach(inc, th)
497 	struct in_conninfo *inc;
498 	struct tcphdr *th;
499 {
500 	struct syncache *sc;
501 	struct syncache_head *sch;
502 
503 	INP_INFO_WLOCK_ASSERT(&tcbinfo);
504 
505 	/* we are called at splnet() here */
506 	sc = syncache_lookup(inc, &sch);
507 	if (sc == NULL)
508 		return;
509 
510 	/* If the sequence number != sc_iss, then it's a bogus ICMP msg */
511 	if (ntohl(th->th_seq) != sc->sc_iss)
512 		return;
513 
514 	/*
515 	 * If we've rertransmitted 3 times and this is our second error,
516 	 * we remove the entry.  Otherwise, we allow it to continue on.
517 	 * This prevents us from incorrectly nuking an entry during a
518 	 * spurious network outage.
519 	 *
520 	 * See tcp_notify().
521 	 */
522 	if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxtslot < 3) {
523 		sc->sc_flags |= SCF_UNREACH;
524 		return;
525 	}
526 	syncache_drop(sc, sch);
527 	tcpstat.tcps_sc_unreach++;
528 }
529 
530 /*
531  * Build a new TCP socket structure from a syncache entry.
532  */
533 static struct socket *
534 syncache_socket(sc, lso, m)
535 	struct syncache *sc;
536 	struct socket *lso;
537 	struct mbuf *m;
538 {
539 	struct inpcb *inp = NULL;
540 	struct socket *so;
541 	struct tcpcb *tp;
542 
543 	GIANT_REQUIRED;			/* XXX until socket locking */
544 	INP_INFO_WLOCK_ASSERT(&tcbinfo);
545 
546 	/*
547 	 * Ok, create the full blown connection, and set things up
548 	 * as they would have been set up if we had created the
549 	 * connection when the SYN arrived.  If we can't create
550 	 * the connection, abort it.
551 	 */
552 	so = sonewconn(lso, SS_ISCONNECTED);
553 	if (so == NULL) {
554 		/*
555 		 * Drop the connection; we will send a RST if the peer
556 		 * retransmits the ACK,
557 		 */
558 		tcpstat.tcps_listendrop++;
559 		goto abort2;
560 	}
561 #ifdef MAC
562 	mac_set_socket_peer_from_mbuf(m, so);
563 #endif
564 
565 	inp = sotoinpcb(so);
566 	INP_LOCK(inp);
567 
568 	/*
569 	 * Insert new socket into hash list.
570 	 */
571 	inp->inp_inc.inc_isipv6 = sc->sc_inc.inc_isipv6;
572 #ifdef INET6
573 	if (sc->sc_inc.inc_isipv6) {
574 		inp->in6p_laddr = sc->sc_inc.inc6_laddr;
575 	} else {
576 		inp->inp_vflag &= ~INP_IPV6;
577 		inp->inp_vflag |= INP_IPV4;
578 #endif
579 		inp->inp_laddr = sc->sc_inc.inc_laddr;
580 #ifdef INET6
581 	}
582 #endif
583 	inp->inp_lport = sc->sc_inc.inc_lport;
584 	if (in_pcbinshash(inp) != 0) {
585 		/*
586 		 * Undo the assignments above if we failed to
587 		 * put the PCB on the hash lists.
588 		 */
589 #ifdef INET6
590 		if (sc->sc_inc.inc_isipv6)
591 			inp->in6p_laddr = in6addr_any;
592        		else
593 #endif
594 			inp->inp_laddr.s_addr = INADDR_ANY;
595 		inp->inp_lport = 0;
596 		goto abort;
597 	}
598 #ifdef IPSEC
599 	/* copy old policy into new socket's */
600 	if (ipsec_copy_pcbpolicy(sotoinpcb(lso)->inp_sp, inp->inp_sp))
601 		printf("syncache_expand: could not copy policy\n");
602 #endif
603 #ifdef FAST_IPSEC
604 	/* copy old policy into new socket's */
605 	if (ipsec_copy_policy(sotoinpcb(lso)->inp_sp, inp->inp_sp))
606 		printf("syncache_expand: could not copy policy\n");
607 #endif
608 #ifdef INET6
609 	if (sc->sc_inc.inc_isipv6) {
610 		struct inpcb *oinp = sotoinpcb(lso);
611 		struct in6_addr laddr6;
612 		struct sockaddr_in6 sin6;
613 		/*
614 		 * Inherit socket options from the listening socket.
615 		 * Note that in6p_inputopts are not (and should not be)
616 		 * copied, since it stores previously received options and is
617 		 * used to detect if each new option is different than the
618 		 * previous one and hence should be passed to a user.
619                  * If we copied in6p_inputopts, a user would not be able to
620 		 * receive options just after calling the accept system call.
621 		 */
622 		inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS;
623 		if (oinp->in6p_outputopts)
624 			inp->in6p_outputopts =
625 			    ip6_copypktopts(oinp->in6p_outputopts, M_NOWAIT);
626 
627 		sin6.sin6_family = AF_INET6;
628 		sin6.sin6_len = sizeof(sin6);
629 		sin6.sin6_addr = sc->sc_inc.inc6_faddr;
630 		sin6.sin6_port = sc->sc_inc.inc_fport;
631 		sin6.sin6_flowinfo = sin6.sin6_scope_id = 0;
632 		laddr6 = inp->in6p_laddr;
633 		if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
634 			inp->in6p_laddr = sc->sc_inc.inc6_laddr;
635 		if (in6_pcbconnect(inp, (struct sockaddr *)&sin6, &thread0)) {
636 			inp->in6p_laddr = laddr6;
637 			goto abort;
638 		}
639 	} else
640 #endif
641 	{
642 		struct in_addr laddr;
643 		struct sockaddr_in sin;
644 
645 		inp->inp_options = ip_srcroute();
646 		if (inp->inp_options == NULL) {
647 			inp->inp_options = sc->sc_ipopts;
648 			sc->sc_ipopts = NULL;
649 		}
650 
651 		sin.sin_family = AF_INET;
652 		sin.sin_len = sizeof(sin);
653 		sin.sin_addr = sc->sc_inc.inc_faddr;
654 		sin.sin_port = sc->sc_inc.inc_fport;
655 		bzero((caddr_t)sin.sin_zero, sizeof(sin.sin_zero));
656 		laddr = inp->inp_laddr;
657 		if (inp->inp_laddr.s_addr == INADDR_ANY)
658 			inp->inp_laddr = sc->sc_inc.inc_laddr;
659 		if (in_pcbconnect(inp, (struct sockaddr *)&sin, &thread0)) {
660 			inp->inp_laddr = laddr;
661 			goto abort;
662 		}
663 	}
664 
665 	tp = intotcpcb(inp);
666 	tp->t_state = TCPS_SYN_RECEIVED;
667 	tp->iss = sc->sc_iss;
668 	tp->irs = sc->sc_irs;
669 	tcp_rcvseqinit(tp);
670 	tcp_sendseqinit(tp);
671 	tp->snd_wl1 = sc->sc_irs;
672 	tp->rcv_up = sc->sc_irs + 1;
673 	tp->rcv_wnd = sc->sc_wnd;
674 	tp->rcv_adv += tp->rcv_wnd;
675 
676 	tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY);
677 	if (sc->sc_flags & SCF_NOOPT)
678 		tp->t_flags |= TF_NOOPT;
679 	if (sc->sc_flags & SCF_WINSCALE) {
680 		tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE;
681 		tp->requested_s_scale = sc->sc_requested_s_scale;
682 		tp->request_r_scale = sc->sc_request_r_scale;
683 	}
684 	if (sc->sc_flags & SCF_TIMESTAMP) {
685 		tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP;
686 		tp->ts_recent = sc->sc_tsrecent;
687 		tp->ts_recent_age = ticks;
688 	}
689 	if (sc->sc_flags & SCF_CC) {
690 		/*
691 		 * Initialization of the tcpcb for transaction;
692 		 *   set SND.WND = SEG.WND,
693 		 *   initialize CCsend and CCrecv.
694 		 */
695 		tp->t_flags |= TF_REQ_CC|TF_RCVD_CC;
696 		tp->cc_send = sc->sc_cc_send;
697 		tp->cc_recv = sc->sc_cc_recv;
698 	}
699 #ifdef TCP_SIGNATURE
700 	if (sc->sc_flags & SCF_SIGNATURE)
701 		tp->t_flags |= TF_SIGNATURE;
702 #endif
703 
704 	/*
705 	 * Set up MSS and get cached values from tcp_hostcache.
706 	 * This might overwrite some of the defaults we just set.
707 	 */
708 	tcp_mss(tp, sc->sc_peer_mss);
709 
710 	/*
711 	 * If the SYN,ACK was retransmitted, reset cwnd to 1 segment.
712 	 */
713 	if (sc->sc_rxtslot != 0)
714                 tp->snd_cwnd = tp->t_maxseg;
715 	callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp);
716 
717 	INP_UNLOCK(inp);
718 
719 	tcpstat.tcps_accepts++;
720 	return (so);
721 
722 abort:
723 	INP_UNLOCK(inp);
724 abort2:
725 	if (so != NULL)
726 		(void) soabort(so);
727 	return (NULL);
728 }
729 
730 /*
731  * This function gets called when we receive an ACK for a
732  * socket in the LISTEN state.  We look up the connection
733  * in the syncache, and if its there, we pull it out of
734  * the cache and turn it into a full-blown connection in
735  * the SYN-RECEIVED state.
736  */
737 int
738 syncache_expand(inc, th, sop, m)
739 	struct in_conninfo *inc;
740 	struct tcphdr *th;
741 	struct socket **sop;
742 	struct mbuf *m;
743 {
744 	struct syncache *sc;
745 	struct syncache_head *sch;
746 	struct socket *so;
747 
748 	INP_INFO_WLOCK_ASSERT(&tcbinfo);
749 
750 	sc = syncache_lookup(inc, &sch);
751 	if (sc == NULL) {
752 		/*
753 		 * There is no syncache entry, so see if this ACK is
754 		 * a returning syncookie.  To do this, first:
755 		 *  A. See if this socket has had a syncache entry dropped in
756 		 *     the past.  We don't want to accept a bogus syncookie
757  		 *     if we've never received a SYN.
758 		 *  B. check that the syncookie is valid.  If it is, then
759 		 *     cobble up a fake syncache entry, and return.
760 		 */
761 		if (!tcp_syncookies)
762 			return (0);
763 		sc = syncookie_lookup(inc, th, *sop);
764 		if (sc == NULL)
765 			return (0);
766 		sch = NULL;
767 		tcpstat.tcps_sc_recvcookie++;
768 	}
769 
770 	/*
771 	 * If seg contains an ACK, but not for our SYN/ACK, send a RST.
772 	 */
773 	if (th->th_ack != sc->sc_iss + 1)
774 		return (0);
775 
776 	so = syncache_socket(sc, *sop, m);
777 	if (so == NULL) {
778 #if 0
779 resetandabort:
780 		/* XXXjlemon check this - is this correct? */
781 		(void) tcp_respond(NULL, m, m, th,
782 		    th->th_seq + tlen, (tcp_seq)0, TH_RST|TH_ACK);
783 #endif
784 		m_freem(m);			/* XXX only needed for above */
785 		tcpstat.tcps_sc_aborted++;
786 	} else
787 		tcpstat.tcps_sc_completed++;
788 
789 	if (sch == NULL)
790 		syncache_free(sc);
791 	else
792 		syncache_drop(sc, sch);
793 	*sop = so;
794 	return (1);
795 }
796 
797 /*
798  * Given a LISTEN socket and an inbound SYN request, add
799  * this to the syn cache, and send back a segment:
800  *	<SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
801  * to the source.
802  *
803  * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN.
804  * Doing so would require that we hold onto the data and deliver it
805  * to the application.  However, if we are the target of a SYN-flood
806  * DoS attack, an attacker could send data which would eventually
807  * consume all available buffer space if it were ACKed.  By not ACKing
808  * the data, we avoid this DoS scenario.
809  */
810 int
811 syncache_add(inc, to, th, sop, m)
812 	struct in_conninfo *inc;
813 	struct tcpopt *to;
814 	struct tcphdr *th;
815 	struct socket **sop;
816 	struct mbuf *m;
817 {
818 	struct tcpcb *tp;
819 	struct socket *so;
820 	struct syncache *sc = NULL;
821 	struct syncache_head *sch;
822 	struct mbuf *ipopts = NULL;
823 	struct rmxp_tao tao;
824 	int i, win;
825 
826 	INP_INFO_WLOCK_ASSERT(&tcbinfo);
827 
828 	so = *sop;
829 	tp = sototcpcb(so);
830 	bzero(&tao, sizeof(tao));
831 
832 	/*
833 	 * Remember the IP options, if any.
834 	 */
835 #ifdef INET6
836 	if (!inc->inc_isipv6)
837 #endif
838 		ipopts = ip_srcroute();
839 
840 	/*
841 	 * See if we already have an entry for this connection.
842 	 * If we do, resend the SYN,ACK, and reset the retransmit timer.
843 	 *
844 	 * XXX
845 	 * should the syncache be re-initialized with the contents
846 	 * of the new SYN here (which may have different options?)
847 	 */
848 	sc = syncache_lookup(inc, &sch);
849 	if (sc != NULL) {
850 		tcpstat.tcps_sc_dupsyn++;
851 		if (ipopts) {
852 			/*
853 			 * If we were remembering a previous source route,
854 			 * forget it and use the new one we've been given.
855 			 */
856 			if (sc->sc_ipopts)
857 				(void) m_free(sc->sc_ipopts);
858 			sc->sc_ipopts = ipopts;
859 		}
860 		/*
861 		 * Update timestamp if present.
862 		 */
863 		if (sc->sc_flags & SCF_TIMESTAMP)
864 			sc->sc_tsrecent = to->to_tsval;
865 		/*
866 		 * PCB may have changed, pick up new values.
867 		 */
868 		sc->sc_tp = tp;
869 		sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt;
870 #ifdef TCPDEBUG
871 		if (syncache_respond(sc, m, so) == 0) {
872 #else
873 		if (syncache_respond(sc, m) == 0) {
874 #endif
875 			/* NB: guarded by INP_INFO_WLOCK(&tcbinfo) */
876 			TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot],
877 			    sc, sc_timerq);
878 			SYNCACHE_TIMEOUT(sc, sc->sc_rxtslot);
879 		 	tcpstat.tcps_sndacks++;
880 			tcpstat.tcps_sndtotal++;
881 		}
882 		*sop = NULL;
883 		return (1);
884 	}
885 
886 	sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT);
887 	if (sc == NULL) {
888 		/*
889 		 * The zone allocator couldn't provide more entries.
890 		 * Treat this as if the cache was full; drop the oldest
891 		 * entry and insert the new one.
892 		 */
893 		/* NB: guarded by INP_INFO_WLOCK(&tcbinfo) */
894 		for (i = SYNCACHE_MAXREXMTS; i >= 0; i--) {
895 			sc = TAILQ_FIRST(&tcp_syncache.timerq[i]);
896 			if (sc != NULL)
897 				break;
898 		}
899 		sc->sc_tp->ts_recent = ticks;
900 		syncache_drop(sc, NULL);
901 		tcpstat.tcps_sc_zonefail++;
902 		sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT);
903 		if (sc == NULL) {
904 			if (ipopts)
905 				(void) m_free(ipopts);
906 			return (0);
907 		}
908 	}
909 
910 	/*
911 	 * Fill in the syncache values.
912 	 */
913 	bzero(sc, sizeof(*sc));
914 	sc->sc_tp = tp;
915 	sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt;
916 	sc->sc_ipopts = ipopts;
917 	sc->sc_inc.inc_fport = inc->inc_fport;
918 	sc->sc_inc.inc_lport = inc->inc_lport;
919 #ifdef INET6
920 	sc->sc_inc.inc_isipv6 = inc->inc_isipv6;
921 	if (inc->inc_isipv6) {
922 		sc->sc_inc.inc6_faddr = inc->inc6_faddr;
923 		sc->sc_inc.inc6_laddr = inc->inc6_laddr;
924 	} else
925 #endif
926 	{
927 		sc->sc_inc.inc_faddr = inc->inc_faddr;
928 		sc->sc_inc.inc_laddr = inc->inc_laddr;
929 	}
930 	sc->sc_irs = th->th_seq;
931 	sc->sc_flags = 0;
932 	sc->sc_peer_mss = to->to_flags & TOF_MSS ? to->to_mss : 0;
933 	if (tcp_syncookies)
934 		sc->sc_iss = syncookie_generate(sc);
935 	else
936 		sc->sc_iss = arc4random();
937 
938 	/* Initial receive window: clip sbspace to [0 .. TCP_MAXWIN] */
939 	win = sbspace(&so->so_rcv);
940 	win = imax(win, 0);
941 	win = imin(win, TCP_MAXWIN);
942 	sc->sc_wnd = win;
943 
944 	if (tcp_do_rfc1323) {
945 		/*
946 		 * A timestamp received in a SYN makes
947 		 * it ok to send timestamp requests and replies.
948 		 */
949 		if (to->to_flags & TOF_TS) {
950 			sc->sc_tsrecent = to->to_tsval;
951 			sc->sc_flags |= SCF_TIMESTAMP;
952 		}
953 		if (to->to_flags & TOF_SCALE) {
954 			int wscale = 0;
955 
956 			/* Compute proper scaling value from buffer space */
957 			while (wscale < TCP_MAX_WINSHIFT &&
958 			    (TCP_MAXWIN << wscale) < so->so_rcv.sb_hiwat)
959 				wscale++;
960 			sc->sc_request_r_scale = wscale;
961 			sc->sc_requested_s_scale = to->to_requested_s_scale;
962 			sc->sc_flags |= SCF_WINSCALE;
963 		}
964 	}
965 	if (tcp_do_rfc1644) {
966 		/*
967 		 * A CC or CC.new option received in a SYN makes
968 		 * it ok to send CC in subsequent segments.
969 		 */
970 		if (to->to_flags & (TOF_CC|TOF_CCNEW)) {
971 			sc->sc_cc_recv = to->to_cc;
972 			sc->sc_cc_send = CC_INC(tcp_ccgen);
973 			sc->sc_flags |= SCF_CC;
974 		}
975 	}
976 	if (tp->t_flags & TF_NOOPT)
977 		sc->sc_flags = SCF_NOOPT;
978 #ifdef TCP_SIGNATURE
979 	/*
980 	 * If listening socket requested TCP digests, and received SYN
981 	 * contains the option, flag this in the syncache so that
982 	 * syncache_respond() will do the right thing with the SYN+ACK.
983 	 * XXX Currently we always record the option by default and will
984 	 * attempt to use it in syncache_respond().
985 	 */
986 	if (to->to_flags & TOF_SIGNATURE)
987 		sc->sc_flags = SCF_SIGNATURE;
988 #endif
989 
990 	/*
991 	 * XXX
992 	 * We have the option here of not doing TAO (even if the segment
993 	 * qualifies) and instead fall back to a normal 3WHS via the syncache.
994 	 * This allows us to apply synflood protection to TAO-qualifying SYNs
995 	 * also. However, there should be a hueristic to determine when to
996 	 * do this, and is not present at the moment.
997 	 */
998 
999 	/*
1000 	 * Perform TAO test on incoming CC (SEG.CC) option, if any.
1001 	 * - compare SEG.CC against cached CC from the same host, if any.
1002 	 * - if SEG.CC > chached value, SYN must be new and is accepted
1003 	 *	immediately: save new CC in the cache, mark the socket
1004 	 *	connected, enter ESTABLISHED state, turn on flag to
1005 	 *	send a SYN in the next segment.
1006 	 *	A virtual advertised window is set in rcv_adv to
1007 	 *	initialize SWS prevention.  Then enter normal segment
1008 	 *	processing: drop SYN, process data and FIN.
1009 	 * - otherwise do a normal 3-way handshake.
1010 	 */
1011 	if (tcp_do_rfc1644)
1012 		tcp_hc_gettao(&sc->sc_inc, &tao);
1013 
1014 	if ((to->to_flags & TOF_CC) != 0) {
1015 		if (((tp->t_flags & TF_NOPUSH) != 0) &&
1016 		    sc->sc_flags & SCF_CC && tao.tao_cc != 0 &&
1017 		    CC_GT(to->to_cc, tao.tao_cc)) {
1018 			sc->sc_rxtslot = 0;
1019 			so = syncache_socket(sc, *sop, m);
1020 			if (so != NULL) {
1021 				tao.tao_cc = to->to_cc;
1022 				tcp_hc_updatetao(&sc->sc_inc, TCP_HC_TAO_CC,
1023 						 tao.tao_cc, 0);
1024 				*sop = so;
1025 			}
1026 			syncache_free(sc);
1027 			return (so != NULL);
1028 		}
1029 	} else {
1030 		/*
1031 		 * No CC option, but maybe CC.NEW: invalidate cached value.
1032 		 */
1033 		if (tcp_do_rfc1644) {
1034 			tao.tao_cc = 0;
1035 			tcp_hc_updatetao(&sc->sc_inc, TCP_HC_TAO_CC,
1036 					 tao.tao_cc, 0);
1037 		}
1038 	}
1039 
1040 	/*
1041 	 * TAO test failed or there was no CC option,
1042 	 *    do a standard 3-way handshake.
1043 	 */
1044 #ifdef TCPDEBUG
1045 	if (syncache_respond(sc, m, so) == 0) {
1046 #else
1047 	if (syncache_respond(sc, m) == 0) {
1048 #endif
1049 		syncache_insert(sc, sch);
1050 		tcpstat.tcps_sndacks++;
1051 		tcpstat.tcps_sndtotal++;
1052 	} else {
1053 		syncache_free(sc);
1054 		tcpstat.tcps_sc_dropped++;
1055 	}
1056 	*sop = NULL;
1057 	return (1);
1058 }
1059 
1060 #ifdef TCPDEBUG
1061 static int
1062 syncache_respond(sc, m, so)
1063 	struct syncache *sc;
1064 	struct mbuf *m;
1065 	struct socket *so;
1066 #else
1067 static int
1068 syncache_respond(sc, m)
1069 	struct syncache *sc;
1070 	struct mbuf *m;
1071 #endif
1072 {
1073 	u_int8_t *optp;
1074 	int optlen, error;
1075 	u_int16_t tlen, hlen, mssopt;
1076 	struct ip *ip = NULL;
1077 	struct tcphdr *th;
1078 	struct inpcb *inp;
1079 #ifdef INET6
1080 	struct ip6_hdr *ip6 = NULL;
1081 #endif
1082 
1083 	hlen =
1084 #ifdef INET6
1085 	       (sc->sc_inc.inc_isipv6) ? sizeof(struct ip6_hdr) :
1086 #endif
1087 		sizeof(struct ip);
1088 
1089 	KASSERT((&sc->sc_inc) != NULL, ("syncache_respond with NULL in_conninfo pointer"));
1090 
1091 	/* Determine MSS we advertize to other end of connection */
1092 	mssopt = tcp_mssopt(&sc->sc_inc);
1093 
1094 	/* Compute the size of the TCP options. */
1095 	if (sc->sc_flags & SCF_NOOPT) {
1096 		optlen = 0;
1097 	} else {
1098 		optlen = TCPOLEN_MAXSEG +
1099 		    ((sc->sc_flags & SCF_WINSCALE) ? 4 : 0) +
1100 		    ((sc->sc_flags & SCF_TIMESTAMP) ? TCPOLEN_TSTAMP_APPA : 0) +
1101 		    ((sc->sc_flags & SCF_CC) ? TCPOLEN_CC_APPA * 2 : 0);
1102 #ifdef TCP_SIGNATURE
1103 		optlen += (sc->sc_flags & SCF_SIGNATURE) ?
1104 		    TCPOLEN_SIGNATURE + 2 : 0;
1105 #endif
1106 	}
1107 	tlen = hlen + sizeof(struct tcphdr) + optlen;
1108 
1109 	/*
1110 	 * XXX
1111 	 * assume that the entire packet will fit in a header mbuf
1112 	 */
1113 	KASSERT(max_linkhdr + tlen <= MHLEN, ("syncache: mbuf too small"));
1114 
1115 	/*
1116 	 * XXX shouldn't this reuse the mbuf if possible ?
1117 	 * Create the IP+TCP header from scratch.
1118 	 */
1119 	if (m)
1120 		m_freem(m);
1121 
1122 	m = m_gethdr(M_DONTWAIT, MT_HEADER);
1123 	if (m == NULL)
1124 		return (ENOBUFS);
1125 	m->m_data += max_linkhdr;
1126 	m->m_len = tlen;
1127 	m->m_pkthdr.len = tlen;
1128 	m->m_pkthdr.rcvif = NULL;
1129 	inp = sc->sc_tp->t_inpcb;
1130 	INP_LOCK(inp);
1131 #ifdef MAC
1132 	mac_create_mbuf_from_socket(inp->inp_socket, m);
1133 #endif
1134 
1135 #ifdef INET6
1136 	if (sc->sc_inc.inc_isipv6) {
1137 		ip6 = mtod(m, struct ip6_hdr *);
1138 		ip6->ip6_vfc = IPV6_VERSION;
1139 		ip6->ip6_nxt = IPPROTO_TCP;
1140 		ip6->ip6_src = sc->sc_inc.inc6_laddr;
1141 		ip6->ip6_dst = sc->sc_inc.inc6_faddr;
1142 		ip6->ip6_plen = htons(tlen - hlen);
1143 		/* ip6_hlim is set after checksum */
1144 		/* ip6_flow = ??? */
1145 
1146 		th = (struct tcphdr *)(ip6 + 1);
1147 	} else
1148 #endif
1149 	{
1150 		ip = mtod(m, struct ip *);
1151 		ip->ip_v = IPVERSION;
1152 		ip->ip_hl = sizeof(struct ip) >> 2;
1153 		ip->ip_len = tlen;
1154 		ip->ip_id = 0;
1155 		ip->ip_off = 0;
1156 		ip->ip_sum = 0;
1157 		ip->ip_p = IPPROTO_TCP;
1158 		ip->ip_src = sc->sc_inc.inc_laddr;
1159 		ip->ip_dst = sc->sc_inc.inc_faddr;
1160 		ip->ip_ttl = inp->inp_ip_ttl;   /* XXX */
1161 		ip->ip_tos = inp->inp_ip_tos;   /* XXX */
1162 
1163 		/*
1164 		 * See if we should do MTU discovery.  Route lookups are
1165 		 * expensive, so we will only unset the DF bit if:
1166 		 *
1167 		 *	1) path_mtu_discovery is disabled
1168 		 *	2) the SCF_UNREACH flag has been set
1169 		 */
1170 		if (path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0))
1171 		       ip->ip_off |= IP_DF;
1172 
1173 		th = (struct tcphdr *)(ip + 1);
1174 	}
1175 	th->th_sport = sc->sc_inc.inc_lport;
1176 	th->th_dport = sc->sc_inc.inc_fport;
1177 
1178 	th->th_seq = htonl(sc->sc_iss);
1179 	th->th_ack = htonl(sc->sc_irs + 1);
1180 	th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
1181 	th->th_x2 = 0;
1182 	th->th_flags = TH_SYN|TH_ACK;
1183 	th->th_win = htons(sc->sc_wnd);
1184 	th->th_urp = 0;
1185 
1186 	/* Tack on the TCP options. */
1187 	if (optlen != 0) {
1188 		optp = (u_int8_t *)(th + 1);
1189 		*optp++ = TCPOPT_MAXSEG;
1190 		*optp++ = TCPOLEN_MAXSEG;
1191 		*optp++ = (mssopt >> 8) & 0xff;
1192 		*optp++ = mssopt & 0xff;
1193 
1194 		if (sc->sc_flags & SCF_WINSCALE) {
1195 			*((u_int32_t *)optp) = htonl(TCPOPT_NOP << 24 |
1196 			    TCPOPT_WINDOW << 16 | TCPOLEN_WINDOW << 8 |
1197 			    sc->sc_request_r_scale);
1198 			optp += 4;
1199 		}
1200 
1201 		if (sc->sc_flags & SCF_TIMESTAMP) {
1202 			u_int32_t *lp = (u_int32_t *)(optp);
1203 
1204 			/* Form timestamp option per appendix A of RFC 1323. */
1205 			*lp++ = htonl(TCPOPT_TSTAMP_HDR);
1206 			*lp++ = htonl(ticks);
1207 			*lp   = htonl(sc->sc_tsrecent);
1208 			optp += TCPOLEN_TSTAMP_APPA;
1209 		}
1210 
1211 		/*
1212 		 * Send CC and CC.echo if we received CC from our peer.
1213 		 */
1214 		if (sc->sc_flags & SCF_CC) {
1215 			u_int32_t *lp = (u_int32_t *)(optp);
1216 
1217 			*lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CC));
1218 			*lp++ = htonl(sc->sc_cc_send);
1219 			*lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CCECHO));
1220 			*lp   = htonl(sc->sc_cc_recv);
1221 			optp += TCPOLEN_CC_APPA * 2;
1222 		}
1223 
1224 #ifdef TCP_SIGNATURE
1225 		/*
1226 		 * Handle TCP-MD5 passive opener response.
1227 		 */
1228 		if (sc->sc_flags & SCF_SIGNATURE) {
1229 			u_int8_t *bp = optp;
1230 			int i;
1231 
1232 			*bp++ = TCPOPT_SIGNATURE;
1233 			*bp++ = TCPOLEN_SIGNATURE;
1234 			for (i = 0; i < TCP_SIGLEN; i++)
1235 				*bp++ = 0;
1236 			tcp_signature_compute(m, sizeof(struct ip), 0, optlen,
1237 			    optp + 2, IPSEC_DIR_OUTBOUND);
1238 			*bp++ = TCPOPT_NOP;
1239 			*bp++ = TCPOPT_EOL;
1240 			optp += TCPOLEN_SIGNATURE + 2;
1241 		}
1242 #endif /* TCP_SIGNATURE */
1243 	}
1244 
1245 #ifdef INET6
1246 	if (sc->sc_inc.inc_isipv6) {
1247 		th->th_sum = 0;
1248 		th->th_sum = in6_cksum(m, IPPROTO_TCP, hlen, tlen - hlen);
1249 		ip6->ip6_hlim = in6_selecthlim(NULL, NULL);
1250 		error = ip6_output(m, NULL, NULL, 0, NULL, NULL, inp);
1251 	} else
1252 #endif
1253 	{
1254         	th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1255 		    htons(tlen - hlen + IPPROTO_TCP));
1256 		m->m_pkthdr.csum_flags = CSUM_TCP;
1257 		m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1258 #ifdef TCPDEBUG
1259 		/*
1260 		 * Trace.
1261 		 */
1262 		if (so != NULL && so->so_options & SO_DEBUG) {
1263 			struct tcpcb *tp = sototcpcb(so);
1264 			tcp_trace(TA_OUTPUT, tp->t_state, tp,
1265 			    mtod(m, void *), th, 0);
1266 		}
1267 #endif
1268 		error = ip_output(m, sc->sc_ipopts, NULL, 0, NULL, inp);
1269 	}
1270 	INP_UNLOCK(inp);
1271 	return (error);
1272 }
1273 
1274 /*
1275  * cookie layers:
1276  *
1277  *	|. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .|
1278  *	| peer iss                                                      |
1279  *	| MD5(laddr,faddr,secret,lport,fport)             |. . . . . . .|
1280  *	|                     0                       |(A)|             |
1281  * (A): peer mss index
1282  */
1283 
1284 /*
1285  * The values below are chosen to minimize the size of the tcp_secret
1286  * table, as well as providing roughly a 16 second lifetime for the cookie.
1287  */
1288 
1289 #define SYNCOOKIE_WNDBITS	5	/* exposed bits for window indexing */
1290 #define SYNCOOKIE_TIMESHIFT	1	/* scale ticks to window time units */
1291 
1292 #define SYNCOOKIE_WNDMASK	((1 << SYNCOOKIE_WNDBITS) - 1)
1293 #define SYNCOOKIE_NSECRETS	(1 << SYNCOOKIE_WNDBITS)
1294 #define SYNCOOKIE_TIMEOUT \
1295     (hz * (1 << SYNCOOKIE_WNDBITS) / (1 << SYNCOOKIE_TIMESHIFT))
1296 #define SYNCOOKIE_DATAMASK 	((3 << SYNCOOKIE_WNDBITS) | SYNCOOKIE_WNDMASK)
1297 
1298 static struct {
1299 	u_int32_t	ts_secbits[4];
1300 	u_int		ts_expire;
1301 } tcp_secret[SYNCOOKIE_NSECRETS];
1302 
1303 static int tcp_msstab[] = { 0, 536, 1460, 8960 };
1304 
1305 static MD5_CTX syn_ctx;
1306 
1307 #define MD5Add(v)	MD5Update(&syn_ctx, (u_char *)&v, sizeof(v))
1308 
1309 struct md5_add {
1310 	u_int32_t laddr, faddr;
1311 	u_int32_t secbits[4];
1312 	u_int16_t lport, fport;
1313 };
1314 
1315 #ifdef CTASSERT
1316 CTASSERT(sizeof(struct md5_add) == 28);
1317 #endif
1318 
1319 /*
1320  * Consider the problem of a recreated (and retransmitted) cookie.  If the
1321  * original SYN was accepted, the connection is established.  The second
1322  * SYN is inflight, and if it arrives with an ISN that falls within the
1323  * receive window, the connection is killed.
1324  *
1325  * However, since cookies have other problems, this may not be worth
1326  * worrying about.
1327  */
1328 
1329 static u_int32_t
1330 syncookie_generate(struct syncache *sc)
1331 {
1332 	u_int32_t md5_buffer[4];
1333 	u_int32_t data;
1334 	int idx, i;
1335 	struct md5_add add;
1336 
1337 	/* NB: single threaded; could add INP_INFO_WLOCK_ASSERT(&tcbinfo) */
1338 
1339 	idx = ((ticks << SYNCOOKIE_TIMESHIFT) / hz) & SYNCOOKIE_WNDMASK;
1340 	if (tcp_secret[idx].ts_expire < ticks) {
1341 		for (i = 0; i < 4; i++)
1342 			tcp_secret[idx].ts_secbits[i] = arc4random();
1343 		tcp_secret[idx].ts_expire = ticks + SYNCOOKIE_TIMEOUT;
1344 	}
1345 	for (data = sizeof(tcp_msstab) / sizeof(int) - 1; data > 0; data--)
1346 		if (tcp_msstab[data] <= sc->sc_peer_mss)
1347 			break;
1348 	data = (data << SYNCOOKIE_WNDBITS) | idx;
1349 	data ^= sc->sc_irs;				/* peer's iss */
1350 	MD5Init(&syn_ctx);
1351 #ifdef INET6
1352 	if (sc->sc_inc.inc_isipv6) {
1353 		MD5Add(sc->sc_inc.inc6_laddr);
1354 		MD5Add(sc->sc_inc.inc6_faddr);
1355 		add.laddr = 0;
1356 		add.faddr = 0;
1357 	} else
1358 #endif
1359 	{
1360 		add.laddr = sc->sc_inc.inc_laddr.s_addr;
1361 		add.faddr = sc->sc_inc.inc_faddr.s_addr;
1362 	}
1363 	add.lport = sc->sc_inc.inc_lport;
1364 	add.fport = sc->sc_inc.inc_fport;
1365 	add.secbits[0] = tcp_secret[idx].ts_secbits[0];
1366 	add.secbits[1] = tcp_secret[idx].ts_secbits[1];
1367 	add.secbits[2] = tcp_secret[idx].ts_secbits[2];
1368 	add.secbits[3] = tcp_secret[idx].ts_secbits[3];
1369 	MD5Add(add);
1370 	MD5Final((u_char *)&md5_buffer, &syn_ctx);
1371 	data ^= (md5_buffer[0] & ~SYNCOOKIE_WNDMASK);
1372 	return (data);
1373 }
1374 
1375 static struct syncache *
1376 syncookie_lookup(inc, th, so)
1377 	struct in_conninfo *inc;
1378 	struct tcphdr *th;
1379 	struct socket *so;
1380 {
1381 	u_int32_t md5_buffer[4];
1382 	struct syncache *sc;
1383 	u_int32_t data;
1384 	int wnd, idx;
1385 	struct md5_add add;
1386 
1387 	/* NB: single threaded; could add INP_INFO_WLOCK_ASSERT(&tcbinfo) */
1388 
1389 	data = (th->th_ack - 1) ^ (th->th_seq - 1);	/* remove ISS */
1390 	idx = data & SYNCOOKIE_WNDMASK;
1391 	if (tcp_secret[idx].ts_expire < ticks ||
1392 	    sototcpcb(so)->ts_recent + SYNCOOKIE_TIMEOUT < ticks)
1393 		return (NULL);
1394 	MD5Init(&syn_ctx);
1395 #ifdef INET6
1396 	if (inc->inc_isipv6) {
1397 		MD5Add(inc->inc6_laddr);
1398 		MD5Add(inc->inc6_faddr);
1399 		add.laddr = 0;
1400 		add.faddr = 0;
1401 	} else
1402 #endif
1403 	{
1404 		add.laddr = inc->inc_laddr.s_addr;
1405 		add.faddr = inc->inc_faddr.s_addr;
1406 	}
1407 	add.lport = inc->inc_lport;
1408 	add.fport = inc->inc_fport;
1409 	add.secbits[0] = tcp_secret[idx].ts_secbits[0];
1410 	add.secbits[1] = tcp_secret[idx].ts_secbits[1];
1411 	add.secbits[2] = tcp_secret[idx].ts_secbits[2];
1412 	add.secbits[3] = tcp_secret[idx].ts_secbits[3];
1413 	MD5Add(add);
1414 	MD5Final((u_char *)&md5_buffer, &syn_ctx);
1415 	data ^= md5_buffer[0];
1416 	if ((data & ~SYNCOOKIE_DATAMASK) != 0)
1417 		return (NULL);
1418 	data = data >> SYNCOOKIE_WNDBITS;
1419 
1420 	sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT);
1421 	if (sc == NULL)
1422 		return (NULL);
1423 	/*
1424 	 * Fill in the syncache values.
1425 	 * XXX duplicate code from syncache_add
1426 	 */
1427 	sc->sc_ipopts = NULL;
1428 	sc->sc_inc.inc_fport = inc->inc_fport;
1429 	sc->sc_inc.inc_lport = inc->inc_lport;
1430 #ifdef INET6
1431 	sc->sc_inc.inc_isipv6 = inc->inc_isipv6;
1432 	if (inc->inc_isipv6) {
1433 		sc->sc_inc.inc6_faddr = inc->inc6_faddr;
1434 		sc->sc_inc.inc6_laddr = inc->inc6_laddr;
1435 	} else
1436 #endif
1437 	{
1438 		sc->sc_inc.inc_faddr = inc->inc_faddr;
1439 		sc->sc_inc.inc_laddr = inc->inc_laddr;
1440 	}
1441 	sc->sc_irs = th->th_seq - 1;
1442 	sc->sc_iss = th->th_ack - 1;
1443 	wnd = sbspace(&so->so_rcv);
1444 	wnd = imax(wnd, 0);
1445 	wnd = imin(wnd, TCP_MAXWIN);
1446 	sc->sc_wnd = wnd;
1447 	sc->sc_flags = 0;
1448 	sc->sc_rxtslot = 0;
1449 	sc->sc_peer_mss = tcp_msstab[data];
1450 	return (sc);
1451 }
1452