1 /*- 2 * Copyright (c) 2001 Networks Associates Technology, Inc. 3 * All rights reserved. 4 * 5 * This software was developed for the FreeBSD Project by Jonathan Lemon 6 * and NAI Labs, the Security Research Division of Network Associates, Inc. 7 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 8 * DARPA CHATS research program. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. The name of the author may not be used to endorse or promote 19 * products derived from this software without specific prior written 20 * permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $FreeBSD$ 35 */ 36 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 #include "opt_ipsec.h" 40 #include "opt_mac.h" 41 #include "opt_tcpdebug.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/sysctl.h> 47 #include <sys/malloc.h> 48 #include <sys/mac.h> 49 #include <sys/mbuf.h> 50 #include <sys/md5.h> 51 #include <sys/proc.h> /* for proc0 declaration */ 52 #include <sys/random.h> 53 #include <sys/socket.h> 54 #include <sys/socketvar.h> 55 56 #include <net/if.h> 57 #include <net/route.h> 58 59 #include <netinet/in.h> 60 #include <netinet/in_systm.h> 61 #include <netinet/ip.h> 62 #include <netinet/in_var.h> 63 #include <netinet/in_pcb.h> 64 #include <netinet/ip_var.h> 65 #ifdef INET6 66 #include <netinet/ip6.h> 67 #include <netinet/icmp6.h> 68 #include <netinet6/nd6.h> 69 #include <netinet6/ip6_var.h> 70 #include <netinet6/in6_pcb.h> 71 #endif 72 #include <netinet/tcp.h> 73 #ifdef TCPDEBUG 74 #include <netinet/tcpip.h> 75 #endif 76 #include <netinet/tcp_fsm.h> 77 #include <netinet/tcp_seq.h> 78 #include <netinet/tcp_timer.h> 79 #include <netinet/tcp_var.h> 80 #ifdef TCPDEBUG 81 #include <netinet/tcp_debug.h> 82 #endif 83 #ifdef INET6 84 #include <netinet6/tcp6_var.h> 85 #endif 86 87 #ifdef IPSEC 88 #include <netinet6/ipsec.h> 89 #ifdef INET6 90 #include <netinet6/ipsec6.h> 91 #endif 92 #endif /*IPSEC*/ 93 94 #ifdef FAST_IPSEC 95 #include <netipsec/ipsec.h> 96 #ifdef INET6 97 #include <netipsec/ipsec6.h> 98 #endif 99 #include <netipsec/key.h> 100 #endif /*FAST_IPSEC*/ 101 102 #include <machine/in_cksum.h> 103 #include <vm/uma.h> 104 105 static int tcp_syncookies = 1; 106 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW, 107 &tcp_syncookies, 0, 108 "Use TCP SYN cookies if the syncache overflows"); 109 110 static void syncache_drop(struct syncache *, struct syncache_head *); 111 static void syncache_free(struct syncache *); 112 static void syncache_insert(struct syncache *, struct syncache_head *); 113 struct syncache *syncache_lookup(struct in_conninfo *, struct syncache_head **); 114 #ifdef TCPDEBUG 115 static int syncache_respond(struct syncache *, struct mbuf *, struct socket *); 116 #else 117 static int syncache_respond(struct syncache *, struct mbuf *); 118 #endif 119 static struct socket *syncache_socket(struct syncache *, struct socket *, 120 struct mbuf *m); 121 static void syncache_timer(void *); 122 static u_int32_t syncookie_generate(struct syncache *); 123 static struct syncache *syncookie_lookup(struct in_conninfo *, 124 struct tcphdr *, struct socket *); 125 126 /* 127 * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies. 128 * 3 retransmits corresponds to a timeout of (1 + 2 + 4 + 8 == 15) seconds, 129 * the odds are that the user has given up attempting to connect by then. 130 */ 131 #define SYNCACHE_MAXREXMTS 3 132 133 /* Arbitrary values */ 134 #define TCP_SYNCACHE_HASHSIZE 512 135 #define TCP_SYNCACHE_BUCKETLIMIT 30 136 137 struct tcp_syncache { 138 struct syncache_head *hashbase; 139 uma_zone_t zone; 140 u_int hashsize; 141 u_int hashmask; 142 u_int bucket_limit; 143 u_int cache_count; 144 u_int cache_limit; 145 u_int rexmt_limit; 146 u_int hash_secret; 147 TAILQ_HEAD(, syncache) timerq[SYNCACHE_MAXREXMTS + 1]; 148 struct callout tt_timerq[SYNCACHE_MAXREXMTS + 1]; 149 }; 150 static struct tcp_syncache tcp_syncache; 151 152 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0, "TCP SYN cache"); 153 154 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_RDTUN, 155 &tcp_syncache.bucket_limit, 0, "Per-bucket hash limit for syncache"); 156 157 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_RDTUN, 158 &tcp_syncache.cache_limit, 0, "Overall entry limit for syncache"); 159 160 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, count, CTLFLAG_RD, 161 &tcp_syncache.cache_count, 0, "Current number of entries in syncache"); 162 163 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_RDTUN, 164 &tcp_syncache.hashsize, 0, "Size of TCP syncache hashtable"); 165 166 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_RW, 167 &tcp_syncache.rexmt_limit, 0, "Limit on SYN/ACK retransmissions"); 168 169 static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache"); 170 171 #define SYNCACHE_HASH(inc, mask) \ 172 ((tcp_syncache.hash_secret ^ \ 173 (inc)->inc_faddr.s_addr ^ \ 174 ((inc)->inc_faddr.s_addr >> 16) ^ \ 175 (inc)->inc_fport ^ (inc)->inc_lport) & mask) 176 177 #define SYNCACHE_HASH6(inc, mask) \ 178 ((tcp_syncache.hash_secret ^ \ 179 (inc)->inc6_faddr.s6_addr32[0] ^ \ 180 (inc)->inc6_faddr.s6_addr32[3] ^ \ 181 (inc)->inc_fport ^ (inc)->inc_lport) & mask) 182 183 #define ENDPTS_EQ(a, b) ( \ 184 (a)->ie_fport == (b)->ie_fport && \ 185 (a)->ie_lport == (b)->ie_lport && \ 186 (a)->ie_faddr.s_addr == (b)->ie_faddr.s_addr && \ 187 (a)->ie_laddr.s_addr == (b)->ie_laddr.s_addr \ 188 ) 189 190 #define ENDPTS6_EQ(a, b) (memcmp(a, b, sizeof(*a)) == 0) 191 192 #define SYNCACHE_TIMEOUT(sc, slot) do { \ 193 sc->sc_rxtslot = (slot); \ 194 sc->sc_rxttime = ticks + TCPTV_RTOBASE * tcp_backoff[(slot)]; \ 195 TAILQ_INSERT_TAIL(&tcp_syncache.timerq[(slot)], sc, sc_timerq); \ 196 if (!callout_active(&tcp_syncache.tt_timerq[(slot)])) \ 197 callout_reset(&tcp_syncache.tt_timerq[(slot)], \ 198 TCPTV_RTOBASE * tcp_backoff[(slot)], \ 199 syncache_timer, (void *)((intptr_t)(slot))); \ 200 } while (0) 201 202 static void 203 syncache_free(struct syncache *sc) 204 { 205 if (sc->sc_ipopts) 206 (void) m_free(sc->sc_ipopts); 207 208 uma_zfree(tcp_syncache.zone, sc); 209 } 210 211 void 212 syncache_init(void) 213 { 214 int i; 215 216 tcp_syncache.cache_count = 0; 217 tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE; 218 tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT; 219 tcp_syncache.cache_limit = 220 tcp_syncache.hashsize * tcp_syncache.bucket_limit; 221 tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS; 222 tcp_syncache.hash_secret = arc4random(); 223 224 TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize", 225 &tcp_syncache.hashsize); 226 TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit", 227 &tcp_syncache.cache_limit); 228 TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit", 229 &tcp_syncache.bucket_limit); 230 if (!powerof2(tcp_syncache.hashsize)) { 231 printf("WARNING: syncache hash size is not a power of 2.\n"); 232 tcp_syncache.hashsize = 512; /* safe default */ 233 } 234 tcp_syncache.hashmask = tcp_syncache.hashsize - 1; 235 236 /* Allocate the hash table. */ 237 MALLOC(tcp_syncache.hashbase, struct syncache_head *, 238 tcp_syncache.hashsize * sizeof(struct syncache_head), 239 M_SYNCACHE, M_WAITOK); 240 241 /* Initialize the hash buckets. */ 242 for (i = 0; i < tcp_syncache.hashsize; i++) { 243 TAILQ_INIT(&tcp_syncache.hashbase[i].sch_bucket); 244 tcp_syncache.hashbase[i].sch_length = 0; 245 } 246 247 /* Initialize the timer queues. */ 248 for (i = 0; i <= SYNCACHE_MAXREXMTS; i++) { 249 TAILQ_INIT(&tcp_syncache.timerq[i]); 250 callout_init(&tcp_syncache.tt_timerq[i], 251 debug_mpsafenet ? CALLOUT_MPSAFE : 0); 252 } 253 254 /* 255 * Allocate the syncache entries. Allow the zone to allocate one 256 * more entry than cache limit, so a new entry can bump out an 257 * older one. 258 */ 259 tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache), 260 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 261 uma_zone_set_max(tcp_syncache.zone, tcp_syncache.cache_limit); 262 tcp_syncache.cache_limit -= 1; 263 } 264 265 static void 266 syncache_insert(sc, sch) 267 struct syncache *sc; 268 struct syncache_head *sch; 269 { 270 struct syncache *sc2; 271 int i; 272 273 INP_INFO_WLOCK_ASSERT(&tcbinfo); 274 275 /* 276 * Make sure that we don't overflow the per-bucket 277 * limit or the total cache size limit. 278 */ 279 if (sch->sch_length >= tcp_syncache.bucket_limit) { 280 /* 281 * The bucket is full, toss the oldest element. 282 */ 283 sc2 = TAILQ_FIRST(&sch->sch_bucket); 284 sc2->sc_tp->ts_recent = ticks; 285 syncache_drop(sc2, sch); 286 tcpstat.tcps_sc_bucketoverflow++; 287 } else if (tcp_syncache.cache_count >= tcp_syncache.cache_limit) { 288 /* 289 * The cache is full. Toss the oldest entry in the 290 * entire cache. This is the front entry in the 291 * first non-empty timer queue with the largest 292 * timeout value. 293 */ 294 for (i = SYNCACHE_MAXREXMTS; i >= 0; i--) { 295 sc2 = TAILQ_FIRST(&tcp_syncache.timerq[i]); 296 if (sc2 != NULL) 297 break; 298 } 299 sc2->sc_tp->ts_recent = ticks; 300 syncache_drop(sc2, NULL); 301 tcpstat.tcps_sc_cacheoverflow++; 302 } 303 304 /* Initialize the entry's timer. */ 305 SYNCACHE_TIMEOUT(sc, 0); 306 307 /* Put it into the bucket. */ 308 TAILQ_INSERT_TAIL(&sch->sch_bucket, sc, sc_hash); 309 sch->sch_length++; 310 tcp_syncache.cache_count++; 311 tcpstat.tcps_sc_added++; 312 } 313 314 static void 315 syncache_drop(sc, sch) 316 struct syncache *sc; 317 struct syncache_head *sch; 318 { 319 INP_INFO_WLOCK_ASSERT(&tcbinfo); 320 321 if (sch == NULL) { 322 #ifdef INET6 323 if (sc->sc_inc.inc_isipv6) { 324 sch = &tcp_syncache.hashbase[ 325 SYNCACHE_HASH6(&sc->sc_inc, tcp_syncache.hashmask)]; 326 } else 327 #endif 328 { 329 sch = &tcp_syncache.hashbase[ 330 SYNCACHE_HASH(&sc->sc_inc, tcp_syncache.hashmask)]; 331 } 332 } 333 334 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash); 335 sch->sch_length--; 336 tcp_syncache.cache_count--; 337 338 TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot], sc, sc_timerq); 339 if (TAILQ_EMPTY(&tcp_syncache.timerq[sc->sc_rxtslot])) 340 callout_stop(&tcp_syncache.tt_timerq[sc->sc_rxtslot]); 341 342 syncache_free(sc); 343 } 344 345 /* 346 * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted. 347 * If we have retransmitted an entry the maximum number of times, expire it. 348 */ 349 static void 350 syncache_timer(xslot) 351 void *xslot; 352 { 353 intptr_t slot = (intptr_t)xslot; 354 struct syncache *sc, *nsc; 355 struct inpcb *inp; 356 357 INP_INFO_WLOCK(&tcbinfo); 358 if (callout_pending(&tcp_syncache.tt_timerq[slot]) || 359 !callout_active(&tcp_syncache.tt_timerq[slot])) { 360 /* XXX can this happen? */ 361 INP_INFO_WUNLOCK(&tcbinfo); 362 return; 363 } 364 callout_deactivate(&tcp_syncache.tt_timerq[slot]); 365 366 nsc = TAILQ_FIRST(&tcp_syncache.timerq[slot]); 367 while (nsc != NULL) { 368 if (ticks < nsc->sc_rxttime) 369 break; 370 sc = nsc; 371 inp = sc->sc_tp->t_inpcb; 372 if (slot == SYNCACHE_MAXREXMTS || 373 slot >= tcp_syncache.rexmt_limit || 374 inp == NULL || inp->inp_gencnt != sc->sc_inp_gencnt) { 375 nsc = TAILQ_NEXT(sc, sc_timerq); 376 syncache_drop(sc, NULL); 377 tcpstat.tcps_sc_stale++; 378 continue; 379 } 380 /* 381 * syncache_respond() may call back into the syncache to 382 * to modify another entry, so do not obtain the next 383 * entry on the timer chain until it has completed. 384 */ 385 #ifdef TCPDEBUG 386 (void) syncache_respond(sc, NULL, NULL); 387 #else 388 (void) syncache_respond(sc, NULL); 389 #endif 390 nsc = TAILQ_NEXT(sc, sc_timerq); 391 tcpstat.tcps_sc_retransmitted++; 392 TAILQ_REMOVE(&tcp_syncache.timerq[slot], sc, sc_timerq); 393 SYNCACHE_TIMEOUT(sc, slot + 1); 394 } 395 if (nsc != NULL) 396 callout_reset(&tcp_syncache.tt_timerq[slot], 397 nsc->sc_rxttime - ticks, syncache_timer, (void *)(slot)); 398 INP_INFO_WUNLOCK(&tcbinfo); 399 } 400 401 /* 402 * Find an entry in the syncache. 403 */ 404 struct syncache * 405 syncache_lookup(inc, schp) 406 struct in_conninfo *inc; 407 struct syncache_head **schp; 408 { 409 struct syncache *sc; 410 struct syncache_head *sch; 411 412 INP_INFO_WLOCK_ASSERT(&tcbinfo); 413 414 #ifdef INET6 415 if (inc->inc_isipv6) { 416 sch = &tcp_syncache.hashbase[ 417 SYNCACHE_HASH6(inc, tcp_syncache.hashmask)]; 418 *schp = sch; 419 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) { 420 if (ENDPTS6_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie)) 421 return (sc); 422 } 423 } else 424 #endif 425 { 426 sch = &tcp_syncache.hashbase[ 427 SYNCACHE_HASH(inc, tcp_syncache.hashmask)]; 428 *schp = sch; 429 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) { 430 #ifdef INET6 431 if (sc->sc_inc.inc_isipv6) 432 continue; 433 #endif 434 if (ENDPTS_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie)) 435 return (sc); 436 } 437 } 438 return (NULL); 439 } 440 441 /* 442 * This function is called when we get a RST for a 443 * non-existent connection, so that we can see if the 444 * connection is in the syn cache. If it is, zap it. 445 */ 446 void 447 syncache_chkrst(inc, th) 448 struct in_conninfo *inc; 449 struct tcphdr *th; 450 { 451 struct syncache *sc; 452 struct syncache_head *sch; 453 454 INP_INFO_WLOCK_ASSERT(&tcbinfo); 455 456 sc = syncache_lookup(inc, &sch); 457 if (sc == NULL) 458 return; 459 /* 460 * If the RST bit is set, check the sequence number to see 461 * if this is a valid reset segment. 462 * RFC 793 page 37: 463 * In all states except SYN-SENT, all reset (RST) segments 464 * are validated by checking their SEQ-fields. A reset is 465 * valid if its sequence number is in the window. 466 * 467 * The sequence number in the reset segment is normally an 468 * echo of our outgoing acknowlegement numbers, but some hosts 469 * send a reset with the sequence number at the rightmost edge 470 * of our receive window, and we have to handle this case. 471 */ 472 if (SEQ_GEQ(th->th_seq, sc->sc_irs) && 473 SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) { 474 syncache_drop(sc, sch); 475 tcpstat.tcps_sc_reset++; 476 } 477 } 478 479 void 480 syncache_badack(inc) 481 struct in_conninfo *inc; 482 { 483 struct syncache *sc; 484 struct syncache_head *sch; 485 486 INP_INFO_WLOCK_ASSERT(&tcbinfo); 487 488 sc = syncache_lookup(inc, &sch); 489 if (sc != NULL) { 490 syncache_drop(sc, sch); 491 tcpstat.tcps_sc_badack++; 492 } 493 } 494 495 void 496 syncache_unreach(inc, th) 497 struct in_conninfo *inc; 498 struct tcphdr *th; 499 { 500 struct syncache *sc; 501 struct syncache_head *sch; 502 503 INP_INFO_WLOCK_ASSERT(&tcbinfo); 504 505 /* we are called at splnet() here */ 506 sc = syncache_lookup(inc, &sch); 507 if (sc == NULL) 508 return; 509 510 /* If the sequence number != sc_iss, then it's a bogus ICMP msg */ 511 if (ntohl(th->th_seq) != sc->sc_iss) 512 return; 513 514 /* 515 * If we've rertransmitted 3 times and this is our second error, 516 * we remove the entry. Otherwise, we allow it to continue on. 517 * This prevents us from incorrectly nuking an entry during a 518 * spurious network outage. 519 * 520 * See tcp_notify(). 521 */ 522 if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxtslot < 3) { 523 sc->sc_flags |= SCF_UNREACH; 524 return; 525 } 526 syncache_drop(sc, sch); 527 tcpstat.tcps_sc_unreach++; 528 } 529 530 /* 531 * Build a new TCP socket structure from a syncache entry. 532 */ 533 static struct socket * 534 syncache_socket(sc, lso, m) 535 struct syncache *sc; 536 struct socket *lso; 537 struct mbuf *m; 538 { 539 struct inpcb *inp = NULL; 540 struct socket *so; 541 struct tcpcb *tp; 542 543 GIANT_REQUIRED; /* XXX until socket locking */ 544 INP_INFO_WLOCK_ASSERT(&tcbinfo); 545 546 /* 547 * Ok, create the full blown connection, and set things up 548 * as they would have been set up if we had created the 549 * connection when the SYN arrived. If we can't create 550 * the connection, abort it. 551 */ 552 so = sonewconn(lso, SS_ISCONNECTED); 553 if (so == NULL) { 554 /* 555 * Drop the connection; we will send a RST if the peer 556 * retransmits the ACK, 557 */ 558 tcpstat.tcps_listendrop++; 559 goto abort2; 560 } 561 #ifdef MAC 562 mac_set_socket_peer_from_mbuf(m, so); 563 #endif 564 565 inp = sotoinpcb(so); 566 INP_LOCK(inp); 567 568 /* 569 * Insert new socket into hash list. 570 */ 571 inp->inp_inc.inc_isipv6 = sc->sc_inc.inc_isipv6; 572 #ifdef INET6 573 if (sc->sc_inc.inc_isipv6) { 574 inp->in6p_laddr = sc->sc_inc.inc6_laddr; 575 } else { 576 inp->inp_vflag &= ~INP_IPV6; 577 inp->inp_vflag |= INP_IPV4; 578 #endif 579 inp->inp_laddr = sc->sc_inc.inc_laddr; 580 #ifdef INET6 581 } 582 #endif 583 inp->inp_lport = sc->sc_inc.inc_lport; 584 if (in_pcbinshash(inp) != 0) { 585 /* 586 * Undo the assignments above if we failed to 587 * put the PCB on the hash lists. 588 */ 589 #ifdef INET6 590 if (sc->sc_inc.inc_isipv6) 591 inp->in6p_laddr = in6addr_any; 592 else 593 #endif 594 inp->inp_laddr.s_addr = INADDR_ANY; 595 inp->inp_lport = 0; 596 goto abort; 597 } 598 #ifdef IPSEC 599 /* copy old policy into new socket's */ 600 if (ipsec_copy_pcbpolicy(sotoinpcb(lso)->inp_sp, inp->inp_sp)) 601 printf("syncache_expand: could not copy policy\n"); 602 #endif 603 #ifdef FAST_IPSEC 604 /* copy old policy into new socket's */ 605 if (ipsec_copy_policy(sotoinpcb(lso)->inp_sp, inp->inp_sp)) 606 printf("syncache_expand: could not copy policy\n"); 607 #endif 608 #ifdef INET6 609 if (sc->sc_inc.inc_isipv6) { 610 struct inpcb *oinp = sotoinpcb(lso); 611 struct in6_addr laddr6; 612 struct sockaddr_in6 sin6; 613 /* 614 * Inherit socket options from the listening socket. 615 * Note that in6p_inputopts are not (and should not be) 616 * copied, since it stores previously received options and is 617 * used to detect if each new option is different than the 618 * previous one and hence should be passed to a user. 619 * If we copied in6p_inputopts, a user would not be able to 620 * receive options just after calling the accept system call. 621 */ 622 inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS; 623 if (oinp->in6p_outputopts) 624 inp->in6p_outputopts = 625 ip6_copypktopts(oinp->in6p_outputopts, M_NOWAIT); 626 627 sin6.sin6_family = AF_INET6; 628 sin6.sin6_len = sizeof(sin6); 629 sin6.sin6_addr = sc->sc_inc.inc6_faddr; 630 sin6.sin6_port = sc->sc_inc.inc_fport; 631 sin6.sin6_flowinfo = sin6.sin6_scope_id = 0; 632 laddr6 = inp->in6p_laddr; 633 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) 634 inp->in6p_laddr = sc->sc_inc.inc6_laddr; 635 if (in6_pcbconnect(inp, (struct sockaddr *)&sin6, 636 thread0.td_ucred)) { 637 inp->in6p_laddr = laddr6; 638 goto abort; 639 } 640 } else 641 #endif 642 { 643 struct in_addr laddr; 644 struct sockaddr_in sin; 645 646 inp->inp_options = ip_srcroute(); 647 if (inp->inp_options == NULL) { 648 inp->inp_options = sc->sc_ipopts; 649 sc->sc_ipopts = NULL; 650 } 651 652 sin.sin_family = AF_INET; 653 sin.sin_len = sizeof(sin); 654 sin.sin_addr = sc->sc_inc.inc_faddr; 655 sin.sin_port = sc->sc_inc.inc_fport; 656 bzero((caddr_t)sin.sin_zero, sizeof(sin.sin_zero)); 657 laddr = inp->inp_laddr; 658 if (inp->inp_laddr.s_addr == INADDR_ANY) 659 inp->inp_laddr = sc->sc_inc.inc_laddr; 660 if (in_pcbconnect(inp, (struct sockaddr *)&sin, 661 thread0.td_ucred)) { 662 inp->inp_laddr = laddr; 663 goto abort; 664 } 665 } 666 667 tp = intotcpcb(inp); 668 tp->t_state = TCPS_SYN_RECEIVED; 669 tp->iss = sc->sc_iss; 670 tp->irs = sc->sc_irs; 671 tcp_rcvseqinit(tp); 672 tcp_sendseqinit(tp); 673 tp->snd_wl1 = sc->sc_irs; 674 tp->rcv_up = sc->sc_irs + 1; 675 tp->rcv_wnd = sc->sc_wnd; 676 tp->rcv_adv += tp->rcv_wnd; 677 678 tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY); 679 if (sc->sc_flags & SCF_NOOPT) 680 tp->t_flags |= TF_NOOPT; 681 if (sc->sc_flags & SCF_WINSCALE) { 682 tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE; 683 tp->requested_s_scale = sc->sc_requested_s_scale; 684 tp->request_r_scale = sc->sc_request_r_scale; 685 } 686 if (sc->sc_flags & SCF_TIMESTAMP) { 687 tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP; 688 tp->ts_recent = sc->sc_tsrecent; 689 tp->ts_recent_age = ticks; 690 } 691 if (sc->sc_flags & SCF_CC) { 692 /* 693 * Initialization of the tcpcb for transaction; 694 * set SND.WND = SEG.WND, 695 * initialize CCsend and CCrecv. 696 */ 697 tp->t_flags |= TF_REQ_CC|TF_RCVD_CC; 698 tp->cc_send = sc->sc_cc_send; 699 tp->cc_recv = sc->sc_cc_recv; 700 } 701 #ifdef TCP_SIGNATURE 702 if (sc->sc_flags & SCF_SIGNATURE) 703 tp->t_flags |= TF_SIGNATURE; 704 #endif 705 706 /* 707 * Set up MSS and get cached values from tcp_hostcache. 708 * This might overwrite some of the defaults we just set. 709 */ 710 tcp_mss(tp, sc->sc_peer_mss); 711 712 /* 713 * If the SYN,ACK was retransmitted, reset cwnd to 1 segment. 714 */ 715 if (sc->sc_rxtslot != 0) 716 tp->snd_cwnd = tp->t_maxseg; 717 callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp); 718 719 INP_UNLOCK(inp); 720 721 tcpstat.tcps_accepts++; 722 return (so); 723 724 abort: 725 INP_UNLOCK(inp); 726 abort2: 727 if (so != NULL) 728 (void) soabort(so); 729 return (NULL); 730 } 731 732 /* 733 * This function gets called when we receive an ACK for a 734 * socket in the LISTEN state. We look up the connection 735 * in the syncache, and if its there, we pull it out of 736 * the cache and turn it into a full-blown connection in 737 * the SYN-RECEIVED state. 738 */ 739 int 740 syncache_expand(inc, th, sop, m) 741 struct in_conninfo *inc; 742 struct tcphdr *th; 743 struct socket **sop; 744 struct mbuf *m; 745 { 746 struct syncache *sc; 747 struct syncache_head *sch; 748 struct socket *so; 749 750 INP_INFO_WLOCK_ASSERT(&tcbinfo); 751 752 sc = syncache_lookup(inc, &sch); 753 if (sc == NULL) { 754 /* 755 * There is no syncache entry, so see if this ACK is 756 * a returning syncookie. To do this, first: 757 * A. See if this socket has had a syncache entry dropped in 758 * the past. We don't want to accept a bogus syncookie 759 * if we've never received a SYN. 760 * B. check that the syncookie is valid. If it is, then 761 * cobble up a fake syncache entry, and return. 762 */ 763 if (!tcp_syncookies) 764 return (0); 765 sc = syncookie_lookup(inc, th, *sop); 766 if (sc == NULL) 767 return (0); 768 sch = NULL; 769 tcpstat.tcps_sc_recvcookie++; 770 } 771 772 /* 773 * If seg contains an ACK, but not for our SYN/ACK, send a RST. 774 */ 775 if (th->th_ack != sc->sc_iss + 1) 776 return (0); 777 778 so = syncache_socket(sc, *sop, m); 779 if (so == NULL) { 780 #if 0 781 resetandabort: 782 /* XXXjlemon check this - is this correct? */ 783 (void) tcp_respond(NULL, m, m, th, 784 th->th_seq + tlen, (tcp_seq)0, TH_RST|TH_ACK); 785 #endif 786 m_freem(m); /* XXX only needed for above */ 787 tcpstat.tcps_sc_aborted++; 788 } else 789 tcpstat.tcps_sc_completed++; 790 791 if (sch == NULL) 792 syncache_free(sc); 793 else 794 syncache_drop(sc, sch); 795 *sop = so; 796 return (1); 797 } 798 799 /* 800 * Given a LISTEN socket and an inbound SYN request, add 801 * this to the syn cache, and send back a segment: 802 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK> 803 * to the source. 804 * 805 * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN. 806 * Doing so would require that we hold onto the data and deliver it 807 * to the application. However, if we are the target of a SYN-flood 808 * DoS attack, an attacker could send data which would eventually 809 * consume all available buffer space if it were ACKed. By not ACKing 810 * the data, we avoid this DoS scenario. 811 */ 812 int 813 syncache_add(inc, to, th, sop, m) 814 struct in_conninfo *inc; 815 struct tcpopt *to; 816 struct tcphdr *th; 817 struct socket **sop; 818 struct mbuf *m; 819 { 820 struct tcpcb *tp; 821 struct socket *so; 822 struct syncache *sc = NULL; 823 struct syncache_head *sch; 824 struct mbuf *ipopts = NULL; 825 struct rmxp_tao tao; 826 int i, win; 827 828 INP_INFO_WLOCK_ASSERT(&tcbinfo); 829 830 so = *sop; 831 tp = sototcpcb(so); 832 bzero(&tao, sizeof(tao)); 833 834 /* 835 * Remember the IP options, if any. 836 */ 837 #ifdef INET6 838 if (!inc->inc_isipv6) 839 #endif 840 ipopts = ip_srcroute(); 841 842 /* 843 * See if we already have an entry for this connection. 844 * If we do, resend the SYN,ACK, and reset the retransmit timer. 845 * 846 * XXX 847 * should the syncache be re-initialized with the contents 848 * of the new SYN here (which may have different options?) 849 */ 850 sc = syncache_lookup(inc, &sch); 851 if (sc != NULL) { 852 tcpstat.tcps_sc_dupsyn++; 853 if (ipopts) { 854 /* 855 * If we were remembering a previous source route, 856 * forget it and use the new one we've been given. 857 */ 858 if (sc->sc_ipopts) 859 (void) m_free(sc->sc_ipopts); 860 sc->sc_ipopts = ipopts; 861 } 862 /* 863 * Update timestamp if present. 864 */ 865 if (sc->sc_flags & SCF_TIMESTAMP) 866 sc->sc_tsrecent = to->to_tsval; 867 /* 868 * PCB may have changed, pick up new values. 869 */ 870 sc->sc_tp = tp; 871 sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt; 872 #ifdef TCPDEBUG 873 if (syncache_respond(sc, m, so) == 0) { 874 #else 875 if (syncache_respond(sc, m) == 0) { 876 #endif 877 /* NB: guarded by INP_INFO_WLOCK(&tcbinfo) */ 878 TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot], 879 sc, sc_timerq); 880 SYNCACHE_TIMEOUT(sc, sc->sc_rxtslot); 881 tcpstat.tcps_sndacks++; 882 tcpstat.tcps_sndtotal++; 883 } 884 *sop = NULL; 885 return (1); 886 } 887 888 sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT); 889 if (sc == NULL) { 890 /* 891 * The zone allocator couldn't provide more entries. 892 * Treat this as if the cache was full; drop the oldest 893 * entry and insert the new one. 894 */ 895 /* NB: guarded by INP_INFO_WLOCK(&tcbinfo) */ 896 for (i = SYNCACHE_MAXREXMTS; i >= 0; i--) { 897 sc = TAILQ_FIRST(&tcp_syncache.timerq[i]); 898 if (sc != NULL) 899 break; 900 } 901 sc->sc_tp->ts_recent = ticks; 902 syncache_drop(sc, NULL); 903 tcpstat.tcps_sc_zonefail++; 904 sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT); 905 if (sc == NULL) { 906 if (ipopts) 907 (void) m_free(ipopts); 908 return (0); 909 } 910 } 911 912 /* 913 * Fill in the syncache values. 914 */ 915 bzero(sc, sizeof(*sc)); 916 sc->sc_tp = tp; 917 sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt; 918 sc->sc_ipopts = ipopts; 919 sc->sc_inc.inc_fport = inc->inc_fport; 920 sc->sc_inc.inc_lport = inc->inc_lport; 921 #ifdef INET6 922 sc->sc_inc.inc_isipv6 = inc->inc_isipv6; 923 if (inc->inc_isipv6) { 924 sc->sc_inc.inc6_faddr = inc->inc6_faddr; 925 sc->sc_inc.inc6_laddr = inc->inc6_laddr; 926 } else 927 #endif 928 { 929 sc->sc_inc.inc_faddr = inc->inc_faddr; 930 sc->sc_inc.inc_laddr = inc->inc_laddr; 931 } 932 sc->sc_irs = th->th_seq; 933 sc->sc_flags = 0; 934 sc->sc_peer_mss = to->to_flags & TOF_MSS ? to->to_mss : 0; 935 if (tcp_syncookies) 936 sc->sc_iss = syncookie_generate(sc); 937 else 938 sc->sc_iss = arc4random(); 939 940 /* Initial receive window: clip sbspace to [0 .. TCP_MAXWIN] */ 941 win = sbspace(&so->so_rcv); 942 win = imax(win, 0); 943 win = imin(win, TCP_MAXWIN); 944 sc->sc_wnd = win; 945 946 if (tcp_do_rfc1323) { 947 /* 948 * A timestamp received in a SYN makes 949 * it ok to send timestamp requests and replies. 950 */ 951 if (to->to_flags & TOF_TS) { 952 sc->sc_tsrecent = to->to_tsval; 953 sc->sc_flags |= SCF_TIMESTAMP; 954 } 955 if (to->to_flags & TOF_SCALE) { 956 int wscale = 0; 957 958 /* Compute proper scaling value from buffer space */ 959 while (wscale < TCP_MAX_WINSHIFT && 960 (TCP_MAXWIN << wscale) < so->so_rcv.sb_hiwat) 961 wscale++; 962 sc->sc_request_r_scale = wscale; 963 sc->sc_requested_s_scale = to->to_requested_s_scale; 964 sc->sc_flags |= SCF_WINSCALE; 965 } 966 } 967 if (tcp_do_rfc1644) { 968 /* 969 * A CC or CC.new option received in a SYN makes 970 * it ok to send CC in subsequent segments. 971 */ 972 if (to->to_flags & (TOF_CC|TOF_CCNEW)) { 973 sc->sc_cc_recv = to->to_cc; 974 sc->sc_cc_send = CC_INC(tcp_ccgen); 975 sc->sc_flags |= SCF_CC; 976 } 977 } 978 if (tp->t_flags & TF_NOOPT) 979 sc->sc_flags = SCF_NOOPT; 980 #ifdef TCP_SIGNATURE 981 /* 982 * If listening socket requested TCP digests, and received SYN 983 * contains the option, flag this in the syncache so that 984 * syncache_respond() will do the right thing with the SYN+ACK. 985 * XXX Currently we always record the option by default and will 986 * attempt to use it in syncache_respond(). 987 */ 988 if (to->to_flags & TOF_SIGNATURE) 989 sc->sc_flags = SCF_SIGNATURE; 990 #endif 991 992 /* 993 * XXX 994 * We have the option here of not doing TAO (even if the segment 995 * qualifies) and instead fall back to a normal 3WHS via the syncache. 996 * This allows us to apply synflood protection to TAO-qualifying SYNs 997 * also. However, there should be a hueristic to determine when to 998 * do this, and is not present at the moment. 999 */ 1000 1001 /* 1002 * Perform TAO test on incoming CC (SEG.CC) option, if any. 1003 * - compare SEG.CC against cached CC from the same host, if any. 1004 * - if SEG.CC > chached value, SYN must be new and is accepted 1005 * immediately: save new CC in the cache, mark the socket 1006 * connected, enter ESTABLISHED state, turn on flag to 1007 * send a SYN in the next segment. 1008 * A virtual advertised window is set in rcv_adv to 1009 * initialize SWS prevention. Then enter normal segment 1010 * processing: drop SYN, process data and FIN. 1011 * - otherwise do a normal 3-way handshake. 1012 */ 1013 if (tcp_do_rfc1644) 1014 tcp_hc_gettao(&sc->sc_inc, &tao); 1015 1016 if ((to->to_flags & TOF_CC) != 0) { 1017 if (((tp->t_flags & TF_NOPUSH) != 0) && 1018 sc->sc_flags & SCF_CC && tao.tao_cc != 0 && 1019 CC_GT(to->to_cc, tao.tao_cc)) { 1020 sc->sc_rxtslot = 0; 1021 so = syncache_socket(sc, *sop, m); 1022 if (so != NULL) { 1023 tao.tao_cc = to->to_cc; 1024 tcp_hc_updatetao(&sc->sc_inc, TCP_HC_TAO_CC, 1025 tao.tao_cc, 0); 1026 *sop = so; 1027 } 1028 syncache_free(sc); 1029 return (so != NULL); 1030 } 1031 } else { 1032 /* 1033 * No CC option, but maybe CC.NEW: invalidate cached value. 1034 */ 1035 if (tcp_do_rfc1644) { 1036 tao.tao_cc = 0; 1037 tcp_hc_updatetao(&sc->sc_inc, TCP_HC_TAO_CC, 1038 tao.tao_cc, 0); 1039 } 1040 } 1041 1042 /* 1043 * TAO test failed or there was no CC option, 1044 * do a standard 3-way handshake. 1045 */ 1046 #ifdef TCPDEBUG 1047 if (syncache_respond(sc, m, so) == 0) { 1048 #else 1049 if (syncache_respond(sc, m) == 0) { 1050 #endif 1051 syncache_insert(sc, sch); 1052 tcpstat.tcps_sndacks++; 1053 tcpstat.tcps_sndtotal++; 1054 } else { 1055 syncache_free(sc); 1056 tcpstat.tcps_sc_dropped++; 1057 } 1058 *sop = NULL; 1059 return (1); 1060 } 1061 1062 #ifdef TCPDEBUG 1063 static int 1064 syncache_respond(sc, m, so) 1065 struct syncache *sc; 1066 struct mbuf *m; 1067 struct socket *so; 1068 #else 1069 static int 1070 syncache_respond(sc, m) 1071 struct syncache *sc; 1072 struct mbuf *m; 1073 #endif 1074 { 1075 u_int8_t *optp; 1076 int optlen, error; 1077 u_int16_t tlen, hlen, mssopt; 1078 struct ip *ip = NULL; 1079 struct tcphdr *th; 1080 struct inpcb *inp; 1081 #ifdef INET6 1082 struct ip6_hdr *ip6 = NULL; 1083 #endif 1084 1085 hlen = 1086 #ifdef INET6 1087 (sc->sc_inc.inc_isipv6) ? sizeof(struct ip6_hdr) : 1088 #endif 1089 sizeof(struct ip); 1090 1091 KASSERT((&sc->sc_inc) != NULL, ("syncache_respond with NULL in_conninfo pointer")); 1092 1093 /* Determine MSS we advertize to other end of connection */ 1094 mssopt = tcp_mssopt(&sc->sc_inc); 1095 1096 /* Compute the size of the TCP options. */ 1097 if (sc->sc_flags & SCF_NOOPT) { 1098 optlen = 0; 1099 } else { 1100 optlen = TCPOLEN_MAXSEG + 1101 ((sc->sc_flags & SCF_WINSCALE) ? 4 : 0) + 1102 ((sc->sc_flags & SCF_TIMESTAMP) ? TCPOLEN_TSTAMP_APPA : 0) + 1103 ((sc->sc_flags & SCF_CC) ? TCPOLEN_CC_APPA * 2 : 0); 1104 #ifdef TCP_SIGNATURE 1105 optlen += (sc->sc_flags & SCF_SIGNATURE) ? 1106 TCPOLEN_SIGNATURE + 2 : 0; 1107 #endif 1108 } 1109 tlen = hlen + sizeof(struct tcphdr) + optlen; 1110 1111 /* 1112 * XXX 1113 * assume that the entire packet will fit in a header mbuf 1114 */ 1115 KASSERT(max_linkhdr + tlen <= MHLEN, ("syncache: mbuf too small")); 1116 1117 /* 1118 * XXX shouldn't this reuse the mbuf if possible ? 1119 * Create the IP+TCP header from scratch. 1120 */ 1121 if (m) 1122 m_freem(m); 1123 1124 m = m_gethdr(M_DONTWAIT, MT_HEADER); 1125 if (m == NULL) 1126 return (ENOBUFS); 1127 m->m_data += max_linkhdr; 1128 m->m_len = tlen; 1129 m->m_pkthdr.len = tlen; 1130 m->m_pkthdr.rcvif = NULL; 1131 inp = sc->sc_tp->t_inpcb; 1132 INP_LOCK(inp); 1133 #ifdef MAC 1134 mac_create_mbuf_from_socket(inp->inp_socket, m); 1135 #endif 1136 1137 #ifdef INET6 1138 if (sc->sc_inc.inc_isipv6) { 1139 ip6 = mtod(m, struct ip6_hdr *); 1140 ip6->ip6_vfc = IPV6_VERSION; 1141 ip6->ip6_nxt = IPPROTO_TCP; 1142 ip6->ip6_src = sc->sc_inc.inc6_laddr; 1143 ip6->ip6_dst = sc->sc_inc.inc6_faddr; 1144 ip6->ip6_plen = htons(tlen - hlen); 1145 /* ip6_hlim is set after checksum */ 1146 /* ip6_flow = ??? */ 1147 1148 th = (struct tcphdr *)(ip6 + 1); 1149 } else 1150 #endif 1151 { 1152 ip = mtod(m, struct ip *); 1153 ip->ip_v = IPVERSION; 1154 ip->ip_hl = sizeof(struct ip) >> 2; 1155 ip->ip_len = tlen; 1156 ip->ip_id = 0; 1157 ip->ip_off = 0; 1158 ip->ip_sum = 0; 1159 ip->ip_p = IPPROTO_TCP; 1160 ip->ip_src = sc->sc_inc.inc_laddr; 1161 ip->ip_dst = sc->sc_inc.inc_faddr; 1162 ip->ip_ttl = inp->inp_ip_ttl; /* XXX */ 1163 ip->ip_tos = inp->inp_ip_tos; /* XXX */ 1164 1165 /* 1166 * See if we should do MTU discovery. Route lookups are 1167 * expensive, so we will only unset the DF bit if: 1168 * 1169 * 1) path_mtu_discovery is disabled 1170 * 2) the SCF_UNREACH flag has been set 1171 */ 1172 if (path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0)) 1173 ip->ip_off |= IP_DF; 1174 1175 th = (struct tcphdr *)(ip + 1); 1176 } 1177 th->th_sport = sc->sc_inc.inc_lport; 1178 th->th_dport = sc->sc_inc.inc_fport; 1179 1180 th->th_seq = htonl(sc->sc_iss); 1181 th->th_ack = htonl(sc->sc_irs + 1); 1182 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 1183 th->th_x2 = 0; 1184 th->th_flags = TH_SYN|TH_ACK; 1185 th->th_win = htons(sc->sc_wnd); 1186 th->th_urp = 0; 1187 1188 /* Tack on the TCP options. */ 1189 if (optlen != 0) { 1190 optp = (u_int8_t *)(th + 1); 1191 *optp++ = TCPOPT_MAXSEG; 1192 *optp++ = TCPOLEN_MAXSEG; 1193 *optp++ = (mssopt >> 8) & 0xff; 1194 *optp++ = mssopt & 0xff; 1195 1196 if (sc->sc_flags & SCF_WINSCALE) { 1197 *((u_int32_t *)optp) = htonl(TCPOPT_NOP << 24 | 1198 TCPOPT_WINDOW << 16 | TCPOLEN_WINDOW << 8 | 1199 sc->sc_request_r_scale); 1200 optp += 4; 1201 } 1202 1203 if (sc->sc_flags & SCF_TIMESTAMP) { 1204 u_int32_t *lp = (u_int32_t *)(optp); 1205 1206 /* Form timestamp option per appendix A of RFC 1323. */ 1207 *lp++ = htonl(TCPOPT_TSTAMP_HDR); 1208 *lp++ = htonl(ticks); 1209 *lp = htonl(sc->sc_tsrecent); 1210 optp += TCPOLEN_TSTAMP_APPA; 1211 } 1212 1213 /* 1214 * Send CC and CC.echo if we received CC from our peer. 1215 */ 1216 if (sc->sc_flags & SCF_CC) { 1217 u_int32_t *lp = (u_int32_t *)(optp); 1218 1219 *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CC)); 1220 *lp++ = htonl(sc->sc_cc_send); 1221 *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CCECHO)); 1222 *lp = htonl(sc->sc_cc_recv); 1223 optp += TCPOLEN_CC_APPA * 2; 1224 } 1225 1226 #ifdef TCP_SIGNATURE 1227 /* 1228 * Handle TCP-MD5 passive opener response. 1229 */ 1230 if (sc->sc_flags & SCF_SIGNATURE) { 1231 u_int8_t *bp = optp; 1232 int i; 1233 1234 *bp++ = TCPOPT_SIGNATURE; 1235 *bp++ = TCPOLEN_SIGNATURE; 1236 for (i = 0; i < TCP_SIGLEN; i++) 1237 *bp++ = 0; 1238 tcp_signature_compute(m, sizeof(struct ip), 0, optlen, 1239 optp + 2, IPSEC_DIR_OUTBOUND); 1240 *bp++ = TCPOPT_NOP; 1241 *bp++ = TCPOPT_EOL; 1242 optp += TCPOLEN_SIGNATURE + 2; 1243 } 1244 #endif /* TCP_SIGNATURE */ 1245 } 1246 1247 #ifdef INET6 1248 if (sc->sc_inc.inc_isipv6) { 1249 th->th_sum = 0; 1250 th->th_sum = in6_cksum(m, IPPROTO_TCP, hlen, tlen - hlen); 1251 ip6->ip6_hlim = in6_selecthlim(NULL, NULL); 1252 error = ip6_output(m, NULL, NULL, 0, NULL, NULL, inp); 1253 } else 1254 #endif 1255 { 1256 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1257 htons(tlen - hlen + IPPROTO_TCP)); 1258 m->m_pkthdr.csum_flags = CSUM_TCP; 1259 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 1260 #ifdef TCPDEBUG 1261 /* 1262 * Trace. 1263 */ 1264 if (so != NULL && so->so_options & SO_DEBUG) { 1265 struct tcpcb *tp = sototcpcb(so); 1266 tcp_trace(TA_OUTPUT, tp->t_state, tp, 1267 mtod(m, void *), th, 0); 1268 } 1269 #endif 1270 error = ip_output(m, sc->sc_ipopts, NULL, 0, NULL, inp); 1271 } 1272 INP_UNLOCK(inp); 1273 return (error); 1274 } 1275 1276 /* 1277 * cookie layers: 1278 * 1279 * |. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .| 1280 * | peer iss | 1281 * | MD5(laddr,faddr,secret,lport,fport) |. . . . . . .| 1282 * | 0 |(A)| | 1283 * (A): peer mss index 1284 */ 1285 1286 /* 1287 * The values below are chosen to minimize the size of the tcp_secret 1288 * table, as well as providing roughly a 16 second lifetime for the cookie. 1289 */ 1290 1291 #define SYNCOOKIE_WNDBITS 5 /* exposed bits for window indexing */ 1292 #define SYNCOOKIE_TIMESHIFT 1 /* scale ticks to window time units */ 1293 1294 #define SYNCOOKIE_WNDMASK ((1 << SYNCOOKIE_WNDBITS) - 1) 1295 #define SYNCOOKIE_NSECRETS (1 << SYNCOOKIE_WNDBITS) 1296 #define SYNCOOKIE_TIMEOUT \ 1297 (hz * (1 << SYNCOOKIE_WNDBITS) / (1 << SYNCOOKIE_TIMESHIFT)) 1298 #define SYNCOOKIE_DATAMASK ((3 << SYNCOOKIE_WNDBITS) | SYNCOOKIE_WNDMASK) 1299 1300 static struct { 1301 u_int32_t ts_secbits[4]; 1302 u_int ts_expire; 1303 } tcp_secret[SYNCOOKIE_NSECRETS]; 1304 1305 static int tcp_msstab[] = { 0, 536, 1460, 8960 }; 1306 1307 static MD5_CTX syn_ctx; 1308 1309 #define MD5Add(v) MD5Update(&syn_ctx, (u_char *)&v, sizeof(v)) 1310 1311 struct md5_add { 1312 u_int32_t laddr, faddr; 1313 u_int32_t secbits[4]; 1314 u_int16_t lport, fport; 1315 }; 1316 1317 #ifdef CTASSERT 1318 CTASSERT(sizeof(struct md5_add) == 28); 1319 #endif 1320 1321 /* 1322 * Consider the problem of a recreated (and retransmitted) cookie. If the 1323 * original SYN was accepted, the connection is established. The second 1324 * SYN is inflight, and if it arrives with an ISN that falls within the 1325 * receive window, the connection is killed. 1326 * 1327 * However, since cookies have other problems, this may not be worth 1328 * worrying about. 1329 */ 1330 1331 static u_int32_t 1332 syncookie_generate(struct syncache *sc) 1333 { 1334 u_int32_t md5_buffer[4]; 1335 u_int32_t data; 1336 int idx, i; 1337 struct md5_add add; 1338 1339 /* NB: single threaded; could add INP_INFO_WLOCK_ASSERT(&tcbinfo) */ 1340 1341 idx = ((ticks << SYNCOOKIE_TIMESHIFT) / hz) & SYNCOOKIE_WNDMASK; 1342 if (tcp_secret[idx].ts_expire < ticks) { 1343 for (i = 0; i < 4; i++) 1344 tcp_secret[idx].ts_secbits[i] = arc4random(); 1345 tcp_secret[idx].ts_expire = ticks + SYNCOOKIE_TIMEOUT; 1346 } 1347 for (data = sizeof(tcp_msstab) / sizeof(int) - 1; data > 0; data--) 1348 if (tcp_msstab[data] <= sc->sc_peer_mss) 1349 break; 1350 data = (data << SYNCOOKIE_WNDBITS) | idx; 1351 data ^= sc->sc_irs; /* peer's iss */ 1352 MD5Init(&syn_ctx); 1353 #ifdef INET6 1354 if (sc->sc_inc.inc_isipv6) { 1355 MD5Add(sc->sc_inc.inc6_laddr); 1356 MD5Add(sc->sc_inc.inc6_faddr); 1357 add.laddr = 0; 1358 add.faddr = 0; 1359 } else 1360 #endif 1361 { 1362 add.laddr = sc->sc_inc.inc_laddr.s_addr; 1363 add.faddr = sc->sc_inc.inc_faddr.s_addr; 1364 } 1365 add.lport = sc->sc_inc.inc_lport; 1366 add.fport = sc->sc_inc.inc_fport; 1367 add.secbits[0] = tcp_secret[idx].ts_secbits[0]; 1368 add.secbits[1] = tcp_secret[idx].ts_secbits[1]; 1369 add.secbits[2] = tcp_secret[idx].ts_secbits[2]; 1370 add.secbits[3] = tcp_secret[idx].ts_secbits[3]; 1371 MD5Add(add); 1372 MD5Final((u_char *)&md5_buffer, &syn_ctx); 1373 data ^= (md5_buffer[0] & ~SYNCOOKIE_WNDMASK); 1374 return (data); 1375 } 1376 1377 static struct syncache * 1378 syncookie_lookup(inc, th, so) 1379 struct in_conninfo *inc; 1380 struct tcphdr *th; 1381 struct socket *so; 1382 { 1383 u_int32_t md5_buffer[4]; 1384 struct syncache *sc; 1385 u_int32_t data; 1386 int wnd, idx; 1387 struct md5_add add; 1388 1389 /* NB: single threaded; could add INP_INFO_WLOCK_ASSERT(&tcbinfo) */ 1390 1391 data = (th->th_ack - 1) ^ (th->th_seq - 1); /* remove ISS */ 1392 idx = data & SYNCOOKIE_WNDMASK; 1393 if (tcp_secret[idx].ts_expire < ticks || 1394 sototcpcb(so)->ts_recent + SYNCOOKIE_TIMEOUT < ticks) 1395 return (NULL); 1396 MD5Init(&syn_ctx); 1397 #ifdef INET6 1398 if (inc->inc_isipv6) { 1399 MD5Add(inc->inc6_laddr); 1400 MD5Add(inc->inc6_faddr); 1401 add.laddr = 0; 1402 add.faddr = 0; 1403 } else 1404 #endif 1405 { 1406 add.laddr = inc->inc_laddr.s_addr; 1407 add.faddr = inc->inc_faddr.s_addr; 1408 } 1409 add.lport = inc->inc_lport; 1410 add.fport = inc->inc_fport; 1411 add.secbits[0] = tcp_secret[idx].ts_secbits[0]; 1412 add.secbits[1] = tcp_secret[idx].ts_secbits[1]; 1413 add.secbits[2] = tcp_secret[idx].ts_secbits[2]; 1414 add.secbits[3] = tcp_secret[idx].ts_secbits[3]; 1415 MD5Add(add); 1416 MD5Final((u_char *)&md5_buffer, &syn_ctx); 1417 data ^= md5_buffer[0]; 1418 if ((data & ~SYNCOOKIE_DATAMASK) != 0) 1419 return (NULL); 1420 data = data >> SYNCOOKIE_WNDBITS; 1421 1422 sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT); 1423 if (sc == NULL) 1424 return (NULL); 1425 /* 1426 * Fill in the syncache values. 1427 * XXX duplicate code from syncache_add 1428 */ 1429 sc->sc_ipopts = NULL; 1430 sc->sc_inc.inc_fport = inc->inc_fport; 1431 sc->sc_inc.inc_lport = inc->inc_lport; 1432 #ifdef INET6 1433 sc->sc_inc.inc_isipv6 = inc->inc_isipv6; 1434 if (inc->inc_isipv6) { 1435 sc->sc_inc.inc6_faddr = inc->inc6_faddr; 1436 sc->sc_inc.inc6_laddr = inc->inc6_laddr; 1437 } else 1438 #endif 1439 { 1440 sc->sc_inc.inc_faddr = inc->inc_faddr; 1441 sc->sc_inc.inc_laddr = inc->inc_laddr; 1442 } 1443 sc->sc_irs = th->th_seq - 1; 1444 sc->sc_iss = th->th_ack - 1; 1445 wnd = sbspace(&so->so_rcv); 1446 wnd = imax(wnd, 0); 1447 wnd = imin(wnd, TCP_MAXWIN); 1448 sc->sc_wnd = wnd; 1449 sc->sc_flags = 0; 1450 sc->sc_rxtslot = 0; 1451 sc->sc_peer_mss = tcp_msstab[data]; 1452 return (sc); 1453 } 1454