1 /*- 2 * Copyright (c) 2001 McAfee, Inc. 3 * Copyright (c) 2006 Andre Oppermann, Internet Business Solutions AG 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Jonathan Lemon 7 * and McAfee Research, the Security Research Division of McAfee, Inc. under 8 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 9 * DARPA CHATS research program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $FreeBSD$ 33 */ 34 35 #include "opt_inet.h" 36 #include "opt_inet6.h" 37 #include "opt_ipsec.h" 38 #include "opt_mac.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/sysctl.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/malloc.h> 47 #include <sys/mbuf.h> 48 #include <sys/md5.h> 49 #include <sys/proc.h> /* for proc0 declaration */ 50 #include <sys/random.h> 51 #include <sys/socket.h> 52 #include <sys/socketvar.h> 53 #include <sys/syslog.h> 54 55 #include <vm/uma.h> 56 57 #include <net/if.h> 58 #include <net/route.h> 59 60 #include <netinet/in.h> 61 #include <netinet/in_systm.h> 62 #include <netinet/ip.h> 63 #include <netinet/in_var.h> 64 #include <netinet/in_pcb.h> 65 #include <netinet/ip_var.h> 66 #include <netinet/ip_options.h> 67 #ifdef INET6 68 #include <netinet/ip6.h> 69 #include <netinet/icmp6.h> 70 #include <netinet6/nd6.h> 71 #include <netinet6/ip6_var.h> 72 #include <netinet6/in6_pcb.h> 73 #endif 74 #include <netinet/tcp.h> 75 #include <netinet/tcp_fsm.h> 76 #include <netinet/tcp_seq.h> 77 #include <netinet/tcp_timer.h> 78 #include <netinet/tcp_var.h> 79 #include <netinet/tcp_syncache.h> 80 #ifdef INET6 81 #include <netinet6/tcp6_var.h> 82 #endif 83 84 #ifdef IPSEC 85 #include <netipsec/ipsec.h> 86 #ifdef INET6 87 #include <netipsec/ipsec6.h> 88 #endif 89 #include <netipsec/key.h> 90 #endif /*IPSEC*/ 91 92 #include <machine/in_cksum.h> 93 94 #include <security/mac/mac_framework.h> 95 96 static int tcp_syncookies = 1; 97 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW, 98 &tcp_syncookies, 0, 99 "Use TCP SYN cookies if the syncache overflows"); 100 101 static int tcp_syncookiesonly = 0; 102 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies_only, CTLFLAG_RW, 103 &tcp_syncookiesonly, 0, 104 "Use only TCP SYN cookies"); 105 106 #define SYNCOOKIE_SECRET_SIZE 8 /* dwords */ 107 #define SYNCOOKIE_LIFETIME 16 /* seconds */ 108 109 struct syncache { 110 TAILQ_ENTRY(syncache) sc_hash; 111 struct in_conninfo sc_inc; /* addresses */ 112 u_long sc_rxttime; /* retransmit time */ 113 u_int16_t sc_rxmits; /* retransmit counter */ 114 115 u_int32_t sc_tsreflect; /* timestamp to reflect */ 116 u_int32_t sc_ts; /* our timestamp to send */ 117 u_int32_t sc_tsoff; /* ts offset w/ syncookies */ 118 u_int32_t sc_flowlabel; /* IPv6 flowlabel */ 119 tcp_seq sc_irs; /* seq from peer */ 120 tcp_seq sc_iss; /* our ISS */ 121 struct mbuf *sc_ipopts; /* source route */ 122 123 u_int16_t sc_peer_mss; /* peer's MSS */ 124 u_int16_t sc_wnd; /* advertised window */ 125 u_int8_t sc_ip_ttl; /* IPv4 TTL */ 126 u_int8_t sc_ip_tos; /* IPv4 TOS */ 127 u_int8_t sc_requested_s_scale:4, 128 sc_requested_r_scale:4; 129 u_int8_t sc_flags; 130 #define SCF_NOOPT 0x01 /* no TCP options */ 131 #define SCF_WINSCALE 0x02 /* negotiated window scaling */ 132 #define SCF_TIMESTAMP 0x04 /* negotiated timestamps */ 133 /* MSS is implicit */ 134 #define SCF_UNREACH 0x10 /* icmp unreachable received */ 135 #define SCF_SIGNATURE 0x20 /* send MD5 digests */ 136 #define SCF_SACK 0x80 /* send SACK option */ 137 #ifdef MAC 138 struct label *sc_label; /* MAC label reference */ 139 #endif 140 }; 141 142 struct syncache_head { 143 struct mtx sch_mtx; 144 TAILQ_HEAD(sch_head, syncache) sch_bucket; 145 struct callout sch_timer; 146 int sch_nextc; 147 u_int sch_length; 148 u_int sch_oddeven; 149 u_int32_t sch_secbits_odd[SYNCOOKIE_SECRET_SIZE]; 150 u_int32_t sch_secbits_even[SYNCOOKIE_SECRET_SIZE]; 151 u_int sch_reseed; /* time_uptime, seconds */ 152 }; 153 154 static void syncache_drop(struct syncache *, struct syncache_head *); 155 static void syncache_free(struct syncache *); 156 static void syncache_insert(struct syncache *, struct syncache_head *); 157 struct syncache *syncache_lookup(struct in_conninfo *, struct syncache_head **); 158 static int syncache_respond(struct syncache *); 159 static struct socket *syncache_socket(struct syncache *, struct socket *, 160 struct mbuf *m); 161 static void syncache_timeout(struct syncache *sc, struct syncache_head *sch, 162 int docallout); 163 static void syncache_timer(void *); 164 static void syncookie_generate(struct syncache_head *, struct syncache *, 165 u_int32_t *); 166 static struct syncache 167 *syncookie_lookup(struct in_conninfo *, struct syncache_head *, 168 struct syncache *, struct tcpopt *, struct tcphdr *, 169 struct socket *); 170 171 /* 172 * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies. 173 * 3 retransmits corresponds to a timeout of (1 + 2 + 4 + 8 == 15) seconds, 174 * the odds are that the user has given up attempting to connect by then. 175 */ 176 #define SYNCACHE_MAXREXMTS 3 177 178 /* Arbitrary values */ 179 #define TCP_SYNCACHE_HASHSIZE 512 180 #define TCP_SYNCACHE_BUCKETLIMIT 30 181 182 struct tcp_syncache { 183 struct syncache_head *hashbase; 184 uma_zone_t zone; 185 u_int hashsize; 186 u_int hashmask; 187 u_int bucket_limit; 188 u_int cache_count; /* XXX: unprotected */ 189 u_int cache_limit; 190 u_int rexmt_limit; 191 u_int hash_secret; 192 }; 193 static struct tcp_syncache tcp_syncache; 194 195 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0, "TCP SYN cache"); 196 197 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_RDTUN, 198 &tcp_syncache.bucket_limit, 0, "Per-bucket hash limit for syncache"); 199 200 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_RDTUN, 201 &tcp_syncache.cache_limit, 0, "Overall entry limit for syncache"); 202 203 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, count, CTLFLAG_RD, 204 &tcp_syncache.cache_count, 0, "Current number of entries in syncache"); 205 206 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_RDTUN, 207 &tcp_syncache.hashsize, 0, "Size of TCP syncache hashtable"); 208 209 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_RW, 210 &tcp_syncache.rexmt_limit, 0, "Limit on SYN/ACK retransmissions"); 211 212 int tcp_sc_rst_sock_fail = 1; 213 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rst_on_sock_fail, CTLFLAG_RW, 214 &tcp_sc_rst_sock_fail, 0, "Send reset on socket allocation failure"); 215 216 static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache"); 217 218 #define SYNCACHE_HASH(inc, mask) \ 219 ((tcp_syncache.hash_secret ^ \ 220 (inc)->inc_faddr.s_addr ^ \ 221 ((inc)->inc_faddr.s_addr >> 16) ^ \ 222 (inc)->inc_fport ^ (inc)->inc_lport) & mask) 223 224 #define SYNCACHE_HASH6(inc, mask) \ 225 ((tcp_syncache.hash_secret ^ \ 226 (inc)->inc6_faddr.s6_addr32[0] ^ \ 227 (inc)->inc6_faddr.s6_addr32[3] ^ \ 228 (inc)->inc_fport ^ (inc)->inc_lport) & mask) 229 230 #define ENDPTS_EQ(a, b) ( \ 231 (a)->ie_fport == (b)->ie_fport && \ 232 (a)->ie_lport == (b)->ie_lport && \ 233 (a)->ie_faddr.s_addr == (b)->ie_faddr.s_addr && \ 234 (a)->ie_laddr.s_addr == (b)->ie_laddr.s_addr \ 235 ) 236 237 #define ENDPTS6_EQ(a, b) (memcmp(a, b, sizeof(*a)) == 0) 238 239 #define SCH_LOCK(sch) mtx_lock(&(sch)->sch_mtx) 240 #define SCH_UNLOCK(sch) mtx_unlock(&(sch)->sch_mtx) 241 #define SCH_LOCK_ASSERT(sch) mtx_assert(&(sch)->sch_mtx, MA_OWNED) 242 243 /* 244 * Requires the syncache entry to be already removed from the bucket list. 245 */ 246 static void 247 syncache_free(struct syncache *sc) 248 { 249 if (sc->sc_ipopts) 250 (void) m_free(sc->sc_ipopts); 251 #ifdef MAC 252 mac_destroy_syncache(&sc->sc_label); 253 #endif 254 255 uma_zfree(tcp_syncache.zone, sc); 256 } 257 258 void 259 syncache_init(void) 260 { 261 int i; 262 263 tcp_syncache.cache_count = 0; 264 tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE; 265 tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT; 266 tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS; 267 tcp_syncache.hash_secret = arc4random(); 268 269 TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize", 270 &tcp_syncache.hashsize); 271 TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit", 272 &tcp_syncache.bucket_limit); 273 if (!powerof2(tcp_syncache.hashsize) || tcp_syncache.hashsize == 0) { 274 printf("WARNING: syncache hash size is not a power of 2.\n"); 275 tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE; 276 } 277 tcp_syncache.hashmask = tcp_syncache.hashsize - 1; 278 279 /* Set limits. */ 280 tcp_syncache.cache_limit = 281 tcp_syncache.hashsize * tcp_syncache.bucket_limit; 282 TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit", 283 &tcp_syncache.cache_limit); 284 285 /* Allocate the hash table. */ 286 MALLOC(tcp_syncache.hashbase, struct syncache_head *, 287 tcp_syncache.hashsize * sizeof(struct syncache_head), 288 M_SYNCACHE, M_WAITOK | M_ZERO); 289 290 /* Initialize the hash buckets. */ 291 for (i = 0; i < tcp_syncache.hashsize; i++) { 292 TAILQ_INIT(&tcp_syncache.hashbase[i].sch_bucket); 293 mtx_init(&tcp_syncache.hashbase[i].sch_mtx, "tcp_sc_head", 294 NULL, MTX_DEF); 295 callout_init_mtx(&tcp_syncache.hashbase[i].sch_timer, 296 &tcp_syncache.hashbase[i].sch_mtx, 0); 297 tcp_syncache.hashbase[i].sch_length = 0; 298 } 299 300 /* Create the syncache entry zone. */ 301 tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache), 302 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 303 uma_zone_set_max(tcp_syncache.zone, tcp_syncache.cache_limit); 304 } 305 306 /* 307 * Inserts a syncache entry into the specified bucket row. 308 * Locks and unlocks the syncache_head autonomously. 309 */ 310 static void 311 syncache_insert(struct syncache *sc, struct syncache_head *sch) 312 { 313 struct syncache *sc2; 314 315 SCH_LOCK(sch); 316 317 /* 318 * Make sure that we don't overflow the per-bucket limit. 319 * If the bucket is full, toss the oldest element. 320 */ 321 if (sch->sch_length >= tcp_syncache.bucket_limit) { 322 KASSERT(!TAILQ_EMPTY(&sch->sch_bucket), 323 ("sch->sch_length incorrect")); 324 sc2 = TAILQ_LAST(&sch->sch_bucket, sch_head); 325 syncache_drop(sc2, sch); 326 tcpstat.tcps_sc_bucketoverflow++; 327 } 328 329 /* Put it into the bucket. */ 330 TAILQ_INSERT_HEAD(&sch->sch_bucket, sc, sc_hash); 331 sch->sch_length++; 332 333 /* Reinitialize the bucket row's timer. */ 334 syncache_timeout(sc, sch, 1); 335 336 SCH_UNLOCK(sch); 337 338 tcp_syncache.cache_count++; 339 tcpstat.tcps_sc_added++; 340 } 341 342 /* 343 * Remove and free entry from syncache bucket row. 344 * Expects locked syncache head. 345 */ 346 static void 347 syncache_drop(struct syncache *sc, struct syncache_head *sch) 348 { 349 350 SCH_LOCK_ASSERT(sch); 351 352 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash); 353 sch->sch_length--; 354 355 syncache_free(sc); 356 tcp_syncache.cache_count--; 357 } 358 359 /* 360 * Engage/reengage time on bucket row. 361 */ 362 static void 363 syncache_timeout(struct syncache *sc, struct syncache_head *sch, int docallout) 364 { 365 sc->sc_rxttime = ticks + 366 TCPTV_RTOBASE * (tcp_backoff[sc->sc_rxmits]); 367 sc->sc_rxmits++; 368 if (sch->sch_nextc > sc->sc_rxttime) 369 sch->sch_nextc = sc->sc_rxttime; 370 if (!TAILQ_EMPTY(&sch->sch_bucket) && docallout) 371 callout_reset(&sch->sch_timer, sch->sch_nextc - ticks, 372 syncache_timer, (void *)sch); 373 } 374 375 /* 376 * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted. 377 * If we have retransmitted an entry the maximum number of times, expire it. 378 * One separate timer for each bucket row. 379 */ 380 static void 381 syncache_timer(void *xsch) 382 { 383 struct syncache_head *sch = (struct syncache_head *)xsch; 384 struct syncache *sc, *nsc; 385 int tick = ticks; 386 char *s; 387 388 /* NB: syncache_head has already been locked by the callout. */ 389 SCH_LOCK_ASSERT(sch); 390 391 TAILQ_FOREACH_SAFE(sc, &sch->sch_bucket, sc_hash, nsc) { 392 /* 393 * We do not check if the listen socket still exists 394 * and accept the case where the listen socket may be 395 * gone by the time we resend the SYN/ACK. We do 396 * not expect this to happens often. If it does, 397 * then the RST will be sent by the time the remote 398 * host does the SYN/ACK->ACK. 399 */ 400 if (sc->sc_rxttime > tick) { 401 if (sc->sc_rxttime < sch->sch_nextc) 402 sch->sch_nextc = sc->sc_rxttime; 403 continue; 404 } 405 406 if (sc->sc_rxmits > tcp_syncache.rexmt_limit) { 407 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) { 408 log(LOG_DEBUG, "%s; %s: Retransmits exhausted, " 409 "giving up and removing syncache entry\n", 410 s, __func__); 411 free(s, M_TCPLOG); 412 } 413 syncache_drop(sc, sch); 414 tcpstat.tcps_sc_stale++; 415 continue; 416 } 417 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) { 418 log(LOG_DEBUG, "%s; %s: Response timeout, " 419 "retransmitting (%u) SYN|ACK\n", 420 s, __func__, sc->sc_rxmits); 421 free(s, M_TCPLOG); 422 } 423 424 (void) syncache_respond(sc); 425 tcpstat.tcps_sc_retransmitted++; 426 syncache_timeout(sc, sch, 0); 427 } 428 if (!TAILQ_EMPTY(&(sch)->sch_bucket)) 429 callout_reset(&(sch)->sch_timer, (sch)->sch_nextc - tick, 430 syncache_timer, (void *)(sch)); 431 } 432 433 /* 434 * Find an entry in the syncache. 435 * Returns always with locked syncache_head plus a matching entry or NULL. 436 */ 437 struct syncache * 438 syncache_lookup(struct in_conninfo *inc, struct syncache_head **schp) 439 { 440 struct syncache *sc; 441 struct syncache_head *sch; 442 443 #ifdef INET6 444 if (inc->inc_isipv6) { 445 sch = &tcp_syncache.hashbase[ 446 SYNCACHE_HASH6(inc, tcp_syncache.hashmask)]; 447 *schp = sch; 448 449 SCH_LOCK(sch); 450 451 /* Circle through bucket row to find matching entry. */ 452 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) { 453 if (ENDPTS6_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie)) 454 return (sc); 455 } 456 } else 457 #endif 458 { 459 sch = &tcp_syncache.hashbase[ 460 SYNCACHE_HASH(inc, tcp_syncache.hashmask)]; 461 *schp = sch; 462 463 SCH_LOCK(sch); 464 465 /* Circle through bucket row to find matching entry. */ 466 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) { 467 #ifdef INET6 468 if (sc->sc_inc.inc_isipv6) 469 continue; 470 #endif 471 if (ENDPTS_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie)) 472 return (sc); 473 } 474 } 475 SCH_LOCK_ASSERT(*schp); 476 return (NULL); /* always returns with locked sch */ 477 } 478 479 /* 480 * This function is called when we get a RST for a 481 * non-existent connection, so that we can see if the 482 * connection is in the syn cache. If it is, zap it. 483 */ 484 void 485 syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th) 486 { 487 struct syncache *sc; 488 struct syncache_head *sch; 489 char *s = NULL; 490 491 sc = syncache_lookup(inc, &sch); /* returns locked sch */ 492 SCH_LOCK_ASSERT(sch); 493 494 /* 495 * Any RST to our SYN|ACK must not carry ACK, SYN or FIN flags. 496 * See RFC 793 page 65, section SEGMENT ARRIVES. 497 */ 498 if (th->th_flags & (TH_ACK|TH_SYN|TH_FIN)) { 499 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 500 log(LOG_DEBUG, "%s; %s: Spurious RST with ACK, SYN or " 501 "FIN flag set, segment ignored\n", s, __func__); 502 tcpstat.tcps_badrst++; 503 goto done; 504 } 505 506 /* 507 * No corresponding connection was found in syncache. 508 * If syncookies are enabled and possibly exclusively 509 * used, or we are under memory pressure, a valid RST 510 * may not find a syncache entry. In that case we're 511 * done and no SYN|ACK retransmissions will happen. 512 * Otherwise the the RST was misdirected or spoofed. 513 */ 514 if (sc == NULL) { 515 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 516 log(LOG_DEBUG, "%s; %s: Spurious RST without matching " 517 "syncache entry (possibly syncookie only), " 518 "segment ignored\n", s, __func__); 519 tcpstat.tcps_badrst++; 520 goto done; 521 } 522 523 /* 524 * If the RST bit is set, check the sequence number to see 525 * if this is a valid reset segment. 526 * RFC 793 page 37: 527 * In all states except SYN-SENT, all reset (RST) segments 528 * are validated by checking their SEQ-fields. A reset is 529 * valid if its sequence number is in the window. 530 * 531 * The sequence number in the reset segment is normally an 532 * echo of our outgoing acknowlegement numbers, but some hosts 533 * send a reset with the sequence number at the rightmost edge 534 * of our receive window, and we have to handle this case. 535 */ 536 if (SEQ_GEQ(th->th_seq, sc->sc_irs) && 537 SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) { 538 syncache_drop(sc, sch); 539 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 540 log(LOG_DEBUG, "%s; %s: Our SYN|ACK was rejected, " 541 "connection attempt aborted by remote endpoint\n", 542 s, __func__); 543 tcpstat.tcps_sc_reset++; 544 } else if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 545 log(LOG_DEBUG, "%s; %s: RST with invalid SEQ %u != IRS %u " 546 "(+WND %u), segment ignored\n", 547 s, __func__, th->th_seq, sc->sc_irs, sc->sc_wnd); 548 tcpstat.tcps_badrst++; 549 } 550 551 done: 552 if (s != NULL) 553 free(s, M_TCPLOG); 554 SCH_UNLOCK(sch); 555 } 556 557 void 558 syncache_badack(struct in_conninfo *inc) 559 { 560 struct syncache *sc; 561 struct syncache_head *sch; 562 563 sc = syncache_lookup(inc, &sch); /* returns locked sch */ 564 SCH_LOCK_ASSERT(sch); 565 if (sc != NULL) { 566 syncache_drop(sc, sch); 567 tcpstat.tcps_sc_badack++; 568 } 569 SCH_UNLOCK(sch); 570 } 571 572 void 573 syncache_unreach(struct in_conninfo *inc, struct tcphdr *th) 574 { 575 struct syncache *sc; 576 struct syncache_head *sch; 577 578 sc = syncache_lookup(inc, &sch); /* returns locked sch */ 579 SCH_LOCK_ASSERT(sch); 580 if (sc == NULL) 581 goto done; 582 583 /* If the sequence number != sc_iss, then it's a bogus ICMP msg */ 584 if (ntohl(th->th_seq) != sc->sc_iss) 585 goto done; 586 587 /* 588 * If we've rertransmitted 3 times and this is our second error, 589 * we remove the entry. Otherwise, we allow it to continue on. 590 * This prevents us from incorrectly nuking an entry during a 591 * spurious network outage. 592 * 593 * See tcp_notify(). 594 */ 595 if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxmits < 3 + 1) { 596 sc->sc_flags |= SCF_UNREACH; 597 goto done; 598 } 599 syncache_drop(sc, sch); 600 tcpstat.tcps_sc_unreach++; 601 done: 602 SCH_UNLOCK(sch); 603 } 604 605 /* 606 * Build a new TCP socket structure from a syncache entry. 607 */ 608 static struct socket * 609 syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m) 610 { 611 struct inpcb *inp = NULL; 612 struct socket *so; 613 struct tcpcb *tp; 614 char *s; 615 616 INP_INFO_WLOCK_ASSERT(&tcbinfo); 617 618 /* 619 * Ok, create the full blown connection, and set things up 620 * as they would have been set up if we had created the 621 * connection when the SYN arrived. If we can't create 622 * the connection, abort it. 623 */ 624 so = sonewconn(lso, SS_ISCONNECTED); 625 if (so == NULL) { 626 /* 627 * Drop the connection; we will either send a RST or 628 * have the peer retransmit its SYN again after its 629 * RTO and try again. 630 */ 631 tcpstat.tcps_listendrop++; 632 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) { 633 log(LOG_DEBUG, "%s; %s: Socket create failed " 634 "due to limits or memory shortage\n", 635 s, __func__); 636 free(s, M_TCPLOG); 637 } 638 goto abort2; 639 } 640 #ifdef MAC 641 SOCK_LOCK(so); 642 mac_set_socket_peer_from_mbuf(m, so); 643 SOCK_UNLOCK(so); 644 #endif 645 646 inp = sotoinpcb(so); 647 INP_LOCK(inp); 648 649 /* Insert new socket into PCB hash list. */ 650 inp->inp_inc.inc_isipv6 = sc->sc_inc.inc_isipv6; 651 #ifdef INET6 652 if (sc->sc_inc.inc_isipv6) { 653 inp->in6p_laddr = sc->sc_inc.inc6_laddr; 654 } else { 655 inp->inp_vflag &= ~INP_IPV6; 656 inp->inp_vflag |= INP_IPV4; 657 #endif 658 inp->inp_laddr = sc->sc_inc.inc_laddr; 659 #ifdef INET6 660 } 661 #endif 662 inp->inp_lport = sc->sc_inc.inc_lport; 663 if (in_pcbinshash(inp) != 0) { 664 /* 665 * Undo the assignments above if we failed to 666 * put the PCB on the hash lists. 667 */ 668 #ifdef INET6 669 if (sc->sc_inc.inc_isipv6) 670 inp->in6p_laddr = in6addr_any; 671 else 672 #endif 673 inp->inp_laddr.s_addr = INADDR_ANY; 674 inp->inp_lport = 0; 675 goto abort; 676 } 677 #ifdef IPSEC 678 /* Copy old policy into new socket's. */ 679 if (ipsec_copy_policy(sotoinpcb(lso)->inp_sp, inp->inp_sp)) 680 printf("syncache_socket: could not copy policy\n"); 681 #endif 682 #ifdef INET6 683 if (sc->sc_inc.inc_isipv6) { 684 struct inpcb *oinp = sotoinpcb(lso); 685 struct in6_addr laddr6; 686 struct sockaddr_in6 sin6; 687 /* 688 * Inherit socket options from the listening socket. 689 * Note that in6p_inputopts are not (and should not be) 690 * copied, since it stores previously received options and is 691 * used to detect if each new option is different than the 692 * previous one and hence should be passed to a user. 693 * If we copied in6p_inputopts, a user would not be able to 694 * receive options just after calling the accept system call. 695 */ 696 inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS; 697 if (oinp->in6p_outputopts) 698 inp->in6p_outputopts = 699 ip6_copypktopts(oinp->in6p_outputopts, M_NOWAIT); 700 701 sin6.sin6_family = AF_INET6; 702 sin6.sin6_len = sizeof(sin6); 703 sin6.sin6_addr = sc->sc_inc.inc6_faddr; 704 sin6.sin6_port = sc->sc_inc.inc_fport; 705 sin6.sin6_flowinfo = sin6.sin6_scope_id = 0; 706 laddr6 = inp->in6p_laddr; 707 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) 708 inp->in6p_laddr = sc->sc_inc.inc6_laddr; 709 if (in6_pcbconnect(inp, (struct sockaddr *)&sin6, 710 thread0.td_ucred)) { 711 inp->in6p_laddr = laddr6; 712 goto abort; 713 } 714 /* Override flowlabel from in6_pcbconnect. */ 715 inp->in6p_flowinfo &= ~IPV6_FLOWLABEL_MASK; 716 inp->in6p_flowinfo |= sc->sc_flowlabel; 717 } else 718 #endif 719 { 720 struct in_addr laddr; 721 struct sockaddr_in sin; 722 723 inp->inp_options = ip_srcroute(m); 724 if (inp->inp_options == NULL) { 725 inp->inp_options = sc->sc_ipopts; 726 sc->sc_ipopts = NULL; 727 } 728 729 sin.sin_family = AF_INET; 730 sin.sin_len = sizeof(sin); 731 sin.sin_addr = sc->sc_inc.inc_faddr; 732 sin.sin_port = sc->sc_inc.inc_fport; 733 bzero((caddr_t)sin.sin_zero, sizeof(sin.sin_zero)); 734 laddr = inp->inp_laddr; 735 if (inp->inp_laddr.s_addr == INADDR_ANY) 736 inp->inp_laddr = sc->sc_inc.inc_laddr; 737 if (in_pcbconnect(inp, (struct sockaddr *)&sin, 738 thread0.td_ucred)) { 739 inp->inp_laddr = laddr; 740 goto abort; 741 } 742 } 743 tp = intotcpcb(inp); 744 tp->t_state = TCPS_SYN_RECEIVED; 745 tp->iss = sc->sc_iss; 746 tp->irs = sc->sc_irs; 747 tcp_rcvseqinit(tp); 748 tcp_sendseqinit(tp); 749 tp->snd_wl1 = sc->sc_irs; 750 tp->snd_max = tp->iss + 1; 751 tp->snd_nxt = tp->iss + 1; 752 tp->rcv_up = sc->sc_irs + 1; 753 tp->rcv_wnd = sc->sc_wnd; 754 tp->rcv_adv += tp->rcv_wnd; 755 tp->last_ack_sent = tp->rcv_nxt; 756 757 tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY); 758 if (sc->sc_flags & SCF_NOOPT) 759 tp->t_flags |= TF_NOOPT; 760 else { 761 if (sc->sc_flags & SCF_WINSCALE) { 762 tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE; 763 tp->snd_scale = sc->sc_requested_s_scale; 764 tp->request_r_scale = sc->sc_requested_r_scale; 765 } 766 if (sc->sc_flags & SCF_TIMESTAMP) { 767 tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP; 768 tp->ts_recent = sc->sc_tsreflect; 769 tp->ts_recent_age = ticks; 770 tp->ts_offset = sc->sc_tsoff; 771 } 772 #ifdef TCP_SIGNATURE 773 if (sc->sc_flags & SCF_SIGNATURE) 774 tp->t_flags |= TF_SIGNATURE; 775 #endif 776 if (sc->sc_flags & SCF_SACK) 777 tp->t_flags |= TF_SACK_PERMIT; 778 } 779 780 /* 781 * Set up MSS and get cached values from tcp_hostcache. 782 * This might overwrite some of the defaults we just set. 783 */ 784 tcp_mss(tp, sc->sc_peer_mss); 785 786 /* 787 * If the SYN,ACK was retransmitted, reset cwnd to 1 segment. 788 */ 789 if (sc->sc_rxmits) 790 tp->snd_cwnd = tp->t_maxseg; 791 tcp_timer_activate(tp, TT_KEEP, tcp_keepinit); 792 793 INP_UNLOCK(inp); 794 795 tcpstat.tcps_accepts++; 796 return (so); 797 798 abort: 799 INP_UNLOCK(inp); 800 abort2: 801 if (so != NULL) 802 soabort(so); 803 return (NULL); 804 } 805 806 /* 807 * This function gets called when we receive an ACK for a 808 * socket in the LISTEN state. We look up the connection 809 * in the syncache, and if its there, we pull it out of 810 * the cache and turn it into a full-blown connection in 811 * the SYN-RECEIVED state. 812 */ 813 int 814 syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th, 815 struct socket **lsop, struct mbuf *m) 816 { 817 struct syncache *sc; 818 struct syncache_head *sch; 819 struct syncache scs; 820 char *s; 821 822 /* 823 * Global TCP locks are held because we manipulate the PCB lists 824 * and create a new socket. 825 */ 826 INP_INFO_WLOCK_ASSERT(&tcbinfo); 827 KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK, 828 ("%s: can handle only ACK", __func__)); 829 830 sc = syncache_lookup(inc, &sch); /* returns locked sch */ 831 SCH_LOCK_ASSERT(sch); 832 if (sc == NULL) { 833 /* 834 * There is no syncache entry, so see if this ACK is 835 * a returning syncookie. To do this, first: 836 * A. See if this socket has had a syncache entry dropped in 837 * the past. We don't want to accept a bogus syncookie 838 * if we've never received a SYN. 839 * B. check that the syncookie is valid. If it is, then 840 * cobble up a fake syncache entry, and return. 841 */ 842 if (!tcp_syncookies) { 843 SCH_UNLOCK(sch); 844 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 845 log(LOG_DEBUG, "%s; %s: Spurious ACK, " 846 "segment rejected (syncookies disabled)\n", 847 s, __func__); 848 goto failed; 849 } 850 bzero(&scs, sizeof(scs)); 851 sc = syncookie_lookup(inc, sch, &scs, to, th, *lsop); 852 SCH_UNLOCK(sch); 853 if (sc == NULL) { 854 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 855 log(LOG_DEBUG, "%s; %s: Segment failed " 856 "SYNCOOKIE authentication, segment rejected " 857 "(probably spoofed)\n", s, __func__); 858 goto failed; 859 } 860 } else { 861 /* Pull out the entry to unlock the bucket row. */ 862 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash); 863 sch->sch_length--; 864 tcp_syncache.cache_count--; 865 SCH_UNLOCK(sch); 866 } 867 868 /* 869 * Segment validation: 870 * ACK must match our initial sequence number + 1 (the SYN|ACK). 871 */ 872 if (th->th_ack != sc->sc_iss + 1) { 873 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 874 log(LOG_DEBUG, "%s; %s: ACK %u != ISS+1 %u, segment " 875 "rejected\n", s, __func__, th->th_ack, sc->sc_iss); 876 goto failed; 877 } 878 /* 879 * The SEQ must match the received initial receive sequence 880 * number + 1 (the SYN) because we didn't ACK any data that 881 * may have come with the SYN. 882 */ 883 if (th->th_seq != sc->sc_irs + 1) { 884 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 885 log(LOG_DEBUG, "%s; %s: SEQ %u != IRS+1 %u, segment " 886 "rejected\n", s, __func__, th->th_seq, sc->sc_irs); 887 goto failed; 888 } 889 /* 890 * If timestamps were present in the SYN and we accepted 891 * them in our SYN|ACK we require them to be present from 892 * now on. And vice versa. 893 */ 894 if ((sc->sc_flags & SCF_TIMESTAMP) && !(to->to_flags & TOF_TS)) { 895 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 896 log(LOG_DEBUG, "%s; %s: Timestamp missing, " 897 "segment rejected\n", s, __func__); 898 goto failed; 899 } 900 if (!(sc->sc_flags & SCF_TIMESTAMP) && (to->to_flags & TOF_TS)) { 901 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 902 log(LOG_DEBUG, "%s; %s: Timestamp not expected, " 903 "segment rejected\n", s, __func__); 904 goto failed; 905 } 906 /* 907 * If timestamps were negotiated the reflected timestamp 908 * must be equal to what we actually sent in the SYN|ACK. 909 */ 910 if ((to->to_flags & TOF_TS) && to->to_tsecr != sc->sc_ts) { 911 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 912 log(LOG_DEBUG, "%s; %s: TSECR %u != TS %u, " 913 "segment rejected\n", 914 s, __func__, to->to_tsecr, sc->sc_ts); 915 goto failed; 916 } 917 918 *lsop = syncache_socket(sc, *lsop, m); 919 920 if (*lsop == NULL) 921 tcpstat.tcps_sc_aborted++; 922 else 923 tcpstat.tcps_sc_completed++; 924 925 if (sc != &scs) 926 syncache_free(sc); 927 return (1); 928 failed: 929 if (sc != NULL && sc != &scs) 930 syncache_free(sc); 931 if (s != NULL) 932 free(s, M_TCPLOG); 933 *lsop = NULL; 934 return (0); 935 } 936 937 /* 938 * Given a LISTEN socket and an inbound SYN request, add 939 * this to the syn cache, and send back a segment: 940 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK> 941 * to the source. 942 * 943 * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN. 944 * Doing so would require that we hold onto the data and deliver it 945 * to the application. However, if we are the target of a SYN-flood 946 * DoS attack, an attacker could send data which would eventually 947 * consume all available buffer space if it were ACKed. By not ACKing 948 * the data, we avoid this DoS scenario. 949 */ 950 void 951 syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th, 952 struct inpcb *inp, struct socket **lsop, struct mbuf *m) 953 { 954 struct tcpcb *tp; 955 struct socket *so; 956 struct syncache *sc = NULL; 957 struct syncache_head *sch; 958 struct mbuf *ipopts = NULL; 959 u_int32_t flowtmp; 960 int win, sb_hiwat, ip_ttl, ip_tos, noopt; 961 char *s; 962 #ifdef INET6 963 int autoflowlabel = 0; 964 #endif 965 #ifdef MAC 966 struct label *maclabel; 967 #endif 968 struct syncache scs; 969 970 INP_INFO_WLOCK_ASSERT(&tcbinfo); 971 INP_LOCK_ASSERT(inp); /* listen socket */ 972 KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_SYN, 973 ("%s: unexpected tcp flags", __func__)); 974 975 /* 976 * Combine all so/tp operations very early to drop the INP lock as 977 * soon as possible. 978 */ 979 so = *lsop; 980 tp = sototcpcb(so); 981 982 #ifdef INET6 983 if (inc->inc_isipv6 && 984 (inp->in6p_flags & IN6P_AUTOFLOWLABEL)) 985 autoflowlabel = 1; 986 #endif 987 ip_ttl = inp->inp_ip_ttl; 988 ip_tos = inp->inp_ip_tos; 989 win = sbspace(&so->so_rcv); 990 sb_hiwat = so->so_rcv.sb_hiwat; 991 noopt = (tp->t_flags & TF_NOOPT); 992 993 so = NULL; 994 tp = NULL; 995 996 #ifdef MAC 997 if (mac_init_syncache(&maclabel) != 0) { 998 INP_UNLOCK(inp); 999 INP_INFO_WUNLOCK(&tcbinfo); 1000 goto done; 1001 } else 1002 mac_init_syncache_from_inpcb(maclabel, inp); 1003 #endif 1004 INP_UNLOCK(inp); 1005 INP_INFO_WUNLOCK(&tcbinfo); 1006 1007 /* 1008 * Remember the IP options, if any. 1009 */ 1010 #ifdef INET6 1011 if (!inc->inc_isipv6) 1012 #endif 1013 ipopts = ip_srcroute(m); 1014 1015 /* 1016 * See if we already have an entry for this connection. 1017 * If we do, resend the SYN,ACK, and reset the retransmit timer. 1018 * 1019 * XXX: should the syncache be re-initialized with the contents 1020 * of the new SYN here (which may have different options?) 1021 * 1022 * XXX: We do not check the sequence number to see if this is a 1023 * real retransmit or a new connection attempt. The question is 1024 * how to handle such a case; either ignore it as spoofed, or 1025 * drop the current entry and create a new one? 1026 */ 1027 sc = syncache_lookup(inc, &sch); /* returns locked entry */ 1028 SCH_LOCK_ASSERT(sch); 1029 if (sc != NULL) { 1030 tcpstat.tcps_sc_dupsyn++; 1031 if (ipopts) { 1032 /* 1033 * If we were remembering a previous source route, 1034 * forget it and use the new one we've been given. 1035 */ 1036 if (sc->sc_ipopts) 1037 (void) m_free(sc->sc_ipopts); 1038 sc->sc_ipopts = ipopts; 1039 } 1040 /* 1041 * Update timestamp if present. 1042 */ 1043 if ((sc->sc_flags & SCF_TIMESTAMP) && (to->to_flags & TOF_TS)) 1044 sc->sc_tsreflect = to->to_tsval; 1045 else 1046 sc->sc_flags &= ~SCF_TIMESTAMP; 1047 #ifdef MAC 1048 /* 1049 * Since we have already unconditionally allocated label 1050 * storage, free it up. The syncache entry will already 1051 * have an initialized label we can use. 1052 */ 1053 mac_destroy_syncache(&maclabel); 1054 KASSERT(sc->sc_label != NULL, 1055 ("%s: label not initialized", __func__)); 1056 #endif 1057 /* Retransmit SYN|ACK and reset retransmit count. */ 1058 if ((s = tcp_log_addrs(&sc->sc_inc, th, NULL, NULL))) { 1059 log(LOG_DEBUG, "%s; %s: Received duplicate SYN, " 1060 "resetting timer and retransmitting SYN|ACK\n", 1061 s, __func__); 1062 free(s, M_TCPLOG); 1063 } 1064 if (syncache_respond(sc) == 0) { 1065 sc->sc_rxmits = 0; 1066 syncache_timeout(sc, sch, 1); 1067 tcpstat.tcps_sndacks++; 1068 tcpstat.tcps_sndtotal++; 1069 } 1070 SCH_UNLOCK(sch); 1071 goto done; 1072 } 1073 1074 sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT | M_ZERO); 1075 if (sc == NULL) { 1076 /* 1077 * The zone allocator couldn't provide more entries. 1078 * Treat this as if the cache was full; drop the oldest 1079 * entry and insert the new one. 1080 */ 1081 tcpstat.tcps_sc_zonefail++; 1082 if ((sc = TAILQ_LAST(&sch->sch_bucket, sch_head)) != NULL) 1083 syncache_drop(sc, sch); 1084 sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT | M_ZERO); 1085 if (sc == NULL) { 1086 if (tcp_syncookies) { 1087 bzero(&scs, sizeof(scs)); 1088 sc = &scs; 1089 } else { 1090 SCH_UNLOCK(sch); 1091 if (ipopts) 1092 (void) m_free(ipopts); 1093 goto done; 1094 } 1095 } 1096 } 1097 1098 /* 1099 * Fill in the syncache values. 1100 */ 1101 #ifdef MAC 1102 sc->sc_label = maclabel; 1103 #endif 1104 sc->sc_ipopts = ipopts; 1105 bcopy(inc, &sc->sc_inc, sizeof(struct in_conninfo)); 1106 #ifdef INET6 1107 if (!inc->inc_isipv6) 1108 #endif 1109 { 1110 sc->sc_ip_tos = ip_tos; 1111 sc->sc_ip_ttl = ip_ttl; 1112 } 1113 1114 sc->sc_irs = th->th_seq; 1115 sc->sc_iss = arc4random(); 1116 sc->sc_flags = 0; 1117 sc->sc_flowlabel = 0; 1118 1119 /* 1120 * Initial receive window: clip sbspace to [0 .. TCP_MAXWIN]. 1121 * win was derived from socket earlier in the function. 1122 */ 1123 win = imax(win, 0); 1124 win = imin(win, TCP_MAXWIN); 1125 sc->sc_wnd = win; 1126 1127 if (tcp_do_rfc1323) { 1128 /* 1129 * A timestamp received in a SYN makes 1130 * it ok to send timestamp requests and replies. 1131 */ 1132 if (to->to_flags & TOF_TS) { 1133 sc->sc_tsreflect = to->to_tsval; 1134 sc->sc_ts = ticks; 1135 sc->sc_flags |= SCF_TIMESTAMP; 1136 } 1137 if (to->to_flags & TOF_SCALE) { 1138 int wscale = 0; 1139 1140 /* 1141 * Compute proper scaling value from buffer space. 1142 * Leave enough room for the socket buffer to grow 1143 * with auto sizing. This allows us to scale the 1144 * receive buffer over a wide range while not losing 1145 * any efficiency or fine granularity. 1146 * 1147 * RFC1323: The Window field in a SYN (i.e., a <SYN> 1148 * or <SYN,ACK>) segment itself is never scaled. 1149 */ 1150 while (wscale < TCP_MAX_WINSHIFT && 1151 (0x1 << wscale) < tcp_minmss) 1152 wscale++; 1153 sc->sc_requested_r_scale = wscale; 1154 sc->sc_requested_s_scale = to->to_wscale; 1155 sc->sc_flags |= SCF_WINSCALE; 1156 } 1157 } 1158 #ifdef TCP_SIGNATURE 1159 /* 1160 * If listening socket requested TCP digests, and received SYN 1161 * contains the option, flag this in the syncache so that 1162 * syncache_respond() will do the right thing with the SYN+ACK. 1163 * XXX: Currently we always record the option by default and will 1164 * attempt to use it in syncache_respond(). 1165 */ 1166 if (to->to_flags & TOF_SIGNATURE) 1167 sc->sc_flags |= SCF_SIGNATURE; 1168 #endif 1169 if (to->to_flags & TOF_SACK) 1170 sc->sc_flags |= SCF_SACK; 1171 if (to->to_flags & TOF_MSS) 1172 sc->sc_peer_mss = to->to_mss; /* peer mss may be zero */ 1173 if (noopt) 1174 sc->sc_flags |= SCF_NOOPT; 1175 1176 if (tcp_syncookies) { 1177 syncookie_generate(sch, sc, &flowtmp); 1178 #ifdef INET6 1179 if (autoflowlabel) 1180 sc->sc_flowlabel = flowtmp; 1181 #endif 1182 } else { 1183 #ifdef INET6 1184 if (autoflowlabel) 1185 sc->sc_flowlabel = 1186 (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK); 1187 #endif 1188 } 1189 SCH_UNLOCK(sch); 1190 1191 /* 1192 * Do a standard 3-way handshake. 1193 */ 1194 if (syncache_respond(sc) == 0) { 1195 if (tcp_syncookies && tcp_syncookiesonly && sc != &scs) 1196 syncache_free(sc); 1197 else if (sc != &scs) 1198 syncache_insert(sc, sch); /* locks and unlocks sch */ 1199 tcpstat.tcps_sndacks++; 1200 tcpstat.tcps_sndtotal++; 1201 } else { 1202 if (sc != &scs) 1203 syncache_free(sc); 1204 tcpstat.tcps_sc_dropped++; 1205 } 1206 1207 done: 1208 #ifdef MAC 1209 if (sc == &scs) 1210 mac_destroy_syncache(&maclabel); 1211 #endif 1212 *lsop = NULL; 1213 m_freem(m); 1214 return; 1215 } 1216 1217 static int 1218 syncache_respond(struct syncache *sc) 1219 { 1220 struct ip *ip = NULL; 1221 struct mbuf *m; 1222 struct tcphdr *th; 1223 int optlen, error; 1224 u_int16_t hlen, tlen, mssopt; 1225 struct tcpopt to; 1226 #ifdef INET6 1227 struct ip6_hdr *ip6 = NULL; 1228 #endif 1229 1230 hlen = 1231 #ifdef INET6 1232 (sc->sc_inc.inc_isipv6) ? sizeof(struct ip6_hdr) : 1233 #endif 1234 sizeof(struct ip); 1235 tlen = hlen + sizeof(struct tcphdr); 1236 1237 /* Determine MSS we advertize to other end of connection. */ 1238 mssopt = tcp_mssopt(&sc->sc_inc); 1239 if (sc->sc_peer_mss) 1240 mssopt = max( min(sc->sc_peer_mss, mssopt), tcp_minmss); 1241 1242 /* XXX: Assume that the entire packet will fit in a header mbuf. */ 1243 KASSERT(max_linkhdr + tlen + TCP_MAXOLEN <= MHLEN, 1244 ("syncache: mbuf too small")); 1245 1246 /* Create the IP+TCP header from scratch. */ 1247 m = m_gethdr(M_DONTWAIT, MT_DATA); 1248 if (m == NULL) 1249 return (ENOBUFS); 1250 #ifdef MAC 1251 mac_create_mbuf_from_syncache(sc->sc_label, m); 1252 #endif 1253 m->m_data += max_linkhdr; 1254 m->m_len = tlen; 1255 m->m_pkthdr.len = tlen; 1256 m->m_pkthdr.rcvif = NULL; 1257 1258 #ifdef INET6 1259 if (sc->sc_inc.inc_isipv6) { 1260 ip6 = mtod(m, struct ip6_hdr *); 1261 ip6->ip6_vfc = IPV6_VERSION; 1262 ip6->ip6_nxt = IPPROTO_TCP; 1263 ip6->ip6_src = sc->sc_inc.inc6_laddr; 1264 ip6->ip6_dst = sc->sc_inc.inc6_faddr; 1265 ip6->ip6_plen = htons(tlen - hlen); 1266 /* ip6_hlim is set after checksum */ 1267 ip6->ip6_flow &= ~IPV6_FLOWLABEL_MASK; 1268 ip6->ip6_flow |= sc->sc_flowlabel; 1269 1270 th = (struct tcphdr *)(ip6 + 1); 1271 } else 1272 #endif 1273 { 1274 ip = mtod(m, struct ip *); 1275 ip->ip_v = IPVERSION; 1276 ip->ip_hl = sizeof(struct ip) >> 2; 1277 ip->ip_len = tlen; 1278 ip->ip_id = 0; 1279 ip->ip_off = 0; 1280 ip->ip_sum = 0; 1281 ip->ip_p = IPPROTO_TCP; 1282 ip->ip_src = sc->sc_inc.inc_laddr; 1283 ip->ip_dst = sc->sc_inc.inc_faddr; 1284 ip->ip_ttl = sc->sc_ip_ttl; 1285 ip->ip_tos = sc->sc_ip_tos; 1286 1287 /* 1288 * See if we should do MTU discovery. Route lookups are 1289 * expensive, so we will only unset the DF bit if: 1290 * 1291 * 1) path_mtu_discovery is disabled 1292 * 2) the SCF_UNREACH flag has been set 1293 */ 1294 if (path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0)) 1295 ip->ip_off |= IP_DF; 1296 1297 th = (struct tcphdr *)(ip + 1); 1298 } 1299 th->th_sport = sc->sc_inc.inc_lport; 1300 th->th_dport = sc->sc_inc.inc_fport; 1301 1302 th->th_seq = htonl(sc->sc_iss); 1303 th->th_ack = htonl(sc->sc_irs + 1); 1304 th->th_off = sizeof(struct tcphdr) >> 2; 1305 th->th_x2 = 0; 1306 th->th_flags = TH_SYN|TH_ACK; 1307 th->th_win = htons(sc->sc_wnd); 1308 th->th_urp = 0; 1309 1310 /* Tack on the TCP options. */ 1311 if ((sc->sc_flags & SCF_NOOPT) == 0) { 1312 to.to_flags = 0; 1313 1314 to.to_mss = mssopt; 1315 to.to_flags = TOF_MSS; 1316 if (sc->sc_flags & SCF_WINSCALE) { 1317 to.to_wscale = sc->sc_requested_r_scale; 1318 to.to_flags |= TOF_SCALE; 1319 } 1320 if (sc->sc_flags & SCF_TIMESTAMP) { 1321 /* Virgin timestamp or TCP cookie enhanced one. */ 1322 to.to_tsval = sc->sc_ts; 1323 to.to_tsecr = sc->sc_tsreflect; 1324 to.to_flags |= TOF_TS; 1325 } 1326 if (sc->sc_flags & SCF_SACK) 1327 to.to_flags |= TOF_SACKPERM; 1328 #ifdef TCP_SIGNATURE 1329 if (sc->sc_flags & SCF_SIGNATURE) 1330 to.to_flags |= TOF_SIGNATURE; 1331 #endif 1332 optlen = tcp_addoptions(&to, (u_char *)(th + 1)); 1333 1334 #ifdef TCP_SIGNATURE 1335 tcp_signature_compute(m, sizeof(struct ip), 0, optlen, 1336 to.to_signature, IPSEC_DIR_OUTBOUND); 1337 #endif 1338 1339 /* Adjust headers by option size. */ 1340 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 1341 m->m_len += optlen; 1342 m->m_pkthdr.len += optlen; 1343 #ifdef INET6 1344 if (sc->sc_inc.inc_isipv6) 1345 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) + optlen); 1346 else 1347 #endif 1348 ip->ip_len += optlen; 1349 } else 1350 optlen = 0; 1351 1352 #ifdef INET6 1353 if (sc->sc_inc.inc_isipv6) { 1354 th->th_sum = 0; 1355 th->th_sum = in6_cksum(m, IPPROTO_TCP, hlen, 1356 tlen + optlen - hlen); 1357 ip6->ip6_hlim = in6_selecthlim(NULL, NULL); 1358 error = ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); 1359 } else 1360 #endif 1361 { 1362 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1363 htons(tlen + optlen - hlen + IPPROTO_TCP)); 1364 m->m_pkthdr.csum_flags = CSUM_TCP; 1365 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 1366 error = ip_output(m, sc->sc_ipopts, NULL, 0, NULL, NULL); 1367 } 1368 return (error); 1369 } 1370 1371 /* 1372 * The purpose of SYN cookies is to avoid keeping track of all SYN's we 1373 * receive and to be able to handle SYN floods from bogus source addresses 1374 * (where we will never receive any reply). SYN floods try to exhaust all 1375 * our memory and available slots in the SYN cache table to cause a denial 1376 * of service to legitimate users of the local host. 1377 * 1378 * The idea of SYN cookies is to encode and include all necessary information 1379 * about the connection setup state within the SYN-ACK we send back and thus 1380 * to get along without keeping any local state until the ACK to the SYN-ACK 1381 * arrives (if ever). Everything we need to know should be available from 1382 * the information we encoded in the SYN-ACK. 1383 * 1384 * More information about the theory behind SYN cookies and its first 1385 * discussion and specification can be found at: 1386 * http://cr.yp.to/syncookies.html (overview) 1387 * http://cr.yp.to/syncookies/archive (gory details) 1388 * 1389 * This implementation extends the orginal idea and first implementation 1390 * of FreeBSD by using not only the initial sequence number field to store 1391 * information but also the timestamp field if present. This way we can 1392 * keep track of the entire state we need to know to recreate the session in 1393 * its original form. Almost all TCP speakers implement RFC1323 timestamps 1394 * these days. For those that do not we still have to live with the known 1395 * shortcomings of the ISN only SYN cookies. 1396 * 1397 * Cookie layers: 1398 * 1399 * Initial sequence number we send: 1400 * 31|................................|0 1401 * DDDDDDDDDDDDDDDDDDDDDDDDDMMMRRRP 1402 * D = MD5 Digest (first dword) 1403 * M = MSS index 1404 * R = Rotation of secret 1405 * P = Odd or Even secret 1406 * 1407 * The MD5 Digest is computed with over following parameters: 1408 * a) randomly rotated secret 1409 * b) struct in_conninfo containing the remote/local ip/port (IPv4&IPv6) 1410 * c) the received initial sequence number from remote host 1411 * d) the rotation offset and odd/even bit 1412 * 1413 * Timestamp we send: 1414 * 31|................................|0 1415 * DDDDDDDDDDDDDDDDDDDDDDSSSSRRRRA5 1416 * D = MD5 Digest (third dword) (only as filler) 1417 * S = Requested send window scale 1418 * R = Requested receive window scale 1419 * A = SACK allowed 1420 * 5 = TCP-MD5 enabled (not implemented yet) 1421 * XORed with MD5 Digest (forth dword) 1422 * 1423 * The timestamp isn't cryptographically secure and doesn't need to be. 1424 * The double use of the MD5 digest dwords ties it to a specific remote/ 1425 * local host/port, remote initial sequence number and our local time 1426 * limited secret. A received timestamp is reverted (XORed) and then 1427 * the contained MD5 dword is compared to the computed one to ensure the 1428 * timestamp belongs to the SYN-ACK we sent. The other parameters may 1429 * have been tampered with but this isn't different from supplying bogus 1430 * values in the SYN in the first place. 1431 * 1432 * Some problems with SYN cookies remain however: 1433 * Consider the problem of a recreated (and retransmitted) cookie. If the 1434 * original SYN was accepted, the connection is established. The second 1435 * SYN is inflight, and if it arrives with an ISN that falls within the 1436 * receive window, the connection is killed. 1437 * 1438 * Notes: 1439 * A heuristic to determine when to accept syn cookies is not necessary. 1440 * An ACK flood would cause the syncookie verification to be attempted, 1441 * but a SYN flood causes syncookies to be generated. Both are of equal 1442 * cost, so there's no point in trying to optimize the ACK flood case. 1443 * Also, if you don't process certain ACKs for some reason, then all someone 1444 * would have to do is launch a SYN and ACK flood at the same time, which 1445 * would stop cookie verification and defeat the entire purpose of syncookies. 1446 */ 1447 static int tcp_sc_msstab[] = { 0, 256, 468, 536, 996, 1452, 1460, 8960 }; 1448 1449 static void 1450 syncookie_generate(struct syncache_head *sch, struct syncache *sc, 1451 u_int32_t *flowlabel) 1452 { 1453 MD5_CTX ctx; 1454 u_int32_t md5_buffer[MD5_DIGEST_LENGTH / sizeof(u_int32_t)]; 1455 u_int32_t data; 1456 u_int32_t *secbits; 1457 u_int off, pmss, mss; 1458 int i; 1459 1460 SCH_LOCK_ASSERT(sch); 1461 1462 /* Which of the two secrets to use. */ 1463 secbits = sch->sch_oddeven ? 1464 sch->sch_secbits_odd : sch->sch_secbits_even; 1465 1466 /* Reseed secret if too old. */ 1467 if (sch->sch_reseed < time_uptime) { 1468 sch->sch_oddeven = sch->sch_oddeven ? 0 : 1; /* toggle */ 1469 secbits = sch->sch_oddeven ? 1470 sch->sch_secbits_odd : sch->sch_secbits_even; 1471 for (i = 0; i < SYNCOOKIE_SECRET_SIZE; i++) 1472 secbits[i] = arc4random(); 1473 sch->sch_reseed = time_uptime + SYNCOOKIE_LIFETIME; 1474 } 1475 1476 /* Secret rotation offset. */ 1477 off = sc->sc_iss & 0x7; /* iss was randomized before */ 1478 1479 /* Maximum segment size calculation. */ 1480 pmss = max( min(sc->sc_peer_mss, tcp_mssopt(&sc->sc_inc)), tcp_minmss); 1481 for (mss = sizeof(tcp_sc_msstab) / sizeof(int) - 1; mss > 0; mss--) 1482 if (tcp_sc_msstab[mss] <= pmss) 1483 break; 1484 1485 /* Fold parameters and MD5 digest into the ISN we will send. */ 1486 data = sch->sch_oddeven;/* odd or even secret, 1 bit */ 1487 data |= off << 1; /* secret offset, derived from iss, 3 bits */ 1488 data |= mss << 4; /* mss, 3 bits */ 1489 1490 MD5Init(&ctx); 1491 MD5Update(&ctx, ((u_int8_t *)secbits) + off, 1492 SYNCOOKIE_SECRET_SIZE * sizeof(*secbits) - off); 1493 MD5Update(&ctx, secbits, off); 1494 MD5Update(&ctx, &sc->sc_inc, sizeof(sc->sc_inc)); 1495 MD5Update(&ctx, &sc->sc_irs, sizeof(sc->sc_irs)); 1496 MD5Update(&ctx, &data, sizeof(data)); 1497 MD5Final((u_int8_t *)&md5_buffer, &ctx); 1498 1499 data |= (md5_buffer[0] << 7); 1500 sc->sc_iss = data; 1501 1502 #ifdef INET6 1503 *flowlabel = md5_buffer[1] & IPV6_FLOWLABEL_MASK; 1504 #endif 1505 1506 /* Additional parameters are stored in the timestamp if present. */ 1507 if (sc->sc_flags & SCF_TIMESTAMP) { 1508 data = ((sc->sc_flags & SCF_SIGNATURE) ? 1 : 0); /* TCP-MD5, 1 bit */ 1509 data |= ((sc->sc_flags & SCF_SACK) ? 1 : 0) << 1; /* SACK, 1 bit */ 1510 data |= sc->sc_requested_s_scale << 2; /* SWIN scale, 4 bits */ 1511 data |= sc->sc_requested_r_scale << 6; /* RWIN scale, 4 bits */ 1512 data |= md5_buffer[2] << 10; /* more digest bits */ 1513 data ^= md5_buffer[3]; 1514 sc->sc_ts = data; 1515 sc->sc_tsoff = data - ticks; /* after XOR */ 1516 } 1517 1518 tcpstat.tcps_sc_sendcookie++; 1519 return; 1520 } 1521 1522 static struct syncache * 1523 syncookie_lookup(struct in_conninfo *inc, struct syncache_head *sch, 1524 struct syncache *sc, struct tcpopt *to, struct tcphdr *th, 1525 struct socket *so) 1526 { 1527 MD5_CTX ctx; 1528 u_int32_t md5_buffer[MD5_DIGEST_LENGTH / sizeof(u_int32_t)]; 1529 u_int32_t data = 0; 1530 u_int32_t *secbits; 1531 tcp_seq ack, seq; 1532 int off, mss, wnd, flags; 1533 1534 SCH_LOCK_ASSERT(sch); 1535 1536 /* 1537 * Pull information out of SYN-ACK/ACK and 1538 * revert sequence number advances. 1539 */ 1540 ack = th->th_ack - 1; 1541 seq = th->th_seq - 1; 1542 off = (ack >> 1) & 0x7; 1543 mss = (ack >> 4) & 0x7; 1544 flags = ack & 0x7f; 1545 1546 /* Which of the two secrets to use. */ 1547 secbits = (flags & 0x1) ? sch->sch_secbits_odd : sch->sch_secbits_even; 1548 1549 /* 1550 * The secret wasn't updated for the lifetime of a syncookie, 1551 * so this SYN-ACK/ACK is either too old (replay) or totally bogus. 1552 */ 1553 if (sch->sch_reseed < time_uptime) { 1554 return (NULL); 1555 } 1556 1557 /* Recompute the digest so we can compare it. */ 1558 MD5Init(&ctx); 1559 MD5Update(&ctx, ((u_int8_t *)secbits) + off, 1560 SYNCOOKIE_SECRET_SIZE * sizeof(*secbits) - off); 1561 MD5Update(&ctx, secbits, off); 1562 MD5Update(&ctx, inc, sizeof(*inc)); 1563 MD5Update(&ctx, &seq, sizeof(seq)); 1564 MD5Update(&ctx, &flags, sizeof(flags)); 1565 MD5Final((u_int8_t *)&md5_buffer, &ctx); 1566 1567 /* Does the digest part of or ACK'ed ISS match? */ 1568 if ((ack & (~0x7f)) != (md5_buffer[0] << 7)) 1569 return (NULL); 1570 1571 /* Does the digest part of our reflected timestamp match? */ 1572 if (to->to_flags & TOF_TS) { 1573 data = md5_buffer[3] ^ to->to_tsecr; 1574 if ((data & (~0x3ff)) != (md5_buffer[2] << 10)) 1575 return (NULL); 1576 } 1577 1578 /* Fill in the syncache values. */ 1579 bcopy(inc, &sc->sc_inc, sizeof(struct in_conninfo)); 1580 sc->sc_ipopts = NULL; 1581 1582 sc->sc_irs = seq; 1583 sc->sc_iss = ack; 1584 1585 #ifdef INET6 1586 if (inc->inc_isipv6) { 1587 if (sotoinpcb(so)->in6p_flags & IN6P_AUTOFLOWLABEL) 1588 sc->sc_flowlabel = md5_buffer[1] & IPV6_FLOWLABEL_MASK; 1589 } else 1590 #endif 1591 { 1592 sc->sc_ip_ttl = sotoinpcb(so)->inp_ip_ttl; 1593 sc->sc_ip_tos = sotoinpcb(so)->inp_ip_tos; 1594 } 1595 1596 /* Additional parameters that were encoded in the timestamp. */ 1597 if (data) { 1598 sc->sc_flags |= SCF_TIMESTAMP; 1599 sc->sc_tsreflect = to->to_tsval; 1600 sc->sc_ts = to->to_tsecr; 1601 sc->sc_tsoff = to->to_tsecr - ticks; 1602 sc->sc_flags |= (data & 0x1) ? SCF_SIGNATURE : 0; 1603 sc->sc_flags |= ((data >> 1) & 0x1) ? SCF_SACK : 0; 1604 sc->sc_requested_s_scale = min((data >> 2) & 0xf, 1605 TCP_MAX_WINSHIFT); 1606 sc->sc_requested_r_scale = min((data >> 6) & 0xf, 1607 TCP_MAX_WINSHIFT); 1608 if (sc->sc_requested_s_scale || sc->sc_requested_r_scale) 1609 sc->sc_flags |= SCF_WINSCALE; 1610 } else 1611 sc->sc_flags |= SCF_NOOPT; 1612 1613 wnd = sbspace(&so->so_rcv); 1614 wnd = imax(wnd, 0); 1615 wnd = imin(wnd, TCP_MAXWIN); 1616 sc->sc_wnd = wnd; 1617 1618 sc->sc_rxmits = 0; 1619 sc->sc_peer_mss = tcp_sc_msstab[mss]; 1620 1621 tcpstat.tcps_sc_recvcookie++; 1622 return (sc); 1623 } 1624 1625 /* 1626 * Returns the current number of syncache entries. This number 1627 * will probably change before you get around to calling 1628 * syncache_pcblist. 1629 */ 1630 1631 int 1632 syncache_pcbcount(void) 1633 { 1634 struct syncache_head *sch; 1635 int count, i; 1636 1637 for (count = 0, i = 0; i < tcp_syncache.hashsize; i++) { 1638 /* No need to lock for a read. */ 1639 sch = &tcp_syncache.hashbase[i]; 1640 count += sch->sch_length; 1641 } 1642 return count; 1643 } 1644 1645 /* 1646 * Exports the syncache entries to userland so that netstat can display 1647 * them alongside the other sockets. This function is intended to be 1648 * called only from tcp_pcblist. 1649 * 1650 * Due to concurrency on an active system, the number of pcbs exported 1651 * may have no relation to max_pcbs. max_pcbs merely indicates the 1652 * amount of space the caller allocated for this function to use. 1653 */ 1654 int 1655 syncache_pcblist(struct sysctl_req *req, int max_pcbs, int *pcbs_exported) 1656 { 1657 struct xtcpcb xt; 1658 struct syncache *sc; 1659 struct syncache_head *sch; 1660 int count, error, i; 1661 1662 for (count = 0, error = 0, i = 0; i < tcp_syncache.hashsize; i++) { 1663 sch = &tcp_syncache.hashbase[i]; 1664 SCH_LOCK(sch); 1665 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) { 1666 if (count >= max_pcbs) { 1667 SCH_UNLOCK(sch); 1668 goto exit; 1669 } 1670 bzero(&xt, sizeof(xt)); 1671 xt.xt_len = sizeof(xt); 1672 if (sc->sc_inc.inc_isipv6) 1673 xt.xt_inp.inp_vflag = INP_IPV6; 1674 else 1675 xt.xt_inp.inp_vflag = INP_IPV4; 1676 bcopy(&sc->sc_inc, &xt.xt_inp.inp_inc, sizeof (struct in_conninfo)); 1677 xt.xt_tp.t_inpcb = &xt.xt_inp; 1678 xt.xt_tp.t_state = TCPS_SYN_RECEIVED; 1679 xt.xt_socket.xso_protocol = IPPROTO_TCP; 1680 xt.xt_socket.xso_len = sizeof (struct xsocket); 1681 xt.xt_socket.so_type = SOCK_STREAM; 1682 xt.xt_socket.so_state = SS_ISCONNECTING; 1683 error = SYSCTL_OUT(req, &xt, sizeof xt); 1684 if (error) { 1685 SCH_UNLOCK(sch); 1686 goto exit; 1687 } 1688 count++; 1689 } 1690 SCH_UNLOCK(sch); 1691 } 1692 exit: 1693 *pcbs_exported = count; 1694 return error; 1695 } 1696 1697