1 /*- 2 * Copyright (c) 2001 McAfee, Inc. 3 * Copyright (c) 2006,2013 Andre Oppermann, Internet Business Solutions AG 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Jonathan Lemon 7 * and McAfee Research, the Security Research Division of McAfee, Inc. under 8 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 9 * DARPA CHATS research program. [2001 McAfee, Inc.] 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_inet.h" 37 #include "opt_inet6.h" 38 #include "opt_ipsec.h" 39 #include "opt_pcbgroup.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/hash.h> 44 #include <sys/refcount.h> 45 #include <sys/kernel.h> 46 #include <sys/sysctl.h> 47 #include <sys/limits.h> 48 #include <sys/lock.h> 49 #include <sys/mutex.h> 50 #include <sys/malloc.h> 51 #include <sys/mbuf.h> 52 #include <sys/proc.h> /* for proc0 declaration */ 53 #include <sys/random.h> 54 #include <sys/socket.h> 55 #include <sys/socketvar.h> 56 #include <sys/syslog.h> 57 #include <sys/ucred.h> 58 59 #include <sys/md5.h> 60 #include <crypto/siphash/siphash.h> 61 62 #include <vm/uma.h> 63 64 #include <net/if.h> 65 #include <net/if_var.h> 66 #include <net/route.h> 67 #include <net/vnet.h> 68 69 #include <netinet/in.h> 70 #include <netinet/in_systm.h> 71 #include <netinet/ip.h> 72 #include <netinet/in_var.h> 73 #include <netinet/in_pcb.h> 74 #include <netinet/ip_var.h> 75 #include <netinet/ip_options.h> 76 #ifdef INET6 77 #include <netinet/ip6.h> 78 #include <netinet/icmp6.h> 79 #include <netinet6/nd6.h> 80 #include <netinet6/ip6_var.h> 81 #include <netinet6/in6_pcb.h> 82 #endif 83 #include <netinet/tcp.h> 84 #ifdef TCP_RFC7413 85 #include <netinet/tcp_fastopen.h> 86 #endif 87 #include <netinet/tcp_fsm.h> 88 #include <netinet/tcp_seq.h> 89 #include <netinet/tcp_timer.h> 90 #include <netinet/tcp_var.h> 91 #include <netinet/tcp_syncache.h> 92 #ifdef INET6 93 #include <netinet6/tcp6_var.h> 94 #endif 95 #ifdef TCP_OFFLOAD 96 #include <netinet/toecore.h> 97 #endif 98 99 #ifdef IPSEC 100 #include <netipsec/ipsec.h> 101 #ifdef INET6 102 #include <netipsec/ipsec6.h> 103 #endif 104 #include <netipsec/key.h> 105 #endif /*IPSEC*/ 106 107 #include <machine/in_cksum.h> 108 109 #include <security/mac/mac_framework.h> 110 111 static VNET_DEFINE(int, tcp_syncookies) = 1; 112 #define V_tcp_syncookies VNET(tcp_syncookies) 113 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_VNET | CTLFLAG_RW, 114 &VNET_NAME(tcp_syncookies), 0, 115 "Use TCP SYN cookies if the syncache overflows"); 116 117 static VNET_DEFINE(int, tcp_syncookiesonly) = 0; 118 #define V_tcp_syncookiesonly VNET(tcp_syncookiesonly) 119 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies_only, CTLFLAG_VNET | CTLFLAG_RW, 120 &VNET_NAME(tcp_syncookiesonly), 0, 121 "Use only TCP SYN cookies"); 122 123 #ifdef TCP_OFFLOAD 124 #define ADDED_BY_TOE(sc) ((sc)->sc_tod != NULL) 125 #endif 126 127 static void syncache_drop(struct syncache *, struct syncache_head *); 128 static void syncache_free(struct syncache *); 129 static void syncache_insert(struct syncache *, struct syncache_head *); 130 static int syncache_respond(struct syncache *, struct syncache_head *, int, 131 const struct mbuf *); 132 static struct socket *syncache_socket(struct syncache *, struct socket *, 133 struct mbuf *m); 134 static void syncache_timeout(struct syncache *sc, struct syncache_head *sch, 135 int docallout); 136 static void syncache_timer(void *); 137 138 static uint32_t syncookie_mac(struct in_conninfo *, tcp_seq, uint8_t, 139 uint8_t *, uintptr_t); 140 static tcp_seq syncookie_generate(struct syncache_head *, struct syncache *); 141 static struct syncache 142 *syncookie_lookup(struct in_conninfo *, struct syncache_head *, 143 struct syncache *, struct tcphdr *, struct tcpopt *, 144 struct socket *); 145 static void syncookie_reseed(void *); 146 #ifdef INVARIANTS 147 static int syncookie_cmp(struct in_conninfo *inc, struct syncache_head *sch, 148 struct syncache *sc, struct tcphdr *th, struct tcpopt *to, 149 struct socket *lso); 150 #endif 151 152 /* 153 * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies. 154 * 3 retransmits corresponds to a timeout of 3 * (1 + 2 + 4 + 8) == 45 seconds, 155 * the odds are that the user has given up attempting to connect by then. 156 */ 157 #define SYNCACHE_MAXREXMTS 3 158 159 /* Arbitrary values */ 160 #define TCP_SYNCACHE_HASHSIZE 512 161 #define TCP_SYNCACHE_BUCKETLIMIT 30 162 163 static VNET_DEFINE(struct tcp_syncache, tcp_syncache); 164 #define V_tcp_syncache VNET(tcp_syncache) 165 166 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0, 167 "TCP SYN cache"); 168 169 SYSCTL_UINT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_VNET | CTLFLAG_RDTUN, 170 &VNET_NAME(tcp_syncache.bucket_limit), 0, 171 "Per-bucket hash limit for syncache"); 172 173 SYSCTL_UINT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_VNET | CTLFLAG_RDTUN, 174 &VNET_NAME(tcp_syncache.cache_limit), 0, 175 "Overall entry limit for syncache"); 176 177 SYSCTL_UMA_CUR(_net_inet_tcp_syncache, OID_AUTO, count, CTLFLAG_VNET, 178 &VNET_NAME(tcp_syncache.zone), "Current number of entries in syncache"); 179 180 SYSCTL_UINT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN, 181 &VNET_NAME(tcp_syncache.hashsize), 0, 182 "Size of TCP syncache hashtable"); 183 184 SYSCTL_UINT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_VNET | CTLFLAG_RW, 185 &VNET_NAME(tcp_syncache.rexmt_limit), 0, 186 "Limit on SYN/ACK retransmissions"); 187 188 VNET_DEFINE(int, tcp_sc_rst_sock_fail) = 1; 189 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rst_on_sock_fail, 190 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_sc_rst_sock_fail), 0, 191 "Send reset on socket allocation failure"); 192 193 static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache"); 194 195 #define SCH_LOCK(sch) mtx_lock(&(sch)->sch_mtx) 196 #define SCH_UNLOCK(sch) mtx_unlock(&(sch)->sch_mtx) 197 #define SCH_LOCK_ASSERT(sch) mtx_assert(&(sch)->sch_mtx, MA_OWNED) 198 199 /* 200 * Requires the syncache entry to be already removed from the bucket list. 201 */ 202 static void 203 syncache_free(struct syncache *sc) 204 { 205 206 if (sc->sc_ipopts) 207 (void) m_free(sc->sc_ipopts); 208 if (sc->sc_cred) 209 crfree(sc->sc_cred); 210 #ifdef MAC 211 mac_syncache_destroy(&sc->sc_label); 212 #endif 213 214 uma_zfree(V_tcp_syncache.zone, sc); 215 } 216 217 void 218 syncache_init(void) 219 { 220 int i; 221 222 V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE; 223 V_tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT; 224 V_tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS; 225 V_tcp_syncache.hash_secret = arc4random(); 226 227 TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize", 228 &V_tcp_syncache.hashsize); 229 TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit", 230 &V_tcp_syncache.bucket_limit); 231 if (!powerof2(V_tcp_syncache.hashsize) || 232 V_tcp_syncache.hashsize == 0) { 233 printf("WARNING: syncache hash size is not a power of 2.\n"); 234 V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE; 235 } 236 V_tcp_syncache.hashmask = V_tcp_syncache.hashsize - 1; 237 238 /* Set limits. */ 239 V_tcp_syncache.cache_limit = 240 V_tcp_syncache.hashsize * V_tcp_syncache.bucket_limit; 241 TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit", 242 &V_tcp_syncache.cache_limit); 243 244 /* Allocate the hash table. */ 245 V_tcp_syncache.hashbase = malloc(V_tcp_syncache.hashsize * 246 sizeof(struct syncache_head), M_SYNCACHE, M_WAITOK | M_ZERO); 247 248 #ifdef VIMAGE 249 V_tcp_syncache.vnet = curvnet; 250 #endif 251 252 /* Initialize the hash buckets. */ 253 for (i = 0; i < V_tcp_syncache.hashsize; i++) { 254 TAILQ_INIT(&V_tcp_syncache.hashbase[i].sch_bucket); 255 mtx_init(&V_tcp_syncache.hashbase[i].sch_mtx, "tcp_sc_head", 256 NULL, MTX_DEF); 257 callout_init_mtx(&V_tcp_syncache.hashbase[i].sch_timer, 258 &V_tcp_syncache.hashbase[i].sch_mtx, 0); 259 V_tcp_syncache.hashbase[i].sch_length = 0; 260 V_tcp_syncache.hashbase[i].sch_sc = &V_tcp_syncache; 261 } 262 263 /* Create the syncache entry zone. */ 264 V_tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache), 265 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 266 V_tcp_syncache.cache_limit = uma_zone_set_max(V_tcp_syncache.zone, 267 V_tcp_syncache.cache_limit); 268 269 /* Start the SYN cookie reseeder callout. */ 270 callout_init(&V_tcp_syncache.secret.reseed, 1); 271 arc4rand(V_tcp_syncache.secret.key[0], SYNCOOKIE_SECRET_SIZE, 0); 272 arc4rand(V_tcp_syncache.secret.key[1], SYNCOOKIE_SECRET_SIZE, 0); 273 callout_reset(&V_tcp_syncache.secret.reseed, SYNCOOKIE_LIFETIME * hz, 274 syncookie_reseed, &V_tcp_syncache); 275 } 276 277 #ifdef VIMAGE 278 void 279 syncache_destroy(void) 280 { 281 struct syncache_head *sch; 282 struct syncache *sc, *nsc; 283 int i; 284 285 /* 286 * Stop the re-seed timer before freeing resources. No need to 287 * possibly schedule it another time. 288 */ 289 callout_drain(&V_tcp_syncache.secret.reseed); 290 291 /* Cleanup hash buckets: stop timers, free entries, destroy locks. */ 292 for (i = 0; i < V_tcp_syncache.hashsize; i++) { 293 294 sch = &V_tcp_syncache.hashbase[i]; 295 callout_drain(&sch->sch_timer); 296 297 SCH_LOCK(sch); 298 TAILQ_FOREACH_SAFE(sc, &sch->sch_bucket, sc_hash, nsc) 299 syncache_drop(sc, sch); 300 SCH_UNLOCK(sch); 301 KASSERT(TAILQ_EMPTY(&sch->sch_bucket), 302 ("%s: sch->sch_bucket not empty", __func__)); 303 KASSERT(sch->sch_length == 0, ("%s: sch->sch_length %d not 0", 304 __func__, sch->sch_length)); 305 mtx_destroy(&sch->sch_mtx); 306 } 307 308 KASSERT(uma_zone_get_cur(V_tcp_syncache.zone) == 0, 309 ("%s: cache_count not 0", __func__)); 310 311 /* Free the allocated global resources. */ 312 uma_zdestroy(V_tcp_syncache.zone); 313 free(V_tcp_syncache.hashbase, M_SYNCACHE); 314 } 315 #endif 316 317 /* 318 * Inserts a syncache entry into the specified bucket row. 319 * Locks and unlocks the syncache_head autonomously. 320 */ 321 static void 322 syncache_insert(struct syncache *sc, struct syncache_head *sch) 323 { 324 struct syncache *sc2; 325 326 SCH_LOCK(sch); 327 328 /* 329 * Make sure that we don't overflow the per-bucket limit. 330 * If the bucket is full, toss the oldest element. 331 */ 332 if (sch->sch_length >= V_tcp_syncache.bucket_limit) { 333 KASSERT(!TAILQ_EMPTY(&sch->sch_bucket), 334 ("sch->sch_length incorrect")); 335 sc2 = TAILQ_LAST(&sch->sch_bucket, sch_head); 336 syncache_drop(sc2, sch); 337 TCPSTAT_INC(tcps_sc_bucketoverflow); 338 } 339 340 /* Put it into the bucket. */ 341 TAILQ_INSERT_HEAD(&sch->sch_bucket, sc, sc_hash); 342 sch->sch_length++; 343 344 #ifdef TCP_OFFLOAD 345 if (ADDED_BY_TOE(sc)) { 346 struct toedev *tod = sc->sc_tod; 347 348 tod->tod_syncache_added(tod, sc->sc_todctx); 349 } 350 #endif 351 352 /* Reinitialize the bucket row's timer. */ 353 if (sch->sch_length == 1) 354 sch->sch_nextc = ticks + INT_MAX; 355 syncache_timeout(sc, sch, 1); 356 357 SCH_UNLOCK(sch); 358 359 TCPSTATES_INC(TCPS_SYN_RECEIVED); 360 TCPSTAT_INC(tcps_sc_added); 361 } 362 363 /* 364 * Remove and free entry from syncache bucket row. 365 * Expects locked syncache head. 366 */ 367 static void 368 syncache_drop(struct syncache *sc, struct syncache_head *sch) 369 { 370 371 SCH_LOCK_ASSERT(sch); 372 373 TCPSTATES_DEC(TCPS_SYN_RECEIVED); 374 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash); 375 sch->sch_length--; 376 377 #ifdef TCP_OFFLOAD 378 if (ADDED_BY_TOE(sc)) { 379 struct toedev *tod = sc->sc_tod; 380 381 tod->tod_syncache_removed(tod, sc->sc_todctx); 382 } 383 #endif 384 385 syncache_free(sc); 386 } 387 388 /* 389 * Engage/reengage time on bucket row. 390 */ 391 static void 392 syncache_timeout(struct syncache *sc, struct syncache_head *sch, int docallout) 393 { 394 sc->sc_rxttime = ticks + 395 TCPTV_RTOBASE * (tcp_syn_backoff[sc->sc_rxmits]); 396 sc->sc_rxmits++; 397 if (TSTMP_LT(sc->sc_rxttime, sch->sch_nextc)) { 398 sch->sch_nextc = sc->sc_rxttime; 399 if (docallout) 400 callout_reset(&sch->sch_timer, sch->sch_nextc - ticks, 401 syncache_timer, (void *)sch); 402 } 403 } 404 405 /* 406 * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted. 407 * If we have retransmitted an entry the maximum number of times, expire it. 408 * One separate timer for each bucket row. 409 */ 410 static void 411 syncache_timer(void *xsch) 412 { 413 struct syncache_head *sch = (struct syncache_head *)xsch; 414 struct syncache *sc, *nsc; 415 int tick = ticks; 416 char *s; 417 418 CURVNET_SET(sch->sch_sc->vnet); 419 420 /* NB: syncache_head has already been locked by the callout. */ 421 SCH_LOCK_ASSERT(sch); 422 423 /* 424 * In the following cycle we may remove some entries and/or 425 * advance some timeouts, so re-initialize the bucket timer. 426 */ 427 sch->sch_nextc = tick + INT_MAX; 428 429 TAILQ_FOREACH_SAFE(sc, &sch->sch_bucket, sc_hash, nsc) { 430 /* 431 * We do not check if the listen socket still exists 432 * and accept the case where the listen socket may be 433 * gone by the time we resend the SYN/ACK. We do 434 * not expect this to happens often. If it does, 435 * then the RST will be sent by the time the remote 436 * host does the SYN/ACK->ACK. 437 */ 438 if (TSTMP_GT(sc->sc_rxttime, tick)) { 439 if (TSTMP_LT(sc->sc_rxttime, sch->sch_nextc)) 440 sch->sch_nextc = sc->sc_rxttime; 441 continue; 442 } 443 if (sc->sc_rxmits > V_tcp_syncache.rexmt_limit) { 444 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) { 445 log(LOG_DEBUG, "%s; %s: Retransmits exhausted, " 446 "giving up and removing syncache entry\n", 447 s, __func__); 448 free(s, M_TCPLOG); 449 } 450 syncache_drop(sc, sch); 451 TCPSTAT_INC(tcps_sc_stale); 452 continue; 453 } 454 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) { 455 log(LOG_DEBUG, "%s; %s: Response timeout, " 456 "retransmitting (%u) SYN|ACK\n", 457 s, __func__, sc->sc_rxmits); 458 free(s, M_TCPLOG); 459 } 460 461 syncache_respond(sc, sch, 1, NULL); 462 TCPSTAT_INC(tcps_sc_retransmitted); 463 syncache_timeout(sc, sch, 0); 464 } 465 if (!TAILQ_EMPTY(&(sch)->sch_bucket)) 466 callout_reset(&(sch)->sch_timer, (sch)->sch_nextc - tick, 467 syncache_timer, (void *)(sch)); 468 CURVNET_RESTORE(); 469 } 470 471 /* 472 * Find an entry in the syncache. 473 * Returns always with locked syncache_head plus a matching entry or NULL. 474 */ 475 static struct syncache * 476 syncache_lookup(struct in_conninfo *inc, struct syncache_head **schp) 477 { 478 struct syncache *sc; 479 struct syncache_head *sch; 480 uint32_t hash; 481 482 /* 483 * The hash is built on foreign port + local port + foreign address. 484 * We rely on the fact that struct in_conninfo starts with 16 bits 485 * of foreign port, then 16 bits of local port then followed by 128 486 * bits of foreign address. In case of IPv4 address, the first 3 487 * 32-bit words of the address always are zeroes. 488 */ 489 hash = jenkins_hash32((uint32_t *)&inc->inc_ie, 5, 490 V_tcp_syncache.hash_secret) & V_tcp_syncache.hashmask; 491 492 sch = &V_tcp_syncache.hashbase[hash]; 493 *schp = sch; 494 SCH_LOCK(sch); 495 496 /* Circle through bucket row to find matching entry. */ 497 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) 498 if (bcmp(&inc->inc_ie, &sc->sc_inc.inc_ie, 499 sizeof(struct in_endpoints)) == 0) 500 break; 501 502 return (sc); /* Always returns with locked sch. */ 503 } 504 505 /* 506 * This function is called when we get a RST for a 507 * non-existent connection, so that we can see if the 508 * connection is in the syn cache. If it is, zap it. 509 */ 510 void 511 syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th) 512 { 513 struct syncache *sc; 514 struct syncache_head *sch; 515 char *s = NULL; 516 517 sc = syncache_lookup(inc, &sch); /* returns locked sch */ 518 SCH_LOCK_ASSERT(sch); 519 520 /* 521 * Any RST to our SYN|ACK must not carry ACK, SYN or FIN flags. 522 * See RFC 793 page 65, section SEGMENT ARRIVES. 523 */ 524 if (th->th_flags & (TH_ACK|TH_SYN|TH_FIN)) { 525 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 526 log(LOG_DEBUG, "%s; %s: Spurious RST with ACK, SYN or " 527 "FIN flag set, segment ignored\n", s, __func__); 528 TCPSTAT_INC(tcps_badrst); 529 goto done; 530 } 531 532 /* 533 * No corresponding connection was found in syncache. 534 * If syncookies are enabled and possibly exclusively 535 * used, or we are under memory pressure, a valid RST 536 * may not find a syncache entry. In that case we're 537 * done and no SYN|ACK retransmissions will happen. 538 * Otherwise the RST was misdirected or spoofed. 539 */ 540 if (sc == NULL) { 541 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 542 log(LOG_DEBUG, "%s; %s: Spurious RST without matching " 543 "syncache entry (possibly syncookie only), " 544 "segment ignored\n", s, __func__); 545 TCPSTAT_INC(tcps_badrst); 546 goto done; 547 } 548 549 /* 550 * If the RST bit is set, check the sequence number to see 551 * if this is a valid reset segment. 552 * RFC 793 page 37: 553 * In all states except SYN-SENT, all reset (RST) segments 554 * are validated by checking their SEQ-fields. A reset is 555 * valid if its sequence number is in the window. 556 * 557 * The sequence number in the reset segment is normally an 558 * echo of our outgoing acknowlegement numbers, but some hosts 559 * send a reset with the sequence number at the rightmost edge 560 * of our receive window, and we have to handle this case. 561 */ 562 if (SEQ_GEQ(th->th_seq, sc->sc_irs) && 563 SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) { 564 syncache_drop(sc, sch); 565 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 566 log(LOG_DEBUG, "%s; %s: Our SYN|ACK was rejected, " 567 "connection attempt aborted by remote endpoint\n", 568 s, __func__); 569 TCPSTAT_INC(tcps_sc_reset); 570 } else { 571 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 572 log(LOG_DEBUG, "%s; %s: RST with invalid SEQ %u != " 573 "IRS %u (+WND %u), segment ignored\n", 574 s, __func__, th->th_seq, sc->sc_irs, sc->sc_wnd); 575 TCPSTAT_INC(tcps_badrst); 576 } 577 578 done: 579 if (s != NULL) 580 free(s, M_TCPLOG); 581 SCH_UNLOCK(sch); 582 } 583 584 void 585 syncache_badack(struct in_conninfo *inc) 586 { 587 struct syncache *sc; 588 struct syncache_head *sch; 589 590 sc = syncache_lookup(inc, &sch); /* returns locked sch */ 591 SCH_LOCK_ASSERT(sch); 592 if (sc != NULL) { 593 syncache_drop(sc, sch); 594 TCPSTAT_INC(tcps_sc_badack); 595 } 596 SCH_UNLOCK(sch); 597 } 598 599 void 600 syncache_unreach(struct in_conninfo *inc, struct tcphdr *th) 601 { 602 struct syncache *sc; 603 struct syncache_head *sch; 604 605 sc = syncache_lookup(inc, &sch); /* returns locked sch */ 606 SCH_LOCK_ASSERT(sch); 607 if (sc == NULL) 608 goto done; 609 610 /* If the sequence number != sc_iss, then it's a bogus ICMP msg */ 611 if (ntohl(th->th_seq) != sc->sc_iss) 612 goto done; 613 614 /* 615 * If we've rertransmitted 3 times and this is our second error, 616 * we remove the entry. Otherwise, we allow it to continue on. 617 * This prevents us from incorrectly nuking an entry during a 618 * spurious network outage. 619 * 620 * See tcp_notify(). 621 */ 622 if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxmits < 3 + 1) { 623 sc->sc_flags |= SCF_UNREACH; 624 goto done; 625 } 626 syncache_drop(sc, sch); 627 TCPSTAT_INC(tcps_sc_unreach); 628 done: 629 SCH_UNLOCK(sch); 630 } 631 632 /* 633 * Build a new TCP socket structure from a syncache entry. 634 * 635 * On success return the newly created socket with its underlying inp locked. 636 */ 637 static struct socket * 638 syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m) 639 { 640 struct tcp_function_block *blk; 641 struct inpcb *inp = NULL; 642 struct socket *so; 643 struct tcpcb *tp; 644 int error; 645 char *s; 646 647 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 648 649 /* 650 * Ok, create the full blown connection, and set things up 651 * as they would have been set up if we had created the 652 * connection when the SYN arrived. If we can't create 653 * the connection, abort it. 654 */ 655 so = sonewconn(lso, 0); 656 if (so == NULL) { 657 /* 658 * Drop the connection; we will either send a RST or 659 * have the peer retransmit its SYN again after its 660 * RTO and try again. 661 */ 662 TCPSTAT_INC(tcps_listendrop); 663 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) { 664 log(LOG_DEBUG, "%s; %s: Socket create failed " 665 "due to limits or memory shortage\n", 666 s, __func__); 667 free(s, M_TCPLOG); 668 } 669 goto abort2; 670 } 671 #ifdef MAC 672 mac_socketpeer_set_from_mbuf(m, so); 673 #endif 674 675 inp = sotoinpcb(so); 676 inp->inp_inc.inc_fibnum = so->so_fibnum; 677 INP_WLOCK(inp); 678 /* 679 * Exclusive pcbinfo lock is not required in syncache socket case even 680 * if two inpcb locks can be acquired simultaneously: 681 * - the inpcb in LISTEN state, 682 * - the newly created inp. 683 * 684 * In this case, an inp cannot be at same time in LISTEN state and 685 * just created by an accept() call. 686 */ 687 INP_HASH_WLOCK(&V_tcbinfo); 688 689 /* Insert new socket into PCB hash list. */ 690 inp->inp_inc.inc_flags = sc->sc_inc.inc_flags; 691 #ifdef INET6 692 if (sc->sc_inc.inc_flags & INC_ISIPV6) { 693 inp->in6p_laddr = sc->sc_inc.inc6_laddr; 694 } else { 695 inp->inp_vflag &= ~INP_IPV6; 696 inp->inp_vflag |= INP_IPV4; 697 #endif 698 inp->inp_laddr = sc->sc_inc.inc_laddr; 699 #ifdef INET6 700 } 701 #endif 702 703 /* 704 * If there's an mbuf and it has a flowid, then let's initialise the 705 * inp with that particular flowid. 706 */ 707 if (m != NULL && M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 708 inp->inp_flowid = m->m_pkthdr.flowid; 709 inp->inp_flowtype = M_HASHTYPE_GET(m); 710 } 711 712 /* 713 * Install in the reservation hash table for now, but don't yet 714 * install a connection group since the full 4-tuple isn't yet 715 * configured. 716 */ 717 inp->inp_lport = sc->sc_inc.inc_lport; 718 if ((error = in_pcbinshash_nopcbgroup(inp)) != 0) { 719 /* 720 * Undo the assignments above if we failed to 721 * put the PCB on the hash lists. 722 */ 723 #ifdef INET6 724 if (sc->sc_inc.inc_flags & INC_ISIPV6) 725 inp->in6p_laddr = in6addr_any; 726 else 727 #endif 728 inp->inp_laddr.s_addr = INADDR_ANY; 729 inp->inp_lport = 0; 730 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) { 731 log(LOG_DEBUG, "%s; %s: in_pcbinshash failed " 732 "with error %i\n", 733 s, __func__, error); 734 free(s, M_TCPLOG); 735 } 736 INP_HASH_WUNLOCK(&V_tcbinfo); 737 goto abort; 738 } 739 #ifdef IPSEC 740 /* Copy old policy into new socket's. */ 741 if (ipsec_copy_policy(sotoinpcb(lso)->inp_sp, inp->inp_sp)) 742 printf("syncache_socket: could not copy policy\n"); 743 #endif 744 #ifdef INET6 745 if (sc->sc_inc.inc_flags & INC_ISIPV6) { 746 struct inpcb *oinp = sotoinpcb(lso); 747 struct in6_addr laddr6; 748 struct sockaddr_in6 sin6; 749 /* 750 * Inherit socket options from the listening socket. 751 * Note that in6p_inputopts are not (and should not be) 752 * copied, since it stores previously received options and is 753 * used to detect if each new option is different than the 754 * previous one and hence should be passed to a user. 755 * If we copied in6p_inputopts, a user would not be able to 756 * receive options just after calling the accept system call. 757 */ 758 inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS; 759 if (oinp->in6p_outputopts) 760 inp->in6p_outputopts = 761 ip6_copypktopts(oinp->in6p_outputopts, M_NOWAIT); 762 763 sin6.sin6_family = AF_INET6; 764 sin6.sin6_len = sizeof(sin6); 765 sin6.sin6_addr = sc->sc_inc.inc6_faddr; 766 sin6.sin6_port = sc->sc_inc.inc_fport; 767 sin6.sin6_flowinfo = sin6.sin6_scope_id = 0; 768 laddr6 = inp->in6p_laddr; 769 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) 770 inp->in6p_laddr = sc->sc_inc.inc6_laddr; 771 if ((error = in6_pcbconnect_mbuf(inp, (struct sockaddr *)&sin6, 772 thread0.td_ucred, m)) != 0) { 773 inp->in6p_laddr = laddr6; 774 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) { 775 log(LOG_DEBUG, "%s; %s: in6_pcbconnect failed " 776 "with error %i\n", 777 s, __func__, error); 778 free(s, M_TCPLOG); 779 } 780 INP_HASH_WUNLOCK(&V_tcbinfo); 781 goto abort; 782 } 783 /* Override flowlabel from in6_pcbconnect. */ 784 inp->inp_flow &= ~IPV6_FLOWLABEL_MASK; 785 inp->inp_flow |= sc->sc_flowlabel; 786 } 787 #endif /* INET6 */ 788 #if defined(INET) && defined(INET6) 789 else 790 #endif 791 #ifdef INET 792 { 793 struct in_addr laddr; 794 struct sockaddr_in sin; 795 796 inp->inp_options = (m) ? ip_srcroute(m) : NULL; 797 798 if (inp->inp_options == NULL) { 799 inp->inp_options = sc->sc_ipopts; 800 sc->sc_ipopts = NULL; 801 } 802 803 sin.sin_family = AF_INET; 804 sin.sin_len = sizeof(sin); 805 sin.sin_addr = sc->sc_inc.inc_faddr; 806 sin.sin_port = sc->sc_inc.inc_fport; 807 bzero((caddr_t)sin.sin_zero, sizeof(sin.sin_zero)); 808 laddr = inp->inp_laddr; 809 if (inp->inp_laddr.s_addr == INADDR_ANY) 810 inp->inp_laddr = sc->sc_inc.inc_laddr; 811 if ((error = in_pcbconnect_mbuf(inp, (struct sockaddr *)&sin, 812 thread0.td_ucred, m)) != 0) { 813 inp->inp_laddr = laddr; 814 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) { 815 log(LOG_DEBUG, "%s; %s: in_pcbconnect failed " 816 "with error %i\n", 817 s, __func__, error); 818 free(s, M_TCPLOG); 819 } 820 INP_HASH_WUNLOCK(&V_tcbinfo); 821 goto abort; 822 } 823 } 824 #endif /* INET */ 825 INP_HASH_WUNLOCK(&V_tcbinfo); 826 tp = intotcpcb(inp); 827 tcp_state_change(tp, TCPS_SYN_RECEIVED); 828 tp->iss = sc->sc_iss; 829 tp->irs = sc->sc_irs; 830 tcp_rcvseqinit(tp); 831 tcp_sendseqinit(tp); 832 blk = sototcpcb(lso)->t_fb; 833 if (blk != tp->t_fb) { 834 /* 835 * Our parents t_fb was not the default, 836 * we need to release our ref on tp->t_fb and 837 * pickup one on the new entry. 838 */ 839 struct tcp_function_block *rblk; 840 841 rblk = find_and_ref_tcp_fb(blk); 842 KASSERT(rblk != NULL, 843 ("cannot find blk %p out of syncache?", blk)); 844 if (tp->t_fb->tfb_tcp_fb_fini) 845 (*tp->t_fb->tfb_tcp_fb_fini)(tp); 846 refcount_release(&tp->t_fb->tfb_refcnt); 847 tp->t_fb = rblk; 848 if (tp->t_fb->tfb_tcp_fb_init) { 849 (*tp->t_fb->tfb_tcp_fb_init)(tp); 850 } 851 } 852 tp->snd_wl1 = sc->sc_irs; 853 tp->snd_max = tp->iss + 1; 854 tp->snd_nxt = tp->iss + 1; 855 tp->rcv_up = sc->sc_irs + 1; 856 tp->rcv_wnd = sc->sc_wnd; 857 tp->rcv_adv += tp->rcv_wnd; 858 tp->last_ack_sent = tp->rcv_nxt; 859 860 tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY); 861 if (sc->sc_flags & SCF_NOOPT) 862 tp->t_flags |= TF_NOOPT; 863 else { 864 if (sc->sc_flags & SCF_WINSCALE) { 865 tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE; 866 tp->snd_scale = sc->sc_requested_s_scale; 867 tp->request_r_scale = sc->sc_requested_r_scale; 868 } 869 if (sc->sc_flags & SCF_TIMESTAMP) { 870 tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP; 871 tp->ts_recent = sc->sc_tsreflect; 872 tp->ts_recent_age = tcp_ts_getticks(); 873 tp->ts_offset = sc->sc_tsoff; 874 } 875 #ifdef TCP_SIGNATURE 876 if (sc->sc_flags & SCF_SIGNATURE) 877 tp->t_flags |= TF_SIGNATURE; 878 #endif 879 if (sc->sc_flags & SCF_SACK) 880 tp->t_flags |= TF_SACK_PERMIT; 881 } 882 883 if (sc->sc_flags & SCF_ECN) 884 tp->t_flags |= TF_ECN_PERMIT; 885 886 /* 887 * Set up MSS and get cached values from tcp_hostcache. 888 * This might overwrite some of the defaults we just set. 889 */ 890 tcp_mss(tp, sc->sc_peer_mss); 891 892 /* 893 * If the SYN,ACK was retransmitted, indicate that CWND to be 894 * limited to one segment in cc_conn_init(). 895 * NB: sc_rxmits counts all SYN,ACK transmits, not just retransmits. 896 */ 897 if (sc->sc_rxmits > 1) 898 tp->snd_cwnd = 1; 899 900 #ifdef TCP_OFFLOAD 901 /* 902 * Allow a TOE driver to install its hooks. Note that we hold the 903 * pcbinfo lock too and that prevents tcp_usr_accept from accepting a 904 * new connection before the TOE driver has done its thing. 905 */ 906 if (ADDED_BY_TOE(sc)) { 907 struct toedev *tod = sc->sc_tod; 908 909 tod->tod_offload_socket(tod, sc->sc_todctx, so); 910 } 911 #endif 912 /* 913 * Copy and activate timers. 914 */ 915 tp->t_keepinit = sototcpcb(lso)->t_keepinit; 916 tp->t_keepidle = sototcpcb(lso)->t_keepidle; 917 tp->t_keepintvl = sototcpcb(lso)->t_keepintvl; 918 tp->t_keepcnt = sototcpcb(lso)->t_keepcnt; 919 tcp_timer_activate(tp, TT_KEEP, TP_KEEPINIT(tp)); 920 921 soisconnected(so); 922 923 TCPSTAT_INC(tcps_accepts); 924 return (so); 925 926 abort: 927 INP_WUNLOCK(inp); 928 abort2: 929 if (so != NULL) 930 soabort(so); 931 return (NULL); 932 } 933 934 /* 935 * This function gets called when we receive an ACK for a 936 * socket in the LISTEN state. We look up the connection 937 * in the syncache, and if its there, we pull it out of 938 * the cache and turn it into a full-blown connection in 939 * the SYN-RECEIVED state. 940 * 941 * On syncache_socket() success the newly created socket 942 * has its underlying inp locked. 943 */ 944 int 945 syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th, 946 struct socket **lsop, struct mbuf *m) 947 { 948 struct syncache *sc; 949 struct syncache_head *sch; 950 struct syncache scs; 951 char *s; 952 953 /* 954 * Global TCP locks are held because we manipulate the PCB lists 955 * and create a new socket. 956 */ 957 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 958 KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK, 959 ("%s: can handle only ACK", __func__)); 960 961 sc = syncache_lookup(inc, &sch); /* returns locked sch */ 962 SCH_LOCK_ASSERT(sch); 963 964 #ifdef INVARIANTS 965 /* 966 * Test code for syncookies comparing the syncache stored 967 * values with the reconstructed values from the cookie. 968 */ 969 if (sc != NULL) 970 syncookie_cmp(inc, sch, sc, th, to, *lsop); 971 #endif 972 973 if (sc == NULL) { 974 /* 975 * There is no syncache entry, so see if this ACK is 976 * a returning syncookie. To do this, first: 977 * A. See if this socket has had a syncache entry dropped in 978 * the past. We don't want to accept a bogus syncookie 979 * if we've never received a SYN. 980 * B. check that the syncookie is valid. If it is, then 981 * cobble up a fake syncache entry, and return. 982 */ 983 if (!V_tcp_syncookies) { 984 SCH_UNLOCK(sch); 985 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 986 log(LOG_DEBUG, "%s; %s: Spurious ACK, " 987 "segment rejected (syncookies disabled)\n", 988 s, __func__); 989 goto failed; 990 } 991 bzero(&scs, sizeof(scs)); 992 sc = syncookie_lookup(inc, sch, &scs, th, to, *lsop); 993 SCH_UNLOCK(sch); 994 if (sc == NULL) { 995 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 996 log(LOG_DEBUG, "%s; %s: Segment failed " 997 "SYNCOOKIE authentication, segment rejected " 998 "(probably spoofed)\n", s, __func__); 999 goto failed; 1000 } 1001 } else { 1002 /* 1003 * Pull out the entry to unlock the bucket row. 1004 * 1005 * NOTE: We must decrease TCPS_SYN_RECEIVED count here, not 1006 * tcp_state_change(). The tcpcb is not existent at this 1007 * moment. A new one will be allocated via syncache_socket-> 1008 * sonewconn->tcp_usr_attach in TCPS_CLOSED state, then 1009 * syncache_socket() will change it to TCPS_SYN_RECEIVED. 1010 */ 1011 TCPSTATES_DEC(TCPS_SYN_RECEIVED); 1012 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash); 1013 sch->sch_length--; 1014 #ifdef TCP_OFFLOAD 1015 if (ADDED_BY_TOE(sc)) { 1016 struct toedev *tod = sc->sc_tod; 1017 1018 tod->tod_syncache_removed(tod, sc->sc_todctx); 1019 } 1020 #endif 1021 SCH_UNLOCK(sch); 1022 } 1023 1024 /* 1025 * Segment validation: 1026 * ACK must match our initial sequence number + 1 (the SYN|ACK). 1027 */ 1028 if (th->th_ack != sc->sc_iss + 1) { 1029 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 1030 log(LOG_DEBUG, "%s; %s: ACK %u != ISS+1 %u, segment " 1031 "rejected\n", s, __func__, th->th_ack, sc->sc_iss); 1032 goto failed; 1033 } 1034 1035 /* 1036 * The SEQ must fall in the window starting at the received 1037 * initial receive sequence number + 1 (the SYN). 1038 */ 1039 if (SEQ_LEQ(th->th_seq, sc->sc_irs) || 1040 SEQ_GT(th->th_seq, sc->sc_irs + sc->sc_wnd)) { 1041 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 1042 log(LOG_DEBUG, "%s; %s: SEQ %u != IRS+1 %u, segment " 1043 "rejected\n", s, __func__, th->th_seq, sc->sc_irs); 1044 goto failed; 1045 } 1046 1047 /* 1048 * If timestamps were not negotiated during SYN/ACK they 1049 * must not appear on any segment during this session. 1050 */ 1051 if (!(sc->sc_flags & SCF_TIMESTAMP) && (to->to_flags & TOF_TS)) { 1052 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 1053 log(LOG_DEBUG, "%s; %s: Timestamp not expected, " 1054 "segment rejected\n", s, __func__); 1055 goto failed; 1056 } 1057 1058 /* 1059 * If timestamps were negotiated during SYN/ACK they should 1060 * appear on every segment during this session. 1061 * XXXAO: This is only informal as there have been unverified 1062 * reports of non-compliants stacks. 1063 */ 1064 if ((sc->sc_flags & SCF_TIMESTAMP) && !(to->to_flags & TOF_TS)) { 1065 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1066 log(LOG_DEBUG, "%s; %s: Timestamp missing, " 1067 "no action\n", s, __func__); 1068 free(s, M_TCPLOG); 1069 s = NULL; 1070 } 1071 } 1072 1073 /* 1074 * If timestamps were negotiated the reflected timestamp 1075 * must be equal to what we actually sent in the SYN|ACK. 1076 */ 1077 if ((to->to_flags & TOF_TS) && to->to_tsecr != sc->sc_ts) { 1078 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 1079 log(LOG_DEBUG, "%s; %s: TSECR %u != TS %u, " 1080 "segment rejected\n", 1081 s, __func__, to->to_tsecr, sc->sc_ts); 1082 goto failed; 1083 } 1084 1085 *lsop = syncache_socket(sc, *lsop, m); 1086 1087 if (*lsop == NULL) 1088 TCPSTAT_INC(tcps_sc_aborted); 1089 else 1090 TCPSTAT_INC(tcps_sc_completed); 1091 1092 /* how do we find the inp for the new socket? */ 1093 if (sc != &scs) 1094 syncache_free(sc); 1095 return (1); 1096 failed: 1097 if (sc != NULL && sc != &scs) 1098 syncache_free(sc); 1099 if (s != NULL) 1100 free(s, M_TCPLOG); 1101 *lsop = NULL; 1102 return (0); 1103 } 1104 1105 #ifdef TCP_RFC7413 1106 static void 1107 syncache_tfo_expand(struct syncache *sc, struct socket **lsop, struct mbuf *m, 1108 uint64_t response_cookie) 1109 { 1110 struct inpcb *inp; 1111 struct tcpcb *tp; 1112 unsigned int *pending_counter; 1113 1114 /* 1115 * Global TCP locks are held because we manipulate the PCB lists 1116 * and create a new socket. 1117 */ 1118 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1119 1120 pending_counter = intotcpcb(sotoinpcb(*lsop))->t_tfo_pending; 1121 *lsop = syncache_socket(sc, *lsop, m); 1122 if (*lsop == NULL) { 1123 TCPSTAT_INC(tcps_sc_aborted); 1124 atomic_subtract_int(pending_counter, 1); 1125 } else { 1126 inp = sotoinpcb(*lsop); 1127 tp = intotcpcb(inp); 1128 tp->t_flags |= TF_FASTOPEN; 1129 tp->t_tfo_cookie = response_cookie; 1130 tp->snd_max = tp->iss; 1131 tp->snd_nxt = tp->iss; 1132 tp->t_tfo_pending = pending_counter; 1133 TCPSTAT_INC(tcps_sc_completed); 1134 } 1135 } 1136 #endif /* TCP_RFC7413 */ 1137 1138 /* 1139 * Given a LISTEN socket and an inbound SYN request, add 1140 * this to the syn cache, and send back a segment: 1141 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK> 1142 * to the source. 1143 * 1144 * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN. 1145 * Doing so would require that we hold onto the data and deliver it 1146 * to the application. However, if we are the target of a SYN-flood 1147 * DoS attack, an attacker could send data which would eventually 1148 * consume all available buffer space if it were ACKed. By not ACKing 1149 * the data, we avoid this DoS scenario. 1150 * 1151 * The exception to the above is when a SYN with a valid TCP Fast Open (TFO) 1152 * cookie is processed, V_tcp_fastopen_enabled set to true, and the 1153 * TCP_FASTOPEN socket option is set. In this case, a new socket is created 1154 * and returned via lsop, the mbuf is not freed so that tcp_input() can 1155 * queue its data to the socket, and 1 is returned to indicate the 1156 * TFO-socket-creation path was taken. 1157 */ 1158 int 1159 syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th, 1160 struct inpcb *inp, struct socket **lsop, struct mbuf *m, void *tod, 1161 void *todctx) 1162 { 1163 struct tcpcb *tp; 1164 struct socket *so; 1165 struct syncache *sc = NULL; 1166 struct syncache_head *sch; 1167 struct mbuf *ipopts = NULL; 1168 u_int ltflags; 1169 int win, sb_hiwat, ip_ttl, ip_tos; 1170 char *s; 1171 int rv = 0; 1172 #ifdef INET6 1173 int autoflowlabel = 0; 1174 #endif 1175 #ifdef MAC 1176 struct label *maclabel; 1177 #endif 1178 struct syncache scs; 1179 struct ucred *cred; 1180 #ifdef TCP_RFC7413 1181 uint64_t tfo_response_cookie; 1182 int tfo_cookie_valid = 0; 1183 int tfo_response_cookie_valid = 0; 1184 #endif 1185 1186 INP_WLOCK_ASSERT(inp); /* listen socket */ 1187 KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_SYN, 1188 ("%s: unexpected tcp flags", __func__)); 1189 1190 /* 1191 * Combine all so/tp operations very early to drop the INP lock as 1192 * soon as possible. 1193 */ 1194 so = *lsop; 1195 tp = sototcpcb(so); 1196 cred = crhold(so->so_cred); 1197 1198 #ifdef INET6 1199 if ((inc->inc_flags & INC_ISIPV6) && 1200 (inp->inp_flags & IN6P_AUTOFLOWLABEL)) 1201 autoflowlabel = 1; 1202 #endif 1203 ip_ttl = inp->inp_ip_ttl; 1204 ip_tos = inp->inp_ip_tos; 1205 win = sbspace(&so->so_rcv); 1206 sb_hiwat = so->so_rcv.sb_hiwat; 1207 ltflags = (tp->t_flags & (TF_NOOPT | TF_SIGNATURE)); 1208 1209 #ifdef TCP_RFC7413 1210 if (V_tcp_fastopen_enabled && (tp->t_flags & TF_FASTOPEN) && 1211 (tp->t_tfo_pending != NULL) && (to->to_flags & TOF_FASTOPEN)) { 1212 /* 1213 * Limit the number of pending TFO connections to 1214 * approximately half of the queue limit. This prevents TFO 1215 * SYN floods from starving the service by filling the 1216 * listen queue with bogus TFO connections. 1217 */ 1218 if (atomic_fetchadd_int(tp->t_tfo_pending, 1) <= 1219 (so->so_qlimit / 2)) { 1220 int result; 1221 1222 result = tcp_fastopen_check_cookie(inc, 1223 to->to_tfo_cookie, to->to_tfo_len, 1224 &tfo_response_cookie); 1225 tfo_cookie_valid = (result > 0); 1226 tfo_response_cookie_valid = (result >= 0); 1227 } else 1228 atomic_subtract_int(tp->t_tfo_pending, 1); 1229 } 1230 #endif 1231 1232 /* By the time we drop the lock these should no longer be used. */ 1233 so = NULL; 1234 tp = NULL; 1235 1236 #ifdef MAC 1237 if (mac_syncache_init(&maclabel) != 0) { 1238 INP_WUNLOCK(inp); 1239 goto done; 1240 } else 1241 mac_syncache_create(maclabel, inp); 1242 #endif 1243 #ifdef TCP_RFC7413 1244 if (!tfo_cookie_valid) 1245 #endif 1246 INP_WUNLOCK(inp); 1247 1248 /* 1249 * Remember the IP options, if any. 1250 */ 1251 #ifdef INET6 1252 if (!(inc->inc_flags & INC_ISIPV6)) 1253 #endif 1254 #ifdef INET 1255 ipopts = (m) ? ip_srcroute(m) : NULL; 1256 #else 1257 ipopts = NULL; 1258 #endif 1259 1260 /* 1261 * See if we already have an entry for this connection. 1262 * If we do, resend the SYN,ACK, and reset the retransmit timer. 1263 * 1264 * XXX: should the syncache be re-initialized with the contents 1265 * of the new SYN here (which may have different options?) 1266 * 1267 * XXX: We do not check the sequence number to see if this is a 1268 * real retransmit or a new connection attempt. The question is 1269 * how to handle such a case; either ignore it as spoofed, or 1270 * drop the current entry and create a new one? 1271 */ 1272 sc = syncache_lookup(inc, &sch); /* returns locked entry */ 1273 SCH_LOCK_ASSERT(sch); 1274 if (sc != NULL) { 1275 #ifdef TCP_RFC7413 1276 if (tfo_cookie_valid) 1277 INP_WUNLOCK(inp); 1278 #endif 1279 TCPSTAT_INC(tcps_sc_dupsyn); 1280 if (ipopts) { 1281 /* 1282 * If we were remembering a previous source route, 1283 * forget it and use the new one we've been given. 1284 */ 1285 if (sc->sc_ipopts) 1286 (void) m_free(sc->sc_ipopts); 1287 sc->sc_ipopts = ipopts; 1288 } 1289 /* 1290 * Update timestamp if present. 1291 */ 1292 if ((sc->sc_flags & SCF_TIMESTAMP) && (to->to_flags & TOF_TS)) 1293 sc->sc_tsreflect = to->to_tsval; 1294 else 1295 sc->sc_flags &= ~SCF_TIMESTAMP; 1296 #ifdef MAC 1297 /* 1298 * Since we have already unconditionally allocated label 1299 * storage, free it up. The syncache entry will already 1300 * have an initialized label we can use. 1301 */ 1302 mac_syncache_destroy(&maclabel); 1303 #endif 1304 /* Retransmit SYN|ACK and reset retransmit count. */ 1305 if ((s = tcp_log_addrs(&sc->sc_inc, th, NULL, NULL))) { 1306 log(LOG_DEBUG, "%s; %s: Received duplicate SYN, " 1307 "resetting timer and retransmitting SYN|ACK\n", 1308 s, __func__); 1309 free(s, M_TCPLOG); 1310 } 1311 if (syncache_respond(sc, sch, 1, m) == 0) { 1312 sc->sc_rxmits = 0; 1313 syncache_timeout(sc, sch, 1); 1314 TCPSTAT_INC(tcps_sndacks); 1315 TCPSTAT_INC(tcps_sndtotal); 1316 } 1317 SCH_UNLOCK(sch); 1318 goto done; 1319 } 1320 1321 #ifdef TCP_RFC7413 1322 if (tfo_cookie_valid) { 1323 bzero(&scs, sizeof(scs)); 1324 sc = &scs; 1325 goto skip_alloc; 1326 } 1327 #endif 1328 1329 sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO); 1330 if (sc == NULL) { 1331 /* 1332 * The zone allocator couldn't provide more entries. 1333 * Treat this as if the cache was full; drop the oldest 1334 * entry and insert the new one. 1335 */ 1336 TCPSTAT_INC(tcps_sc_zonefail); 1337 if ((sc = TAILQ_LAST(&sch->sch_bucket, sch_head)) != NULL) 1338 syncache_drop(sc, sch); 1339 sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO); 1340 if (sc == NULL) { 1341 if (V_tcp_syncookies) { 1342 bzero(&scs, sizeof(scs)); 1343 sc = &scs; 1344 } else { 1345 SCH_UNLOCK(sch); 1346 if (ipopts) 1347 (void) m_free(ipopts); 1348 goto done; 1349 } 1350 } 1351 } 1352 1353 #ifdef TCP_RFC7413 1354 skip_alloc: 1355 if (!tfo_cookie_valid && tfo_response_cookie_valid) 1356 sc->sc_tfo_cookie = &tfo_response_cookie; 1357 #endif 1358 1359 /* 1360 * Fill in the syncache values. 1361 */ 1362 #ifdef MAC 1363 sc->sc_label = maclabel; 1364 #endif 1365 sc->sc_cred = cred; 1366 cred = NULL; 1367 sc->sc_ipopts = ipopts; 1368 bcopy(inc, &sc->sc_inc, sizeof(struct in_conninfo)); 1369 #ifdef INET6 1370 if (!(inc->inc_flags & INC_ISIPV6)) 1371 #endif 1372 { 1373 sc->sc_ip_tos = ip_tos; 1374 sc->sc_ip_ttl = ip_ttl; 1375 } 1376 #ifdef TCP_OFFLOAD 1377 sc->sc_tod = tod; 1378 sc->sc_todctx = todctx; 1379 #endif 1380 sc->sc_irs = th->th_seq; 1381 sc->sc_iss = arc4random(); 1382 sc->sc_flags = 0; 1383 sc->sc_flowlabel = 0; 1384 1385 /* 1386 * Initial receive window: clip sbspace to [0 .. TCP_MAXWIN]. 1387 * win was derived from socket earlier in the function. 1388 */ 1389 win = imax(win, 0); 1390 win = imin(win, TCP_MAXWIN); 1391 sc->sc_wnd = win; 1392 1393 if (V_tcp_do_rfc1323) { 1394 /* 1395 * A timestamp received in a SYN makes 1396 * it ok to send timestamp requests and replies. 1397 */ 1398 if (to->to_flags & TOF_TS) { 1399 sc->sc_tsreflect = to->to_tsval; 1400 sc->sc_ts = tcp_ts_getticks(); 1401 sc->sc_flags |= SCF_TIMESTAMP; 1402 } 1403 if (to->to_flags & TOF_SCALE) { 1404 int wscale = 0; 1405 1406 /* 1407 * Pick the smallest possible scaling factor that 1408 * will still allow us to scale up to sb_max, aka 1409 * kern.ipc.maxsockbuf. 1410 * 1411 * We do this because there are broken firewalls that 1412 * will corrupt the window scale option, leading to 1413 * the other endpoint believing that our advertised 1414 * window is unscaled. At scale factors larger than 1415 * 5 the unscaled window will drop below 1500 bytes, 1416 * leading to serious problems when traversing these 1417 * broken firewalls. 1418 * 1419 * With the default maxsockbuf of 256K, a scale factor 1420 * of 3 will be chosen by this algorithm. Those who 1421 * choose a larger maxsockbuf should watch out 1422 * for the compatibility problems mentioned above. 1423 * 1424 * RFC1323: The Window field in a SYN (i.e., a <SYN> 1425 * or <SYN,ACK>) segment itself is never scaled. 1426 */ 1427 while (wscale < TCP_MAX_WINSHIFT && 1428 (TCP_MAXWIN << wscale) < sb_max) 1429 wscale++; 1430 sc->sc_requested_r_scale = wscale; 1431 sc->sc_requested_s_scale = to->to_wscale; 1432 sc->sc_flags |= SCF_WINSCALE; 1433 } 1434 } 1435 #ifdef TCP_SIGNATURE 1436 /* 1437 * If listening socket requested TCP digests, OR received SYN 1438 * contains the option, flag this in the syncache so that 1439 * syncache_respond() will do the right thing with the SYN+ACK. 1440 */ 1441 if (to->to_flags & TOF_SIGNATURE || ltflags & TF_SIGNATURE) 1442 sc->sc_flags |= SCF_SIGNATURE; 1443 #endif 1444 if (to->to_flags & TOF_SACKPERM) 1445 sc->sc_flags |= SCF_SACK; 1446 if (to->to_flags & TOF_MSS) 1447 sc->sc_peer_mss = to->to_mss; /* peer mss may be zero */ 1448 if (ltflags & TF_NOOPT) 1449 sc->sc_flags |= SCF_NOOPT; 1450 if ((th->th_flags & (TH_ECE|TH_CWR)) && V_tcp_do_ecn) 1451 sc->sc_flags |= SCF_ECN; 1452 1453 if (V_tcp_syncookies) 1454 sc->sc_iss = syncookie_generate(sch, sc); 1455 #ifdef INET6 1456 if (autoflowlabel) { 1457 if (V_tcp_syncookies) 1458 sc->sc_flowlabel = sc->sc_iss; 1459 else 1460 sc->sc_flowlabel = ip6_randomflowlabel(); 1461 sc->sc_flowlabel = htonl(sc->sc_flowlabel) & IPV6_FLOWLABEL_MASK; 1462 } 1463 #endif 1464 SCH_UNLOCK(sch); 1465 1466 #ifdef TCP_RFC7413 1467 if (tfo_cookie_valid) { 1468 syncache_tfo_expand(sc, lsop, m, tfo_response_cookie); 1469 /* INP_WUNLOCK(inp) will be performed by the called */ 1470 rv = 1; 1471 goto tfo_done; 1472 } 1473 #endif 1474 1475 /* 1476 * Do a standard 3-way handshake. 1477 */ 1478 if (syncache_respond(sc, sch, 0, m) == 0) { 1479 if (V_tcp_syncookies && V_tcp_syncookiesonly && sc != &scs) 1480 syncache_free(sc); 1481 else if (sc != &scs) 1482 syncache_insert(sc, sch); /* locks and unlocks sch */ 1483 TCPSTAT_INC(tcps_sndacks); 1484 TCPSTAT_INC(tcps_sndtotal); 1485 } else { 1486 if (sc != &scs) 1487 syncache_free(sc); 1488 TCPSTAT_INC(tcps_sc_dropped); 1489 } 1490 1491 done: 1492 if (m) { 1493 *lsop = NULL; 1494 m_freem(m); 1495 } 1496 #ifdef TCP_RFC7413 1497 tfo_done: 1498 #endif 1499 if (cred != NULL) 1500 crfree(cred); 1501 #ifdef MAC 1502 if (sc == &scs) 1503 mac_syncache_destroy(&maclabel); 1504 #endif 1505 return (rv); 1506 } 1507 1508 static int 1509 syncache_respond(struct syncache *sc, struct syncache_head *sch, int locked, 1510 const struct mbuf *m0) 1511 { 1512 struct ip *ip = NULL; 1513 struct mbuf *m; 1514 struct tcphdr *th = NULL; 1515 int optlen, error = 0; /* Make compiler happy */ 1516 u_int16_t hlen, tlen, mssopt; 1517 struct tcpopt to; 1518 #ifdef INET6 1519 struct ip6_hdr *ip6 = NULL; 1520 #endif 1521 #ifdef TCP_SIGNATURE 1522 struct secasvar *sav; 1523 #endif 1524 1525 hlen = 1526 #ifdef INET6 1527 (sc->sc_inc.inc_flags & INC_ISIPV6) ? sizeof(struct ip6_hdr) : 1528 #endif 1529 sizeof(struct ip); 1530 tlen = hlen + sizeof(struct tcphdr); 1531 1532 /* Determine MSS we advertize to other end of connection. */ 1533 mssopt = tcp_mssopt(&sc->sc_inc); 1534 if (sc->sc_peer_mss) 1535 mssopt = max( min(sc->sc_peer_mss, mssopt), V_tcp_minmss); 1536 1537 /* XXX: Assume that the entire packet will fit in a header mbuf. */ 1538 KASSERT(max_linkhdr + tlen + TCP_MAXOLEN <= MHLEN, 1539 ("syncache: mbuf too small")); 1540 1541 /* Create the IP+TCP header from scratch. */ 1542 m = m_gethdr(M_NOWAIT, MT_DATA); 1543 if (m == NULL) 1544 return (ENOBUFS); 1545 #ifdef MAC 1546 mac_syncache_create_mbuf(sc->sc_label, m); 1547 #endif 1548 m->m_data += max_linkhdr; 1549 m->m_len = tlen; 1550 m->m_pkthdr.len = tlen; 1551 m->m_pkthdr.rcvif = NULL; 1552 1553 #ifdef INET6 1554 if (sc->sc_inc.inc_flags & INC_ISIPV6) { 1555 ip6 = mtod(m, struct ip6_hdr *); 1556 ip6->ip6_vfc = IPV6_VERSION; 1557 ip6->ip6_nxt = IPPROTO_TCP; 1558 ip6->ip6_src = sc->sc_inc.inc6_laddr; 1559 ip6->ip6_dst = sc->sc_inc.inc6_faddr; 1560 ip6->ip6_plen = htons(tlen - hlen); 1561 /* ip6_hlim is set after checksum */ 1562 ip6->ip6_flow &= ~IPV6_FLOWLABEL_MASK; 1563 ip6->ip6_flow |= sc->sc_flowlabel; 1564 1565 th = (struct tcphdr *)(ip6 + 1); 1566 } 1567 #endif 1568 #if defined(INET6) && defined(INET) 1569 else 1570 #endif 1571 #ifdef INET 1572 { 1573 ip = mtod(m, struct ip *); 1574 ip->ip_v = IPVERSION; 1575 ip->ip_hl = sizeof(struct ip) >> 2; 1576 ip->ip_len = htons(tlen); 1577 ip->ip_id = 0; 1578 ip->ip_off = 0; 1579 ip->ip_sum = 0; 1580 ip->ip_p = IPPROTO_TCP; 1581 ip->ip_src = sc->sc_inc.inc_laddr; 1582 ip->ip_dst = sc->sc_inc.inc_faddr; 1583 ip->ip_ttl = sc->sc_ip_ttl; 1584 ip->ip_tos = sc->sc_ip_tos; 1585 1586 /* 1587 * See if we should do MTU discovery. Route lookups are 1588 * expensive, so we will only unset the DF bit if: 1589 * 1590 * 1) path_mtu_discovery is disabled 1591 * 2) the SCF_UNREACH flag has been set 1592 */ 1593 if (V_path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0)) 1594 ip->ip_off |= htons(IP_DF); 1595 1596 th = (struct tcphdr *)(ip + 1); 1597 } 1598 #endif /* INET */ 1599 th->th_sport = sc->sc_inc.inc_lport; 1600 th->th_dport = sc->sc_inc.inc_fport; 1601 1602 th->th_seq = htonl(sc->sc_iss); 1603 th->th_ack = htonl(sc->sc_irs + 1); 1604 th->th_off = sizeof(struct tcphdr) >> 2; 1605 th->th_x2 = 0; 1606 th->th_flags = TH_SYN|TH_ACK; 1607 th->th_win = htons(sc->sc_wnd); 1608 th->th_urp = 0; 1609 1610 if (sc->sc_flags & SCF_ECN) { 1611 th->th_flags |= TH_ECE; 1612 TCPSTAT_INC(tcps_ecn_shs); 1613 } 1614 1615 /* Tack on the TCP options. */ 1616 if ((sc->sc_flags & SCF_NOOPT) == 0) { 1617 to.to_flags = 0; 1618 1619 to.to_mss = mssopt; 1620 to.to_flags = TOF_MSS; 1621 if (sc->sc_flags & SCF_WINSCALE) { 1622 to.to_wscale = sc->sc_requested_r_scale; 1623 to.to_flags |= TOF_SCALE; 1624 } 1625 if (sc->sc_flags & SCF_TIMESTAMP) { 1626 /* Virgin timestamp or TCP cookie enhanced one. */ 1627 to.to_tsval = sc->sc_ts; 1628 to.to_tsecr = sc->sc_tsreflect; 1629 to.to_flags |= TOF_TS; 1630 } 1631 if (sc->sc_flags & SCF_SACK) 1632 to.to_flags |= TOF_SACKPERM; 1633 #ifdef TCP_SIGNATURE 1634 sav = NULL; 1635 if (sc->sc_flags & SCF_SIGNATURE) { 1636 sav = tcp_get_sav(m, IPSEC_DIR_OUTBOUND); 1637 if (sav != NULL) 1638 to.to_flags |= TOF_SIGNATURE; 1639 else { 1640 1641 /* 1642 * We've got SCF_SIGNATURE flag 1643 * inherited from listening socket, 1644 * but no SADB key for given source 1645 * address. Assume signature is not 1646 * required and remove signature flag 1647 * instead of silently dropping 1648 * connection. 1649 */ 1650 if (locked == 0) 1651 SCH_LOCK(sch); 1652 sc->sc_flags &= ~SCF_SIGNATURE; 1653 if (locked == 0) 1654 SCH_UNLOCK(sch); 1655 } 1656 } 1657 #endif 1658 1659 #ifdef TCP_RFC7413 1660 if (sc->sc_tfo_cookie) { 1661 to.to_flags |= TOF_FASTOPEN; 1662 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 1663 to.to_tfo_cookie = sc->sc_tfo_cookie; 1664 /* don't send cookie again when retransmitting response */ 1665 sc->sc_tfo_cookie = NULL; 1666 } 1667 #endif 1668 optlen = tcp_addoptions(&to, (u_char *)(th + 1)); 1669 1670 /* Adjust headers by option size. */ 1671 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 1672 m->m_len += optlen; 1673 m->m_pkthdr.len += optlen; 1674 1675 #ifdef TCP_SIGNATURE 1676 if (sc->sc_flags & SCF_SIGNATURE) 1677 tcp_signature_do_compute(m, 0, optlen, 1678 to.to_signature, sav); 1679 #endif 1680 #ifdef INET6 1681 if (sc->sc_inc.inc_flags & INC_ISIPV6) 1682 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) + optlen); 1683 else 1684 #endif 1685 ip->ip_len = htons(ntohs(ip->ip_len) + optlen); 1686 } else 1687 optlen = 0; 1688 1689 M_SETFIB(m, sc->sc_inc.inc_fibnum); 1690 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 1691 if (m0 != NULL && M_HASHTYPE_GET(m0) != M_HASHTYPE_NONE) { 1692 m->m_pkthdr.flowid = m0->m_pkthdr.flowid; 1693 M_HASHTYPE_SET(m, M_HASHTYPE_GET(m0)); 1694 } 1695 #ifdef INET6 1696 if (sc->sc_inc.inc_flags & INC_ISIPV6) { 1697 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 1698 th->th_sum = in6_cksum_pseudo(ip6, tlen + optlen - hlen, 1699 IPPROTO_TCP, 0); 1700 ip6->ip6_hlim = in6_selecthlim(NULL, NULL); 1701 #ifdef TCP_OFFLOAD 1702 if (ADDED_BY_TOE(sc)) { 1703 struct toedev *tod = sc->sc_tod; 1704 1705 error = tod->tod_syncache_respond(tod, sc->sc_todctx, m); 1706 1707 return (error); 1708 } 1709 #endif 1710 error = ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); 1711 } 1712 #endif 1713 #if defined(INET6) && defined(INET) 1714 else 1715 #endif 1716 #ifdef INET 1717 { 1718 m->m_pkthdr.csum_flags = CSUM_TCP; 1719 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1720 htons(tlen + optlen - hlen + IPPROTO_TCP)); 1721 #ifdef TCP_OFFLOAD 1722 if (ADDED_BY_TOE(sc)) { 1723 struct toedev *tod = sc->sc_tod; 1724 1725 error = tod->tod_syncache_respond(tod, sc->sc_todctx, m); 1726 1727 return (error); 1728 } 1729 #endif 1730 error = ip_output(m, sc->sc_ipopts, NULL, 0, NULL, NULL); 1731 } 1732 #endif 1733 return (error); 1734 } 1735 1736 /* 1737 * The purpose of syncookies is to handle spoofed SYN flooding DoS attacks 1738 * that exceed the capacity of the syncache by avoiding the storage of any 1739 * of the SYNs we receive. Syncookies defend against blind SYN flooding 1740 * attacks where the attacker does not have access to our responses. 1741 * 1742 * Syncookies encode and include all necessary information about the 1743 * connection setup within the SYN|ACK that we send back. That way we 1744 * can avoid keeping any local state until the ACK to our SYN|ACK returns 1745 * (if ever). Normally the syncache and syncookies are running in parallel 1746 * with the latter taking over when the former is exhausted. When matching 1747 * syncache entry is found the syncookie is ignored. 1748 * 1749 * The only reliable information persisting the 3WHS is our initial sequence 1750 * number ISS of 32 bits. Syncookies embed a cryptographically sufficient 1751 * strong hash (MAC) value and a few bits of TCP SYN options in the ISS 1752 * of our SYN|ACK. The MAC can be recomputed when the ACK to our SYN|ACK 1753 * returns and signifies a legitimate connection if it matches the ACK. 1754 * 1755 * The available space of 32 bits to store the hash and to encode the SYN 1756 * option information is very tight and we should have at least 24 bits for 1757 * the MAC to keep the number of guesses by blind spoofing reasonably high. 1758 * 1759 * SYN option information we have to encode to fully restore a connection: 1760 * MSS: is imporant to chose an optimal segment size to avoid IP level 1761 * fragmentation along the path. The common MSS values can be encoded 1762 * in a 3-bit table. Uncommon values are captured by the next lower value 1763 * in the table leading to a slight increase in packetization overhead. 1764 * WSCALE: is necessary to allow large windows to be used for high delay- 1765 * bandwidth product links. Not scaling the window when it was initially 1766 * negotiated is bad for performance as lack of scaling further decreases 1767 * the apparent available send window. We only need to encode the WSCALE 1768 * we received from the remote end. Our end can be recalculated at any 1769 * time. The common WSCALE values can be encoded in a 3-bit table. 1770 * Uncommon values are captured by the next lower value in the table 1771 * making us under-estimate the available window size halving our 1772 * theoretically possible maximum throughput for that connection. 1773 * SACK: Greatly assists in packet loss recovery and requires 1 bit. 1774 * TIMESTAMP and SIGNATURE is not encoded because they are permanent options 1775 * that are included in all segments on a connection. We enable them when 1776 * the ACK has them. 1777 * 1778 * Security of syncookies and attack vectors: 1779 * 1780 * The MAC is computed over (faddr||laddr||fport||lport||irs||flags||secmod) 1781 * together with the gloabl secret to make it unique per connection attempt. 1782 * Thus any change of any of those parameters results in a different MAC output 1783 * in an unpredictable way unless a collision is encountered. 24 bits of the 1784 * MAC are embedded into the ISS. 1785 * 1786 * To prevent replay attacks two rotating global secrets are updated with a 1787 * new random value every 15 seconds. The life-time of a syncookie is thus 1788 * 15-30 seconds. 1789 * 1790 * Vector 1: Attacking the secret. This requires finding a weakness in the 1791 * MAC itself or the way it is used here. The attacker can do a chosen plain 1792 * text attack by varying and testing the all parameters under his control. 1793 * The strength depends on the size and randomness of the secret, and the 1794 * cryptographic security of the MAC function. Due to the constant updating 1795 * of the secret the attacker has at most 29.999 seconds to find the secret 1796 * and launch spoofed connections. After that he has to start all over again. 1797 * 1798 * Vector 2: Collision attack on the MAC of a single ACK. With a 24 bit MAC 1799 * size an average of 4,823 attempts are required for a 50% chance of success 1800 * to spoof a single syncookie (birthday collision paradox). However the 1801 * attacker is blind and doesn't know if one of his attempts succeeded unless 1802 * he has a side channel to interfere success from. A single connection setup 1803 * success average of 90% requires 8,790 packets, 99.99% requires 17,578 packets. 1804 * This many attempts are required for each one blind spoofed connection. For 1805 * every additional spoofed connection he has to launch another N attempts. 1806 * Thus for a sustained rate 100 spoofed connections per second approximately 1807 * 1,800,000 packets per second would have to be sent. 1808 * 1809 * NB: The MAC function should be fast so that it doesn't become a CPU 1810 * exhaustion attack vector itself. 1811 * 1812 * References: 1813 * RFC4987 TCP SYN Flooding Attacks and Common Mitigations 1814 * SYN cookies were first proposed by cryptographer Dan J. Bernstein in 1996 1815 * http://cr.yp.to/syncookies.html (overview) 1816 * http://cr.yp.to/syncookies/archive (details) 1817 * 1818 * 1819 * Schematic construction of a syncookie enabled Initial Sequence Number: 1820 * 0 1 2 3 1821 * 12345678901234567890123456789012 1822 * |xxxxxxxxxxxxxxxxxxxxxxxxWWWMMMSP| 1823 * 1824 * x 24 MAC (truncated) 1825 * W 3 Send Window Scale index 1826 * M 3 MSS index 1827 * S 1 SACK permitted 1828 * P 1 Odd/even secret 1829 */ 1830 1831 /* 1832 * Distribution and probability of certain MSS values. Those in between are 1833 * rounded down to the next lower one. 1834 * [An Analysis of TCP Maximum Segment Sizes, S. Alcock and R. Nelson, 2011] 1835 * .2% .3% 5% 7% 7% 20% 15% 45% 1836 */ 1837 static int tcp_sc_msstab[] = { 216, 536, 1200, 1360, 1400, 1440, 1452, 1460 }; 1838 1839 /* 1840 * Distribution and probability of certain WSCALE values. We have to map the 1841 * (send) window scale (shift) option with a range of 0-14 from 4 bits into 3 1842 * bits based on prevalence of certain values. Where we don't have an exact 1843 * match for are rounded down to the next lower one letting us under-estimate 1844 * the true available window. At the moment this would happen only for the 1845 * very uncommon values 3, 5 and those above 8 (more than 16MB socket buffer 1846 * and window size). The absence of the WSCALE option (no scaling in either 1847 * direction) is encoded with index zero. 1848 * [WSCALE values histograms, Allman, 2012] 1849 * X 10 10 35 5 6 14 10% by host 1850 * X 11 4 5 5 18 49 3% by connections 1851 */ 1852 static int tcp_sc_wstab[] = { 0, 0, 1, 2, 4, 6, 7, 8 }; 1853 1854 /* 1855 * Compute the MAC for the SYN cookie. SIPHASH-2-4 is chosen for its speed 1856 * and good cryptographic properties. 1857 */ 1858 static uint32_t 1859 syncookie_mac(struct in_conninfo *inc, tcp_seq irs, uint8_t flags, 1860 uint8_t *secbits, uintptr_t secmod) 1861 { 1862 SIPHASH_CTX ctx; 1863 uint32_t siphash[2]; 1864 1865 SipHash24_Init(&ctx); 1866 SipHash_SetKey(&ctx, secbits); 1867 switch (inc->inc_flags & INC_ISIPV6) { 1868 #ifdef INET 1869 case 0: 1870 SipHash_Update(&ctx, &inc->inc_faddr, sizeof(inc->inc_faddr)); 1871 SipHash_Update(&ctx, &inc->inc_laddr, sizeof(inc->inc_laddr)); 1872 break; 1873 #endif 1874 #ifdef INET6 1875 case INC_ISIPV6: 1876 SipHash_Update(&ctx, &inc->inc6_faddr, sizeof(inc->inc6_faddr)); 1877 SipHash_Update(&ctx, &inc->inc6_laddr, sizeof(inc->inc6_laddr)); 1878 break; 1879 #endif 1880 } 1881 SipHash_Update(&ctx, &inc->inc_fport, sizeof(inc->inc_fport)); 1882 SipHash_Update(&ctx, &inc->inc_lport, sizeof(inc->inc_lport)); 1883 SipHash_Update(&ctx, &irs, sizeof(irs)); 1884 SipHash_Update(&ctx, &flags, sizeof(flags)); 1885 SipHash_Update(&ctx, &secmod, sizeof(secmod)); 1886 SipHash_Final((u_int8_t *)&siphash, &ctx); 1887 1888 return (siphash[0] ^ siphash[1]); 1889 } 1890 1891 static tcp_seq 1892 syncookie_generate(struct syncache_head *sch, struct syncache *sc) 1893 { 1894 u_int i, mss, secbit, wscale; 1895 uint32_t iss, hash; 1896 uint8_t *secbits; 1897 union syncookie cookie; 1898 1899 SCH_LOCK_ASSERT(sch); 1900 1901 cookie.cookie = 0; 1902 1903 /* Map our computed MSS into the 3-bit index. */ 1904 mss = min(tcp_mssopt(&sc->sc_inc), max(sc->sc_peer_mss, V_tcp_minmss)); 1905 for (i = nitems(tcp_sc_msstab) - 1; tcp_sc_msstab[i] > mss && i > 0; 1906 i--) 1907 ; 1908 cookie.flags.mss_idx = i; 1909 1910 /* 1911 * Map the send window scale into the 3-bit index but only if 1912 * the wscale option was received. 1913 */ 1914 if (sc->sc_flags & SCF_WINSCALE) { 1915 wscale = sc->sc_requested_s_scale; 1916 for (i = nitems(tcp_sc_wstab) - 1; 1917 tcp_sc_wstab[i] > wscale && i > 0; 1918 i--) 1919 ; 1920 cookie.flags.wscale_idx = i; 1921 } 1922 1923 /* Can we do SACK? */ 1924 if (sc->sc_flags & SCF_SACK) 1925 cookie.flags.sack_ok = 1; 1926 1927 /* Which of the two secrets to use. */ 1928 secbit = sch->sch_sc->secret.oddeven & 0x1; 1929 cookie.flags.odd_even = secbit; 1930 1931 secbits = sch->sch_sc->secret.key[secbit]; 1932 hash = syncookie_mac(&sc->sc_inc, sc->sc_irs, cookie.cookie, secbits, 1933 (uintptr_t)sch); 1934 1935 /* 1936 * Put the flags into the hash and XOR them to get better ISS number 1937 * variance. This doesn't enhance the cryptographic strength and is 1938 * done to prevent the 8 cookie bits from showing up directly on the 1939 * wire. 1940 */ 1941 iss = hash & ~0xff; 1942 iss |= cookie.cookie ^ (hash >> 24); 1943 1944 /* Randomize the timestamp. */ 1945 if (sc->sc_flags & SCF_TIMESTAMP) { 1946 sc->sc_ts = arc4random(); 1947 sc->sc_tsoff = sc->sc_ts - tcp_ts_getticks(); 1948 } 1949 1950 TCPSTAT_INC(tcps_sc_sendcookie); 1951 return (iss); 1952 } 1953 1954 static struct syncache * 1955 syncookie_lookup(struct in_conninfo *inc, struct syncache_head *sch, 1956 struct syncache *sc, struct tcphdr *th, struct tcpopt *to, 1957 struct socket *lso) 1958 { 1959 uint32_t hash; 1960 uint8_t *secbits; 1961 tcp_seq ack, seq; 1962 int wnd, wscale = 0; 1963 union syncookie cookie; 1964 1965 SCH_LOCK_ASSERT(sch); 1966 1967 /* 1968 * Pull information out of SYN-ACK/ACK and revert sequence number 1969 * advances. 1970 */ 1971 ack = th->th_ack - 1; 1972 seq = th->th_seq - 1; 1973 1974 /* 1975 * Unpack the flags containing enough information to restore the 1976 * connection. 1977 */ 1978 cookie.cookie = (ack & 0xff) ^ (ack >> 24); 1979 1980 /* Which of the two secrets to use. */ 1981 secbits = sch->sch_sc->secret.key[cookie.flags.odd_even]; 1982 1983 hash = syncookie_mac(inc, seq, cookie.cookie, secbits, (uintptr_t)sch); 1984 1985 /* The recomputed hash matches the ACK if this was a genuine cookie. */ 1986 if ((ack & ~0xff) != (hash & ~0xff)) 1987 return (NULL); 1988 1989 /* Fill in the syncache values. */ 1990 sc->sc_flags = 0; 1991 bcopy(inc, &sc->sc_inc, sizeof(struct in_conninfo)); 1992 sc->sc_ipopts = NULL; 1993 1994 sc->sc_irs = seq; 1995 sc->sc_iss = ack; 1996 1997 switch (inc->inc_flags & INC_ISIPV6) { 1998 #ifdef INET 1999 case 0: 2000 sc->sc_ip_ttl = sotoinpcb(lso)->inp_ip_ttl; 2001 sc->sc_ip_tos = sotoinpcb(lso)->inp_ip_tos; 2002 break; 2003 #endif 2004 #ifdef INET6 2005 case INC_ISIPV6: 2006 if (sotoinpcb(lso)->inp_flags & IN6P_AUTOFLOWLABEL) 2007 sc->sc_flowlabel = sc->sc_iss & IPV6_FLOWLABEL_MASK; 2008 break; 2009 #endif 2010 } 2011 2012 sc->sc_peer_mss = tcp_sc_msstab[cookie.flags.mss_idx]; 2013 2014 /* We can simply recompute receive window scale we sent earlier. */ 2015 while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < sb_max) 2016 wscale++; 2017 2018 /* Only use wscale if it was enabled in the orignal SYN. */ 2019 if (cookie.flags.wscale_idx > 0) { 2020 sc->sc_requested_r_scale = wscale; 2021 sc->sc_requested_s_scale = tcp_sc_wstab[cookie.flags.wscale_idx]; 2022 sc->sc_flags |= SCF_WINSCALE; 2023 } 2024 2025 wnd = sbspace(&lso->so_rcv); 2026 wnd = imax(wnd, 0); 2027 wnd = imin(wnd, TCP_MAXWIN); 2028 sc->sc_wnd = wnd; 2029 2030 if (cookie.flags.sack_ok) 2031 sc->sc_flags |= SCF_SACK; 2032 2033 if (to->to_flags & TOF_TS) { 2034 sc->sc_flags |= SCF_TIMESTAMP; 2035 sc->sc_tsreflect = to->to_tsval; 2036 sc->sc_ts = to->to_tsecr; 2037 sc->sc_tsoff = to->to_tsecr - tcp_ts_getticks(); 2038 } 2039 2040 if (to->to_flags & TOF_SIGNATURE) 2041 sc->sc_flags |= SCF_SIGNATURE; 2042 2043 sc->sc_rxmits = 0; 2044 2045 TCPSTAT_INC(tcps_sc_recvcookie); 2046 return (sc); 2047 } 2048 2049 #ifdef INVARIANTS 2050 static int 2051 syncookie_cmp(struct in_conninfo *inc, struct syncache_head *sch, 2052 struct syncache *sc, struct tcphdr *th, struct tcpopt *to, 2053 struct socket *lso) 2054 { 2055 struct syncache scs, *scx; 2056 char *s; 2057 2058 bzero(&scs, sizeof(scs)); 2059 scx = syncookie_lookup(inc, sch, &scs, th, to, lso); 2060 2061 if ((s = tcp_log_addrs(inc, th, NULL, NULL)) == NULL) 2062 return (0); 2063 2064 if (scx != NULL) { 2065 if (sc->sc_peer_mss != scx->sc_peer_mss) 2066 log(LOG_DEBUG, "%s; %s: mss different %i vs %i\n", 2067 s, __func__, sc->sc_peer_mss, scx->sc_peer_mss); 2068 2069 if (sc->sc_requested_r_scale != scx->sc_requested_r_scale) 2070 log(LOG_DEBUG, "%s; %s: rwscale different %i vs %i\n", 2071 s, __func__, sc->sc_requested_r_scale, 2072 scx->sc_requested_r_scale); 2073 2074 if (sc->sc_requested_s_scale != scx->sc_requested_s_scale) 2075 log(LOG_DEBUG, "%s; %s: swscale different %i vs %i\n", 2076 s, __func__, sc->sc_requested_s_scale, 2077 scx->sc_requested_s_scale); 2078 2079 if ((sc->sc_flags & SCF_SACK) != (scx->sc_flags & SCF_SACK)) 2080 log(LOG_DEBUG, "%s; %s: SACK different\n", s, __func__); 2081 } 2082 2083 if (s != NULL) 2084 free(s, M_TCPLOG); 2085 return (0); 2086 } 2087 #endif /* INVARIANTS */ 2088 2089 static void 2090 syncookie_reseed(void *arg) 2091 { 2092 struct tcp_syncache *sc = arg; 2093 uint8_t *secbits; 2094 int secbit; 2095 2096 /* 2097 * Reseeding the secret doesn't have to be protected by a lock. 2098 * It only must be ensured that the new random values are visible 2099 * to all CPUs in a SMP environment. The atomic with release 2100 * semantics ensures that. 2101 */ 2102 secbit = (sc->secret.oddeven & 0x1) ? 0 : 1; 2103 secbits = sc->secret.key[secbit]; 2104 arc4rand(secbits, SYNCOOKIE_SECRET_SIZE, 0); 2105 atomic_add_rel_int(&sc->secret.oddeven, 1); 2106 2107 /* Reschedule ourself. */ 2108 callout_schedule(&sc->secret.reseed, SYNCOOKIE_LIFETIME * hz); 2109 } 2110 2111 /* 2112 * Exports the syncache entries to userland so that netstat can display 2113 * them alongside the other sockets. This function is intended to be 2114 * called only from tcp_pcblist. 2115 * 2116 * Due to concurrency on an active system, the number of pcbs exported 2117 * may have no relation to max_pcbs. max_pcbs merely indicates the 2118 * amount of space the caller allocated for this function to use. 2119 */ 2120 int 2121 syncache_pcblist(struct sysctl_req *req, int max_pcbs, int *pcbs_exported) 2122 { 2123 struct xtcpcb xt; 2124 struct syncache *sc; 2125 struct syncache_head *sch; 2126 int count, error, i; 2127 2128 for (count = 0, error = 0, i = 0; i < V_tcp_syncache.hashsize; i++) { 2129 sch = &V_tcp_syncache.hashbase[i]; 2130 SCH_LOCK(sch); 2131 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) { 2132 if (count >= max_pcbs) { 2133 SCH_UNLOCK(sch); 2134 goto exit; 2135 } 2136 if (cr_cansee(req->td->td_ucred, sc->sc_cred) != 0) 2137 continue; 2138 bzero(&xt, sizeof(xt)); 2139 xt.xt_len = sizeof(xt); 2140 if (sc->sc_inc.inc_flags & INC_ISIPV6) 2141 xt.xt_inp.inp_vflag = INP_IPV6; 2142 else 2143 xt.xt_inp.inp_vflag = INP_IPV4; 2144 bcopy(&sc->sc_inc, &xt.xt_inp.inp_inc, sizeof (struct in_conninfo)); 2145 xt.xt_tp.t_inpcb = &xt.xt_inp; 2146 xt.xt_tp.t_state = TCPS_SYN_RECEIVED; 2147 xt.xt_socket.xso_protocol = IPPROTO_TCP; 2148 xt.xt_socket.xso_len = sizeof (struct xsocket); 2149 xt.xt_socket.so_type = SOCK_STREAM; 2150 xt.xt_socket.so_state = SS_ISCONNECTING; 2151 error = SYSCTL_OUT(req, &xt, sizeof xt); 2152 if (error) { 2153 SCH_UNLOCK(sch); 2154 goto exit; 2155 } 2156 count++; 2157 } 2158 SCH_UNLOCK(sch); 2159 } 2160 exit: 2161 *pcbs_exported = count; 2162 return error; 2163 } 2164