1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2001 McAfee, Inc. 5 * Copyright (c) 2006,2013 Andre Oppermann, Internet Business Solutions AG 6 * All rights reserved. 7 * 8 * This software was developed for the FreeBSD Project by Jonathan Lemon 9 * and McAfee Research, the Security Research Division of McAfee, Inc. under 10 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 11 * DARPA CHATS research program. [2001 McAfee, Inc.] 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include "opt_inet.h" 39 #include "opt_inet6.h" 40 #include "opt_ipsec.h" 41 #include "opt_pcbgroup.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/hash.h> 46 #include <sys/refcount.h> 47 #include <sys/kernel.h> 48 #include <sys/sysctl.h> 49 #include <sys/limits.h> 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 #include <sys/malloc.h> 53 #include <sys/mbuf.h> 54 #include <sys/proc.h> /* for proc0 declaration */ 55 #include <sys/random.h> 56 #include <sys/socket.h> 57 #include <sys/socketvar.h> 58 #include <sys/syslog.h> 59 #include <sys/ucred.h> 60 61 #include <sys/md5.h> 62 #include <crypto/siphash/siphash.h> 63 64 #include <vm/uma.h> 65 66 #include <net/if.h> 67 #include <net/if_var.h> 68 #include <net/route.h> 69 #include <net/vnet.h> 70 71 #include <netinet/in.h> 72 #include <netinet/in_systm.h> 73 #include <netinet/ip.h> 74 #include <netinet/in_var.h> 75 #include <netinet/in_pcb.h> 76 #include <netinet/ip_var.h> 77 #include <netinet/ip_options.h> 78 #ifdef INET6 79 #include <netinet/ip6.h> 80 #include <netinet/icmp6.h> 81 #include <netinet6/nd6.h> 82 #include <netinet6/ip6_var.h> 83 #include <netinet6/in6_pcb.h> 84 #endif 85 #include <netinet/tcp.h> 86 #ifdef TCP_RFC7413 87 #include <netinet/tcp_fastopen.h> 88 #endif 89 #include <netinet/tcp_fsm.h> 90 #include <netinet/tcp_seq.h> 91 #include <netinet/tcp_timer.h> 92 #include <netinet/tcp_var.h> 93 #include <netinet/tcp_syncache.h> 94 #ifdef INET6 95 #include <netinet6/tcp6_var.h> 96 #endif 97 #ifdef TCP_OFFLOAD 98 #include <netinet/toecore.h> 99 #endif 100 101 #include <netipsec/ipsec_support.h> 102 103 #include <machine/in_cksum.h> 104 105 #include <security/mac/mac_framework.h> 106 107 static VNET_DEFINE(int, tcp_syncookies) = 1; 108 #define V_tcp_syncookies VNET(tcp_syncookies) 109 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_VNET | CTLFLAG_RW, 110 &VNET_NAME(tcp_syncookies), 0, 111 "Use TCP SYN cookies if the syncache overflows"); 112 113 static VNET_DEFINE(int, tcp_syncookiesonly) = 0; 114 #define V_tcp_syncookiesonly VNET(tcp_syncookiesonly) 115 SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies_only, CTLFLAG_VNET | CTLFLAG_RW, 116 &VNET_NAME(tcp_syncookiesonly), 0, 117 "Use only TCP SYN cookies"); 118 119 static VNET_DEFINE(int, functions_inherit_listen_socket_stack) = 1; 120 #define V_functions_inherit_listen_socket_stack \ 121 VNET(functions_inherit_listen_socket_stack) 122 SYSCTL_INT(_net_inet_tcp, OID_AUTO, functions_inherit_listen_socket_stack, 123 CTLFLAG_VNET | CTLFLAG_RW, 124 &VNET_NAME(functions_inherit_listen_socket_stack), 0, 125 "Inherit listen socket's stack"); 126 127 #ifdef TCP_OFFLOAD 128 #define ADDED_BY_TOE(sc) ((sc)->sc_tod != NULL) 129 #endif 130 131 static void syncache_drop(struct syncache *, struct syncache_head *); 132 static void syncache_free(struct syncache *); 133 static void syncache_insert(struct syncache *, struct syncache_head *); 134 static int syncache_respond(struct syncache *, struct syncache_head *, int, 135 const struct mbuf *); 136 static struct socket *syncache_socket(struct syncache *, struct socket *, 137 struct mbuf *m); 138 static void syncache_timeout(struct syncache *sc, struct syncache_head *sch, 139 int docallout); 140 static void syncache_timer(void *); 141 142 static uint32_t syncookie_mac(struct in_conninfo *, tcp_seq, uint8_t, 143 uint8_t *, uintptr_t); 144 static tcp_seq syncookie_generate(struct syncache_head *, struct syncache *); 145 static struct syncache 146 *syncookie_lookup(struct in_conninfo *, struct syncache_head *, 147 struct syncache *, struct tcphdr *, struct tcpopt *, 148 struct socket *); 149 static void syncookie_reseed(void *); 150 #ifdef INVARIANTS 151 static int syncookie_cmp(struct in_conninfo *inc, struct syncache_head *sch, 152 struct syncache *sc, struct tcphdr *th, struct tcpopt *to, 153 struct socket *lso); 154 #endif 155 156 /* 157 * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies. 158 * 3 retransmits corresponds to a timeout of 3 * (1 + 2 + 4 + 8) == 45 seconds, 159 * the odds are that the user has given up attempting to connect by then. 160 */ 161 #define SYNCACHE_MAXREXMTS 3 162 163 /* Arbitrary values */ 164 #define TCP_SYNCACHE_HASHSIZE 512 165 #define TCP_SYNCACHE_BUCKETLIMIT 30 166 167 static VNET_DEFINE(struct tcp_syncache, tcp_syncache); 168 #define V_tcp_syncache VNET(tcp_syncache) 169 170 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0, 171 "TCP SYN cache"); 172 173 SYSCTL_UINT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_VNET | CTLFLAG_RDTUN, 174 &VNET_NAME(tcp_syncache.bucket_limit), 0, 175 "Per-bucket hash limit for syncache"); 176 177 SYSCTL_UINT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_VNET | CTLFLAG_RDTUN, 178 &VNET_NAME(tcp_syncache.cache_limit), 0, 179 "Overall entry limit for syncache"); 180 181 SYSCTL_UMA_CUR(_net_inet_tcp_syncache, OID_AUTO, count, CTLFLAG_VNET, 182 &VNET_NAME(tcp_syncache.zone), "Current number of entries in syncache"); 183 184 SYSCTL_UINT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN, 185 &VNET_NAME(tcp_syncache.hashsize), 0, 186 "Size of TCP syncache hashtable"); 187 188 SYSCTL_UINT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_VNET | CTLFLAG_RW, 189 &VNET_NAME(tcp_syncache.rexmt_limit), 0, 190 "Limit on SYN/ACK retransmissions"); 191 192 VNET_DEFINE(int, tcp_sc_rst_sock_fail) = 1; 193 SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rst_on_sock_fail, 194 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_sc_rst_sock_fail), 0, 195 "Send reset on socket allocation failure"); 196 197 static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache"); 198 199 #define SCH_LOCK(sch) mtx_lock(&(sch)->sch_mtx) 200 #define SCH_UNLOCK(sch) mtx_unlock(&(sch)->sch_mtx) 201 #define SCH_LOCK_ASSERT(sch) mtx_assert(&(sch)->sch_mtx, MA_OWNED) 202 203 /* 204 * Requires the syncache entry to be already removed from the bucket list. 205 */ 206 static void 207 syncache_free(struct syncache *sc) 208 { 209 210 if (sc->sc_ipopts) 211 (void) m_free(sc->sc_ipopts); 212 if (sc->sc_cred) 213 crfree(sc->sc_cred); 214 #ifdef MAC 215 mac_syncache_destroy(&sc->sc_label); 216 #endif 217 218 uma_zfree(V_tcp_syncache.zone, sc); 219 } 220 221 void 222 syncache_init(void) 223 { 224 int i; 225 226 V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE; 227 V_tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT; 228 V_tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS; 229 V_tcp_syncache.hash_secret = arc4random(); 230 231 TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize", 232 &V_tcp_syncache.hashsize); 233 TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit", 234 &V_tcp_syncache.bucket_limit); 235 if (!powerof2(V_tcp_syncache.hashsize) || 236 V_tcp_syncache.hashsize == 0) { 237 printf("WARNING: syncache hash size is not a power of 2.\n"); 238 V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE; 239 } 240 V_tcp_syncache.hashmask = V_tcp_syncache.hashsize - 1; 241 242 /* Set limits. */ 243 V_tcp_syncache.cache_limit = 244 V_tcp_syncache.hashsize * V_tcp_syncache.bucket_limit; 245 TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit", 246 &V_tcp_syncache.cache_limit); 247 248 /* Allocate the hash table. */ 249 V_tcp_syncache.hashbase = malloc(V_tcp_syncache.hashsize * 250 sizeof(struct syncache_head), M_SYNCACHE, M_WAITOK | M_ZERO); 251 252 #ifdef VIMAGE 253 V_tcp_syncache.vnet = curvnet; 254 #endif 255 256 /* Initialize the hash buckets. */ 257 for (i = 0; i < V_tcp_syncache.hashsize; i++) { 258 TAILQ_INIT(&V_tcp_syncache.hashbase[i].sch_bucket); 259 mtx_init(&V_tcp_syncache.hashbase[i].sch_mtx, "tcp_sc_head", 260 NULL, MTX_DEF); 261 callout_init_mtx(&V_tcp_syncache.hashbase[i].sch_timer, 262 &V_tcp_syncache.hashbase[i].sch_mtx, 0); 263 V_tcp_syncache.hashbase[i].sch_length = 0; 264 V_tcp_syncache.hashbase[i].sch_sc = &V_tcp_syncache; 265 V_tcp_syncache.hashbase[i].sch_last_overflow = 266 -(SYNCOOKIE_LIFETIME + 1); 267 } 268 269 /* Create the syncache entry zone. */ 270 V_tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache), 271 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 272 V_tcp_syncache.cache_limit = uma_zone_set_max(V_tcp_syncache.zone, 273 V_tcp_syncache.cache_limit); 274 275 /* Start the SYN cookie reseeder callout. */ 276 callout_init(&V_tcp_syncache.secret.reseed, 1); 277 arc4rand(V_tcp_syncache.secret.key[0], SYNCOOKIE_SECRET_SIZE, 0); 278 arc4rand(V_tcp_syncache.secret.key[1], SYNCOOKIE_SECRET_SIZE, 0); 279 callout_reset(&V_tcp_syncache.secret.reseed, SYNCOOKIE_LIFETIME * hz, 280 syncookie_reseed, &V_tcp_syncache); 281 } 282 283 #ifdef VIMAGE 284 void 285 syncache_destroy(void) 286 { 287 struct syncache_head *sch; 288 struct syncache *sc, *nsc; 289 int i; 290 291 /* 292 * Stop the re-seed timer before freeing resources. No need to 293 * possibly schedule it another time. 294 */ 295 callout_drain(&V_tcp_syncache.secret.reseed); 296 297 /* Cleanup hash buckets: stop timers, free entries, destroy locks. */ 298 for (i = 0; i < V_tcp_syncache.hashsize; i++) { 299 300 sch = &V_tcp_syncache.hashbase[i]; 301 callout_drain(&sch->sch_timer); 302 303 SCH_LOCK(sch); 304 TAILQ_FOREACH_SAFE(sc, &sch->sch_bucket, sc_hash, nsc) 305 syncache_drop(sc, sch); 306 SCH_UNLOCK(sch); 307 KASSERT(TAILQ_EMPTY(&sch->sch_bucket), 308 ("%s: sch->sch_bucket not empty", __func__)); 309 KASSERT(sch->sch_length == 0, ("%s: sch->sch_length %d not 0", 310 __func__, sch->sch_length)); 311 mtx_destroy(&sch->sch_mtx); 312 } 313 314 KASSERT(uma_zone_get_cur(V_tcp_syncache.zone) == 0, 315 ("%s: cache_count not 0", __func__)); 316 317 /* Free the allocated global resources. */ 318 uma_zdestroy(V_tcp_syncache.zone); 319 free(V_tcp_syncache.hashbase, M_SYNCACHE); 320 } 321 #endif 322 323 /* 324 * Inserts a syncache entry into the specified bucket row. 325 * Locks and unlocks the syncache_head autonomously. 326 */ 327 static void 328 syncache_insert(struct syncache *sc, struct syncache_head *sch) 329 { 330 struct syncache *sc2; 331 332 SCH_LOCK(sch); 333 334 /* 335 * Make sure that we don't overflow the per-bucket limit. 336 * If the bucket is full, toss the oldest element. 337 */ 338 if (sch->sch_length >= V_tcp_syncache.bucket_limit) { 339 KASSERT(!TAILQ_EMPTY(&sch->sch_bucket), 340 ("sch->sch_length incorrect")); 341 sc2 = TAILQ_LAST(&sch->sch_bucket, sch_head); 342 sch->sch_last_overflow = time_uptime; 343 syncache_drop(sc2, sch); 344 TCPSTAT_INC(tcps_sc_bucketoverflow); 345 } 346 347 /* Put it into the bucket. */ 348 TAILQ_INSERT_HEAD(&sch->sch_bucket, sc, sc_hash); 349 sch->sch_length++; 350 351 #ifdef TCP_OFFLOAD 352 if (ADDED_BY_TOE(sc)) { 353 struct toedev *tod = sc->sc_tod; 354 355 tod->tod_syncache_added(tod, sc->sc_todctx); 356 } 357 #endif 358 359 /* Reinitialize the bucket row's timer. */ 360 if (sch->sch_length == 1) 361 sch->sch_nextc = ticks + INT_MAX; 362 syncache_timeout(sc, sch, 1); 363 364 SCH_UNLOCK(sch); 365 366 TCPSTATES_INC(TCPS_SYN_RECEIVED); 367 TCPSTAT_INC(tcps_sc_added); 368 } 369 370 /* 371 * Remove and free entry from syncache bucket row. 372 * Expects locked syncache head. 373 */ 374 static void 375 syncache_drop(struct syncache *sc, struct syncache_head *sch) 376 { 377 378 SCH_LOCK_ASSERT(sch); 379 380 TCPSTATES_DEC(TCPS_SYN_RECEIVED); 381 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash); 382 sch->sch_length--; 383 384 #ifdef TCP_OFFLOAD 385 if (ADDED_BY_TOE(sc)) { 386 struct toedev *tod = sc->sc_tod; 387 388 tod->tod_syncache_removed(tod, sc->sc_todctx); 389 } 390 #endif 391 392 syncache_free(sc); 393 } 394 395 /* 396 * Engage/reengage time on bucket row. 397 */ 398 static void 399 syncache_timeout(struct syncache *sc, struct syncache_head *sch, int docallout) 400 { 401 sc->sc_rxttime = ticks + 402 TCPTV_RTOBASE * (tcp_syn_backoff[sc->sc_rxmits]); 403 sc->sc_rxmits++; 404 if (TSTMP_LT(sc->sc_rxttime, sch->sch_nextc)) { 405 sch->sch_nextc = sc->sc_rxttime; 406 if (docallout) 407 callout_reset(&sch->sch_timer, sch->sch_nextc - ticks, 408 syncache_timer, (void *)sch); 409 } 410 } 411 412 /* 413 * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted. 414 * If we have retransmitted an entry the maximum number of times, expire it. 415 * One separate timer for each bucket row. 416 */ 417 static void 418 syncache_timer(void *xsch) 419 { 420 struct syncache_head *sch = (struct syncache_head *)xsch; 421 struct syncache *sc, *nsc; 422 int tick = ticks; 423 char *s; 424 425 CURVNET_SET(sch->sch_sc->vnet); 426 427 /* NB: syncache_head has already been locked by the callout. */ 428 SCH_LOCK_ASSERT(sch); 429 430 /* 431 * In the following cycle we may remove some entries and/or 432 * advance some timeouts, so re-initialize the bucket timer. 433 */ 434 sch->sch_nextc = tick + INT_MAX; 435 436 TAILQ_FOREACH_SAFE(sc, &sch->sch_bucket, sc_hash, nsc) { 437 /* 438 * We do not check if the listen socket still exists 439 * and accept the case where the listen socket may be 440 * gone by the time we resend the SYN/ACK. We do 441 * not expect this to happens often. If it does, 442 * then the RST will be sent by the time the remote 443 * host does the SYN/ACK->ACK. 444 */ 445 if (TSTMP_GT(sc->sc_rxttime, tick)) { 446 if (TSTMP_LT(sc->sc_rxttime, sch->sch_nextc)) 447 sch->sch_nextc = sc->sc_rxttime; 448 continue; 449 } 450 if (sc->sc_rxmits > V_tcp_syncache.rexmt_limit) { 451 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) { 452 log(LOG_DEBUG, "%s; %s: Retransmits exhausted, " 453 "giving up and removing syncache entry\n", 454 s, __func__); 455 free(s, M_TCPLOG); 456 } 457 syncache_drop(sc, sch); 458 TCPSTAT_INC(tcps_sc_stale); 459 continue; 460 } 461 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) { 462 log(LOG_DEBUG, "%s; %s: Response timeout, " 463 "retransmitting (%u) SYN|ACK\n", 464 s, __func__, sc->sc_rxmits); 465 free(s, M_TCPLOG); 466 } 467 468 syncache_respond(sc, sch, 1, NULL); 469 TCPSTAT_INC(tcps_sc_retransmitted); 470 syncache_timeout(sc, sch, 0); 471 } 472 if (!TAILQ_EMPTY(&(sch)->sch_bucket)) 473 callout_reset(&(sch)->sch_timer, (sch)->sch_nextc - tick, 474 syncache_timer, (void *)(sch)); 475 CURVNET_RESTORE(); 476 } 477 478 /* 479 * Find an entry in the syncache. 480 * Returns always with locked syncache_head plus a matching entry or NULL. 481 */ 482 static struct syncache * 483 syncache_lookup(struct in_conninfo *inc, struct syncache_head **schp) 484 { 485 struct syncache *sc; 486 struct syncache_head *sch; 487 uint32_t hash; 488 489 /* 490 * The hash is built on foreign port + local port + foreign address. 491 * We rely on the fact that struct in_conninfo starts with 16 bits 492 * of foreign port, then 16 bits of local port then followed by 128 493 * bits of foreign address. In case of IPv4 address, the first 3 494 * 32-bit words of the address always are zeroes. 495 */ 496 hash = jenkins_hash32((uint32_t *)&inc->inc_ie, 5, 497 V_tcp_syncache.hash_secret) & V_tcp_syncache.hashmask; 498 499 sch = &V_tcp_syncache.hashbase[hash]; 500 *schp = sch; 501 SCH_LOCK(sch); 502 503 /* Circle through bucket row to find matching entry. */ 504 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) 505 if (bcmp(&inc->inc_ie, &sc->sc_inc.inc_ie, 506 sizeof(struct in_endpoints)) == 0) 507 break; 508 509 return (sc); /* Always returns with locked sch. */ 510 } 511 512 /* 513 * This function is called when we get a RST for a 514 * non-existent connection, so that we can see if the 515 * connection is in the syn cache. If it is, zap it. 516 */ 517 void 518 syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th) 519 { 520 struct syncache *sc; 521 struct syncache_head *sch; 522 char *s = NULL; 523 524 sc = syncache_lookup(inc, &sch); /* returns locked sch */ 525 SCH_LOCK_ASSERT(sch); 526 527 /* 528 * Any RST to our SYN|ACK must not carry ACK, SYN or FIN flags. 529 * See RFC 793 page 65, section SEGMENT ARRIVES. 530 */ 531 if (th->th_flags & (TH_ACK|TH_SYN|TH_FIN)) { 532 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 533 log(LOG_DEBUG, "%s; %s: Spurious RST with ACK, SYN or " 534 "FIN flag set, segment ignored\n", s, __func__); 535 TCPSTAT_INC(tcps_badrst); 536 goto done; 537 } 538 539 /* 540 * No corresponding connection was found in syncache. 541 * If syncookies are enabled and possibly exclusively 542 * used, or we are under memory pressure, a valid RST 543 * may not find a syncache entry. In that case we're 544 * done and no SYN|ACK retransmissions will happen. 545 * Otherwise the RST was misdirected or spoofed. 546 */ 547 if (sc == NULL) { 548 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 549 log(LOG_DEBUG, "%s; %s: Spurious RST without matching " 550 "syncache entry (possibly syncookie only), " 551 "segment ignored\n", s, __func__); 552 TCPSTAT_INC(tcps_badrst); 553 goto done; 554 } 555 556 /* 557 * If the RST bit is set, check the sequence number to see 558 * if this is a valid reset segment. 559 * RFC 793 page 37: 560 * In all states except SYN-SENT, all reset (RST) segments 561 * are validated by checking their SEQ-fields. A reset is 562 * valid if its sequence number is in the window. 563 * 564 * The sequence number in the reset segment is normally an 565 * echo of our outgoing acknowlegement numbers, but some hosts 566 * send a reset with the sequence number at the rightmost edge 567 * of our receive window, and we have to handle this case. 568 */ 569 if (SEQ_GEQ(th->th_seq, sc->sc_irs) && 570 SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) { 571 syncache_drop(sc, sch); 572 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 573 log(LOG_DEBUG, "%s; %s: Our SYN|ACK was rejected, " 574 "connection attempt aborted by remote endpoint\n", 575 s, __func__); 576 TCPSTAT_INC(tcps_sc_reset); 577 } else { 578 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 579 log(LOG_DEBUG, "%s; %s: RST with invalid SEQ %u != " 580 "IRS %u (+WND %u), segment ignored\n", 581 s, __func__, th->th_seq, sc->sc_irs, sc->sc_wnd); 582 TCPSTAT_INC(tcps_badrst); 583 } 584 585 done: 586 if (s != NULL) 587 free(s, M_TCPLOG); 588 SCH_UNLOCK(sch); 589 } 590 591 void 592 syncache_badack(struct in_conninfo *inc) 593 { 594 struct syncache *sc; 595 struct syncache_head *sch; 596 597 sc = syncache_lookup(inc, &sch); /* returns locked sch */ 598 SCH_LOCK_ASSERT(sch); 599 if (sc != NULL) { 600 syncache_drop(sc, sch); 601 TCPSTAT_INC(tcps_sc_badack); 602 } 603 SCH_UNLOCK(sch); 604 } 605 606 void 607 syncache_unreach(struct in_conninfo *inc, tcp_seq th_seq) 608 { 609 struct syncache *sc; 610 struct syncache_head *sch; 611 612 sc = syncache_lookup(inc, &sch); /* returns locked sch */ 613 SCH_LOCK_ASSERT(sch); 614 if (sc == NULL) 615 goto done; 616 617 /* If the sequence number != sc_iss, then it's a bogus ICMP msg */ 618 if (ntohl(th_seq) != sc->sc_iss) 619 goto done; 620 621 /* 622 * If we've rertransmitted 3 times and this is our second error, 623 * we remove the entry. Otherwise, we allow it to continue on. 624 * This prevents us from incorrectly nuking an entry during a 625 * spurious network outage. 626 * 627 * See tcp_notify(). 628 */ 629 if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxmits < 3 + 1) { 630 sc->sc_flags |= SCF_UNREACH; 631 goto done; 632 } 633 syncache_drop(sc, sch); 634 TCPSTAT_INC(tcps_sc_unreach); 635 done: 636 SCH_UNLOCK(sch); 637 } 638 639 /* 640 * Build a new TCP socket structure from a syncache entry. 641 * 642 * On success return the newly created socket with its underlying inp locked. 643 */ 644 static struct socket * 645 syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m) 646 { 647 struct tcp_function_block *blk; 648 struct inpcb *inp = NULL; 649 struct socket *so; 650 struct tcpcb *tp; 651 int error; 652 char *s; 653 654 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 655 656 /* 657 * Ok, create the full blown connection, and set things up 658 * as they would have been set up if we had created the 659 * connection when the SYN arrived. If we can't create 660 * the connection, abort it. 661 */ 662 so = sonewconn(lso, 0); 663 if (so == NULL) { 664 /* 665 * Drop the connection; we will either send a RST or 666 * have the peer retransmit its SYN again after its 667 * RTO and try again. 668 */ 669 TCPSTAT_INC(tcps_listendrop); 670 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) { 671 log(LOG_DEBUG, "%s; %s: Socket create failed " 672 "due to limits or memory shortage\n", 673 s, __func__); 674 free(s, M_TCPLOG); 675 } 676 goto abort2; 677 } 678 #ifdef MAC 679 mac_socketpeer_set_from_mbuf(m, so); 680 #endif 681 682 inp = sotoinpcb(so); 683 inp->inp_inc.inc_fibnum = so->so_fibnum; 684 INP_WLOCK(inp); 685 /* 686 * Exclusive pcbinfo lock is not required in syncache socket case even 687 * if two inpcb locks can be acquired simultaneously: 688 * - the inpcb in LISTEN state, 689 * - the newly created inp. 690 * 691 * In this case, an inp cannot be at same time in LISTEN state and 692 * just created by an accept() call. 693 */ 694 INP_HASH_WLOCK(&V_tcbinfo); 695 696 /* Insert new socket into PCB hash list. */ 697 inp->inp_inc.inc_flags = sc->sc_inc.inc_flags; 698 #ifdef INET6 699 if (sc->sc_inc.inc_flags & INC_ISIPV6) { 700 inp->in6p_laddr = sc->sc_inc.inc6_laddr; 701 } else { 702 inp->inp_vflag &= ~INP_IPV6; 703 inp->inp_vflag |= INP_IPV4; 704 #endif 705 inp->inp_laddr = sc->sc_inc.inc_laddr; 706 #ifdef INET6 707 } 708 #endif 709 710 /* 711 * If there's an mbuf and it has a flowid, then let's initialise the 712 * inp with that particular flowid. 713 */ 714 if (m != NULL && M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 715 inp->inp_flowid = m->m_pkthdr.flowid; 716 inp->inp_flowtype = M_HASHTYPE_GET(m); 717 } 718 719 /* 720 * Install in the reservation hash table for now, but don't yet 721 * install a connection group since the full 4-tuple isn't yet 722 * configured. 723 */ 724 inp->inp_lport = sc->sc_inc.inc_lport; 725 if ((error = in_pcbinshash_nopcbgroup(inp)) != 0) { 726 /* 727 * Undo the assignments above if we failed to 728 * put the PCB on the hash lists. 729 */ 730 #ifdef INET6 731 if (sc->sc_inc.inc_flags & INC_ISIPV6) 732 inp->in6p_laddr = in6addr_any; 733 else 734 #endif 735 inp->inp_laddr.s_addr = INADDR_ANY; 736 inp->inp_lport = 0; 737 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) { 738 log(LOG_DEBUG, "%s; %s: in_pcbinshash failed " 739 "with error %i\n", 740 s, __func__, error); 741 free(s, M_TCPLOG); 742 } 743 INP_HASH_WUNLOCK(&V_tcbinfo); 744 goto abort; 745 } 746 #ifdef INET6 747 if (sc->sc_inc.inc_flags & INC_ISIPV6) { 748 struct inpcb *oinp = sotoinpcb(lso); 749 struct in6_addr laddr6; 750 struct sockaddr_in6 sin6; 751 /* 752 * Inherit socket options from the listening socket. 753 * Note that in6p_inputopts are not (and should not be) 754 * copied, since it stores previously received options and is 755 * used to detect if each new option is different than the 756 * previous one and hence should be passed to a user. 757 * If we copied in6p_inputopts, a user would not be able to 758 * receive options just after calling the accept system call. 759 */ 760 inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS; 761 if (oinp->in6p_outputopts) 762 inp->in6p_outputopts = 763 ip6_copypktopts(oinp->in6p_outputopts, M_NOWAIT); 764 765 sin6.sin6_family = AF_INET6; 766 sin6.sin6_len = sizeof(sin6); 767 sin6.sin6_addr = sc->sc_inc.inc6_faddr; 768 sin6.sin6_port = sc->sc_inc.inc_fport; 769 sin6.sin6_flowinfo = sin6.sin6_scope_id = 0; 770 laddr6 = inp->in6p_laddr; 771 if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) 772 inp->in6p_laddr = sc->sc_inc.inc6_laddr; 773 if ((error = in6_pcbconnect_mbuf(inp, (struct sockaddr *)&sin6, 774 thread0.td_ucred, m)) != 0) { 775 inp->in6p_laddr = laddr6; 776 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) { 777 log(LOG_DEBUG, "%s; %s: in6_pcbconnect failed " 778 "with error %i\n", 779 s, __func__, error); 780 free(s, M_TCPLOG); 781 } 782 INP_HASH_WUNLOCK(&V_tcbinfo); 783 goto abort; 784 } 785 /* Override flowlabel from in6_pcbconnect. */ 786 inp->inp_flow &= ~IPV6_FLOWLABEL_MASK; 787 inp->inp_flow |= sc->sc_flowlabel; 788 } 789 #endif /* INET6 */ 790 #if defined(INET) && defined(INET6) 791 else 792 #endif 793 #ifdef INET 794 { 795 struct in_addr laddr; 796 struct sockaddr_in sin; 797 798 inp->inp_options = (m) ? ip_srcroute(m) : NULL; 799 800 if (inp->inp_options == NULL) { 801 inp->inp_options = sc->sc_ipopts; 802 sc->sc_ipopts = NULL; 803 } 804 805 sin.sin_family = AF_INET; 806 sin.sin_len = sizeof(sin); 807 sin.sin_addr = sc->sc_inc.inc_faddr; 808 sin.sin_port = sc->sc_inc.inc_fport; 809 bzero((caddr_t)sin.sin_zero, sizeof(sin.sin_zero)); 810 laddr = inp->inp_laddr; 811 if (inp->inp_laddr.s_addr == INADDR_ANY) 812 inp->inp_laddr = sc->sc_inc.inc_laddr; 813 if ((error = in_pcbconnect_mbuf(inp, (struct sockaddr *)&sin, 814 thread0.td_ucred, m)) != 0) { 815 inp->inp_laddr = laddr; 816 if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) { 817 log(LOG_DEBUG, "%s; %s: in_pcbconnect failed " 818 "with error %i\n", 819 s, __func__, error); 820 free(s, M_TCPLOG); 821 } 822 INP_HASH_WUNLOCK(&V_tcbinfo); 823 goto abort; 824 } 825 } 826 #endif /* INET */ 827 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 828 /* Copy old policy into new socket's. */ 829 if (ipsec_copy_pcbpolicy(sotoinpcb(lso), inp) != 0) 830 printf("syncache_socket: could not copy policy\n"); 831 #endif 832 INP_HASH_WUNLOCK(&V_tcbinfo); 833 tp = intotcpcb(inp); 834 tcp_state_change(tp, TCPS_SYN_RECEIVED); 835 tp->iss = sc->sc_iss; 836 tp->irs = sc->sc_irs; 837 tcp_rcvseqinit(tp); 838 tcp_sendseqinit(tp); 839 blk = sototcpcb(lso)->t_fb; 840 if (V_functions_inherit_listen_socket_stack && blk != tp->t_fb) { 841 /* 842 * Our parents t_fb was not the default, 843 * we need to release our ref on tp->t_fb and 844 * pickup one on the new entry. 845 */ 846 struct tcp_function_block *rblk; 847 848 rblk = find_and_ref_tcp_fb(blk); 849 KASSERT(rblk != NULL, 850 ("cannot find blk %p out of syncache?", blk)); 851 if (tp->t_fb->tfb_tcp_fb_fini) 852 (*tp->t_fb->tfb_tcp_fb_fini)(tp, 0); 853 refcount_release(&tp->t_fb->tfb_refcnt); 854 tp->t_fb = rblk; 855 if (tp->t_fb->tfb_tcp_fb_init) { 856 (*tp->t_fb->tfb_tcp_fb_init)(tp); 857 } 858 } 859 tp->snd_wl1 = sc->sc_irs; 860 tp->snd_max = tp->iss + 1; 861 tp->snd_nxt = tp->iss + 1; 862 tp->rcv_up = sc->sc_irs + 1; 863 tp->rcv_wnd = sc->sc_wnd; 864 tp->rcv_adv += tp->rcv_wnd; 865 tp->last_ack_sent = tp->rcv_nxt; 866 867 tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY); 868 if (sc->sc_flags & SCF_NOOPT) 869 tp->t_flags |= TF_NOOPT; 870 else { 871 if (sc->sc_flags & SCF_WINSCALE) { 872 tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE; 873 tp->snd_scale = sc->sc_requested_s_scale; 874 tp->request_r_scale = sc->sc_requested_r_scale; 875 } 876 if (sc->sc_flags & SCF_TIMESTAMP) { 877 tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP; 878 tp->ts_recent = sc->sc_tsreflect; 879 tp->ts_recent_age = tcp_ts_getticks(); 880 tp->ts_offset = sc->sc_tsoff; 881 } 882 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 883 if (sc->sc_flags & SCF_SIGNATURE) 884 tp->t_flags |= TF_SIGNATURE; 885 #endif 886 if (sc->sc_flags & SCF_SACK) 887 tp->t_flags |= TF_SACK_PERMIT; 888 } 889 890 if (sc->sc_flags & SCF_ECN) 891 tp->t_flags |= TF_ECN_PERMIT; 892 893 /* 894 * Set up MSS and get cached values from tcp_hostcache. 895 * This might overwrite some of the defaults we just set. 896 */ 897 tcp_mss(tp, sc->sc_peer_mss); 898 899 /* 900 * If the SYN,ACK was retransmitted, indicate that CWND to be 901 * limited to one segment in cc_conn_init(). 902 * NB: sc_rxmits counts all SYN,ACK transmits, not just retransmits. 903 */ 904 if (sc->sc_rxmits > 1) 905 tp->snd_cwnd = 1; 906 907 #ifdef TCP_OFFLOAD 908 /* 909 * Allow a TOE driver to install its hooks. Note that we hold the 910 * pcbinfo lock too and that prevents tcp_usr_accept from accepting a 911 * new connection before the TOE driver has done its thing. 912 */ 913 if (ADDED_BY_TOE(sc)) { 914 struct toedev *tod = sc->sc_tod; 915 916 tod->tod_offload_socket(tod, sc->sc_todctx, so); 917 } 918 #endif 919 /* 920 * Copy and activate timers. 921 */ 922 tp->t_keepinit = sototcpcb(lso)->t_keepinit; 923 tp->t_keepidle = sototcpcb(lso)->t_keepidle; 924 tp->t_keepintvl = sototcpcb(lso)->t_keepintvl; 925 tp->t_keepcnt = sototcpcb(lso)->t_keepcnt; 926 tcp_timer_activate(tp, TT_KEEP, TP_KEEPINIT(tp)); 927 928 TCPSTAT_INC(tcps_accepts); 929 return (so); 930 931 abort: 932 INP_WUNLOCK(inp); 933 abort2: 934 if (so != NULL) 935 soabort(so); 936 return (NULL); 937 } 938 939 /* 940 * This function gets called when we receive an ACK for a 941 * socket in the LISTEN state. We look up the connection 942 * in the syncache, and if its there, we pull it out of 943 * the cache and turn it into a full-blown connection in 944 * the SYN-RECEIVED state. 945 * 946 * On syncache_socket() success the newly created socket 947 * has its underlying inp locked. 948 */ 949 int 950 syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th, 951 struct socket **lsop, struct mbuf *m) 952 { 953 struct syncache *sc; 954 struct syncache_head *sch; 955 struct syncache scs; 956 char *s; 957 958 /* 959 * Global TCP locks are held because we manipulate the PCB lists 960 * and create a new socket. 961 */ 962 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 963 KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK, 964 ("%s: can handle only ACK", __func__)); 965 966 sc = syncache_lookup(inc, &sch); /* returns locked sch */ 967 SCH_LOCK_ASSERT(sch); 968 969 #ifdef INVARIANTS 970 /* 971 * Test code for syncookies comparing the syncache stored 972 * values with the reconstructed values from the cookie. 973 */ 974 if (sc != NULL) 975 syncookie_cmp(inc, sch, sc, th, to, *lsop); 976 #endif 977 978 if (sc == NULL) { 979 /* 980 * There is no syncache entry, so see if this ACK is 981 * a returning syncookie. To do this, first: 982 * A. Check if syncookies are used in case of syncache 983 * overflows 984 * B. See if this socket has had a syncache entry dropped in 985 * the recent past. We don't want to accept a bogus 986 * syncookie if we've never received a SYN or accept it 987 * twice. 988 * C. check that the syncookie is valid. If it is, then 989 * cobble up a fake syncache entry, and return. 990 */ 991 if (!V_tcp_syncookies) { 992 SCH_UNLOCK(sch); 993 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 994 log(LOG_DEBUG, "%s; %s: Spurious ACK, " 995 "segment rejected (syncookies disabled)\n", 996 s, __func__); 997 goto failed; 998 } 999 if (!V_tcp_syncookiesonly && 1000 sch->sch_last_overflow < time_uptime - SYNCOOKIE_LIFETIME) { 1001 SCH_UNLOCK(sch); 1002 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 1003 log(LOG_DEBUG, "%s; %s: Spurious ACK, " 1004 "segment rejected (no syncache entry)\n", 1005 s, __func__); 1006 goto failed; 1007 } 1008 bzero(&scs, sizeof(scs)); 1009 sc = syncookie_lookup(inc, sch, &scs, th, to, *lsop); 1010 SCH_UNLOCK(sch); 1011 if (sc == NULL) { 1012 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 1013 log(LOG_DEBUG, "%s; %s: Segment failed " 1014 "SYNCOOKIE authentication, segment rejected " 1015 "(probably spoofed)\n", s, __func__); 1016 goto failed; 1017 } 1018 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1019 /* If received ACK has MD5 signature, check it. */ 1020 if ((to->to_flags & TOF_SIGNATURE) != 0 && 1021 (!TCPMD5_ENABLED() || 1022 TCPMD5_INPUT(m, th, to->to_signature) != 0)) { 1023 /* Drop the ACK. */ 1024 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1025 log(LOG_DEBUG, "%s; %s: Segment rejected, " 1026 "MD5 signature doesn't match.\n", 1027 s, __func__); 1028 free(s, M_TCPLOG); 1029 } 1030 TCPSTAT_INC(tcps_sig_err_sigopt); 1031 return (-1); /* Do not send RST */ 1032 } 1033 #endif /* TCP_SIGNATURE */ 1034 } else { 1035 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1036 /* 1037 * If listening socket requested TCP digests, check that 1038 * received ACK has signature and it is correct. 1039 * If not, drop the ACK and leave sc entry in th cache, 1040 * because SYN was received with correct signature. 1041 */ 1042 if (sc->sc_flags & SCF_SIGNATURE) { 1043 if ((to->to_flags & TOF_SIGNATURE) == 0) { 1044 /* No signature */ 1045 TCPSTAT_INC(tcps_sig_err_nosigopt); 1046 SCH_UNLOCK(sch); 1047 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1048 log(LOG_DEBUG, "%s; %s: Segment " 1049 "rejected, MD5 signature wasn't " 1050 "provided.\n", s, __func__); 1051 free(s, M_TCPLOG); 1052 } 1053 return (-1); /* Do not send RST */ 1054 } 1055 if (!TCPMD5_ENABLED() || 1056 TCPMD5_INPUT(m, th, to->to_signature) != 0) { 1057 /* Doesn't match or no SA */ 1058 SCH_UNLOCK(sch); 1059 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1060 log(LOG_DEBUG, "%s; %s: Segment " 1061 "rejected, MD5 signature doesn't " 1062 "match.\n", s, __func__); 1063 free(s, M_TCPLOG); 1064 } 1065 return (-1); /* Do not send RST */ 1066 } 1067 } 1068 #endif /* TCP_SIGNATURE */ 1069 /* 1070 * Pull out the entry to unlock the bucket row. 1071 * 1072 * NOTE: We must decrease TCPS_SYN_RECEIVED count here, not 1073 * tcp_state_change(). The tcpcb is not existent at this 1074 * moment. A new one will be allocated via syncache_socket-> 1075 * sonewconn->tcp_usr_attach in TCPS_CLOSED state, then 1076 * syncache_socket() will change it to TCPS_SYN_RECEIVED. 1077 */ 1078 TCPSTATES_DEC(TCPS_SYN_RECEIVED); 1079 TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash); 1080 sch->sch_length--; 1081 #ifdef TCP_OFFLOAD 1082 if (ADDED_BY_TOE(sc)) { 1083 struct toedev *tod = sc->sc_tod; 1084 1085 tod->tod_syncache_removed(tod, sc->sc_todctx); 1086 } 1087 #endif 1088 SCH_UNLOCK(sch); 1089 } 1090 1091 /* 1092 * Segment validation: 1093 * ACK must match our initial sequence number + 1 (the SYN|ACK). 1094 */ 1095 if (th->th_ack != sc->sc_iss + 1) { 1096 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 1097 log(LOG_DEBUG, "%s; %s: ACK %u != ISS+1 %u, segment " 1098 "rejected\n", s, __func__, th->th_ack, sc->sc_iss); 1099 goto failed; 1100 } 1101 1102 /* 1103 * The SEQ must fall in the window starting at the received 1104 * initial receive sequence number + 1 (the SYN). 1105 */ 1106 if (SEQ_LEQ(th->th_seq, sc->sc_irs) || 1107 SEQ_GT(th->th_seq, sc->sc_irs + sc->sc_wnd)) { 1108 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 1109 log(LOG_DEBUG, "%s; %s: SEQ %u != IRS+1 %u, segment " 1110 "rejected\n", s, __func__, th->th_seq, sc->sc_irs); 1111 goto failed; 1112 } 1113 1114 /* 1115 * If timestamps were not negotiated during SYN/ACK they 1116 * must not appear on any segment during this session. 1117 */ 1118 if (!(sc->sc_flags & SCF_TIMESTAMP) && (to->to_flags & TOF_TS)) { 1119 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 1120 log(LOG_DEBUG, "%s; %s: Timestamp not expected, " 1121 "segment rejected\n", s, __func__); 1122 goto failed; 1123 } 1124 1125 /* 1126 * If timestamps were negotiated during SYN/ACK they should 1127 * appear on every segment during this session. 1128 * XXXAO: This is only informal as there have been unverified 1129 * reports of non-compliants stacks. 1130 */ 1131 if ((sc->sc_flags & SCF_TIMESTAMP) && !(to->to_flags & TOF_TS)) { 1132 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1133 log(LOG_DEBUG, "%s; %s: Timestamp missing, " 1134 "no action\n", s, __func__); 1135 free(s, M_TCPLOG); 1136 s = NULL; 1137 } 1138 } 1139 1140 /* 1141 * If timestamps were negotiated, the reflected timestamp 1142 * must be equal to what we actually sent in the SYN|ACK 1143 * except in the case of 0. Some boxes are known for sending 1144 * broken timestamp replies during the 3whs (and potentially 1145 * during the connection also). 1146 * 1147 * Accept the final ACK of 3whs with reflected timestamp of 0 1148 * instead of sending a RST and deleting the syncache entry. 1149 */ 1150 if ((to->to_flags & TOF_TS) && to->to_tsecr && 1151 to->to_tsecr != sc->sc_ts) { 1152 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) 1153 log(LOG_DEBUG, "%s; %s: TSECR %u != TS %u, " 1154 "segment rejected\n", 1155 s, __func__, to->to_tsecr, sc->sc_ts); 1156 goto failed; 1157 } 1158 1159 *lsop = syncache_socket(sc, *lsop, m); 1160 1161 if (*lsop == NULL) 1162 TCPSTAT_INC(tcps_sc_aborted); 1163 else 1164 TCPSTAT_INC(tcps_sc_completed); 1165 1166 /* how do we find the inp for the new socket? */ 1167 if (sc != &scs) 1168 syncache_free(sc); 1169 return (1); 1170 failed: 1171 if (sc != NULL && sc != &scs) 1172 syncache_free(sc); 1173 if (s != NULL) 1174 free(s, M_TCPLOG); 1175 *lsop = NULL; 1176 return (0); 1177 } 1178 1179 #ifdef TCP_RFC7413 1180 static void 1181 syncache_tfo_expand(struct syncache *sc, struct socket **lsop, struct mbuf *m, 1182 uint64_t response_cookie) 1183 { 1184 struct inpcb *inp; 1185 struct tcpcb *tp; 1186 unsigned int *pending_counter; 1187 1188 /* 1189 * Global TCP locks are held because we manipulate the PCB lists 1190 * and create a new socket. 1191 */ 1192 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1193 1194 pending_counter = intotcpcb(sotoinpcb(*lsop))->t_tfo_pending; 1195 *lsop = syncache_socket(sc, *lsop, m); 1196 if (*lsop == NULL) { 1197 TCPSTAT_INC(tcps_sc_aborted); 1198 atomic_subtract_int(pending_counter, 1); 1199 } else { 1200 soisconnected(*lsop); 1201 inp = sotoinpcb(*lsop); 1202 tp = intotcpcb(inp); 1203 tp->t_flags |= TF_FASTOPEN; 1204 tp->t_tfo_cookie.server = response_cookie; 1205 tp->snd_max = tp->iss; 1206 tp->snd_nxt = tp->iss; 1207 tp->t_tfo_pending = pending_counter; 1208 TCPSTAT_INC(tcps_sc_completed); 1209 } 1210 } 1211 #endif /* TCP_RFC7413 */ 1212 1213 /* 1214 * Given a LISTEN socket and an inbound SYN request, add 1215 * this to the syn cache, and send back a segment: 1216 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK> 1217 * to the source. 1218 * 1219 * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN. 1220 * Doing so would require that we hold onto the data and deliver it 1221 * to the application. However, if we are the target of a SYN-flood 1222 * DoS attack, an attacker could send data which would eventually 1223 * consume all available buffer space if it were ACKed. By not ACKing 1224 * the data, we avoid this DoS scenario. 1225 * 1226 * The exception to the above is when a SYN with a valid TCP Fast Open (TFO) 1227 * cookie is processed and a new socket is created. In this case, any data 1228 * accompanying the SYN will be queued to the socket by tcp_input() and will 1229 * be ACKed either when the application sends response data or the delayed 1230 * ACK timer expires, whichever comes first. 1231 */ 1232 int 1233 syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th, 1234 struct inpcb *inp, struct socket **lsop, struct mbuf *m, void *tod, 1235 void *todctx) 1236 { 1237 struct tcpcb *tp; 1238 struct socket *so; 1239 struct syncache *sc = NULL; 1240 struct syncache_head *sch; 1241 struct mbuf *ipopts = NULL; 1242 u_int ltflags; 1243 int win, ip_ttl, ip_tos; 1244 char *s; 1245 int rv = 0; 1246 #ifdef INET6 1247 int autoflowlabel = 0; 1248 #endif 1249 #ifdef MAC 1250 struct label *maclabel; 1251 #endif 1252 struct syncache scs; 1253 struct ucred *cred; 1254 #ifdef TCP_RFC7413 1255 uint64_t tfo_response_cookie; 1256 unsigned int *tfo_pending = NULL; 1257 int tfo_cookie_valid = 0; 1258 int tfo_response_cookie_valid = 0; 1259 #endif 1260 1261 INP_WLOCK_ASSERT(inp); /* listen socket */ 1262 KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_SYN, 1263 ("%s: unexpected tcp flags", __func__)); 1264 1265 /* 1266 * Combine all so/tp operations very early to drop the INP lock as 1267 * soon as possible. 1268 */ 1269 so = *lsop; 1270 KASSERT(SOLISTENING(so), ("%s: %p not listening", __func__, so)); 1271 tp = sototcpcb(so); 1272 cred = crhold(so->so_cred); 1273 1274 #ifdef INET6 1275 if ((inc->inc_flags & INC_ISIPV6) && 1276 (inp->inp_flags & IN6P_AUTOFLOWLABEL)) 1277 autoflowlabel = 1; 1278 #endif 1279 ip_ttl = inp->inp_ip_ttl; 1280 ip_tos = inp->inp_ip_tos; 1281 win = so->sol_sbrcv_hiwat; 1282 ltflags = (tp->t_flags & (TF_NOOPT | TF_SIGNATURE)); 1283 1284 #ifdef TCP_RFC7413 1285 if (V_tcp_fastopen_server_enable && IS_FASTOPEN(tp->t_flags) && 1286 (tp->t_tfo_pending != NULL) && 1287 (to->to_flags & TOF_FASTOPEN)) { 1288 /* 1289 * Limit the number of pending TFO connections to 1290 * approximately half of the queue limit. This prevents TFO 1291 * SYN floods from starving the service by filling the 1292 * listen queue with bogus TFO connections. 1293 */ 1294 if (atomic_fetchadd_int(tp->t_tfo_pending, 1) <= 1295 (so->sol_qlimit / 2)) { 1296 int result; 1297 1298 result = tcp_fastopen_check_cookie(inc, 1299 to->to_tfo_cookie, to->to_tfo_len, 1300 &tfo_response_cookie); 1301 tfo_cookie_valid = (result > 0); 1302 tfo_response_cookie_valid = (result >= 0); 1303 } 1304 1305 /* 1306 * Remember the TFO pending counter as it will have to be 1307 * decremented below if we don't make it to syncache_tfo_expand(). 1308 */ 1309 tfo_pending = tp->t_tfo_pending; 1310 } 1311 #endif 1312 1313 /* By the time we drop the lock these should no longer be used. */ 1314 so = NULL; 1315 tp = NULL; 1316 1317 #ifdef MAC 1318 if (mac_syncache_init(&maclabel) != 0) { 1319 INP_WUNLOCK(inp); 1320 goto done; 1321 } else 1322 mac_syncache_create(maclabel, inp); 1323 #endif 1324 #ifdef TCP_RFC7413 1325 if (!tfo_cookie_valid) 1326 #endif 1327 INP_WUNLOCK(inp); 1328 1329 /* 1330 * Remember the IP options, if any. 1331 */ 1332 #ifdef INET6 1333 if (!(inc->inc_flags & INC_ISIPV6)) 1334 #endif 1335 #ifdef INET 1336 ipopts = (m) ? ip_srcroute(m) : NULL; 1337 #else 1338 ipopts = NULL; 1339 #endif 1340 1341 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1342 /* 1343 * If listening socket requested TCP digests, check that received 1344 * SYN has signature and it is correct. If signature doesn't match 1345 * or TCP_SIGNATURE support isn't enabled, drop the packet. 1346 */ 1347 if (ltflags & TF_SIGNATURE) { 1348 if ((to->to_flags & TOF_SIGNATURE) == 0) { 1349 TCPSTAT_INC(tcps_sig_err_nosigopt); 1350 goto done; 1351 } 1352 if (!TCPMD5_ENABLED() || 1353 TCPMD5_INPUT(m, th, to->to_signature) != 0) 1354 goto done; 1355 } 1356 #endif /* TCP_SIGNATURE */ 1357 /* 1358 * See if we already have an entry for this connection. 1359 * If we do, resend the SYN,ACK, and reset the retransmit timer. 1360 * 1361 * XXX: should the syncache be re-initialized with the contents 1362 * of the new SYN here (which may have different options?) 1363 * 1364 * XXX: We do not check the sequence number to see if this is a 1365 * real retransmit or a new connection attempt. The question is 1366 * how to handle such a case; either ignore it as spoofed, or 1367 * drop the current entry and create a new one? 1368 */ 1369 sc = syncache_lookup(inc, &sch); /* returns locked entry */ 1370 SCH_LOCK_ASSERT(sch); 1371 if (sc != NULL) { 1372 #ifdef TCP_RFC7413 1373 if (tfo_cookie_valid) 1374 INP_WUNLOCK(inp); 1375 #endif 1376 TCPSTAT_INC(tcps_sc_dupsyn); 1377 if (ipopts) { 1378 /* 1379 * If we were remembering a previous source route, 1380 * forget it and use the new one we've been given. 1381 */ 1382 if (sc->sc_ipopts) 1383 (void) m_free(sc->sc_ipopts); 1384 sc->sc_ipopts = ipopts; 1385 } 1386 /* 1387 * Update timestamp if present. 1388 */ 1389 if ((sc->sc_flags & SCF_TIMESTAMP) && (to->to_flags & TOF_TS)) 1390 sc->sc_tsreflect = to->to_tsval; 1391 else 1392 sc->sc_flags &= ~SCF_TIMESTAMP; 1393 #ifdef MAC 1394 /* 1395 * Since we have already unconditionally allocated label 1396 * storage, free it up. The syncache entry will already 1397 * have an initialized label we can use. 1398 */ 1399 mac_syncache_destroy(&maclabel); 1400 #endif 1401 /* Retransmit SYN|ACK and reset retransmit count. */ 1402 if ((s = tcp_log_addrs(&sc->sc_inc, th, NULL, NULL))) { 1403 log(LOG_DEBUG, "%s; %s: Received duplicate SYN, " 1404 "resetting timer and retransmitting SYN|ACK\n", 1405 s, __func__); 1406 free(s, M_TCPLOG); 1407 } 1408 if (syncache_respond(sc, sch, 1, m) == 0) { 1409 sc->sc_rxmits = 0; 1410 syncache_timeout(sc, sch, 1); 1411 TCPSTAT_INC(tcps_sndacks); 1412 TCPSTAT_INC(tcps_sndtotal); 1413 } 1414 SCH_UNLOCK(sch); 1415 goto done; 1416 } 1417 1418 #ifdef TCP_RFC7413 1419 if (tfo_cookie_valid) { 1420 bzero(&scs, sizeof(scs)); 1421 sc = &scs; 1422 goto skip_alloc; 1423 } 1424 #endif 1425 1426 sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO); 1427 if (sc == NULL) { 1428 /* 1429 * The zone allocator couldn't provide more entries. 1430 * Treat this as if the cache was full; drop the oldest 1431 * entry and insert the new one. 1432 */ 1433 TCPSTAT_INC(tcps_sc_zonefail); 1434 if ((sc = TAILQ_LAST(&sch->sch_bucket, sch_head)) != NULL) { 1435 sch->sch_last_overflow = time_uptime; 1436 syncache_drop(sc, sch); 1437 } 1438 sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO); 1439 if (sc == NULL) { 1440 if (V_tcp_syncookies) { 1441 bzero(&scs, sizeof(scs)); 1442 sc = &scs; 1443 } else { 1444 SCH_UNLOCK(sch); 1445 if (ipopts) 1446 (void) m_free(ipopts); 1447 goto done; 1448 } 1449 } 1450 } 1451 1452 #ifdef TCP_RFC7413 1453 skip_alloc: 1454 if (!tfo_cookie_valid && tfo_response_cookie_valid) 1455 sc->sc_tfo_cookie = &tfo_response_cookie; 1456 #endif 1457 1458 /* 1459 * Fill in the syncache values. 1460 */ 1461 #ifdef MAC 1462 sc->sc_label = maclabel; 1463 #endif 1464 sc->sc_cred = cred; 1465 cred = NULL; 1466 sc->sc_ipopts = ipopts; 1467 bcopy(inc, &sc->sc_inc, sizeof(struct in_conninfo)); 1468 #ifdef INET6 1469 if (!(inc->inc_flags & INC_ISIPV6)) 1470 #endif 1471 { 1472 sc->sc_ip_tos = ip_tos; 1473 sc->sc_ip_ttl = ip_ttl; 1474 } 1475 #ifdef TCP_OFFLOAD 1476 sc->sc_tod = tod; 1477 sc->sc_todctx = todctx; 1478 #endif 1479 sc->sc_irs = th->th_seq; 1480 sc->sc_iss = arc4random(); 1481 sc->sc_flags = 0; 1482 sc->sc_flowlabel = 0; 1483 1484 /* 1485 * Initial receive window: clip sbspace to [0 .. TCP_MAXWIN]. 1486 * win was derived from socket earlier in the function. 1487 */ 1488 win = imax(win, 0); 1489 win = imin(win, TCP_MAXWIN); 1490 sc->sc_wnd = win; 1491 1492 if (V_tcp_do_rfc1323) { 1493 /* 1494 * A timestamp received in a SYN makes 1495 * it ok to send timestamp requests and replies. 1496 */ 1497 if (to->to_flags & TOF_TS) { 1498 sc->sc_tsreflect = to->to_tsval; 1499 sc->sc_ts = tcp_ts_getticks(); 1500 sc->sc_flags |= SCF_TIMESTAMP; 1501 } 1502 if (to->to_flags & TOF_SCALE) { 1503 int wscale = 0; 1504 1505 /* 1506 * Pick the smallest possible scaling factor that 1507 * will still allow us to scale up to sb_max, aka 1508 * kern.ipc.maxsockbuf. 1509 * 1510 * We do this because there are broken firewalls that 1511 * will corrupt the window scale option, leading to 1512 * the other endpoint believing that our advertised 1513 * window is unscaled. At scale factors larger than 1514 * 5 the unscaled window will drop below 1500 bytes, 1515 * leading to serious problems when traversing these 1516 * broken firewalls. 1517 * 1518 * With the default maxsockbuf of 256K, a scale factor 1519 * of 3 will be chosen by this algorithm. Those who 1520 * choose a larger maxsockbuf should watch out 1521 * for the compatibility problems mentioned above. 1522 * 1523 * RFC1323: The Window field in a SYN (i.e., a <SYN> 1524 * or <SYN,ACK>) segment itself is never scaled. 1525 */ 1526 while (wscale < TCP_MAX_WINSHIFT && 1527 (TCP_MAXWIN << wscale) < sb_max) 1528 wscale++; 1529 sc->sc_requested_r_scale = wscale; 1530 sc->sc_requested_s_scale = to->to_wscale; 1531 sc->sc_flags |= SCF_WINSCALE; 1532 } 1533 } 1534 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1535 /* 1536 * If listening socket requested TCP digests, flag this in the 1537 * syncache so that syncache_respond() will do the right thing 1538 * with the SYN+ACK. 1539 */ 1540 if (ltflags & TF_SIGNATURE) 1541 sc->sc_flags |= SCF_SIGNATURE; 1542 #endif /* TCP_SIGNATURE */ 1543 if (to->to_flags & TOF_SACKPERM) 1544 sc->sc_flags |= SCF_SACK; 1545 if (to->to_flags & TOF_MSS) 1546 sc->sc_peer_mss = to->to_mss; /* peer mss may be zero */ 1547 if (ltflags & TF_NOOPT) 1548 sc->sc_flags |= SCF_NOOPT; 1549 if ((th->th_flags & (TH_ECE|TH_CWR)) && V_tcp_do_ecn) 1550 sc->sc_flags |= SCF_ECN; 1551 1552 if (V_tcp_syncookies) 1553 sc->sc_iss = syncookie_generate(sch, sc); 1554 #ifdef INET6 1555 if (autoflowlabel) { 1556 if (V_tcp_syncookies) 1557 sc->sc_flowlabel = sc->sc_iss; 1558 else 1559 sc->sc_flowlabel = ip6_randomflowlabel(); 1560 sc->sc_flowlabel = htonl(sc->sc_flowlabel) & IPV6_FLOWLABEL_MASK; 1561 } 1562 #endif 1563 SCH_UNLOCK(sch); 1564 1565 #ifdef TCP_RFC7413 1566 if (tfo_cookie_valid) { 1567 syncache_tfo_expand(sc, lsop, m, tfo_response_cookie); 1568 /* INP_WUNLOCK(inp) will be performed by the caller */ 1569 rv = 1; 1570 goto tfo_expanded; 1571 } 1572 #endif 1573 1574 /* 1575 * Do a standard 3-way handshake. 1576 */ 1577 if (syncache_respond(sc, sch, 0, m) == 0) { 1578 if (V_tcp_syncookies && V_tcp_syncookiesonly && sc != &scs) 1579 syncache_free(sc); 1580 else if (sc != &scs) 1581 syncache_insert(sc, sch); /* locks and unlocks sch */ 1582 TCPSTAT_INC(tcps_sndacks); 1583 TCPSTAT_INC(tcps_sndtotal); 1584 } else { 1585 if (sc != &scs) 1586 syncache_free(sc); 1587 TCPSTAT_INC(tcps_sc_dropped); 1588 } 1589 1590 done: 1591 if (m) { 1592 *lsop = NULL; 1593 m_freem(m); 1594 } 1595 #ifdef TCP_RFC7413 1596 /* 1597 * If tfo_pending is not NULL here, then a TFO SYN that did not 1598 * result in a new socket was processed and the associated pending 1599 * counter has not yet been decremented. All such TFO processing paths 1600 * transit this point. 1601 */ 1602 if (tfo_pending != NULL) 1603 tcp_fastopen_decrement_counter(tfo_pending); 1604 1605 tfo_expanded: 1606 #endif 1607 if (cred != NULL) 1608 crfree(cred); 1609 #ifdef MAC 1610 if (sc == &scs) 1611 mac_syncache_destroy(&maclabel); 1612 #endif 1613 return (rv); 1614 } 1615 1616 /* 1617 * Send SYN|ACK to the peer. Either in response to the peer's SYN, 1618 * i.e. m0 != NULL, or upon 3WHS ACK timeout, i.e. m0 == NULL. 1619 */ 1620 static int 1621 syncache_respond(struct syncache *sc, struct syncache_head *sch, int locked, 1622 const struct mbuf *m0) 1623 { 1624 struct ip *ip = NULL; 1625 struct mbuf *m; 1626 struct tcphdr *th = NULL; 1627 int optlen, error = 0; /* Make compiler happy */ 1628 u_int16_t hlen, tlen, mssopt; 1629 struct tcpopt to; 1630 #ifdef INET6 1631 struct ip6_hdr *ip6 = NULL; 1632 #endif 1633 hlen = 1634 #ifdef INET6 1635 (sc->sc_inc.inc_flags & INC_ISIPV6) ? sizeof(struct ip6_hdr) : 1636 #endif 1637 sizeof(struct ip); 1638 tlen = hlen + sizeof(struct tcphdr); 1639 1640 /* Determine MSS we advertize to other end of connection. */ 1641 mssopt = max(tcp_mssopt(&sc->sc_inc), V_tcp_minmss); 1642 1643 /* XXX: Assume that the entire packet will fit in a header mbuf. */ 1644 KASSERT(max_linkhdr + tlen + TCP_MAXOLEN <= MHLEN, 1645 ("syncache: mbuf too small")); 1646 1647 /* Create the IP+TCP header from scratch. */ 1648 m = m_gethdr(M_NOWAIT, MT_DATA); 1649 if (m == NULL) 1650 return (ENOBUFS); 1651 #ifdef MAC 1652 mac_syncache_create_mbuf(sc->sc_label, m); 1653 #endif 1654 m->m_data += max_linkhdr; 1655 m->m_len = tlen; 1656 m->m_pkthdr.len = tlen; 1657 m->m_pkthdr.rcvif = NULL; 1658 1659 #ifdef INET6 1660 if (sc->sc_inc.inc_flags & INC_ISIPV6) { 1661 ip6 = mtod(m, struct ip6_hdr *); 1662 ip6->ip6_vfc = IPV6_VERSION; 1663 ip6->ip6_nxt = IPPROTO_TCP; 1664 ip6->ip6_src = sc->sc_inc.inc6_laddr; 1665 ip6->ip6_dst = sc->sc_inc.inc6_faddr; 1666 ip6->ip6_plen = htons(tlen - hlen); 1667 /* ip6_hlim is set after checksum */ 1668 ip6->ip6_flow &= ~IPV6_FLOWLABEL_MASK; 1669 ip6->ip6_flow |= sc->sc_flowlabel; 1670 1671 th = (struct tcphdr *)(ip6 + 1); 1672 } 1673 #endif 1674 #if defined(INET6) && defined(INET) 1675 else 1676 #endif 1677 #ifdef INET 1678 { 1679 ip = mtod(m, struct ip *); 1680 ip->ip_v = IPVERSION; 1681 ip->ip_hl = sizeof(struct ip) >> 2; 1682 ip->ip_len = htons(tlen); 1683 ip->ip_id = 0; 1684 ip->ip_off = 0; 1685 ip->ip_sum = 0; 1686 ip->ip_p = IPPROTO_TCP; 1687 ip->ip_src = sc->sc_inc.inc_laddr; 1688 ip->ip_dst = sc->sc_inc.inc_faddr; 1689 ip->ip_ttl = sc->sc_ip_ttl; 1690 ip->ip_tos = sc->sc_ip_tos; 1691 1692 /* 1693 * See if we should do MTU discovery. Route lookups are 1694 * expensive, so we will only unset the DF bit if: 1695 * 1696 * 1) path_mtu_discovery is disabled 1697 * 2) the SCF_UNREACH flag has been set 1698 */ 1699 if (V_path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0)) 1700 ip->ip_off |= htons(IP_DF); 1701 1702 th = (struct tcphdr *)(ip + 1); 1703 } 1704 #endif /* INET */ 1705 th->th_sport = sc->sc_inc.inc_lport; 1706 th->th_dport = sc->sc_inc.inc_fport; 1707 1708 th->th_seq = htonl(sc->sc_iss); 1709 th->th_ack = htonl(sc->sc_irs + 1); 1710 th->th_off = sizeof(struct tcphdr) >> 2; 1711 th->th_x2 = 0; 1712 th->th_flags = TH_SYN|TH_ACK; 1713 th->th_win = htons(sc->sc_wnd); 1714 th->th_urp = 0; 1715 1716 if (sc->sc_flags & SCF_ECN) { 1717 th->th_flags |= TH_ECE; 1718 TCPSTAT_INC(tcps_ecn_shs); 1719 } 1720 1721 /* Tack on the TCP options. */ 1722 if ((sc->sc_flags & SCF_NOOPT) == 0) { 1723 to.to_flags = 0; 1724 1725 to.to_mss = mssopt; 1726 to.to_flags = TOF_MSS; 1727 if (sc->sc_flags & SCF_WINSCALE) { 1728 to.to_wscale = sc->sc_requested_r_scale; 1729 to.to_flags |= TOF_SCALE; 1730 } 1731 if (sc->sc_flags & SCF_TIMESTAMP) { 1732 /* Virgin timestamp or TCP cookie enhanced one. */ 1733 to.to_tsval = sc->sc_ts; 1734 to.to_tsecr = sc->sc_tsreflect; 1735 to.to_flags |= TOF_TS; 1736 } 1737 if (sc->sc_flags & SCF_SACK) 1738 to.to_flags |= TOF_SACKPERM; 1739 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1740 if (sc->sc_flags & SCF_SIGNATURE) 1741 to.to_flags |= TOF_SIGNATURE; 1742 #endif 1743 #ifdef TCP_RFC7413 1744 if (sc->sc_tfo_cookie) { 1745 to.to_flags |= TOF_FASTOPEN; 1746 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 1747 to.to_tfo_cookie = sc->sc_tfo_cookie; 1748 /* don't send cookie again when retransmitting response */ 1749 sc->sc_tfo_cookie = NULL; 1750 } 1751 #endif 1752 optlen = tcp_addoptions(&to, (u_char *)(th + 1)); 1753 1754 /* Adjust headers by option size. */ 1755 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 1756 m->m_len += optlen; 1757 m->m_pkthdr.len += optlen; 1758 #ifdef INET6 1759 if (sc->sc_inc.inc_flags & INC_ISIPV6) 1760 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) + optlen); 1761 else 1762 #endif 1763 ip->ip_len = htons(ntohs(ip->ip_len) + optlen); 1764 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1765 if (sc->sc_flags & SCF_SIGNATURE) { 1766 KASSERT(to.to_flags & TOF_SIGNATURE, 1767 ("tcp_addoptions() didn't set tcp_signature")); 1768 1769 /* NOTE: to.to_signature is inside of mbuf */ 1770 if (!TCPMD5_ENABLED() || 1771 TCPMD5_OUTPUT(m, th, to.to_signature) != 0) { 1772 m_freem(m); 1773 return (EACCES); 1774 } 1775 } 1776 #endif 1777 } else 1778 optlen = 0; 1779 1780 M_SETFIB(m, sc->sc_inc.inc_fibnum); 1781 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 1782 /* 1783 * If we have peer's SYN and it has a flowid, then let's assign it to 1784 * our SYN|ACK. ip6_output() and ip_output() will not assign flowid 1785 * to SYN|ACK due to lack of inp here. 1786 */ 1787 if (m0 != NULL && M_HASHTYPE_GET(m0) != M_HASHTYPE_NONE) { 1788 m->m_pkthdr.flowid = m0->m_pkthdr.flowid; 1789 M_HASHTYPE_SET(m, M_HASHTYPE_GET(m0)); 1790 } 1791 #ifdef INET6 1792 if (sc->sc_inc.inc_flags & INC_ISIPV6) { 1793 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 1794 th->th_sum = in6_cksum_pseudo(ip6, tlen + optlen - hlen, 1795 IPPROTO_TCP, 0); 1796 ip6->ip6_hlim = in6_selecthlim(NULL, NULL); 1797 #ifdef TCP_OFFLOAD 1798 if (ADDED_BY_TOE(sc)) { 1799 struct toedev *tod = sc->sc_tod; 1800 1801 error = tod->tod_syncache_respond(tod, sc->sc_todctx, m); 1802 1803 return (error); 1804 } 1805 #endif 1806 error = ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); 1807 } 1808 #endif 1809 #if defined(INET6) && defined(INET) 1810 else 1811 #endif 1812 #ifdef INET 1813 { 1814 m->m_pkthdr.csum_flags = CSUM_TCP; 1815 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1816 htons(tlen + optlen - hlen + IPPROTO_TCP)); 1817 #ifdef TCP_OFFLOAD 1818 if (ADDED_BY_TOE(sc)) { 1819 struct toedev *tod = sc->sc_tod; 1820 1821 error = tod->tod_syncache_respond(tod, sc->sc_todctx, m); 1822 1823 return (error); 1824 } 1825 #endif 1826 error = ip_output(m, sc->sc_ipopts, NULL, 0, NULL, NULL); 1827 } 1828 #endif 1829 return (error); 1830 } 1831 1832 /* 1833 * The purpose of syncookies is to handle spoofed SYN flooding DoS attacks 1834 * that exceed the capacity of the syncache by avoiding the storage of any 1835 * of the SYNs we receive. Syncookies defend against blind SYN flooding 1836 * attacks where the attacker does not have access to our responses. 1837 * 1838 * Syncookies encode and include all necessary information about the 1839 * connection setup within the SYN|ACK that we send back. That way we 1840 * can avoid keeping any local state until the ACK to our SYN|ACK returns 1841 * (if ever). Normally the syncache and syncookies are running in parallel 1842 * with the latter taking over when the former is exhausted. When matching 1843 * syncache entry is found the syncookie is ignored. 1844 * 1845 * The only reliable information persisting the 3WHS is our initial sequence 1846 * number ISS of 32 bits. Syncookies embed a cryptographically sufficient 1847 * strong hash (MAC) value and a few bits of TCP SYN options in the ISS 1848 * of our SYN|ACK. The MAC can be recomputed when the ACK to our SYN|ACK 1849 * returns and signifies a legitimate connection if it matches the ACK. 1850 * 1851 * The available space of 32 bits to store the hash and to encode the SYN 1852 * option information is very tight and we should have at least 24 bits for 1853 * the MAC to keep the number of guesses by blind spoofing reasonably high. 1854 * 1855 * SYN option information we have to encode to fully restore a connection: 1856 * MSS: is imporant to chose an optimal segment size to avoid IP level 1857 * fragmentation along the path. The common MSS values can be encoded 1858 * in a 3-bit table. Uncommon values are captured by the next lower value 1859 * in the table leading to a slight increase in packetization overhead. 1860 * WSCALE: is necessary to allow large windows to be used for high delay- 1861 * bandwidth product links. Not scaling the window when it was initially 1862 * negotiated is bad for performance as lack of scaling further decreases 1863 * the apparent available send window. We only need to encode the WSCALE 1864 * we received from the remote end. Our end can be recalculated at any 1865 * time. The common WSCALE values can be encoded in a 3-bit table. 1866 * Uncommon values are captured by the next lower value in the table 1867 * making us under-estimate the available window size halving our 1868 * theoretically possible maximum throughput for that connection. 1869 * SACK: Greatly assists in packet loss recovery and requires 1 bit. 1870 * TIMESTAMP and SIGNATURE is not encoded because they are permanent options 1871 * that are included in all segments on a connection. We enable them when 1872 * the ACK has them. 1873 * 1874 * Security of syncookies and attack vectors: 1875 * 1876 * The MAC is computed over (faddr||laddr||fport||lport||irs||flags||secmod) 1877 * together with the gloabl secret to make it unique per connection attempt. 1878 * Thus any change of any of those parameters results in a different MAC output 1879 * in an unpredictable way unless a collision is encountered. 24 bits of the 1880 * MAC are embedded into the ISS. 1881 * 1882 * To prevent replay attacks two rotating global secrets are updated with a 1883 * new random value every 15 seconds. The life-time of a syncookie is thus 1884 * 15-30 seconds. 1885 * 1886 * Vector 1: Attacking the secret. This requires finding a weakness in the 1887 * MAC itself or the way it is used here. The attacker can do a chosen plain 1888 * text attack by varying and testing the all parameters under his control. 1889 * The strength depends on the size and randomness of the secret, and the 1890 * cryptographic security of the MAC function. Due to the constant updating 1891 * of the secret the attacker has at most 29.999 seconds to find the secret 1892 * and launch spoofed connections. After that he has to start all over again. 1893 * 1894 * Vector 2: Collision attack on the MAC of a single ACK. With a 24 bit MAC 1895 * size an average of 4,823 attempts are required for a 50% chance of success 1896 * to spoof a single syncookie (birthday collision paradox). However the 1897 * attacker is blind and doesn't know if one of his attempts succeeded unless 1898 * he has a side channel to interfere success from. A single connection setup 1899 * success average of 90% requires 8,790 packets, 99.99% requires 17,578 packets. 1900 * This many attempts are required for each one blind spoofed connection. For 1901 * every additional spoofed connection he has to launch another N attempts. 1902 * Thus for a sustained rate 100 spoofed connections per second approximately 1903 * 1,800,000 packets per second would have to be sent. 1904 * 1905 * NB: The MAC function should be fast so that it doesn't become a CPU 1906 * exhaustion attack vector itself. 1907 * 1908 * References: 1909 * RFC4987 TCP SYN Flooding Attacks and Common Mitigations 1910 * SYN cookies were first proposed by cryptographer Dan J. Bernstein in 1996 1911 * http://cr.yp.to/syncookies.html (overview) 1912 * http://cr.yp.to/syncookies/archive (details) 1913 * 1914 * 1915 * Schematic construction of a syncookie enabled Initial Sequence Number: 1916 * 0 1 2 3 1917 * 12345678901234567890123456789012 1918 * |xxxxxxxxxxxxxxxxxxxxxxxxWWWMMMSP| 1919 * 1920 * x 24 MAC (truncated) 1921 * W 3 Send Window Scale index 1922 * M 3 MSS index 1923 * S 1 SACK permitted 1924 * P 1 Odd/even secret 1925 */ 1926 1927 /* 1928 * Distribution and probability of certain MSS values. Those in between are 1929 * rounded down to the next lower one. 1930 * [An Analysis of TCP Maximum Segment Sizes, S. Alcock and R. Nelson, 2011] 1931 * .2% .3% 5% 7% 7% 20% 15% 45% 1932 */ 1933 static int tcp_sc_msstab[] = { 216, 536, 1200, 1360, 1400, 1440, 1452, 1460 }; 1934 1935 /* 1936 * Distribution and probability of certain WSCALE values. We have to map the 1937 * (send) window scale (shift) option with a range of 0-14 from 4 bits into 3 1938 * bits based on prevalence of certain values. Where we don't have an exact 1939 * match for are rounded down to the next lower one letting us under-estimate 1940 * the true available window. At the moment this would happen only for the 1941 * very uncommon values 3, 5 and those above 8 (more than 16MB socket buffer 1942 * and window size). The absence of the WSCALE option (no scaling in either 1943 * direction) is encoded with index zero. 1944 * [WSCALE values histograms, Allman, 2012] 1945 * X 10 10 35 5 6 14 10% by host 1946 * X 11 4 5 5 18 49 3% by connections 1947 */ 1948 static int tcp_sc_wstab[] = { 0, 0, 1, 2, 4, 6, 7, 8 }; 1949 1950 /* 1951 * Compute the MAC for the SYN cookie. SIPHASH-2-4 is chosen for its speed 1952 * and good cryptographic properties. 1953 */ 1954 static uint32_t 1955 syncookie_mac(struct in_conninfo *inc, tcp_seq irs, uint8_t flags, 1956 uint8_t *secbits, uintptr_t secmod) 1957 { 1958 SIPHASH_CTX ctx; 1959 uint32_t siphash[2]; 1960 1961 SipHash24_Init(&ctx); 1962 SipHash_SetKey(&ctx, secbits); 1963 switch (inc->inc_flags & INC_ISIPV6) { 1964 #ifdef INET 1965 case 0: 1966 SipHash_Update(&ctx, &inc->inc_faddr, sizeof(inc->inc_faddr)); 1967 SipHash_Update(&ctx, &inc->inc_laddr, sizeof(inc->inc_laddr)); 1968 break; 1969 #endif 1970 #ifdef INET6 1971 case INC_ISIPV6: 1972 SipHash_Update(&ctx, &inc->inc6_faddr, sizeof(inc->inc6_faddr)); 1973 SipHash_Update(&ctx, &inc->inc6_laddr, sizeof(inc->inc6_laddr)); 1974 break; 1975 #endif 1976 } 1977 SipHash_Update(&ctx, &inc->inc_fport, sizeof(inc->inc_fport)); 1978 SipHash_Update(&ctx, &inc->inc_lport, sizeof(inc->inc_lport)); 1979 SipHash_Update(&ctx, &irs, sizeof(irs)); 1980 SipHash_Update(&ctx, &flags, sizeof(flags)); 1981 SipHash_Update(&ctx, &secmod, sizeof(secmod)); 1982 SipHash_Final((u_int8_t *)&siphash, &ctx); 1983 1984 return (siphash[0] ^ siphash[1]); 1985 } 1986 1987 static tcp_seq 1988 syncookie_generate(struct syncache_head *sch, struct syncache *sc) 1989 { 1990 u_int i, secbit, wscale; 1991 uint32_t iss, hash; 1992 uint8_t *secbits; 1993 union syncookie cookie; 1994 1995 SCH_LOCK_ASSERT(sch); 1996 1997 cookie.cookie = 0; 1998 1999 /* Map our computed MSS into the 3-bit index. */ 2000 for (i = nitems(tcp_sc_msstab) - 1; 2001 tcp_sc_msstab[i] > sc->sc_peer_mss && i > 0; 2002 i--) 2003 ; 2004 cookie.flags.mss_idx = i; 2005 2006 /* 2007 * Map the send window scale into the 3-bit index but only if 2008 * the wscale option was received. 2009 */ 2010 if (sc->sc_flags & SCF_WINSCALE) { 2011 wscale = sc->sc_requested_s_scale; 2012 for (i = nitems(tcp_sc_wstab) - 1; 2013 tcp_sc_wstab[i] > wscale && i > 0; 2014 i--) 2015 ; 2016 cookie.flags.wscale_idx = i; 2017 } 2018 2019 /* Can we do SACK? */ 2020 if (sc->sc_flags & SCF_SACK) 2021 cookie.flags.sack_ok = 1; 2022 2023 /* Which of the two secrets to use. */ 2024 secbit = sch->sch_sc->secret.oddeven & 0x1; 2025 cookie.flags.odd_even = secbit; 2026 2027 secbits = sch->sch_sc->secret.key[secbit]; 2028 hash = syncookie_mac(&sc->sc_inc, sc->sc_irs, cookie.cookie, secbits, 2029 (uintptr_t)sch); 2030 2031 /* 2032 * Put the flags into the hash and XOR them to get better ISS number 2033 * variance. This doesn't enhance the cryptographic strength and is 2034 * done to prevent the 8 cookie bits from showing up directly on the 2035 * wire. 2036 */ 2037 iss = hash & ~0xff; 2038 iss |= cookie.cookie ^ (hash >> 24); 2039 2040 /* Randomize the timestamp. */ 2041 if (sc->sc_flags & SCF_TIMESTAMP) { 2042 sc->sc_ts = arc4random(); 2043 sc->sc_tsoff = sc->sc_ts - tcp_ts_getticks(); 2044 } 2045 2046 TCPSTAT_INC(tcps_sc_sendcookie); 2047 return (iss); 2048 } 2049 2050 static struct syncache * 2051 syncookie_lookup(struct in_conninfo *inc, struct syncache_head *sch, 2052 struct syncache *sc, struct tcphdr *th, struct tcpopt *to, 2053 struct socket *lso) 2054 { 2055 uint32_t hash; 2056 uint8_t *secbits; 2057 tcp_seq ack, seq; 2058 int wnd, wscale = 0; 2059 union syncookie cookie; 2060 2061 SCH_LOCK_ASSERT(sch); 2062 2063 /* 2064 * Pull information out of SYN-ACK/ACK and revert sequence number 2065 * advances. 2066 */ 2067 ack = th->th_ack - 1; 2068 seq = th->th_seq - 1; 2069 2070 /* 2071 * Unpack the flags containing enough information to restore the 2072 * connection. 2073 */ 2074 cookie.cookie = (ack & 0xff) ^ (ack >> 24); 2075 2076 /* Which of the two secrets to use. */ 2077 secbits = sch->sch_sc->secret.key[cookie.flags.odd_even]; 2078 2079 hash = syncookie_mac(inc, seq, cookie.cookie, secbits, (uintptr_t)sch); 2080 2081 /* The recomputed hash matches the ACK if this was a genuine cookie. */ 2082 if ((ack & ~0xff) != (hash & ~0xff)) 2083 return (NULL); 2084 2085 /* Fill in the syncache values. */ 2086 sc->sc_flags = 0; 2087 bcopy(inc, &sc->sc_inc, sizeof(struct in_conninfo)); 2088 sc->sc_ipopts = NULL; 2089 2090 sc->sc_irs = seq; 2091 sc->sc_iss = ack; 2092 2093 switch (inc->inc_flags & INC_ISIPV6) { 2094 #ifdef INET 2095 case 0: 2096 sc->sc_ip_ttl = sotoinpcb(lso)->inp_ip_ttl; 2097 sc->sc_ip_tos = sotoinpcb(lso)->inp_ip_tos; 2098 break; 2099 #endif 2100 #ifdef INET6 2101 case INC_ISIPV6: 2102 if (sotoinpcb(lso)->inp_flags & IN6P_AUTOFLOWLABEL) 2103 sc->sc_flowlabel = sc->sc_iss & IPV6_FLOWLABEL_MASK; 2104 break; 2105 #endif 2106 } 2107 2108 sc->sc_peer_mss = tcp_sc_msstab[cookie.flags.mss_idx]; 2109 2110 /* We can simply recompute receive window scale we sent earlier. */ 2111 while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < sb_max) 2112 wscale++; 2113 2114 /* Only use wscale if it was enabled in the orignal SYN. */ 2115 if (cookie.flags.wscale_idx > 0) { 2116 sc->sc_requested_r_scale = wscale; 2117 sc->sc_requested_s_scale = tcp_sc_wstab[cookie.flags.wscale_idx]; 2118 sc->sc_flags |= SCF_WINSCALE; 2119 } 2120 2121 wnd = lso->sol_sbrcv_hiwat; 2122 wnd = imax(wnd, 0); 2123 wnd = imin(wnd, TCP_MAXWIN); 2124 sc->sc_wnd = wnd; 2125 2126 if (cookie.flags.sack_ok) 2127 sc->sc_flags |= SCF_SACK; 2128 2129 if (to->to_flags & TOF_TS) { 2130 sc->sc_flags |= SCF_TIMESTAMP; 2131 sc->sc_tsreflect = to->to_tsval; 2132 sc->sc_ts = to->to_tsecr; 2133 sc->sc_tsoff = to->to_tsecr - tcp_ts_getticks(); 2134 } 2135 2136 if (to->to_flags & TOF_SIGNATURE) 2137 sc->sc_flags |= SCF_SIGNATURE; 2138 2139 sc->sc_rxmits = 0; 2140 2141 TCPSTAT_INC(tcps_sc_recvcookie); 2142 return (sc); 2143 } 2144 2145 #ifdef INVARIANTS 2146 static int 2147 syncookie_cmp(struct in_conninfo *inc, struct syncache_head *sch, 2148 struct syncache *sc, struct tcphdr *th, struct tcpopt *to, 2149 struct socket *lso) 2150 { 2151 struct syncache scs, *scx; 2152 char *s; 2153 2154 bzero(&scs, sizeof(scs)); 2155 scx = syncookie_lookup(inc, sch, &scs, th, to, lso); 2156 2157 if ((s = tcp_log_addrs(inc, th, NULL, NULL)) == NULL) 2158 return (0); 2159 2160 if (scx != NULL) { 2161 if (sc->sc_peer_mss != scx->sc_peer_mss) 2162 log(LOG_DEBUG, "%s; %s: mss different %i vs %i\n", 2163 s, __func__, sc->sc_peer_mss, scx->sc_peer_mss); 2164 2165 if (sc->sc_requested_r_scale != scx->sc_requested_r_scale) 2166 log(LOG_DEBUG, "%s; %s: rwscale different %i vs %i\n", 2167 s, __func__, sc->sc_requested_r_scale, 2168 scx->sc_requested_r_scale); 2169 2170 if (sc->sc_requested_s_scale != scx->sc_requested_s_scale) 2171 log(LOG_DEBUG, "%s; %s: swscale different %i vs %i\n", 2172 s, __func__, sc->sc_requested_s_scale, 2173 scx->sc_requested_s_scale); 2174 2175 if ((sc->sc_flags & SCF_SACK) != (scx->sc_flags & SCF_SACK)) 2176 log(LOG_DEBUG, "%s; %s: SACK different\n", s, __func__); 2177 } 2178 2179 if (s != NULL) 2180 free(s, M_TCPLOG); 2181 return (0); 2182 } 2183 #endif /* INVARIANTS */ 2184 2185 static void 2186 syncookie_reseed(void *arg) 2187 { 2188 struct tcp_syncache *sc = arg; 2189 uint8_t *secbits; 2190 int secbit; 2191 2192 /* 2193 * Reseeding the secret doesn't have to be protected by a lock. 2194 * It only must be ensured that the new random values are visible 2195 * to all CPUs in a SMP environment. The atomic with release 2196 * semantics ensures that. 2197 */ 2198 secbit = (sc->secret.oddeven & 0x1) ? 0 : 1; 2199 secbits = sc->secret.key[secbit]; 2200 arc4rand(secbits, SYNCOOKIE_SECRET_SIZE, 0); 2201 atomic_add_rel_int(&sc->secret.oddeven, 1); 2202 2203 /* Reschedule ourself. */ 2204 callout_schedule(&sc->secret.reseed, SYNCOOKIE_LIFETIME * hz); 2205 } 2206 2207 /* 2208 * Exports the syncache entries to userland so that netstat can display 2209 * them alongside the other sockets. This function is intended to be 2210 * called only from tcp_pcblist. 2211 * 2212 * Due to concurrency on an active system, the number of pcbs exported 2213 * may have no relation to max_pcbs. max_pcbs merely indicates the 2214 * amount of space the caller allocated for this function to use. 2215 */ 2216 int 2217 syncache_pcblist(struct sysctl_req *req, int max_pcbs, int *pcbs_exported) 2218 { 2219 struct xtcpcb xt; 2220 struct syncache *sc; 2221 struct syncache_head *sch; 2222 int count, error, i; 2223 2224 for (count = 0, error = 0, i = 0; i < V_tcp_syncache.hashsize; i++) { 2225 sch = &V_tcp_syncache.hashbase[i]; 2226 SCH_LOCK(sch); 2227 TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) { 2228 if (count >= max_pcbs) { 2229 SCH_UNLOCK(sch); 2230 goto exit; 2231 } 2232 if (cr_cansee(req->td->td_ucred, sc->sc_cred) != 0) 2233 continue; 2234 bzero(&xt, sizeof(xt)); 2235 xt.xt_len = sizeof(xt); 2236 if (sc->sc_inc.inc_flags & INC_ISIPV6) 2237 xt.xt_inp.inp_vflag = INP_IPV6; 2238 else 2239 xt.xt_inp.inp_vflag = INP_IPV4; 2240 bcopy(&sc->sc_inc, &xt.xt_inp.inp_inc, 2241 sizeof (struct in_conninfo)); 2242 xt.t_state = TCPS_SYN_RECEIVED; 2243 xt.xt_inp.xi_socket.xso_protocol = IPPROTO_TCP; 2244 xt.xt_inp.xi_socket.xso_len = sizeof (struct xsocket); 2245 xt.xt_inp.xi_socket.so_type = SOCK_STREAM; 2246 xt.xt_inp.xi_socket.so_state = SS_ISCONNECTING; 2247 error = SYSCTL_OUT(req, &xt, sizeof xt); 2248 if (error) { 2249 SCH_UNLOCK(sch); 2250 goto exit; 2251 } 2252 count++; 2253 } 2254 SCH_UNLOCK(sch); 2255 } 2256 exit: 2257 *pcbs_exported = count; 2258 return error; 2259 } 2260