1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 5 * The Regents of the University of California. All rights reserved. 6 * Copyright (c) 2007-2008,2010 7 * Swinburne University of Technology, Melbourne, Australia. 8 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org> 9 * Copyright (c) 2010 The FreeBSD Foundation 10 * Copyright (c) 2010-2011 Juniper Networks, Inc. 11 * All rights reserved. 12 * 13 * Portions of this software were developed at the Centre for Advanced Internet 14 * Architectures, Swinburne University of Technology, by Lawrence Stewart, 15 * James Healy and David Hayes, made possible in part by a grant from the Cisco 16 * University Research Program Fund at Community Foundation Silicon Valley. 17 * 18 * Portions of this software were developed at the Centre for Advanced 19 * Internet Architectures, Swinburne University of Technology, Melbourne, 20 * Australia by David Hayes under sponsorship from the FreeBSD Foundation. 21 * 22 * Portions of this software were developed by Robert N. M. Watson under 23 * contract to Juniper Networks, Inc. 24 * 25 * Redistribution and use in source and binary forms, with or without 26 * modification, are permitted provided that the following conditions 27 * are met: 28 * 1. Redistributions of source code must retain the above copyright 29 * notice, this list of conditions and the following disclaimer. 30 * 2. Redistributions in binary form must reproduce the above copyright 31 * notice, this list of conditions and the following disclaimer in the 32 * documentation and/or other materials provided with the distribution. 33 * 3. Neither the name of the University nor the names of its contributors 34 * may be used to endorse or promote products derived from this software 35 * without specific prior written permission. 36 * 37 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 38 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 39 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 40 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 42 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 43 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 45 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 46 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 47 * SUCH DAMAGE. 48 * 49 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 50 */ 51 52 #include <sys/cdefs.h> 53 __FBSDID("$FreeBSD$"); 54 55 #include "opt_inet.h" 56 #include "opt_inet6.h" 57 #include "opt_ipsec.h" 58 #include "opt_tcpdebug.h" 59 60 #include <sys/param.h> 61 #include <sys/arb.h> 62 #include <sys/kernel.h> 63 #ifdef TCP_HHOOK 64 #include <sys/hhook.h> 65 #endif 66 #include <sys/malloc.h> 67 #include <sys/mbuf.h> 68 #include <sys/proc.h> /* for proc0 declaration */ 69 #include <sys/protosw.h> 70 #include <sys/qmath.h> 71 #include <sys/sdt.h> 72 #include <sys/signalvar.h> 73 #include <sys/socket.h> 74 #include <sys/socketvar.h> 75 #include <sys/sysctl.h> 76 #include <sys/syslog.h> 77 #include <sys/systm.h> 78 #include <sys/stats.h> 79 80 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 81 82 #include <vm/uma.h> 83 84 #include <net/if.h> 85 #include <net/if_var.h> 86 #include <net/route.h> 87 #include <net/vnet.h> 88 89 #define TCPSTATES /* for logging */ 90 91 #include <netinet/in.h> 92 #include <netinet/in_kdtrace.h> 93 #include <netinet/in_pcb.h> 94 #include <netinet/in_systm.h> 95 #include <netinet/ip.h> 96 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 97 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 98 #include <netinet/ip_var.h> 99 #include <netinet/ip_options.h> 100 #include <netinet/ip6.h> 101 #include <netinet/icmp6.h> 102 #include <netinet6/in6_pcb.h> 103 #include <netinet6/in6_var.h> 104 #include <netinet6/ip6_var.h> 105 #include <netinet6/nd6.h> 106 #include <netinet/tcp.h> 107 #include <netinet/tcp_fsm.h> 108 #include <netinet/tcp_log_buf.h> 109 #include <netinet/tcp_seq.h> 110 #include <netinet/tcp_timer.h> 111 #include <netinet/tcp_var.h> 112 #include <netinet6/tcp6_var.h> 113 #include <netinet/tcpip.h> 114 #include <netinet/cc/cc.h> 115 #include <netinet/tcp_fastopen.h> 116 #ifdef TCPPCAP 117 #include <netinet/tcp_pcap.h> 118 #endif 119 #include <netinet/tcp_syncache.h> 120 #ifdef TCPDEBUG 121 #include <netinet/tcp_debug.h> 122 #endif /* TCPDEBUG */ 123 #ifdef TCP_OFFLOAD 124 #include <netinet/tcp_offload.h> 125 #endif 126 127 #include <netipsec/ipsec_support.h> 128 129 #include <machine/in_cksum.h> 130 131 #include <security/mac/mac_framework.h> 132 133 const int tcprexmtthresh = 3; 134 135 VNET_DEFINE(int, tcp_log_in_vain) = 0; 136 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_VNET | CTLFLAG_RW, 137 &VNET_NAME(tcp_log_in_vain), 0, 138 "Log all incoming TCP segments to closed ports"); 139 140 VNET_DEFINE(int, blackhole) = 0; 141 #define V_blackhole VNET(blackhole) 142 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_VNET | CTLFLAG_RW, 143 &VNET_NAME(blackhole), 0, 144 "Do not send RST on segments to closed ports"); 145 146 VNET_DEFINE(int, tcp_delack_enabled) = 1; 147 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_VNET | CTLFLAG_RW, 148 &VNET_NAME(tcp_delack_enabled), 0, 149 "Delay ACK to try and piggyback it onto a data packet"); 150 151 VNET_DEFINE(int, drop_synfin) = 0; 152 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_VNET | CTLFLAG_RW, 153 &VNET_NAME(drop_synfin), 0, 154 "Drop TCP packets with SYN+FIN set"); 155 156 VNET_DEFINE(int, tcp_do_newcwv) = 0; 157 SYSCTL_INT(_net_inet_tcp, OID_AUTO, newcwv, CTLFLAG_VNET | CTLFLAG_RW, 158 &VNET_NAME(tcp_do_newcwv), 0, 159 "Enable New Congestion Window Validation per RFC7661"); 160 161 VNET_DEFINE(int, tcp_do_rfc6675_pipe) = 0; 162 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc6675_pipe, CTLFLAG_VNET | CTLFLAG_RW, 163 &VNET_NAME(tcp_do_rfc6675_pipe), 0, 164 "Use calculated pipe/in-flight bytes per RFC 6675"); 165 166 VNET_DEFINE(int, tcp_do_rfc3042) = 1; 167 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_VNET | CTLFLAG_RW, 168 &VNET_NAME(tcp_do_rfc3042), 0, 169 "Enable RFC 3042 (Limited Transmit)"); 170 171 VNET_DEFINE(int, tcp_do_rfc3390) = 1; 172 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_VNET | CTLFLAG_RW, 173 &VNET_NAME(tcp_do_rfc3390), 0, 174 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 175 176 VNET_DEFINE(int, tcp_initcwnd_segments) = 10; 177 SYSCTL_INT(_net_inet_tcp, OID_AUTO, initcwnd_segments, 178 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_initcwnd_segments), 0, 179 "Slow-start flight size (initial congestion window) in number of segments"); 180 181 VNET_DEFINE(int, tcp_do_rfc3465) = 1; 182 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_VNET | CTLFLAG_RW, 183 &VNET_NAME(tcp_do_rfc3465), 0, 184 "Enable RFC 3465 (Appropriate Byte Counting)"); 185 186 VNET_DEFINE(int, tcp_abc_l_var) = 2; 187 SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_VNET | CTLFLAG_RW, 188 &VNET_NAME(tcp_abc_l_var), 2, 189 "Cap the max cwnd increment during slow-start to this number of segments"); 190 191 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN"); 192 193 VNET_DEFINE(int, tcp_do_ecn) = 2; 194 SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW, 195 &VNET_NAME(tcp_do_ecn), 0, 196 "TCP ECN support"); 197 198 VNET_DEFINE(int, tcp_ecn_maxretries) = 1; 199 SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_VNET | CTLFLAG_RW, 200 &VNET_NAME(tcp_ecn_maxretries), 0, 201 "Max retries before giving up on ECN"); 202 203 VNET_DEFINE(int, tcp_insecure_syn) = 0; 204 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_syn, CTLFLAG_VNET | CTLFLAG_RW, 205 &VNET_NAME(tcp_insecure_syn), 0, 206 "Follow RFC793 instead of RFC5961 criteria for accepting SYN packets"); 207 208 VNET_DEFINE(int, tcp_insecure_rst) = 0; 209 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_VNET | CTLFLAG_RW, 210 &VNET_NAME(tcp_insecure_rst), 0, 211 "Follow RFC793 instead of RFC5961 criteria for accepting RST packets"); 212 213 VNET_DEFINE(int, tcp_recvspace) = 1024*64; 214 #define V_tcp_recvspace VNET(tcp_recvspace) 215 SYSCTL_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_VNET | CTLFLAG_RW, 216 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size"); 217 218 VNET_DEFINE(int, tcp_do_autorcvbuf) = 1; 219 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_VNET | CTLFLAG_RW, 220 &VNET_NAME(tcp_do_autorcvbuf), 0, 221 "Enable automatic receive buffer sizing"); 222 223 VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024; 224 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_VNET | CTLFLAG_RW, 225 &VNET_NAME(tcp_autorcvbuf_max), 0, 226 "Max size of automatic receive buffer"); 227 228 VNET_DEFINE(struct inpcbhead, tcb); 229 #define tcb6 tcb /* for KAME src sync over BSD*'s */ 230 VNET_DEFINE(struct inpcbinfo, tcbinfo); 231 232 /* 233 * TCP statistics are stored in an array of counter(9)s, which size matches 234 * size of struct tcpstat. TCP running connection count is a regular array. 235 */ 236 VNET_PCPUSTAT_DEFINE(struct tcpstat, tcpstat); 237 SYSCTL_VNET_PCPUSTAT(_net_inet_tcp, TCPCTL_STATS, stats, struct tcpstat, 238 tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)"); 239 VNET_DEFINE(counter_u64_t, tcps_states[TCP_NSTATES]); 240 SYSCTL_COUNTER_U64_ARRAY(_net_inet_tcp, TCPCTL_STATES, states, CTLFLAG_RD | 241 CTLFLAG_VNET, &VNET_NAME(tcps_states)[0], TCP_NSTATES, 242 "TCP connection counts by TCP state"); 243 244 static void 245 tcp_vnet_init(const void *unused) 246 { 247 248 COUNTER_ARRAY_ALLOC(V_tcps_states, TCP_NSTATES, M_WAITOK); 249 VNET_PCPUSTAT_ALLOC(tcpstat, M_WAITOK); 250 } 251 VNET_SYSINIT(tcp_vnet_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 252 tcp_vnet_init, NULL); 253 254 #ifdef VIMAGE 255 static void 256 tcp_vnet_uninit(const void *unused) 257 { 258 259 COUNTER_ARRAY_FREE(V_tcps_states, TCP_NSTATES); 260 VNET_PCPUSTAT_FREE(tcpstat); 261 } 262 VNET_SYSUNINIT(tcp_vnet_uninit, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 263 tcp_vnet_uninit, NULL); 264 #endif /* VIMAGE */ 265 266 /* 267 * Kernel module interface for updating tcpstat. The argument is an index 268 * into tcpstat treated as an array. 269 */ 270 void 271 kmod_tcpstat_inc(int statnum) 272 { 273 274 counter_u64_add(VNET(tcpstat)[statnum], 1); 275 } 276 277 #ifdef TCP_HHOOK 278 /* 279 * Wrapper for the TCP established input helper hook. 280 */ 281 void 282 hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to) 283 { 284 struct tcp_hhook_data hhook_data; 285 286 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) { 287 hhook_data.tp = tp; 288 hhook_data.th = th; 289 hhook_data.to = to; 290 291 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data, 292 tp->osd); 293 } 294 } 295 #endif 296 297 /* 298 * CC wrapper hook functions 299 */ 300 void 301 cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t nsegs, 302 uint16_t type) 303 { 304 #ifdef STATS 305 int32_t gput; 306 #endif 307 308 INP_WLOCK_ASSERT(tp->t_inpcb); 309 310 tp->ccv->nsegs = nsegs; 311 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th); 312 if ((!V_tcp_do_newcwv && (tp->snd_cwnd <= tp->snd_wnd)) || 313 (V_tcp_do_newcwv && (tp->snd_cwnd <= tp->snd_wnd) && 314 (tp->snd_cwnd < (tcp_compute_pipe(tp) * 2)))) 315 tp->ccv->flags |= CCF_CWND_LIMITED; 316 else 317 tp->ccv->flags &= ~CCF_CWND_LIMITED; 318 319 if (type == CC_ACK) { 320 #ifdef STATS 321 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 322 ((int32_t)tp->snd_cwnd) - tp->snd_wnd); 323 if (!IN_RECOVERY(tp->t_flags)) 324 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_ACKLEN, 325 tp->ccv->bytes_this_ack / (tcp_maxseg(tp) * nsegs)); 326 if ((tp->t_flags & TF_GPUTINPROG) && 327 SEQ_GEQ(th->th_ack, tp->gput_ack)) { 328 /* 329 * Compute goodput in bits per millisecond. 330 */ 331 gput = (((int64_t)(th->th_ack - tp->gput_seq)) << 3) / 332 max(1, tcp_ts_getticks() - tp->gput_ts); 333 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 334 gput); 335 /* 336 * XXXLAS: This is a temporary hack, and should be 337 * chained off VOI_TCP_GPUT when stats(9) grows an API 338 * to deal with chained VOIs. 339 */ 340 if (tp->t_stats_gput_prev > 0) 341 stats_voi_update_abs_s32(tp->t_stats, 342 VOI_TCP_GPUT_ND, 343 ((gput - tp->t_stats_gput_prev) * 100) / 344 tp->t_stats_gput_prev); 345 tp->t_flags &= ~TF_GPUTINPROG; 346 tp->t_stats_gput_prev = gput; 347 } 348 #endif /* STATS */ 349 if (tp->snd_cwnd > tp->snd_ssthresh) { 350 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack, 351 nsegs * V_tcp_abc_l_var * tcp_maxseg(tp)); 352 if (tp->t_bytes_acked >= tp->snd_cwnd) { 353 tp->t_bytes_acked -= tp->snd_cwnd; 354 tp->ccv->flags |= CCF_ABC_SENTAWND; 355 } 356 } else { 357 tp->ccv->flags &= ~CCF_ABC_SENTAWND; 358 tp->t_bytes_acked = 0; 359 } 360 } 361 362 if (CC_ALGO(tp)->ack_received != NULL) { 363 /* XXXLAS: Find a way to live without this */ 364 tp->ccv->curack = th->th_ack; 365 CC_ALGO(tp)->ack_received(tp->ccv, type); 366 } 367 #ifdef STATS 368 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, tp->snd_cwnd); 369 #endif 370 } 371 372 void 373 cc_conn_init(struct tcpcb *tp) 374 { 375 struct hc_metrics_lite metrics; 376 struct inpcb *inp = tp->t_inpcb; 377 u_int maxseg; 378 int rtt; 379 380 INP_WLOCK_ASSERT(tp->t_inpcb); 381 382 tcp_hc_get(&inp->inp_inc, &metrics); 383 maxseg = tcp_maxseg(tp); 384 385 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) { 386 tp->t_srtt = rtt; 387 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE; 388 TCPSTAT_INC(tcps_usedrtt); 389 if (metrics.rmx_rttvar) { 390 tp->t_rttvar = metrics.rmx_rttvar; 391 TCPSTAT_INC(tcps_usedrttvar); 392 } else { 393 /* default variation is +- 1 rtt */ 394 tp->t_rttvar = 395 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 396 } 397 TCPT_RANGESET(tp->t_rxtcur, 398 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 399 tp->t_rttmin, TCPTV_REXMTMAX); 400 } 401 if (metrics.rmx_ssthresh) { 402 /* 403 * There's some sort of gateway or interface 404 * buffer limit on the path. Use this to set 405 * the slow start threshold, but set the 406 * threshold to no less than 2*mss. 407 */ 408 tp->snd_ssthresh = max(2 * maxseg, metrics.rmx_ssthresh); 409 TCPSTAT_INC(tcps_usedssthresh); 410 } 411 412 /* 413 * Set the initial slow-start flight size. 414 * 415 * If a SYN or SYN/ACK was lost and retransmitted, we have to 416 * reduce the initial CWND to one segment as congestion is likely 417 * requiring us to be cautious. 418 */ 419 if (tp->snd_cwnd == 1) 420 tp->snd_cwnd = maxseg; /* SYN(-ACK) lost */ 421 else 422 tp->snd_cwnd = tcp_compute_initwnd(maxseg); 423 424 if (CC_ALGO(tp)->conn_init != NULL) 425 CC_ALGO(tp)->conn_init(tp->ccv); 426 } 427 428 void inline 429 cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type) 430 { 431 u_int maxseg; 432 433 INP_WLOCK_ASSERT(tp->t_inpcb); 434 435 #ifdef STATS 436 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 437 #endif 438 439 switch(type) { 440 case CC_NDUPACK: 441 if (!IN_FASTRECOVERY(tp->t_flags)) { 442 tp->snd_recover = tp->snd_max; 443 if (tp->t_flags2 & TF2_ECN_PERMIT) 444 tp->t_flags2 |= TF2_ECN_SND_CWR; 445 } 446 break; 447 case CC_ECN: 448 if (!IN_CONGRECOVERY(tp->t_flags)) { 449 TCPSTAT_INC(tcps_ecn_rcwnd); 450 tp->snd_recover = tp->snd_max; 451 if (tp->t_flags2 & TF2_ECN_PERMIT) 452 tp->t_flags2 |= TF2_ECN_SND_CWR; 453 } 454 break; 455 case CC_RTO: 456 maxseg = tcp_maxseg(tp); 457 tp->t_dupacks = 0; 458 tp->t_bytes_acked = 0; 459 EXIT_RECOVERY(tp->t_flags); 460 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 / 461 maxseg) * maxseg; 462 tp->snd_cwnd = maxseg; 463 if (tp->t_flags2 & TF2_ECN_PERMIT) 464 tp->t_flags2 |= TF2_ECN_SND_CWR; 465 break; 466 case CC_RTO_ERR: 467 TCPSTAT_INC(tcps_sndrexmitbad); 468 /* RTO was unnecessary, so reset everything. */ 469 tp->snd_cwnd = tp->snd_cwnd_prev; 470 tp->snd_ssthresh = tp->snd_ssthresh_prev; 471 tp->snd_recover = tp->snd_recover_prev; 472 if (tp->t_flags & TF_WASFRECOVERY) 473 ENTER_FASTRECOVERY(tp->t_flags); 474 if (tp->t_flags & TF_WASCRECOVERY) 475 ENTER_CONGRECOVERY(tp->t_flags); 476 tp->snd_nxt = tp->snd_max; 477 tp->t_flags &= ~TF_PREVVALID; 478 tp->t_badrxtwin = 0; 479 break; 480 } 481 482 if (CC_ALGO(tp)->cong_signal != NULL) { 483 if (th != NULL) 484 tp->ccv->curack = th->th_ack; 485 CC_ALGO(tp)->cong_signal(tp->ccv, type); 486 } 487 } 488 489 void inline 490 cc_post_recovery(struct tcpcb *tp, struct tcphdr *th) 491 { 492 INP_WLOCK_ASSERT(tp->t_inpcb); 493 494 /* XXXLAS: KASSERT that we're in recovery? */ 495 496 if (CC_ALGO(tp)->post_recovery != NULL) { 497 tp->ccv->curack = th->th_ack; 498 CC_ALGO(tp)->post_recovery(tp->ccv); 499 } 500 /* XXXLAS: EXIT_RECOVERY ? */ 501 tp->t_bytes_acked = 0; 502 } 503 504 /* 505 * Indicate whether this ack should be delayed. We can delay the ack if 506 * following conditions are met: 507 * - There is no delayed ack timer in progress. 508 * - Our last ack wasn't a 0-sized window. We never want to delay 509 * the ack that opens up a 0-sized window. 510 * - LRO wasn't used for this segment. We make sure by checking that the 511 * segment size is not larger than the MSS. 512 */ 513 #define DELAY_ACK(tp, tlen) \ 514 ((!tcp_timer_active(tp, TT_DELACK) && \ 515 (tp->t_flags & TF_RXWIN0SENT) == 0) && \ 516 (tlen <= tp->t_maxseg) && \ 517 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN))) 518 519 void inline 520 cc_ecnpkt_handler(struct tcpcb *tp, struct tcphdr *th, uint8_t iptos) 521 { 522 INP_WLOCK_ASSERT(tp->t_inpcb); 523 524 if (CC_ALGO(tp)->ecnpkt_handler != NULL) { 525 switch (iptos & IPTOS_ECN_MASK) { 526 case IPTOS_ECN_CE: 527 tp->ccv->flags |= CCF_IPHDR_CE; 528 break; 529 case IPTOS_ECN_ECT0: 530 /* FALLTHROUGH */ 531 case IPTOS_ECN_ECT1: 532 /* FALLTHROUGH */ 533 case IPTOS_ECN_NOTECT: 534 tp->ccv->flags &= ~CCF_IPHDR_CE; 535 break; 536 } 537 538 if (th->th_flags & TH_CWR) 539 tp->ccv->flags |= CCF_TCPHDR_CWR; 540 else 541 tp->ccv->flags &= ~CCF_TCPHDR_CWR; 542 543 CC_ALGO(tp)->ecnpkt_handler(tp->ccv); 544 545 if (tp->ccv->flags & CCF_ACKNOW) { 546 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 547 tp->t_flags |= TF_ACKNOW; 548 } 549 } 550 } 551 552 /* 553 * TCP input handling is split into multiple parts: 554 * tcp6_input is a thin wrapper around tcp_input for the extended 555 * ip6_protox[] call format in ip6_input 556 * tcp_input handles primary segment validation, inpcb lookup and 557 * SYN processing on listen sockets 558 * tcp_do_segment processes the ACK and text of the segment for 559 * establishing, established and closing connections 560 */ 561 #ifdef INET6 562 int 563 tcp6_input(struct mbuf **mp, int *offp, int proto) 564 { 565 struct mbuf *m; 566 struct in6_ifaddr *ia6; 567 struct ip6_hdr *ip6; 568 569 m = *mp; 570 if (m->m_len < *offp + sizeof(struct tcphdr)) { 571 m = m_pullup(m, *offp + sizeof(struct tcphdr)); 572 if (m == NULL) { 573 *mp = m; 574 TCPSTAT_INC(tcps_rcvshort); 575 return (IPPROTO_DONE); 576 } 577 } 578 579 /* 580 * draft-itojun-ipv6-tcp-to-anycast 581 * better place to put this in? 582 */ 583 ip6 = mtod(m, struct ip6_hdr *); 584 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */); 585 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 586 587 ifa_free(&ia6->ia_ifa); 588 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 589 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6); 590 *mp = NULL; 591 return (IPPROTO_DONE); 592 } 593 if (ia6) 594 ifa_free(&ia6->ia_ifa); 595 596 *mp = m; 597 return (tcp_input(mp, offp, proto)); 598 } 599 #endif /* INET6 */ 600 601 int 602 tcp_input(struct mbuf **mp, int *offp, int proto) 603 { 604 struct mbuf *m = *mp; 605 struct tcphdr *th = NULL; 606 struct ip *ip = NULL; 607 struct inpcb *inp = NULL; 608 struct tcpcb *tp = NULL; 609 struct socket *so = NULL; 610 u_char *optp = NULL; 611 int off0; 612 int optlen = 0; 613 #ifdef INET 614 int len; 615 uint8_t ipttl; 616 #endif 617 int tlen = 0, off; 618 int drop_hdrlen; 619 int thflags; 620 int rstreason = 0; /* For badport_bandlim accounting purposes */ 621 uint8_t iptos; 622 struct m_tag *fwd_tag = NULL; 623 #ifdef INET6 624 struct ip6_hdr *ip6 = NULL; 625 int isipv6; 626 #else 627 const void *ip6 = NULL; 628 #endif /* INET6 */ 629 struct tcpopt to; /* options in this segment */ 630 char *s = NULL; /* address and port logging */ 631 #ifdef TCPDEBUG 632 /* 633 * The size of tcp_saveipgen must be the size of the max ip header, 634 * now IPv6. 635 */ 636 u_char tcp_saveipgen[IP6_HDR_LEN]; 637 struct tcphdr tcp_savetcp; 638 short ostate = 0; 639 #endif 640 641 NET_EPOCH_ASSERT(); 642 643 #ifdef INET6 644 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 645 #endif 646 647 off0 = *offp; 648 m = *mp; 649 *mp = NULL; 650 to.to_flags = 0; 651 TCPSTAT_INC(tcps_rcvtotal); 652 653 #ifdef INET6 654 if (isipv6) { 655 656 ip6 = mtod(m, struct ip6_hdr *); 657 th = (struct tcphdr *)((caddr_t)ip6 + off0); 658 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0; 659 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) { 660 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 661 th->th_sum = m->m_pkthdr.csum_data; 662 else 663 th->th_sum = in6_cksum_pseudo(ip6, tlen, 664 IPPROTO_TCP, m->m_pkthdr.csum_data); 665 th->th_sum ^= 0xffff; 666 } else 667 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen); 668 if (th->th_sum) { 669 TCPSTAT_INC(tcps_rcvbadsum); 670 goto drop; 671 } 672 673 /* 674 * Be proactive about unspecified IPv6 address in source. 675 * As we use all-zero to indicate unbounded/unconnected pcb, 676 * unspecified IPv6 address can be used to confuse us. 677 * 678 * Note that packets with unspecified IPv6 destination is 679 * already dropped in ip6_input. 680 */ 681 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 682 /* XXX stat */ 683 goto drop; 684 } 685 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; 686 } 687 #endif 688 #if defined(INET) && defined(INET6) 689 else 690 #endif 691 #ifdef INET 692 { 693 /* 694 * Get IP and TCP header together in first mbuf. 695 * Note: IP leaves IP header in first mbuf. 696 */ 697 if (off0 > sizeof (struct ip)) { 698 ip_stripoptions(m); 699 off0 = sizeof(struct ip); 700 } 701 if (m->m_len < sizeof (struct tcpiphdr)) { 702 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) 703 == NULL) { 704 TCPSTAT_INC(tcps_rcvshort); 705 return (IPPROTO_DONE); 706 } 707 } 708 ip = mtod(m, struct ip *); 709 th = (struct tcphdr *)((caddr_t)ip + off0); 710 tlen = ntohs(ip->ip_len) - off0; 711 712 iptos = ip->ip_tos; 713 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 714 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 715 th->th_sum = m->m_pkthdr.csum_data; 716 else 717 th->th_sum = in_pseudo(ip->ip_src.s_addr, 718 ip->ip_dst.s_addr, 719 htonl(m->m_pkthdr.csum_data + tlen + 720 IPPROTO_TCP)); 721 th->th_sum ^= 0xffff; 722 } else { 723 struct ipovly *ipov = (struct ipovly *)ip; 724 725 /* 726 * Checksum extended TCP header and data. 727 */ 728 len = off0 + tlen; 729 ipttl = ip->ip_ttl; 730 bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); 731 ipov->ih_len = htons(tlen); 732 th->th_sum = in_cksum(m, len); 733 /* Reset length for SDT probes. */ 734 ip->ip_len = htons(len); 735 /* Reset TOS bits */ 736 ip->ip_tos = iptos; 737 /* Re-initialization for later version check */ 738 ip->ip_ttl = ipttl; 739 ip->ip_v = IPVERSION; 740 ip->ip_hl = off0 >> 2; 741 } 742 743 if (th->th_sum) { 744 TCPSTAT_INC(tcps_rcvbadsum); 745 goto drop; 746 } 747 } 748 #endif /* INET */ 749 750 /* 751 * Check that TCP offset makes sense, 752 * pull out TCP options and adjust length. XXX 753 */ 754 off = th->th_off << 2; 755 if (off < sizeof (struct tcphdr) || off > tlen) { 756 TCPSTAT_INC(tcps_rcvbadoff); 757 goto drop; 758 } 759 tlen -= off; /* tlen is used instead of ti->ti_len */ 760 if (off > sizeof (struct tcphdr)) { 761 #ifdef INET6 762 if (isipv6) { 763 if (m->m_len < off0 + off) { 764 m = m_pullup(m, off0 + off); 765 if (m == NULL) { 766 TCPSTAT_INC(tcps_rcvshort); 767 return (IPPROTO_DONE); 768 } 769 } 770 ip6 = mtod(m, struct ip6_hdr *); 771 th = (struct tcphdr *)((caddr_t)ip6 + off0); 772 } 773 #endif 774 #if defined(INET) && defined(INET6) 775 else 776 #endif 777 #ifdef INET 778 { 779 if (m->m_len < sizeof(struct ip) + off) { 780 if ((m = m_pullup(m, sizeof (struct ip) + off)) 781 == NULL) { 782 TCPSTAT_INC(tcps_rcvshort); 783 return (IPPROTO_DONE); 784 } 785 ip = mtod(m, struct ip *); 786 th = (struct tcphdr *)((caddr_t)ip + off0); 787 } 788 } 789 #endif 790 optlen = off - sizeof (struct tcphdr); 791 optp = (u_char *)(th + 1); 792 } 793 thflags = th->th_flags; 794 795 /* 796 * Convert TCP protocol specific fields to host format. 797 */ 798 tcp_fields_to_host(th); 799 800 /* 801 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options. 802 */ 803 drop_hdrlen = off0 + off; 804 805 /* 806 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. 807 */ 808 if ( 809 #ifdef INET6 810 (isipv6 && (m->m_flags & M_IP6_NEXTHOP)) 811 #ifdef INET 812 || (!isipv6 && (m->m_flags & M_IP_NEXTHOP)) 813 #endif 814 #endif 815 #if defined(INET) && !defined(INET6) 816 (m->m_flags & M_IP_NEXTHOP) 817 #endif 818 ) 819 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 820 821 findpcb: 822 #ifdef INET6 823 if (isipv6 && fwd_tag != NULL) { 824 struct sockaddr_in6 *next_hop6; 825 826 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1); 827 /* 828 * Transparently forwarded. Pretend to be the destination. 829 * Already got one like this? 830 */ 831 inp = in6_pcblookup_mbuf(&V_tcbinfo, 832 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport, 833 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif, m); 834 if (!inp) { 835 /* 836 * It's new. Try to find the ambushing socket. 837 * Because we've rewritten the destination address, 838 * any hardware-generated hash is ignored. 839 */ 840 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src, 841 th->th_sport, &next_hop6->sin6_addr, 842 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) : 843 th->th_dport, INPLOOKUP_WILDCARD | 844 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif); 845 } 846 } else if (isipv6) { 847 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src, 848 th->th_sport, &ip6->ip6_dst, th->th_dport, 849 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB, 850 m->m_pkthdr.rcvif, m); 851 } 852 #endif /* INET6 */ 853 #if defined(INET6) && defined(INET) 854 else 855 #endif 856 #ifdef INET 857 if (fwd_tag != NULL) { 858 struct sockaddr_in *next_hop; 859 860 next_hop = (struct sockaddr_in *)(fwd_tag+1); 861 /* 862 * Transparently forwarded. Pretend to be the destination. 863 * already got one like this? 864 */ 865 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport, 866 ip->ip_dst, th->th_dport, INPLOOKUP_WLOCKPCB, 867 m->m_pkthdr.rcvif, m); 868 if (!inp) { 869 /* 870 * It's new. Try to find the ambushing socket. 871 * Because we've rewritten the destination address, 872 * any hardware-generated hash is ignored. 873 */ 874 inp = in_pcblookup(&V_tcbinfo, ip->ip_src, 875 th->th_sport, next_hop->sin_addr, 876 next_hop->sin_port ? ntohs(next_hop->sin_port) : 877 th->th_dport, INPLOOKUP_WILDCARD | 878 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif); 879 } 880 } else 881 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, 882 th->th_sport, ip->ip_dst, th->th_dport, 883 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB, 884 m->m_pkthdr.rcvif, m); 885 #endif /* INET */ 886 887 /* 888 * If the INPCB does not exist then all data in the incoming 889 * segment is discarded and an appropriate RST is sent back. 890 * XXX MRT Send RST using which routing table? 891 */ 892 if (inp == NULL) { 893 /* 894 * Log communication attempts to ports that are not 895 * in use. 896 */ 897 if ((V_tcp_log_in_vain == 1 && (thflags & TH_SYN)) || 898 V_tcp_log_in_vain == 2) { 899 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6))) 900 log(LOG_INFO, "%s; %s: Connection attempt " 901 "to closed port\n", s, __func__); 902 } 903 /* 904 * When blackholing do not respond with a RST but 905 * completely ignore the segment and drop it. 906 */ 907 if ((V_blackhole == 1 && (thflags & TH_SYN)) || 908 V_blackhole == 2) 909 goto dropunlock; 910 911 rstreason = BANDLIM_RST_CLOSEDPORT; 912 goto dropwithreset; 913 } 914 INP_WLOCK_ASSERT(inp); 915 /* 916 * While waiting for inp lock during the lookup, another thread 917 * can have dropped the inpcb, in which case we need to loop back 918 * and try to find a new inpcb to deliver to. 919 */ 920 if (inp->inp_flags & INP_DROPPED) { 921 INP_WUNLOCK(inp); 922 inp = NULL; 923 goto findpcb; 924 } 925 if ((inp->inp_flowtype == M_HASHTYPE_NONE) && 926 (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) && 927 ((inp->inp_socket == NULL) || 928 (inp->inp_socket->so_options & SO_ACCEPTCONN) == 0)) { 929 inp->inp_flowid = m->m_pkthdr.flowid; 930 inp->inp_flowtype = M_HASHTYPE_GET(m); 931 } 932 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 933 #ifdef INET6 934 if (isipv6 && IPSEC_ENABLED(ipv6) && 935 IPSEC_CHECK_POLICY(ipv6, m, inp) != 0) { 936 goto dropunlock; 937 } 938 #ifdef INET 939 else 940 #endif 941 #endif /* INET6 */ 942 #ifdef INET 943 if (IPSEC_ENABLED(ipv4) && 944 IPSEC_CHECK_POLICY(ipv4, m, inp) != 0) { 945 goto dropunlock; 946 } 947 #endif /* INET */ 948 #endif /* IPSEC */ 949 950 /* 951 * Check the minimum TTL for socket. 952 */ 953 if (inp->inp_ip_minttl != 0) { 954 #ifdef INET6 955 if (isipv6) { 956 if (inp->inp_ip_minttl > ip6->ip6_hlim) 957 goto dropunlock; 958 } else 959 #endif 960 if (inp->inp_ip_minttl > ip->ip_ttl) 961 goto dropunlock; 962 } 963 964 /* 965 * A previous connection in TIMEWAIT state is supposed to catch stray 966 * or duplicate segments arriving late. If this segment was a 967 * legitimate new connection attempt, the old INPCB gets removed and 968 * we can try again to find a listening socket. 969 * 970 * At this point, due to earlier optimism, we may hold only an inpcb 971 * lock, and not the inpcbinfo write lock. If so, we need to try to 972 * acquire it, or if that fails, acquire a reference on the inpcb, 973 * drop all locks, acquire a global write lock, and then re-acquire 974 * the inpcb lock. We may at that point discover that another thread 975 * has tried to free the inpcb, in which case we need to loop back 976 * and try to find a new inpcb to deliver to. 977 * 978 * XXXRW: It may be time to rethink timewait locking. 979 */ 980 if (inp->inp_flags & INP_TIMEWAIT) { 981 if (thflags & TH_SYN) 982 tcp_dooptions(&to, optp, optlen, TO_SYN); 983 /* 984 * NB: tcp_twcheck unlocks the INP and frees the mbuf. 985 */ 986 if (tcp_twcheck(inp, &to, th, m, tlen)) 987 goto findpcb; 988 return (IPPROTO_DONE); 989 } 990 /* 991 * The TCPCB may no longer exist if the connection is winding 992 * down or it is in the CLOSED state. Either way we drop the 993 * segment and send an appropriate response. 994 */ 995 tp = intotcpcb(inp); 996 if (tp == NULL || tp->t_state == TCPS_CLOSED) { 997 rstreason = BANDLIM_RST_CLOSEDPORT; 998 goto dropwithreset; 999 } 1000 1001 #ifdef TCP_OFFLOAD 1002 if (tp->t_flags & TF_TOE) { 1003 tcp_offload_input(tp, m); 1004 m = NULL; /* consumed by the TOE driver */ 1005 goto dropunlock; 1006 } 1007 #endif 1008 1009 #ifdef MAC 1010 INP_WLOCK_ASSERT(inp); 1011 if (mac_inpcb_check_deliver(inp, m)) 1012 goto dropunlock; 1013 #endif 1014 so = inp->inp_socket; 1015 KASSERT(so != NULL, ("%s: so == NULL", __func__)); 1016 #ifdef TCPDEBUG 1017 if (so->so_options & SO_DEBUG) { 1018 ostate = tp->t_state; 1019 #ifdef INET6 1020 if (isipv6) { 1021 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6)); 1022 } else 1023 #endif 1024 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip)); 1025 tcp_savetcp = *th; 1026 } 1027 #endif /* TCPDEBUG */ 1028 /* 1029 * When the socket is accepting connections (the INPCB is in LISTEN 1030 * state) we look into the SYN cache if this is a new connection 1031 * attempt or the completion of a previous one. 1032 */ 1033 KASSERT(tp->t_state == TCPS_LISTEN || !(so->so_options & SO_ACCEPTCONN), 1034 ("%s: so accepting but tp %p not listening", __func__, tp)); 1035 if (tp->t_state == TCPS_LISTEN && (so->so_options & SO_ACCEPTCONN)) { 1036 struct in_conninfo inc; 1037 1038 bzero(&inc, sizeof(inc)); 1039 #ifdef INET6 1040 if (isipv6) { 1041 inc.inc_flags |= INC_ISIPV6; 1042 if (inp->inp_inc.inc_flags & INC_IPV6MINMTU) 1043 inc.inc_flags |= INC_IPV6MINMTU; 1044 inc.inc6_faddr = ip6->ip6_src; 1045 inc.inc6_laddr = ip6->ip6_dst; 1046 } else 1047 #endif 1048 { 1049 inc.inc_faddr = ip->ip_src; 1050 inc.inc_laddr = ip->ip_dst; 1051 } 1052 inc.inc_fport = th->th_sport; 1053 inc.inc_lport = th->th_dport; 1054 inc.inc_fibnum = so->so_fibnum; 1055 1056 /* 1057 * Check for an existing connection attempt in syncache if 1058 * the flag is only ACK. A successful lookup creates a new 1059 * socket appended to the listen queue in SYN_RECEIVED state. 1060 */ 1061 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) { 1062 1063 /* 1064 * Parse the TCP options here because 1065 * syncookies need access to the reflected 1066 * timestamp. 1067 */ 1068 tcp_dooptions(&to, optp, optlen, 0); 1069 /* 1070 * NB: syncache_expand() doesn't unlock 1071 * inp and tcpinfo locks. 1072 */ 1073 rstreason = syncache_expand(&inc, &to, th, &so, m); 1074 if (rstreason < 0) { 1075 /* 1076 * A failing TCP MD5 signature comparison 1077 * must result in the segment being dropped 1078 * and must not produce any response back 1079 * to the sender. 1080 */ 1081 goto dropunlock; 1082 } else if (rstreason == 0) { 1083 /* 1084 * No syncache entry or ACK was not 1085 * for our SYN/ACK. Send a RST. 1086 * NB: syncache did its own logging 1087 * of the failure cause. 1088 */ 1089 rstreason = BANDLIM_RST_OPENPORT; 1090 goto dropwithreset; 1091 } 1092 tfo_socket_result: 1093 if (so == NULL) { 1094 /* 1095 * We completed the 3-way handshake 1096 * but could not allocate a socket 1097 * either due to memory shortage, 1098 * listen queue length limits or 1099 * global socket limits. Send RST 1100 * or wait and have the remote end 1101 * retransmit the ACK for another 1102 * try. 1103 */ 1104 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1105 log(LOG_DEBUG, "%s; %s: Listen socket: " 1106 "Socket allocation failed due to " 1107 "limits or memory shortage, %s\n", 1108 s, __func__, 1109 V_tcp_sc_rst_sock_fail ? 1110 "sending RST" : "try again"); 1111 if (V_tcp_sc_rst_sock_fail) { 1112 rstreason = BANDLIM_UNLIMITED; 1113 goto dropwithreset; 1114 } else 1115 goto dropunlock; 1116 } 1117 /* 1118 * Socket is created in state SYN_RECEIVED. 1119 * Unlock the listen socket, lock the newly 1120 * created socket and update the tp variable. 1121 */ 1122 INP_WUNLOCK(inp); /* listen socket */ 1123 inp = sotoinpcb(so); 1124 /* 1125 * New connection inpcb is already locked by 1126 * syncache_expand(). 1127 */ 1128 INP_WLOCK_ASSERT(inp); 1129 tp = intotcpcb(inp); 1130 KASSERT(tp->t_state == TCPS_SYN_RECEIVED, 1131 ("%s: ", __func__)); 1132 /* 1133 * Process the segment and the data it 1134 * contains. tcp_do_segment() consumes 1135 * the mbuf chain and unlocks the inpcb. 1136 */ 1137 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1138 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, 1139 iptos); 1140 return (IPPROTO_DONE); 1141 } 1142 /* 1143 * Segment flag validation for new connection attempts: 1144 * 1145 * Our (SYN|ACK) response was rejected. 1146 * Check with syncache and remove entry to prevent 1147 * retransmits. 1148 * 1149 * NB: syncache_chkrst does its own logging of failure 1150 * causes. 1151 */ 1152 if (thflags & TH_RST) { 1153 syncache_chkrst(&inc, th, m); 1154 goto dropunlock; 1155 } 1156 /* 1157 * We can't do anything without SYN. 1158 */ 1159 if ((thflags & TH_SYN) == 0) { 1160 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1161 log(LOG_DEBUG, "%s; %s: Listen socket: " 1162 "SYN is missing, segment ignored\n", 1163 s, __func__); 1164 TCPSTAT_INC(tcps_badsyn); 1165 goto dropunlock; 1166 } 1167 /* 1168 * (SYN|ACK) is bogus on a listen socket. 1169 */ 1170 if (thflags & TH_ACK) { 1171 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1172 log(LOG_DEBUG, "%s; %s: Listen socket: " 1173 "SYN|ACK invalid, segment rejected\n", 1174 s, __func__); 1175 syncache_badack(&inc); /* XXX: Not needed! */ 1176 TCPSTAT_INC(tcps_badsyn); 1177 rstreason = BANDLIM_RST_OPENPORT; 1178 goto dropwithreset; 1179 } 1180 /* 1181 * If the drop_synfin option is enabled, drop all 1182 * segments with both the SYN and FIN bits set. 1183 * This prevents e.g. nmap from identifying the 1184 * TCP/IP stack. 1185 * XXX: Poor reasoning. nmap has other methods 1186 * and is constantly refining its stack detection 1187 * strategies. 1188 * XXX: This is a violation of the TCP specification 1189 * and was used by RFC1644. 1190 */ 1191 if ((thflags & TH_FIN) && V_drop_synfin) { 1192 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1193 log(LOG_DEBUG, "%s; %s: Listen socket: " 1194 "SYN|FIN segment ignored (based on " 1195 "sysctl setting)\n", s, __func__); 1196 TCPSTAT_INC(tcps_badsyn); 1197 goto dropunlock; 1198 } 1199 /* 1200 * Segment's flags are (SYN) or (SYN|FIN). 1201 * 1202 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored 1203 * as they do not affect the state of the TCP FSM. 1204 * The data pointed to by TH_URG and th_urp is ignored. 1205 */ 1206 KASSERT((thflags & (TH_RST|TH_ACK)) == 0, 1207 ("%s: Listen socket: TH_RST or TH_ACK set", __func__)); 1208 KASSERT(thflags & (TH_SYN), 1209 ("%s: Listen socket: TH_SYN not set", __func__)); 1210 #ifdef INET6 1211 /* 1212 * If deprecated address is forbidden, 1213 * we do not accept SYN to deprecated interface 1214 * address to prevent any new inbound connection from 1215 * getting established. 1216 * When we do not accept SYN, we send a TCP RST, 1217 * with deprecated source address (instead of dropping 1218 * it). We compromise it as it is much better for peer 1219 * to send a RST, and RST will be the final packet 1220 * for the exchange. 1221 * 1222 * If we do not forbid deprecated addresses, we accept 1223 * the SYN packet. RFC2462 does not suggest dropping 1224 * SYN in this case. 1225 * If we decipher RFC2462 5.5.4, it says like this: 1226 * 1. use of deprecated addr with existing 1227 * communication is okay - "SHOULD continue to be 1228 * used" 1229 * 2. use of it with new communication: 1230 * (2a) "SHOULD NOT be used if alternate address 1231 * with sufficient scope is available" 1232 * (2b) nothing mentioned otherwise. 1233 * Here we fall into (2b) case as we have no choice in 1234 * our source address selection - we must obey the peer. 1235 * 1236 * The wording in RFC2462 is confusing, and there are 1237 * multiple description text for deprecated address 1238 * handling - worse, they are not exactly the same. 1239 * I believe 5.5.4 is the best one, so we follow 5.5.4. 1240 */ 1241 if (isipv6 && !V_ip6_use_deprecated) { 1242 struct in6_ifaddr *ia6; 1243 1244 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */); 1245 if (ia6 != NULL && 1246 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 1247 ifa_free(&ia6->ia_ifa); 1248 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1249 log(LOG_DEBUG, "%s; %s: Listen socket: " 1250 "Connection attempt to deprecated " 1251 "IPv6 address rejected\n", 1252 s, __func__); 1253 rstreason = BANDLIM_RST_OPENPORT; 1254 goto dropwithreset; 1255 } 1256 if (ia6) 1257 ifa_free(&ia6->ia_ifa); 1258 } 1259 #endif /* INET6 */ 1260 /* 1261 * Basic sanity checks on incoming SYN requests: 1262 * Don't respond if the destination is a link layer 1263 * broadcast according to RFC1122 4.2.3.10, p. 104. 1264 * If it is from this socket it must be forged. 1265 * Don't respond if the source or destination is a 1266 * global or subnet broad- or multicast address. 1267 * Note that it is quite possible to receive unicast 1268 * link-layer packets with a broadcast IP address. Use 1269 * in_broadcast() to find them. 1270 */ 1271 if (m->m_flags & (M_BCAST|M_MCAST)) { 1272 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1273 log(LOG_DEBUG, "%s; %s: Listen socket: " 1274 "Connection attempt from broad- or multicast " 1275 "link layer address ignored\n", s, __func__); 1276 goto dropunlock; 1277 } 1278 #ifdef INET6 1279 if (isipv6) { 1280 if (th->th_dport == th->th_sport && 1281 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) { 1282 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1283 log(LOG_DEBUG, "%s; %s: Listen socket: " 1284 "Connection attempt to/from self " 1285 "ignored\n", s, __func__); 1286 goto dropunlock; 1287 } 1288 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 1289 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) { 1290 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1291 log(LOG_DEBUG, "%s; %s: Listen socket: " 1292 "Connection attempt from/to multicast " 1293 "address ignored\n", s, __func__); 1294 goto dropunlock; 1295 } 1296 } 1297 #endif 1298 #if defined(INET) && defined(INET6) 1299 else 1300 #endif 1301 #ifdef INET 1302 { 1303 if (th->th_dport == th->th_sport && 1304 ip->ip_dst.s_addr == ip->ip_src.s_addr) { 1305 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1306 log(LOG_DEBUG, "%s; %s: Listen socket: " 1307 "Connection attempt from/to self " 1308 "ignored\n", s, __func__); 1309 goto dropunlock; 1310 } 1311 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 1312 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 1313 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 1314 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) { 1315 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1316 log(LOG_DEBUG, "%s; %s: Listen socket: " 1317 "Connection attempt from/to broad- " 1318 "or multicast address ignored\n", 1319 s, __func__); 1320 goto dropunlock; 1321 } 1322 } 1323 #endif 1324 /* 1325 * SYN appears to be valid. Create compressed TCP state 1326 * for syncache. 1327 */ 1328 #ifdef TCPDEBUG 1329 if (so->so_options & SO_DEBUG) 1330 tcp_trace(TA_INPUT, ostate, tp, 1331 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1332 #endif 1333 TCP_PROBE3(debug__input, tp, th, m); 1334 tcp_dooptions(&to, optp, optlen, TO_SYN); 1335 if (syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL, iptos)) 1336 goto tfo_socket_result; 1337 1338 /* 1339 * Entry added to syncache and mbuf consumed. 1340 * Only the listen socket is unlocked by syncache_add(). 1341 */ 1342 INP_INFO_WUNLOCK_ASSERT(&V_tcbinfo); 1343 return (IPPROTO_DONE); 1344 } else if (tp->t_state == TCPS_LISTEN) { 1345 /* 1346 * When a listen socket is torn down the SO_ACCEPTCONN 1347 * flag is removed first while connections are drained 1348 * from the accept queue in a unlock/lock cycle of the 1349 * ACCEPT_LOCK, opening a race condition allowing a SYN 1350 * attempt go through unhandled. 1351 */ 1352 goto dropunlock; 1353 } 1354 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1355 if (tp->t_flags & TF_SIGNATURE) { 1356 tcp_dooptions(&to, optp, optlen, thflags); 1357 if ((to.to_flags & TOF_SIGNATURE) == 0) { 1358 TCPSTAT_INC(tcps_sig_err_nosigopt); 1359 goto dropunlock; 1360 } 1361 if (!TCPMD5_ENABLED() || 1362 TCPMD5_INPUT(m, th, to.to_signature) != 0) 1363 goto dropunlock; 1364 } 1365 #endif 1366 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1367 1368 /* 1369 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later 1370 * state. tcp_do_segment() always consumes the mbuf chain, unlocks 1371 * the inpcb, and unlocks pcbinfo. 1372 */ 1373 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos); 1374 return (IPPROTO_DONE); 1375 1376 dropwithreset: 1377 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1378 1379 if (inp != NULL) { 1380 tcp_dropwithreset(m, th, tp, tlen, rstreason); 1381 INP_WUNLOCK(inp); 1382 } else 1383 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 1384 m = NULL; /* mbuf chain got consumed. */ 1385 goto drop; 1386 1387 dropunlock: 1388 if (m != NULL) 1389 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1390 1391 if (inp != NULL) 1392 INP_WUNLOCK(inp); 1393 1394 drop: 1395 INP_INFO_WUNLOCK_ASSERT(&V_tcbinfo); 1396 if (s != NULL) 1397 free(s, M_TCPLOG); 1398 if (m != NULL) 1399 m_freem(m); 1400 return (IPPROTO_DONE); 1401 } 1402 1403 /* 1404 * Automatic sizing of receive socket buffer. Often the send 1405 * buffer size is not optimally adjusted to the actual network 1406 * conditions at hand (delay bandwidth product). Setting the 1407 * buffer size too small limits throughput on links with high 1408 * bandwidth and high delay (eg. trans-continental/oceanic links). 1409 * 1410 * On the receive side the socket buffer memory is only rarely 1411 * used to any significant extent. This allows us to be much 1412 * more aggressive in scaling the receive socket buffer. For 1413 * the case that the buffer space is actually used to a large 1414 * extent and we run out of kernel memory we can simply drop 1415 * the new segments; TCP on the sender will just retransmit it 1416 * later. Setting the buffer size too big may only consume too 1417 * much kernel memory if the application doesn't read() from 1418 * the socket or packet loss or reordering makes use of the 1419 * reassembly queue. 1420 * 1421 * The criteria to step up the receive buffer one notch are: 1422 * 1. Application has not set receive buffer size with 1423 * SO_RCVBUF. Setting SO_RCVBUF clears SB_AUTOSIZE. 1424 * 2. the number of bytes received during 1/2 of an sRTT 1425 * is at least 3/8 of the current socket buffer size. 1426 * 3. receive buffer size has not hit maximal automatic size; 1427 * 1428 * If all of the criteria are met we increaset the socket buffer 1429 * by a 1/2 (bounded by the max). This allows us to keep ahead 1430 * of slow-start but also makes it so our peer never gets limited 1431 * by our rwnd which we then open up causing a burst. 1432 * 1433 * This algorithm does two steps per RTT at most and only if 1434 * we receive a bulk stream w/o packet losses or reorderings. 1435 * Shrinking the buffer during idle times is not necessary as 1436 * it doesn't consume any memory when idle. 1437 * 1438 * TODO: Only step up if the application is actually serving 1439 * the buffer to better manage the socket buffer resources. 1440 */ 1441 int 1442 tcp_autorcvbuf(struct mbuf *m, struct tcphdr *th, struct socket *so, 1443 struct tcpcb *tp, int tlen) 1444 { 1445 int newsize = 0; 1446 1447 if (V_tcp_do_autorcvbuf && (so->so_rcv.sb_flags & SB_AUTOSIZE) && 1448 tp->t_srtt != 0 && tp->rfbuf_ts != 0 && 1449 TCP_TS_TO_TICKS(tcp_ts_getticks() - tp->rfbuf_ts) > 1450 ((tp->t_srtt >> TCP_RTT_SHIFT)/2)) { 1451 if (tp->rfbuf_cnt > ((so->so_rcv.sb_hiwat / 2)/ 4 * 3) && 1452 so->so_rcv.sb_hiwat < V_tcp_autorcvbuf_max) { 1453 newsize = min((so->so_rcv.sb_hiwat + (so->so_rcv.sb_hiwat/2)), V_tcp_autorcvbuf_max); 1454 } 1455 TCP_PROBE6(receive__autoresize, NULL, tp, m, tp, th, newsize); 1456 1457 /* Start over with next RTT. */ 1458 tp->rfbuf_ts = 0; 1459 tp->rfbuf_cnt = 0; 1460 } else { 1461 tp->rfbuf_cnt += tlen; /* add up */ 1462 } 1463 return (newsize); 1464 } 1465 1466 void 1467 tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 1468 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos) 1469 { 1470 int thflags, acked, ourfinisacked, needoutput = 0, sack_changed; 1471 int rstreason, todrop, win; 1472 uint32_t tiwin; 1473 uint16_t nsegs; 1474 char *s; 1475 struct in_conninfo *inc; 1476 struct mbuf *mfree; 1477 struct tcpopt to; 1478 int tfo_syn; 1479 1480 #ifdef TCPDEBUG 1481 /* 1482 * The size of tcp_saveipgen must be the size of the max ip header, 1483 * now IPv6. 1484 */ 1485 u_char tcp_saveipgen[IP6_HDR_LEN]; 1486 struct tcphdr tcp_savetcp; 1487 short ostate = 0; 1488 #endif 1489 thflags = th->th_flags; 1490 inc = &tp->t_inpcb->inp_inc; 1491 tp->sackhint.last_sack_ack = 0; 1492 sack_changed = 0; 1493 nsegs = max(1, m->m_pkthdr.lro_nsegs); 1494 1495 NET_EPOCH_ASSERT(); 1496 INP_WLOCK_ASSERT(tp->t_inpcb); 1497 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 1498 __func__)); 1499 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 1500 __func__)); 1501 1502 #ifdef TCPPCAP 1503 /* Save segment, if requested. */ 1504 tcp_pcap_add(th, m, &(tp->t_inpkts)); 1505 #endif 1506 TCP_LOG_EVENT(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 1507 tlen, NULL, true); 1508 1509 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 1510 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1511 log(LOG_DEBUG, "%s; %s: " 1512 "SYN|FIN segment ignored (based on " 1513 "sysctl setting)\n", s, __func__); 1514 free(s, M_TCPLOG); 1515 } 1516 goto drop; 1517 } 1518 1519 /* 1520 * If a segment with the ACK-bit set arrives in the SYN-SENT state 1521 * check SEQ.ACK first. 1522 */ 1523 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 1524 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 1525 rstreason = BANDLIM_UNLIMITED; 1526 goto dropwithreset; 1527 } 1528 1529 /* 1530 * Segment received on connection. 1531 * Reset idle time and keep-alive timer. 1532 * XXX: This should be done after segment 1533 * validation to ignore broken/spoofed segs. 1534 */ 1535 tp->t_rcvtime = ticks; 1536 1537 /* 1538 * Scale up the window into a 32-bit value. 1539 * For the SYN_SENT state the scale is zero. 1540 */ 1541 tiwin = th->th_win << tp->snd_scale; 1542 #ifdef STATS 1543 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 1544 #endif 1545 1546 /* 1547 * TCP ECN processing. 1548 */ 1549 if (tp->t_flags2 & TF2_ECN_PERMIT) { 1550 if (thflags & TH_CWR) { 1551 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 1552 tp->t_flags |= TF_ACKNOW; 1553 } 1554 switch (iptos & IPTOS_ECN_MASK) { 1555 case IPTOS_ECN_CE: 1556 tp->t_flags2 |= TF2_ECN_SND_ECE; 1557 TCPSTAT_INC(tcps_ecn_ce); 1558 break; 1559 case IPTOS_ECN_ECT0: 1560 TCPSTAT_INC(tcps_ecn_ect0); 1561 break; 1562 case IPTOS_ECN_ECT1: 1563 TCPSTAT_INC(tcps_ecn_ect1); 1564 break; 1565 } 1566 1567 /* Process a packet differently from RFC3168. */ 1568 cc_ecnpkt_handler(tp, th, iptos); 1569 1570 /* Congestion experienced. */ 1571 if (thflags & TH_ECE) { 1572 cc_cong_signal(tp, th, CC_ECN); 1573 } 1574 } 1575 1576 /* 1577 * Parse options on any incoming segment. 1578 */ 1579 tcp_dooptions(&to, (u_char *)(th + 1), 1580 (th->th_off << 2) - sizeof(struct tcphdr), 1581 (thflags & TH_SYN) ? TO_SYN : 0); 1582 1583 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1584 if ((tp->t_flags & TF_SIGNATURE) != 0 && 1585 (to.to_flags & TOF_SIGNATURE) == 0) { 1586 TCPSTAT_INC(tcps_sig_err_sigopt); 1587 /* XXX: should drop? */ 1588 } 1589 #endif 1590 /* 1591 * If echoed timestamp is later than the current time, 1592 * fall back to non RFC1323 RTT calculation. Normalize 1593 * timestamp if syncookies were used when this connection 1594 * was established. 1595 */ 1596 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 1597 to.to_tsecr -= tp->ts_offset; 1598 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks())) 1599 to.to_tsecr = 0; 1600 else if (tp->t_flags & TF_PREVVALID && 1601 tp->t_badrxtwin != 0 && SEQ_LT(to.to_tsecr, tp->t_badrxtwin)) 1602 cc_cong_signal(tp, th, CC_RTO_ERR); 1603 } 1604 /* 1605 * Process options only when we get SYN/ACK back. The SYN case 1606 * for incoming connections is handled in tcp_syncache. 1607 * According to RFC1323 the window field in a SYN (i.e., a <SYN> 1608 * or <SYN,ACK>) segment itself is never scaled. 1609 * XXX this is traditional behavior, may need to be cleaned up. 1610 */ 1611 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 1612 if ((to.to_flags & TOF_SCALE) && 1613 (tp->t_flags & TF_REQ_SCALE)) { 1614 tp->t_flags |= TF_RCVD_SCALE; 1615 tp->snd_scale = to.to_wscale; 1616 } 1617 /* 1618 * Initial send window. It will be updated with 1619 * the next incoming segment to the scaled value. 1620 */ 1621 tp->snd_wnd = th->th_win; 1622 if (to.to_flags & TOF_TS) { 1623 tp->t_flags |= TF_RCVD_TSTMP; 1624 tp->ts_recent = to.to_tsval; 1625 tp->ts_recent_age = tcp_ts_getticks(); 1626 } 1627 if (to.to_flags & TOF_MSS) 1628 tcp_mss(tp, to.to_mss); 1629 if ((tp->t_flags & TF_SACK_PERMIT) && 1630 (to.to_flags & TOF_SACKPERM) == 0) 1631 tp->t_flags &= ~TF_SACK_PERMIT; 1632 if (IS_FASTOPEN(tp->t_flags)) { 1633 if (to.to_flags & TOF_FASTOPEN) { 1634 uint16_t mss; 1635 1636 if (to.to_flags & TOF_MSS) 1637 mss = to.to_mss; 1638 else 1639 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 1640 mss = TCP6_MSS; 1641 else 1642 mss = TCP_MSS; 1643 tcp_fastopen_update_cache(tp, mss, 1644 to.to_tfo_len, to.to_tfo_cookie); 1645 } else 1646 tcp_fastopen_disable_path(tp); 1647 } 1648 } 1649 1650 /* 1651 * If timestamps were negotiated during SYN/ACK they should 1652 * appear on every segment during this session and vice versa. 1653 */ 1654 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS)) { 1655 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1656 log(LOG_DEBUG, "%s; %s: Timestamp missing, " 1657 "no action\n", s, __func__); 1658 free(s, M_TCPLOG); 1659 } 1660 } 1661 if (!(tp->t_flags & TF_RCVD_TSTMP) && (to.to_flags & TOF_TS)) { 1662 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1663 log(LOG_DEBUG, "%s; %s: Timestamp not expected, " 1664 "no action\n", s, __func__); 1665 free(s, M_TCPLOG); 1666 } 1667 } 1668 1669 /* 1670 * Header prediction: check for the two common cases 1671 * of a uni-directional data xfer. If the packet has 1672 * no control flags, is in-sequence, the window didn't 1673 * change and we're not retransmitting, it's a 1674 * candidate. If the length is zero and the ack moved 1675 * forward, we're the sender side of the xfer. Just 1676 * free the data acked & wake any higher level process 1677 * that was blocked waiting for space. If the length 1678 * is non-zero and the ack didn't move, we're the 1679 * receiver side. If we're getting packets in-order 1680 * (the reassembly queue is empty), add the data to 1681 * the socket buffer and note that we need a delayed ack. 1682 * Make sure that the hidden state-flags are also off. 1683 * Since we check for TCPS_ESTABLISHED first, it can only 1684 * be TH_NEEDSYN. 1685 */ 1686 if (tp->t_state == TCPS_ESTABLISHED && 1687 th->th_seq == tp->rcv_nxt && 1688 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 1689 tp->snd_nxt == tp->snd_max && 1690 tiwin && tiwin == tp->snd_wnd && 1691 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && 1692 SEGQ_EMPTY(tp) && 1693 ((to.to_flags & TOF_TS) == 0 || 1694 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) { 1695 1696 /* 1697 * If last ACK falls within this segment's sequence numbers, 1698 * record the timestamp. 1699 * NOTE that the test is modified according to the latest 1700 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1701 */ 1702 if ((to.to_flags & TOF_TS) != 0 && 1703 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 1704 tp->ts_recent_age = tcp_ts_getticks(); 1705 tp->ts_recent = to.to_tsval; 1706 } 1707 1708 if (tlen == 0) { 1709 if (SEQ_GT(th->th_ack, tp->snd_una) && 1710 SEQ_LEQ(th->th_ack, tp->snd_max) && 1711 !IN_RECOVERY(tp->t_flags) && 1712 (to.to_flags & TOF_SACK) == 0 && 1713 TAILQ_EMPTY(&tp->snd_holes)) { 1714 /* 1715 * This is a pure ack for outstanding data. 1716 */ 1717 TCPSTAT_INC(tcps_predack); 1718 1719 /* 1720 * "bad retransmit" recovery without timestamps. 1721 */ 1722 if ((to.to_flags & TOF_TS) == 0 && 1723 tp->t_rxtshift == 1 && 1724 tp->t_flags & TF_PREVVALID && 1725 (int)(ticks - tp->t_badrxtwin) < 0) { 1726 cc_cong_signal(tp, th, CC_RTO_ERR); 1727 } 1728 1729 /* 1730 * Recalculate the transmit timer / rtt. 1731 * 1732 * Some boxes send broken timestamp replies 1733 * during the SYN+ACK phase, ignore 1734 * timestamps of 0 or we could calculate a 1735 * huge RTT and blow up the retransmit timer. 1736 */ 1737 if ((to.to_flags & TOF_TS) != 0 && 1738 to.to_tsecr) { 1739 uint32_t t; 1740 1741 t = tcp_ts_getticks() - to.to_tsecr; 1742 if (!tp->t_rttlow || tp->t_rttlow > t) 1743 tp->t_rttlow = t; 1744 tcp_xmit_timer(tp, 1745 TCP_TS_TO_TICKS(t) + 1); 1746 } else if (tp->t_rtttime && 1747 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1748 if (!tp->t_rttlow || 1749 tp->t_rttlow > ticks - tp->t_rtttime) 1750 tp->t_rttlow = ticks - tp->t_rtttime; 1751 tcp_xmit_timer(tp, 1752 ticks - tp->t_rtttime); 1753 } 1754 acked = BYTES_THIS_ACK(tp, th); 1755 1756 #ifdef TCP_HHOOK 1757 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 1758 hhook_run_tcp_est_in(tp, th, &to); 1759 #endif 1760 1761 TCPSTAT_ADD(tcps_rcvackpack, nsegs); 1762 TCPSTAT_ADD(tcps_rcvackbyte, acked); 1763 sbdrop(&so->so_snd, acked); 1764 if (SEQ_GT(tp->snd_una, tp->snd_recover) && 1765 SEQ_LEQ(th->th_ack, tp->snd_recover)) 1766 tp->snd_recover = th->th_ack - 1; 1767 1768 /* 1769 * Let the congestion control algorithm update 1770 * congestion control related information. This 1771 * typically means increasing the congestion 1772 * window. 1773 */ 1774 cc_ack_received(tp, th, nsegs, CC_ACK); 1775 1776 tp->snd_una = th->th_ack; 1777 /* 1778 * Pull snd_wl2 up to prevent seq wrap relative 1779 * to th_ack. 1780 */ 1781 tp->snd_wl2 = th->th_ack; 1782 tp->t_dupacks = 0; 1783 m_freem(m); 1784 1785 /* 1786 * If all outstanding data are acked, stop 1787 * retransmit timer, otherwise restart timer 1788 * using current (possibly backed-off) value. 1789 * If process is waiting for space, 1790 * wakeup/selwakeup/signal. If data 1791 * are ready to send, let tcp_output 1792 * decide between more output or persist. 1793 */ 1794 #ifdef TCPDEBUG 1795 if (so->so_options & SO_DEBUG) 1796 tcp_trace(TA_INPUT, ostate, tp, 1797 (void *)tcp_saveipgen, 1798 &tcp_savetcp, 0); 1799 #endif 1800 TCP_PROBE3(debug__input, tp, th, m); 1801 if (tp->snd_una == tp->snd_max) 1802 tcp_timer_activate(tp, TT_REXMT, 0); 1803 else if (!tcp_timer_active(tp, TT_PERSIST)) 1804 tcp_timer_activate(tp, TT_REXMT, 1805 tp->t_rxtcur); 1806 sowwakeup(so); 1807 if (sbavail(&so->so_snd)) 1808 (void) tp->t_fb->tfb_tcp_output(tp); 1809 goto check_delack; 1810 } 1811 } else if (th->th_ack == tp->snd_una && 1812 tlen <= sbspace(&so->so_rcv)) { 1813 int newsize = 0; /* automatic sockbuf scaling */ 1814 1815 /* 1816 * This is a pure, in-sequence data packet with 1817 * nothing on the reassembly queue and we have enough 1818 * buffer space to take it. 1819 */ 1820 /* Clean receiver SACK report if present */ 1821 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks) 1822 tcp_clean_sackreport(tp); 1823 TCPSTAT_INC(tcps_preddat); 1824 tp->rcv_nxt += tlen; 1825 /* 1826 * Pull snd_wl1 up to prevent seq wrap relative to 1827 * th_seq. 1828 */ 1829 tp->snd_wl1 = th->th_seq; 1830 /* 1831 * Pull rcv_up up to prevent seq wrap relative to 1832 * rcv_nxt. 1833 */ 1834 tp->rcv_up = tp->rcv_nxt; 1835 TCPSTAT_ADD(tcps_rcvpack, nsegs); 1836 TCPSTAT_ADD(tcps_rcvbyte, tlen); 1837 #ifdef TCPDEBUG 1838 if (so->so_options & SO_DEBUG) 1839 tcp_trace(TA_INPUT, ostate, tp, 1840 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1841 #endif 1842 TCP_PROBE3(debug__input, tp, th, m); 1843 1844 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 1845 1846 /* Add data to socket buffer. */ 1847 SOCKBUF_LOCK(&so->so_rcv); 1848 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1849 m_freem(m); 1850 } else { 1851 /* 1852 * Set new socket buffer size. 1853 * Give up when limit is reached. 1854 */ 1855 if (newsize) 1856 if (!sbreserve_locked(&so->so_rcv, 1857 newsize, so, NULL)) 1858 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 1859 m_adj(m, drop_hdrlen); /* delayed header drop */ 1860 sbappendstream_locked(&so->so_rcv, m, 0); 1861 } 1862 /* NB: sorwakeup_locked() does an implicit unlock. */ 1863 sorwakeup_locked(so); 1864 if (DELAY_ACK(tp, tlen)) { 1865 tp->t_flags |= TF_DELACK; 1866 } else { 1867 tp->t_flags |= TF_ACKNOW; 1868 tp->t_fb->tfb_tcp_output(tp); 1869 } 1870 goto check_delack; 1871 } 1872 } 1873 1874 /* 1875 * Calculate amount of space in receive window, 1876 * and then do TCP input processing. 1877 * Receive window is amount of space in rcv queue, 1878 * but not less than advertised window. 1879 */ 1880 win = sbspace(&so->so_rcv); 1881 if (win < 0) 1882 win = 0; 1883 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 1884 1885 switch (tp->t_state) { 1886 1887 /* 1888 * If the state is SYN_RECEIVED: 1889 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1890 */ 1891 case TCPS_SYN_RECEIVED: 1892 if ((thflags & TH_ACK) && 1893 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1894 SEQ_GT(th->th_ack, tp->snd_max))) { 1895 rstreason = BANDLIM_RST_OPENPORT; 1896 goto dropwithreset; 1897 } 1898 if (IS_FASTOPEN(tp->t_flags)) { 1899 /* 1900 * When a TFO connection is in SYN_RECEIVED, the 1901 * only valid packets are the initial SYN, a 1902 * retransmit/copy of the initial SYN (possibly with 1903 * a subset of the original data), a valid ACK, a 1904 * FIN, or a RST. 1905 */ 1906 if ((thflags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)) { 1907 rstreason = BANDLIM_RST_OPENPORT; 1908 goto dropwithreset; 1909 } else if (thflags & TH_SYN) { 1910 /* non-initial SYN is ignored */ 1911 if ((tcp_timer_active(tp, TT_DELACK) || 1912 tcp_timer_active(tp, TT_REXMT))) 1913 goto drop; 1914 } else if (!(thflags & (TH_ACK|TH_FIN|TH_RST))) { 1915 goto drop; 1916 } 1917 } 1918 break; 1919 1920 /* 1921 * If the state is SYN_SENT: 1922 * if seg contains a RST with valid ACK (SEQ.ACK has already 1923 * been verified), then drop the connection. 1924 * if seg contains a RST without an ACK, drop the seg. 1925 * if seg does not contain SYN, then drop the seg. 1926 * Otherwise this is an acceptable SYN segment 1927 * initialize tp->rcv_nxt and tp->irs 1928 * if seg contains ack then advance tp->snd_una 1929 * if seg contains an ECE and ECN support is enabled, the stream 1930 * is ECN capable. 1931 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1932 * arrange for segment to be acked (eventually) 1933 * continue processing rest of data/controls, beginning with URG 1934 */ 1935 case TCPS_SYN_SENT: 1936 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) { 1937 TCP_PROBE5(connect__refused, NULL, tp, 1938 m, tp, th); 1939 tp = tcp_drop(tp, ECONNREFUSED); 1940 } 1941 if (thflags & TH_RST) 1942 goto drop; 1943 if (!(thflags & TH_SYN)) 1944 goto drop; 1945 1946 tp->irs = th->th_seq; 1947 tcp_rcvseqinit(tp); 1948 if (thflags & TH_ACK) { 1949 int tfo_partial_ack = 0; 1950 1951 TCPSTAT_INC(tcps_connects); 1952 soisconnected(so); 1953 #ifdef MAC 1954 mac_socketpeer_set_from_mbuf(m, so); 1955 #endif 1956 /* Do window scaling on this connection? */ 1957 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1958 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1959 tp->rcv_scale = tp->request_r_scale; 1960 } 1961 tp->rcv_adv += min(tp->rcv_wnd, 1962 TCP_MAXWIN << tp->rcv_scale); 1963 tp->snd_una++; /* SYN is acked */ 1964 /* 1965 * If not all the data that was sent in the TFO SYN 1966 * has been acked, resend the remainder right away. 1967 */ 1968 if (IS_FASTOPEN(tp->t_flags) && 1969 (tp->snd_una != tp->snd_max)) { 1970 tp->snd_nxt = th->th_ack; 1971 tfo_partial_ack = 1; 1972 } 1973 /* 1974 * If there's data, delay ACK; if there's also a FIN 1975 * ACKNOW will be turned on later. 1976 */ 1977 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial_ack) 1978 tcp_timer_activate(tp, TT_DELACK, 1979 tcp_delacktime); 1980 else 1981 tp->t_flags |= TF_ACKNOW; 1982 1983 if (((thflags & (TH_CWR | TH_ECE)) == TH_ECE) && 1984 (V_tcp_do_ecn == 1)) { 1985 tp->t_flags2 |= TF2_ECN_PERMIT; 1986 TCPSTAT_INC(tcps_ecn_shs); 1987 } 1988 1989 /* 1990 * Received <SYN,ACK> in SYN_SENT[*] state. 1991 * Transitions: 1992 * SYN_SENT --> ESTABLISHED 1993 * SYN_SENT* --> FIN_WAIT_1 1994 */ 1995 tp->t_starttime = ticks; 1996 if (tp->t_flags & TF_NEEDFIN) { 1997 tcp_state_change(tp, TCPS_FIN_WAIT_1); 1998 tp->t_flags &= ~TF_NEEDFIN; 1999 thflags &= ~TH_SYN; 2000 } else { 2001 tcp_state_change(tp, TCPS_ESTABLISHED); 2002 TCP_PROBE5(connect__established, NULL, tp, 2003 m, tp, th); 2004 cc_conn_init(tp); 2005 tcp_timer_activate(tp, TT_KEEP, 2006 TP_KEEPIDLE(tp)); 2007 } 2008 } else { 2009 /* 2010 * Received initial SYN in SYN-SENT[*] state => 2011 * simultaneous open. 2012 * If it succeeds, connection is * half-synchronized. 2013 * Otherwise, do 3-way handshake: 2014 * SYN-SENT -> SYN-RECEIVED 2015 * SYN-SENT* -> SYN-RECEIVED* 2016 */ 2017 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 2018 tcp_timer_activate(tp, TT_REXMT, 0); 2019 tcp_state_change(tp, TCPS_SYN_RECEIVED); 2020 } 2021 2022 INP_WLOCK_ASSERT(tp->t_inpcb); 2023 2024 /* 2025 * Advance th->th_seq to correspond to first data byte. 2026 * If data, trim to stay within window, 2027 * dropping FIN if necessary. 2028 */ 2029 th->th_seq++; 2030 if (tlen > tp->rcv_wnd) { 2031 todrop = tlen - tp->rcv_wnd; 2032 m_adj(m, -todrop); 2033 tlen = tp->rcv_wnd; 2034 thflags &= ~TH_FIN; 2035 TCPSTAT_INC(tcps_rcvpackafterwin); 2036 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 2037 } 2038 tp->snd_wl1 = th->th_seq - 1; 2039 tp->rcv_up = th->th_seq; 2040 /* 2041 * Client side of transaction: already sent SYN and data. 2042 * If the remote host used T/TCP to validate the SYN, 2043 * our data will be ACK'd; if so, enter normal data segment 2044 * processing in the middle of step 5, ack processing. 2045 * Otherwise, goto step 6. 2046 */ 2047 if (thflags & TH_ACK) 2048 goto process_ACK; 2049 2050 goto step6; 2051 2052 /* 2053 * If the state is LAST_ACK or CLOSING or TIME_WAIT: 2054 * do normal processing. 2055 * 2056 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later. 2057 */ 2058 case TCPS_LAST_ACK: 2059 case TCPS_CLOSING: 2060 break; /* continue normal processing */ 2061 } 2062 2063 /* 2064 * States other than LISTEN or SYN_SENT. 2065 * First check the RST flag and sequence number since reset segments 2066 * are exempt from the timestamp and connection count tests. This 2067 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 2068 * below which allowed reset segments in half the sequence space 2069 * to fall though and be processed (which gives forged reset 2070 * segments with a random sequence number a 50 percent chance of 2071 * killing a connection). 2072 * Then check timestamp, if present. 2073 * Then check the connection count, if present. 2074 * Then check that at least some bytes of segment are within 2075 * receive window. If segment begins before rcv_nxt, 2076 * drop leading data (and SYN); if nothing left, just ack. 2077 */ 2078 if (thflags & TH_RST) { 2079 /* 2080 * RFC5961 Section 3.2 2081 * 2082 * - RST drops connection only if SEG.SEQ == RCV.NXT. 2083 * - If RST is in window, we send challenge ACK. 2084 * 2085 * Note: to take into account delayed ACKs, we should 2086 * test against last_ack_sent instead of rcv_nxt. 2087 * Note 2: we handle special case of closed window, not 2088 * covered by the RFC. 2089 */ 2090 if ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 2091 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || 2092 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) { 2093 2094 KASSERT(tp->t_state != TCPS_SYN_SENT, 2095 ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p", 2096 __func__, th, tp)); 2097 2098 if (V_tcp_insecure_rst || 2099 tp->last_ack_sent == th->th_seq) { 2100 TCPSTAT_INC(tcps_drops); 2101 /* Drop the connection. */ 2102 switch (tp->t_state) { 2103 case TCPS_SYN_RECEIVED: 2104 so->so_error = ECONNREFUSED; 2105 goto close; 2106 case TCPS_ESTABLISHED: 2107 case TCPS_FIN_WAIT_1: 2108 case TCPS_FIN_WAIT_2: 2109 case TCPS_CLOSE_WAIT: 2110 case TCPS_CLOSING: 2111 case TCPS_LAST_ACK: 2112 so->so_error = ECONNRESET; 2113 close: 2114 /* FALLTHROUGH */ 2115 default: 2116 tp = tcp_close(tp); 2117 } 2118 } else { 2119 TCPSTAT_INC(tcps_badrst); 2120 /* Send challenge ACK. */ 2121 tcp_respond(tp, mtod(m, void *), th, m, 2122 tp->rcv_nxt, tp->snd_nxt, TH_ACK); 2123 tp->last_ack_sent = tp->rcv_nxt; 2124 m = NULL; 2125 } 2126 } 2127 goto drop; 2128 } 2129 2130 /* 2131 * RFC5961 Section 4.2 2132 * Send challenge ACK for any SYN in synchronized state. 2133 */ 2134 if ((thflags & TH_SYN) && tp->t_state != TCPS_SYN_SENT && 2135 tp->t_state != TCPS_SYN_RECEIVED) { 2136 TCPSTAT_INC(tcps_badsyn); 2137 if (V_tcp_insecure_syn && 2138 SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 2139 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 2140 tp = tcp_drop(tp, ECONNRESET); 2141 rstreason = BANDLIM_UNLIMITED; 2142 } else { 2143 /* Send challenge ACK. */ 2144 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt, 2145 tp->snd_nxt, TH_ACK); 2146 tp->last_ack_sent = tp->rcv_nxt; 2147 m = NULL; 2148 } 2149 goto drop; 2150 } 2151 2152 /* 2153 * RFC 1323 PAWS: If we have a timestamp reply on this segment 2154 * and it's less than ts_recent, drop it. 2155 */ 2156 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 2157 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 2158 2159 /* Check to see if ts_recent is over 24 days old. */ 2160 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) { 2161 /* 2162 * Invalidate ts_recent. If this segment updates 2163 * ts_recent, the age will be reset later and ts_recent 2164 * will get a valid value. If it does not, setting 2165 * ts_recent to zero will at least satisfy the 2166 * requirement that zero be placed in the timestamp 2167 * echo reply when ts_recent isn't valid. The 2168 * age isn't reset until we get a valid ts_recent 2169 * because we don't want out-of-order segments to be 2170 * dropped when ts_recent is old. 2171 */ 2172 tp->ts_recent = 0; 2173 } else { 2174 TCPSTAT_INC(tcps_rcvduppack); 2175 TCPSTAT_ADD(tcps_rcvdupbyte, tlen); 2176 TCPSTAT_INC(tcps_pawsdrop); 2177 if (tlen) 2178 goto dropafterack; 2179 goto drop; 2180 } 2181 } 2182 2183 /* 2184 * In the SYN-RECEIVED state, validate that the packet belongs to 2185 * this connection before trimming the data to fit the receive 2186 * window. Check the sequence number versus IRS since we know 2187 * the sequence numbers haven't wrapped. This is a partial fix 2188 * for the "LAND" DoS attack. 2189 */ 2190 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 2191 rstreason = BANDLIM_RST_OPENPORT; 2192 goto dropwithreset; 2193 } 2194 2195 todrop = tp->rcv_nxt - th->th_seq; 2196 if (todrop > 0) { 2197 if (thflags & TH_SYN) { 2198 thflags &= ~TH_SYN; 2199 th->th_seq++; 2200 if (th->th_urp > 1) 2201 th->th_urp--; 2202 else 2203 thflags &= ~TH_URG; 2204 todrop--; 2205 } 2206 /* 2207 * Following if statement from Stevens, vol. 2, p. 960. 2208 */ 2209 if (todrop > tlen 2210 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 2211 /* 2212 * Any valid FIN must be to the left of the window. 2213 * At this point the FIN must be a duplicate or out 2214 * of sequence; drop it. 2215 */ 2216 thflags &= ~TH_FIN; 2217 2218 /* 2219 * Send an ACK to resynchronize and drop any data. 2220 * But keep on processing for RST or ACK. 2221 */ 2222 tp->t_flags |= TF_ACKNOW; 2223 todrop = tlen; 2224 TCPSTAT_INC(tcps_rcvduppack); 2225 TCPSTAT_ADD(tcps_rcvdupbyte, todrop); 2226 } else { 2227 TCPSTAT_INC(tcps_rcvpartduppack); 2228 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop); 2229 } 2230 /* 2231 * DSACK - add SACK block for dropped range 2232 */ 2233 if ((todrop > 0) && (tp->t_flags & TF_SACK_PERMIT)) { 2234 tcp_update_sack_list(tp, th->th_seq, 2235 th->th_seq + todrop); 2236 /* 2237 * ACK now, as the next in-sequence segment 2238 * will clear the DSACK block again 2239 */ 2240 tp->t_flags |= TF_ACKNOW; 2241 } 2242 drop_hdrlen += todrop; /* drop from the top afterwards */ 2243 th->th_seq += todrop; 2244 tlen -= todrop; 2245 if (th->th_urp > todrop) 2246 th->th_urp -= todrop; 2247 else { 2248 thflags &= ~TH_URG; 2249 th->th_urp = 0; 2250 } 2251 } 2252 2253 /* 2254 * If new data are received on a connection after the 2255 * user processes are gone, then RST the other end. 2256 */ 2257 if ((so->so_state & SS_NOFDREF) && 2258 tp->t_state > TCPS_CLOSE_WAIT && tlen) { 2259 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 2260 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data " 2261 "after socket was closed, " 2262 "sending RST and removing tcpcb\n", 2263 s, __func__, tcpstates[tp->t_state], tlen); 2264 free(s, M_TCPLOG); 2265 } 2266 tp = tcp_close(tp); 2267 TCPSTAT_INC(tcps_rcvafterclose); 2268 rstreason = BANDLIM_UNLIMITED; 2269 goto dropwithreset; 2270 } 2271 2272 /* 2273 * If segment ends after window, drop trailing data 2274 * (and PUSH and FIN); if nothing left, just ACK. 2275 */ 2276 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 2277 if (todrop > 0) { 2278 TCPSTAT_INC(tcps_rcvpackafterwin); 2279 if (todrop >= tlen) { 2280 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen); 2281 /* 2282 * If window is closed can only take segments at 2283 * window edge, and have to drop data and PUSH from 2284 * incoming segments. Continue processing, but 2285 * remember to ack. Otherwise, drop segment 2286 * and ack. 2287 */ 2288 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 2289 tp->t_flags |= TF_ACKNOW; 2290 TCPSTAT_INC(tcps_rcvwinprobe); 2291 } else 2292 goto dropafterack; 2293 } else 2294 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 2295 m_adj(m, -todrop); 2296 tlen -= todrop; 2297 thflags &= ~(TH_PUSH|TH_FIN); 2298 } 2299 2300 /* 2301 * If last ACK falls within this segment's sequence numbers, 2302 * record its timestamp. 2303 * NOTE: 2304 * 1) That the test incorporates suggestions from the latest 2305 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 2306 * 2) That updating only on newer timestamps interferes with 2307 * our earlier PAWS tests, so this check should be solely 2308 * predicated on the sequence space of this segment. 2309 * 3) That we modify the segment boundary check to be 2310 * Last.ACK.Sent <= SEG.SEQ + SEG.Len 2311 * instead of RFC1323's 2312 * Last.ACK.Sent < SEG.SEQ + SEG.Len, 2313 * This modified check allows us to overcome RFC1323's 2314 * limitations as described in Stevens TCP/IP Illustrated 2315 * Vol. 2 p.869. In such cases, we can still calculate the 2316 * RTT correctly when RCV.NXT == Last.ACK.Sent. 2317 */ 2318 if ((to.to_flags & TOF_TS) != 0 && 2319 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 2320 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 2321 ((thflags & (TH_SYN|TH_FIN)) != 0))) { 2322 tp->ts_recent_age = tcp_ts_getticks(); 2323 tp->ts_recent = to.to_tsval; 2324 } 2325 2326 /* 2327 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 2328 * flag is on (half-synchronized state), then queue data for 2329 * later processing; else drop segment and return. 2330 */ 2331 if ((thflags & TH_ACK) == 0) { 2332 if (tp->t_state == TCPS_SYN_RECEIVED || 2333 (tp->t_flags & TF_NEEDSYN)) { 2334 if (tp->t_state == TCPS_SYN_RECEIVED && 2335 IS_FASTOPEN(tp->t_flags)) { 2336 tp->snd_wnd = tiwin; 2337 cc_conn_init(tp); 2338 } 2339 goto step6; 2340 } else if (tp->t_flags & TF_ACKNOW) 2341 goto dropafterack; 2342 else 2343 goto drop; 2344 } 2345 2346 /* 2347 * Ack processing. 2348 */ 2349 switch (tp->t_state) { 2350 2351 /* 2352 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter 2353 * ESTABLISHED state and continue processing. 2354 * The ACK was checked above. 2355 */ 2356 case TCPS_SYN_RECEIVED: 2357 2358 TCPSTAT_INC(tcps_connects); 2359 soisconnected(so); 2360 /* Do window scaling? */ 2361 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2362 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2363 tp->rcv_scale = tp->request_r_scale; 2364 } 2365 tp->snd_wnd = tiwin; 2366 /* 2367 * Make transitions: 2368 * SYN-RECEIVED -> ESTABLISHED 2369 * SYN-RECEIVED* -> FIN-WAIT-1 2370 */ 2371 tp->t_starttime = ticks; 2372 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) { 2373 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 2374 tp->t_tfo_pending = NULL; 2375 2376 /* 2377 * Account for the ACK of our SYN prior to 2378 * regular ACK processing below. 2379 */ 2380 tp->snd_una++; 2381 } 2382 if (tp->t_flags & TF_NEEDFIN) { 2383 tcp_state_change(tp, TCPS_FIN_WAIT_1); 2384 tp->t_flags &= ~TF_NEEDFIN; 2385 } else { 2386 tcp_state_change(tp, TCPS_ESTABLISHED); 2387 TCP_PROBE5(accept__established, NULL, tp, 2388 m, tp, th); 2389 /* 2390 * TFO connections call cc_conn_init() during SYN 2391 * processing. Calling it again here for such 2392 * connections is not harmless as it would undo the 2393 * snd_cwnd reduction that occurs when a TFO SYN|ACK 2394 * is retransmitted. 2395 */ 2396 if (!IS_FASTOPEN(tp->t_flags)) 2397 cc_conn_init(tp); 2398 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp)); 2399 } 2400 /* 2401 * If segment contains data or ACK, will call tcp_reass() 2402 * later; if not, do so now to pass queued data to user. 2403 */ 2404 if (tlen == 0 && (thflags & TH_FIN) == 0) 2405 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 2406 (struct mbuf *)0); 2407 tp->snd_wl1 = th->th_seq - 1; 2408 /* FALLTHROUGH */ 2409 2410 /* 2411 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 2412 * ACKs. If the ack is in the range 2413 * tp->snd_una < th->th_ack <= tp->snd_max 2414 * then advance tp->snd_una to th->th_ack and drop 2415 * data from the retransmission queue. If this ACK reflects 2416 * more up to date window information we update our window information. 2417 */ 2418 case TCPS_ESTABLISHED: 2419 case TCPS_FIN_WAIT_1: 2420 case TCPS_FIN_WAIT_2: 2421 case TCPS_CLOSE_WAIT: 2422 case TCPS_CLOSING: 2423 case TCPS_LAST_ACK: 2424 if (SEQ_GT(th->th_ack, tp->snd_max)) { 2425 TCPSTAT_INC(tcps_rcvacktoomuch); 2426 goto dropafterack; 2427 } 2428 if ((tp->t_flags & TF_SACK_PERMIT) && 2429 ((to.to_flags & TOF_SACK) || 2430 !TAILQ_EMPTY(&tp->snd_holes))) 2431 sack_changed = tcp_sack_doack(tp, &to, th->th_ack); 2432 else 2433 /* 2434 * Reset the value so that previous (valid) value 2435 * from the last ack with SACK doesn't get used. 2436 */ 2437 tp->sackhint.sacked_bytes = 0; 2438 2439 #ifdef TCP_HHOOK 2440 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 2441 hhook_run_tcp_est_in(tp, th, &to); 2442 #endif 2443 2444 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 2445 u_int maxseg; 2446 2447 maxseg = tcp_maxseg(tp); 2448 if (tlen == 0 && 2449 (tiwin == tp->snd_wnd || 2450 (tp->t_flags & TF_SACK_PERMIT))) { 2451 /* 2452 * If this is the first time we've seen a 2453 * FIN from the remote, this is not a 2454 * duplicate and it needs to be processed 2455 * normally. This happens during a 2456 * simultaneous close. 2457 */ 2458 if ((thflags & TH_FIN) && 2459 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) { 2460 tp->t_dupacks = 0; 2461 break; 2462 } 2463 TCPSTAT_INC(tcps_rcvdupack); 2464 /* 2465 * If we have outstanding data (other than 2466 * a window probe), this is a completely 2467 * duplicate ack (ie, window info didn't 2468 * change and FIN isn't set), 2469 * the ack is the biggest we've 2470 * seen and we've seen exactly our rexmt 2471 * threshold of them, assume a packet 2472 * has been dropped and retransmit it. 2473 * Kludge snd_nxt & the congestion 2474 * window so we send only this one 2475 * packet. 2476 * 2477 * We know we're losing at the current 2478 * window size so do congestion avoidance 2479 * (set ssthresh to half the current window 2480 * and pull our congestion window back to 2481 * the new ssthresh). 2482 * 2483 * Dup acks mean that packets have left the 2484 * network (they're now cached at the receiver) 2485 * so bump cwnd by the amount in the receiver 2486 * to keep a constant cwnd packets in the 2487 * network. 2488 * 2489 * When using TCP ECN, notify the peer that 2490 * we reduced the cwnd. 2491 */ 2492 /* 2493 * Following 2 kinds of acks should not affect 2494 * dupack counting: 2495 * 1) Old acks 2496 * 2) Acks with SACK but without any new SACK 2497 * information in them. These could result from 2498 * any anomaly in the network like a switch 2499 * duplicating packets or a possible DoS attack. 2500 */ 2501 if (th->th_ack != tp->snd_una || 2502 ((tp->t_flags & TF_SACK_PERMIT) && 2503 !sack_changed)) 2504 break; 2505 else if (!tcp_timer_active(tp, TT_REXMT)) 2506 tp->t_dupacks = 0; 2507 else if (++tp->t_dupacks > tcprexmtthresh || 2508 IN_FASTRECOVERY(tp->t_flags)) { 2509 cc_ack_received(tp, th, nsegs, 2510 CC_DUPACK); 2511 if ((tp->t_flags & TF_SACK_PERMIT) && 2512 IN_FASTRECOVERY(tp->t_flags)) { 2513 int awnd; 2514 2515 /* 2516 * Compute the amount of data in flight first. 2517 * We can inject new data into the pipe iff 2518 * we have less than 1/2 the original window's 2519 * worth of data in flight. 2520 */ 2521 if (V_tcp_do_rfc6675_pipe) 2522 awnd = tcp_compute_pipe(tp); 2523 else 2524 awnd = (tp->snd_nxt - tp->snd_fack) + 2525 tp->sackhint.sack_bytes_rexmit; 2526 2527 if (awnd < tp->snd_ssthresh) { 2528 tp->snd_cwnd += maxseg; 2529 if (tp->snd_cwnd > tp->snd_ssthresh) 2530 tp->snd_cwnd = tp->snd_ssthresh; 2531 } 2532 } else 2533 tp->snd_cwnd += maxseg; 2534 (void) tp->t_fb->tfb_tcp_output(tp); 2535 goto drop; 2536 } else if (tp->t_dupacks == tcprexmtthresh) { 2537 tcp_seq onxt = tp->snd_nxt; 2538 2539 /* 2540 * If we're doing sack, check to 2541 * see if we're already in sack 2542 * recovery. If we're not doing sack, 2543 * check to see if we're in newreno 2544 * recovery. 2545 */ 2546 if (tp->t_flags & TF_SACK_PERMIT) { 2547 if (IN_FASTRECOVERY(tp->t_flags)) { 2548 tp->t_dupacks = 0; 2549 break; 2550 } 2551 } else { 2552 if (SEQ_LEQ(th->th_ack, 2553 tp->snd_recover)) { 2554 tp->t_dupacks = 0; 2555 break; 2556 } 2557 } 2558 /* Congestion signal before ack. */ 2559 cc_cong_signal(tp, th, CC_NDUPACK); 2560 cc_ack_received(tp, th, nsegs, 2561 CC_DUPACK); 2562 tcp_timer_activate(tp, TT_REXMT, 0); 2563 tp->t_rtttime = 0; 2564 if (tp->t_flags & TF_SACK_PERMIT) { 2565 TCPSTAT_INC( 2566 tcps_sack_recovery_episode); 2567 tp->sack_newdata = tp->snd_nxt; 2568 tp->snd_cwnd = maxseg; 2569 (void) tp->t_fb->tfb_tcp_output(tp); 2570 goto drop; 2571 } 2572 tp->snd_nxt = th->th_ack; 2573 tp->snd_cwnd = maxseg; 2574 (void) tp->t_fb->tfb_tcp_output(tp); 2575 KASSERT(tp->snd_limited <= 2, 2576 ("%s: tp->snd_limited too big", 2577 __func__)); 2578 tp->snd_cwnd = tp->snd_ssthresh + 2579 maxseg * 2580 (tp->t_dupacks - tp->snd_limited); 2581 if (SEQ_GT(onxt, tp->snd_nxt)) 2582 tp->snd_nxt = onxt; 2583 goto drop; 2584 } else if (V_tcp_do_rfc3042) { 2585 /* 2586 * Process first and second duplicate 2587 * ACKs. Each indicates a segment 2588 * leaving the network, creating room 2589 * for more. Make sure we can send a 2590 * packet on reception of each duplicate 2591 * ACK by increasing snd_cwnd by one 2592 * segment. Restore the original 2593 * snd_cwnd after packet transmission. 2594 */ 2595 cc_ack_received(tp, th, nsegs, 2596 CC_DUPACK); 2597 uint32_t oldcwnd = tp->snd_cwnd; 2598 tcp_seq oldsndmax = tp->snd_max; 2599 u_int sent; 2600 int avail; 2601 2602 KASSERT(tp->t_dupacks == 1 || 2603 tp->t_dupacks == 2, 2604 ("%s: dupacks not 1 or 2", 2605 __func__)); 2606 if (tp->t_dupacks == 1) 2607 tp->snd_limited = 0; 2608 tp->snd_cwnd = 2609 (tp->snd_nxt - tp->snd_una) + 2610 (tp->t_dupacks - tp->snd_limited) * 2611 maxseg; 2612 /* 2613 * Only call tcp_output when there 2614 * is new data available to be sent. 2615 * Otherwise we would send pure ACKs. 2616 */ 2617 SOCKBUF_LOCK(&so->so_snd); 2618 avail = sbavail(&so->so_snd) - 2619 (tp->snd_nxt - tp->snd_una); 2620 SOCKBUF_UNLOCK(&so->so_snd); 2621 if (avail > 0) 2622 (void) tp->t_fb->tfb_tcp_output(tp); 2623 sent = tp->snd_max - oldsndmax; 2624 if (sent > maxseg) { 2625 KASSERT((tp->t_dupacks == 2 && 2626 tp->snd_limited == 0) || 2627 (sent == maxseg + 1 && 2628 tp->t_flags & TF_SENTFIN), 2629 ("%s: sent too much", 2630 __func__)); 2631 tp->snd_limited = 2; 2632 } else if (sent > 0) 2633 ++tp->snd_limited; 2634 tp->snd_cwnd = oldcwnd; 2635 goto drop; 2636 } 2637 } 2638 break; 2639 } else { 2640 /* 2641 * This ack is advancing the left edge, reset the 2642 * counter. 2643 */ 2644 tp->t_dupacks = 0; 2645 /* 2646 * If this ack also has new SACK info, increment the 2647 * counter as per rfc6675. 2648 */ 2649 if ((tp->t_flags & TF_SACK_PERMIT) && sack_changed) 2650 tp->t_dupacks++; 2651 } 2652 2653 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), 2654 ("%s: th_ack <= snd_una", __func__)); 2655 2656 /* 2657 * If the congestion window was inflated to account 2658 * for the other side's cached packets, retract it. 2659 */ 2660 if (IN_FASTRECOVERY(tp->t_flags)) { 2661 if (SEQ_LT(th->th_ack, tp->snd_recover)) { 2662 if (tp->t_flags & TF_SACK_PERMIT) 2663 tcp_sack_partialack(tp, th); 2664 else 2665 tcp_newreno_partial_ack(tp, th); 2666 } else 2667 cc_post_recovery(tp, th); 2668 } 2669 /* 2670 * If we reach this point, ACK is not a duplicate, 2671 * i.e., it ACKs something we sent. 2672 */ 2673 if (tp->t_flags & TF_NEEDSYN) { 2674 /* 2675 * T/TCP: Connection was half-synchronized, and our 2676 * SYN has been ACK'd (so connection is now fully 2677 * synchronized). Go to non-starred state, 2678 * increment snd_una for ACK of SYN, and check if 2679 * we can do window scaling. 2680 */ 2681 tp->t_flags &= ~TF_NEEDSYN; 2682 tp->snd_una++; 2683 /* Do window scaling? */ 2684 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2685 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2686 tp->rcv_scale = tp->request_r_scale; 2687 /* Send window already scaled. */ 2688 } 2689 } 2690 2691 process_ACK: 2692 INP_WLOCK_ASSERT(tp->t_inpcb); 2693 2694 acked = BYTES_THIS_ACK(tp, th); 2695 KASSERT(acked >= 0, ("%s: acked unexepectedly negative " 2696 "(tp->snd_una=%u, th->th_ack=%u, tp=%p, m=%p)", __func__, 2697 tp->snd_una, th->th_ack, tp, m)); 2698 TCPSTAT_ADD(tcps_rcvackpack, nsegs); 2699 TCPSTAT_ADD(tcps_rcvackbyte, acked); 2700 2701 /* 2702 * If we just performed our first retransmit, and the ACK 2703 * arrives within our recovery window, then it was a mistake 2704 * to do the retransmit in the first place. Recover our 2705 * original cwnd and ssthresh, and proceed to transmit where 2706 * we left off. 2707 */ 2708 if (tp->t_rxtshift == 1 && 2709 tp->t_flags & TF_PREVVALID && 2710 tp->t_badrxtwin && 2711 SEQ_LT(to.to_tsecr, tp->t_badrxtwin)) 2712 cc_cong_signal(tp, th, CC_RTO_ERR); 2713 2714 /* 2715 * If we have a timestamp reply, update smoothed 2716 * round trip time. If no timestamp is present but 2717 * transmit timer is running and timed sequence 2718 * number was acked, update smoothed round trip time. 2719 * Since we now have an rtt measurement, cancel the 2720 * timer backoff (cf., Phil Karn's retransmit alg.). 2721 * Recompute the initial retransmit timer. 2722 * 2723 * Some boxes send broken timestamp replies 2724 * during the SYN+ACK phase, ignore 2725 * timestamps of 0 or we could calculate a 2726 * huge RTT and blow up the retransmit timer. 2727 */ 2728 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) { 2729 uint32_t t; 2730 2731 t = tcp_ts_getticks() - to.to_tsecr; 2732 if (!tp->t_rttlow || tp->t_rttlow > t) 2733 tp->t_rttlow = t; 2734 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1); 2735 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) { 2736 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime) 2737 tp->t_rttlow = ticks - tp->t_rtttime; 2738 tcp_xmit_timer(tp, ticks - tp->t_rtttime); 2739 } 2740 2741 /* 2742 * If all outstanding data is acked, stop retransmit 2743 * timer and remember to restart (more output or persist). 2744 * If there is more data to be acked, restart retransmit 2745 * timer, using current (possibly backed-off) value. 2746 */ 2747 if (th->th_ack == tp->snd_max) { 2748 tcp_timer_activate(tp, TT_REXMT, 0); 2749 needoutput = 1; 2750 } else if (!tcp_timer_active(tp, TT_PERSIST)) 2751 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur); 2752 2753 /* 2754 * If no data (only SYN) was ACK'd, 2755 * skip rest of ACK processing. 2756 */ 2757 if (acked == 0) 2758 goto step6; 2759 2760 /* 2761 * Let the congestion control algorithm update congestion 2762 * control related information. This typically means increasing 2763 * the congestion window. 2764 */ 2765 cc_ack_received(tp, th, nsegs, CC_ACK); 2766 2767 SOCKBUF_LOCK(&so->so_snd); 2768 if (acked > sbavail(&so->so_snd)) { 2769 if (tp->snd_wnd >= sbavail(&so->so_snd)) 2770 tp->snd_wnd -= sbavail(&so->so_snd); 2771 else 2772 tp->snd_wnd = 0; 2773 mfree = sbcut_locked(&so->so_snd, 2774 (int)sbavail(&so->so_snd)); 2775 ourfinisacked = 1; 2776 } else { 2777 mfree = sbcut_locked(&so->so_snd, acked); 2778 if (tp->snd_wnd >= (uint32_t) acked) 2779 tp->snd_wnd -= acked; 2780 else 2781 tp->snd_wnd = 0; 2782 ourfinisacked = 0; 2783 } 2784 /* NB: sowwakeup_locked() does an implicit unlock. */ 2785 sowwakeup_locked(so); 2786 m_freem(mfree); 2787 /* Detect una wraparound. */ 2788 if (!IN_RECOVERY(tp->t_flags) && 2789 SEQ_GT(tp->snd_una, tp->snd_recover) && 2790 SEQ_LEQ(th->th_ack, tp->snd_recover)) 2791 tp->snd_recover = th->th_ack - 1; 2792 /* XXXLAS: Can this be moved up into cc_post_recovery? */ 2793 if (IN_RECOVERY(tp->t_flags) && 2794 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 2795 EXIT_RECOVERY(tp->t_flags); 2796 } 2797 tp->snd_una = th->th_ack; 2798 if (tp->t_flags & TF_SACK_PERMIT) { 2799 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 2800 tp->snd_recover = tp->snd_una; 2801 } 2802 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2803 tp->snd_nxt = tp->snd_una; 2804 2805 switch (tp->t_state) { 2806 2807 /* 2808 * In FIN_WAIT_1 STATE in addition to the processing 2809 * for the ESTABLISHED state if our FIN is now acknowledged 2810 * then enter FIN_WAIT_2. 2811 */ 2812 case TCPS_FIN_WAIT_1: 2813 if (ourfinisacked) { 2814 /* 2815 * If we can't receive any more 2816 * data, then closing user can proceed. 2817 * Starting the timer is contrary to the 2818 * specification, but if we don't get a FIN 2819 * we'll hang forever. 2820 * 2821 * XXXjl: 2822 * we should release the tp also, and use a 2823 * compressed state. 2824 */ 2825 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2826 soisdisconnected(so); 2827 tcp_timer_activate(tp, TT_2MSL, 2828 (tcp_fast_finwait2_recycle ? 2829 tcp_finwait2_timeout : 2830 TP_MAXIDLE(tp))); 2831 } 2832 tcp_state_change(tp, TCPS_FIN_WAIT_2); 2833 } 2834 break; 2835 2836 /* 2837 * In CLOSING STATE in addition to the processing for 2838 * the ESTABLISHED state if the ACK acknowledges our FIN 2839 * then enter the TIME-WAIT state, otherwise ignore 2840 * the segment. 2841 */ 2842 case TCPS_CLOSING: 2843 if (ourfinisacked) { 2844 tcp_twstart(tp); 2845 m_freem(m); 2846 return; 2847 } 2848 break; 2849 2850 /* 2851 * In LAST_ACK, we may still be waiting for data to drain 2852 * and/or to be acked, as well as for the ack of our FIN. 2853 * If our FIN is now acknowledged, delete the TCB, 2854 * enter the closed state and return. 2855 */ 2856 case TCPS_LAST_ACK: 2857 if (ourfinisacked) { 2858 tp = tcp_close(tp); 2859 goto drop; 2860 } 2861 break; 2862 } 2863 } 2864 2865 step6: 2866 INP_WLOCK_ASSERT(tp->t_inpcb); 2867 2868 /* 2869 * Update window information. 2870 * Don't look at window if no ACK: TAC's send garbage on first SYN. 2871 */ 2872 if ((thflags & TH_ACK) && 2873 (SEQ_LT(tp->snd_wl1, th->th_seq) || 2874 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 2875 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 2876 /* keep track of pure window updates */ 2877 if (tlen == 0 && 2878 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 2879 TCPSTAT_INC(tcps_rcvwinupd); 2880 tp->snd_wnd = tiwin; 2881 tp->snd_wl1 = th->th_seq; 2882 tp->snd_wl2 = th->th_ack; 2883 if (tp->snd_wnd > tp->max_sndwnd) 2884 tp->max_sndwnd = tp->snd_wnd; 2885 needoutput = 1; 2886 } 2887 2888 /* 2889 * Process segments with URG. 2890 */ 2891 if ((thflags & TH_URG) && th->th_urp && 2892 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2893 /* 2894 * This is a kludge, but if we receive and accept 2895 * random urgent pointers, we'll crash in 2896 * soreceive. It's hard to imagine someone 2897 * actually wanting to send this much urgent data. 2898 */ 2899 SOCKBUF_LOCK(&so->so_rcv); 2900 if (th->th_urp + sbavail(&so->so_rcv) > sb_max) { 2901 th->th_urp = 0; /* XXX */ 2902 thflags &= ~TH_URG; /* XXX */ 2903 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */ 2904 goto dodata; /* XXX */ 2905 } 2906 /* 2907 * If this segment advances the known urgent pointer, 2908 * then mark the data stream. This should not happen 2909 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 2910 * a FIN has been received from the remote side. 2911 * In these states we ignore the URG. 2912 * 2913 * According to RFC961 (Assigned Protocols), 2914 * the urgent pointer points to the last octet 2915 * of urgent data. We continue, however, 2916 * to consider it to indicate the first octet 2917 * of data past the urgent section as the original 2918 * spec states (in one of two places). 2919 */ 2920 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) { 2921 tp->rcv_up = th->th_seq + th->th_urp; 2922 so->so_oobmark = sbavail(&so->so_rcv) + 2923 (tp->rcv_up - tp->rcv_nxt) - 1; 2924 if (so->so_oobmark == 0) 2925 so->so_rcv.sb_state |= SBS_RCVATMARK; 2926 sohasoutofband(so); 2927 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 2928 } 2929 SOCKBUF_UNLOCK(&so->so_rcv); 2930 /* 2931 * Remove out of band data so doesn't get presented to user. 2932 * This can happen independent of advancing the URG pointer, 2933 * but if two URG's are pending at once, some out-of-band 2934 * data may creep in... ick. 2935 */ 2936 if (th->th_urp <= (uint32_t)tlen && 2937 !(so->so_options & SO_OOBINLINE)) { 2938 /* hdr drop is delayed */ 2939 tcp_pulloutofband(so, th, m, drop_hdrlen); 2940 } 2941 } else { 2942 /* 2943 * If no out of band data is expected, 2944 * pull receive urgent pointer along 2945 * with the receive window. 2946 */ 2947 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 2948 tp->rcv_up = tp->rcv_nxt; 2949 } 2950 dodata: /* XXX */ 2951 INP_WLOCK_ASSERT(tp->t_inpcb); 2952 2953 /* 2954 * Process the segment text, merging it into the TCP sequencing queue, 2955 * and arranging for acknowledgment of receipt if necessary. 2956 * This process logically involves adjusting tp->rcv_wnd as data 2957 * is presented to the user (this happens in tcp_usrreq.c, 2958 * case PRU_RCVD). If a FIN has already been received on this 2959 * connection then we just ignore the text. 2960 */ 2961 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 2962 IS_FASTOPEN(tp->t_flags)); 2963 if ((tlen || (thflags & TH_FIN) || tfo_syn) && 2964 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2965 tcp_seq save_start = th->th_seq; 2966 tcp_seq save_rnxt = tp->rcv_nxt; 2967 int save_tlen = tlen; 2968 m_adj(m, drop_hdrlen); /* delayed header drop */ 2969 /* 2970 * Insert segment which includes th into TCP reassembly queue 2971 * with control block tp. Set thflags to whether reassembly now 2972 * includes a segment with FIN. This handles the common case 2973 * inline (segment is the next to be received on an established 2974 * connection, and the queue is empty), avoiding linkage into 2975 * and removal from the queue and repetition of various 2976 * conversions. 2977 * Set DELACK for segments received in order, but ack 2978 * immediately when segments are out of order (so 2979 * fast retransmit can work). 2980 */ 2981 if (th->th_seq == tp->rcv_nxt && 2982 SEGQ_EMPTY(tp) && 2983 (TCPS_HAVEESTABLISHED(tp->t_state) || 2984 tfo_syn)) { 2985 if (DELAY_ACK(tp, tlen) || tfo_syn) 2986 tp->t_flags |= TF_DELACK; 2987 else 2988 tp->t_flags |= TF_ACKNOW; 2989 tp->rcv_nxt += tlen; 2990 thflags = th->th_flags & TH_FIN; 2991 TCPSTAT_INC(tcps_rcvpack); 2992 TCPSTAT_ADD(tcps_rcvbyte, tlen); 2993 SOCKBUF_LOCK(&so->so_rcv); 2994 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 2995 m_freem(m); 2996 else 2997 sbappendstream_locked(&so->so_rcv, m, 0); 2998 /* NB: sorwakeup_locked() does an implicit unlock. */ 2999 sorwakeup_locked(so); 3000 } else { 3001 /* 3002 * XXX: Due to the header drop above "th" is 3003 * theoretically invalid by now. Fortunately 3004 * m_adj() doesn't actually frees any mbufs 3005 * when trimming from the head. 3006 */ 3007 tcp_seq temp = save_start; 3008 thflags = tcp_reass(tp, th, &temp, &tlen, m); 3009 tp->t_flags |= TF_ACKNOW; 3010 } 3011 if ((tp->t_flags & TF_SACK_PERMIT) && (save_tlen > 0)) { 3012 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 3013 /* 3014 * DSACK actually handled in the fastpath 3015 * above. 3016 */ 3017 tcp_update_sack_list(tp, save_start, 3018 save_start + save_tlen); 3019 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 3020 if ((tp->rcv_numsacks >= 1) && 3021 (tp->sackblks[0].end == save_start)) { 3022 /* 3023 * Partial overlap, recorded at todrop 3024 * above. 3025 */ 3026 tcp_update_sack_list(tp, 3027 tp->sackblks[0].start, 3028 tp->sackblks[0].end); 3029 } else { 3030 tcp_update_dsack_list(tp, save_start, 3031 save_start + save_tlen); 3032 } 3033 } else if (tlen >= save_tlen) { 3034 /* Update of sackblks. */ 3035 tcp_update_dsack_list(tp, save_start, 3036 save_start + save_tlen); 3037 } else if (tlen > 0) { 3038 tcp_update_dsack_list(tp, save_start, 3039 save_start + tlen); 3040 } 3041 } 3042 #if 0 3043 /* 3044 * Note the amount of data that peer has sent into 3045 * our window, in order to estimate the sender's 3046 * buffer size. 3047 * XXX: Unused. 3048 */ 3049 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) 3050 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 3051 else 3052 len = so->so_rcv.sb_hiwat; 3053 #endif 3054 } else { 3055 m_freem(m); 3056 thflags &= ~TH_FIN; 3057 } 3058 3059 /* 3060 * If FIN is received ACK the FIN and let the user know 3061 * that the connection is closing. 3062 */ 3063 if (thflags & TH_FIN) { 3064 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 3065 socantrcvmore(so); 3066 /* 3067 * If connection is half-synchronized 3068 * (ie NEEDSYN flag on) then delay ACK, 3069 * so it may be piggybacked when SYN is sent. 3070 * Otherwise, since we received a FIN then no 3071 * more input can be expected, send ACK now. 3072 */ 3073 if (tp->t_flags & TF_NEEDSYN) 3074 tp->t_flags |= TF_DELACK; 3075 else 3076 tp->t_flags |= TF_ACKNOW; 3077 tp->rcv_nxt++; 3078 } 3079 switch (tp->t_state) { 3080 3081 /* 3082 * In SYN_RECEIVED and ESTABLISHED STATES 3083 * enter the CLOSE_WAIT state. 3084 */ 3085 case TCPS_SYN_RECEIVED: 3086 tp->t_starttime = ticks; 3087 /* FALLTHROUGH */ 3088 case TCPS_ESTABLISHED: 3089 tcp_state_change(tp, TCPS_CLOSE_WAIT); 3090 break; 3091 3092 /* 3093 * If still in FIN_WAIT_1 STATE FIN has not been acked so 3094 * enter the CLOSING state. 3095 */ 3096 case TCPS_FIN_WAIT_1: 3097 tcp_state_change(tp, TCPS_CLOSING); 3098 break; 3099 3100 /* 3101 * In FIN_WAIT_2 state enter the TIME_WAIT state, 3102 * starting the time-wait timer, turning off the other 3103 * standard timers. 3104 */ 3105 case TCPS_FIN_WAIT_2: 3106 tcp_twstart(tp); 3107 return; 3108 } 3109 } 3110 #ifdef TCPDEBUG 3111 if (so->so_options & SO_DEBUG) 3112 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen, 3113 &tcp_savetcp, 0); 3114 #endif 3115 TCP_PROBE3(debug__input, tp, th, m); 3116 3117 /* 3118 * Return any desired output. 3119 */ 3120 if (needoutput || (tp->t_flags & TF_ACKNOW)) 3121 (void) tp->t_fb->tfb_tcp_output(tp); 3122 3123 check_delack: 3124 INP_WLOCK_ASSERT(tp->t_inpcb); 3125 3126 if (tp->t_flags & TF_DELACK) { 3127 tp->t_flags &= ~TF_DELACK; 3128 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 3129 } 3130 INP_WUNLOCK(tp->t_inpcb); 3131 return; 3132 3133 dropafterack: 3134 /* 3135 * Generate an ACK dropping incoming segment if it occupies 3136 * sequence space, where the ACK reflects our state. 3137 * 3138 * We can now skip the test for the RST flag since all 3139 * paths to this code happen after packets containing 3140 * RST have been dropped. 3141 * 3142 * In the SYN-RECEIVED state, don't send an ACK unless the 3143 * segment we received passes the SYN-RECEIVED ACK test. 3144 * If it fails send a RST. This breaks the loop in the 3145 * "LAND" DoS attack, and also prevents an ACK storm 3146 * between two listening ports that have been sent forged 3147 * SYN segments, each with the source address of the other. 3148 */ 3149 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 3150 (SEQ_GT(tp->snd_una, th->th_ack) || 3151 SEQ_GT(th->th_ack, tp->snd_max)) ) { 3152 rstreason = BANDLIM_RST_OPENPORT; 3153 goto dropwithreset; 3154 } 3155 #ifdef TCPDEBUG 3156 if (so->so_options & SO_DEBUG) 3157 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 3158 &tcp_savetcp, 0); 3159 #endif 3160 TCP_PROBE3(debug__input, tp, th, m); 3161 tp->t_flags |= TF_ACKNOW; 3162 (void) tp->t_fb->tfb_tcp_output(tp); 3163 INP_WUNLOCK(tp->t_inpcb); 3164 m_freem(m); 3165 return; 3166 3167 dropwithreset: 3168 if (tp != NULL) { 3169 tcp_dropwithreset(m, th, tp, tlen, rstreason); 3170 INP_WUNLOCK(tp->t_inpcb); 3171 } else 3172 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 3173 return; 3174 3175 drop: 3176 /* 3177 * Drop space held by incoming segment and return. 3178 */ 3179 #ifdef TCPDEBUG 3180 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 3181 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 3182 &tcp_savetcp, 0); 3183 #endif 3184 TCP_PROBE3(debug__input, tp, th, m); 3185 if (tp != NULL) 3186 INP_WUNLOCK(tp->t_inpcb); 3187 m_freem(m); 3188 } 3189 3190 /* 3191 * Issue RST and make ACK acceptable to originator of segment. 3192 * The mbuf must still include the original packet header. 3193 * tp may be NULL. 3194 */ 3195 void 3196 tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, 3197 int tlen, int rstreason) 3198 { 3199 #ifdef INET 3200 struct ip *ip; 3201 #endif 3202 #ifdef INET6 3203 struct ip6_hdr *ip6; 3204 #endif 3205 3206 if (tp != NULL) { 3207 INP_WLOCK_ASSERT(tp->t_inpcb); 3208 } 3209 3210 /* Don't bother if destination was broadcast/multicast. */ 3211 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST)) 3212 goto drop; 3213 #ifdef INET6 3214 if (mtod(m, struct ip *)->ip_v == 6) { 3215 ip6 = mtod(m, struct ip6_hdr *); 3216 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 3217 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 3218 goto drop; 3219 /* IPv6 anycast check is done at tcp6_input() */ 3220 } 3221 #endif 3222 #if defined(INET) && defined(INET6) 3223 else 3224 #endif 3225 #ifdef INET 3226 { 3227 ip = mtod(m, struct ip *); 3228 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 3229 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 3230 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 3231 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 3232 goto drop; 3233 } 3234 #endif 3235 3236 /* Perform bandwidth limiting. */ 3237 if (badport_bandlim(rstreason) < 0) 3238 goto drop; 3239 3240 /* tcp_respond consumes the mbuf chain. */ 3241 if (th->th_flags & TH_ACK) { 3242 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, 3243 th->th_ack, TH_RST); 3244 } else { 3245 if (th->th_flags & TH_SYN) 3246 tlen++; 3247 if (th->th_flags & TH_FIN) 3248 tlen++; 3249 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen, 3250 (tcp_seq)0, TH_RST|TH_ACK); 3251 } 3252 return; 3253 drop: 3254 m_freem(m); 3255 } 3256 3257 /* 3258 * Parse TCP options and place in tcpopt. 3259 */ 3260 void 3261 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags) 3262 { 3263 int opt, optlen; 3264 3265 to->to_flags = 0; 3266 for (; cnt > 0; cnt -= optlen, cp += optlen) { 3267 opt = cp[0]; 3268 if (opt == TCPOPT_EOL) 3269 break; 3270 if (opt == TCPOPT_NOP) 3271 optlen = 1; 3272 else { 3273 if (cnt < 2) 3274 break; 3275 optlen = cp[1]; 3276 if (optlen < 2 || optlen > cnt) 3277 break; 3278 } 3279 switch (opt) { 3280 case TCPOPT_MAXSEG: 3281 if (optlen != TCPOLEN_MAXSEG) 3282 continue; 3283 if (!(flags & TO_SYN)) 3284 continue; 3285 to->to_flags |= TOF_MSS; 3286 bcopy((char *)cp + 2, 3287 (char *)&to->to_mss, sizeof(to->to_mss)); 3288 to->to_mss = ntohs(to->to_mss); 3289 break; 3290 case TCPOPT_WINDOW: 3291 if (optlen != TCPOLEN_WINDOW) 3292 continue; 3293 if (!(flags & TO_SYN)) 3294 continue; 3295 to->to_flags |= TOF_SCALE; 3296 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT); 3297 break; 3298 case TCPOPT_TIMESTAMP: 3299 if (optlen != TCPOLEN_TIMESTAMP) 3300 continue; 3301 to->to_flags |= TOF_TS; 3302 bcopy((char *)cp + 2, 3303 (char *)&to->to_tsval, sizeof(to->to_tsval)); 3304 to->to_tsval = ntohl(to->to_tsval); 3305 bcopy((char *)cp + 6, 3306 (char *)&to->to_tsecr, sizeof(to->to_tsecr)); 3307 to->to_tsecr = ntohl(to->to_tsecr); 3308 break; 3309 case TCPOPT_SIGNATURE: 3310 /* 3311 * In order to reply to a host which has set the 3312 * TCP_SIGNATURE option in its initial SYN, we have 3313 * to record the fact that the option was observed 3314 * here for the syncache code to perform the correct 3315 * response. 3316 */ 3317 if (optlen != TCPOLEN_SIGNATURE) 3318 continue; 3319 to->to_flags |= TOF_SIGNATURE; 3320 to->to_signature = cp + 2; 3321 break; 3322 case TCPOPT_SACK_PERMITTED: 3323 if (optlen != TCPOLEN_SACK_PERMITTED) 3324 continue; 3325 if (!(flags & TO_SYN)) 3326 continue; 3327 if (!V_tcp_do_sack) 3328 continue; 3329 to->to_flags |= TOF_SACKPERM; 3330 break; 3331 case TCPOPT_SACK: 3332 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) 3333 continue; 3334 if (flags & TO_SYN) 3335 continue; 3336 to->to_flags |= TOF_SACK; 3337 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK; 3338 to->to_sacks = cp + 2; 3339 TCPSTAT_INC(tcps_sack_rcv_blocks); 3340 break; 3341 case TCPOPT_FAST_OPEN: 3342 /* 3343 * Cookie length validation is performed by the 3344 * server side cookie checking code or the client 3345 * side cookie cache update code. 3346 */ 3347 if (!(flags & TO_SYN)) 3348 continue; 3349 if (!V_tcp_fastopen_client_enable && 3350 !V_tcp_fastopen_server_enable) 3351 continue; 3352 to->to_flags |= TOF_FASTOPEN; 3353 to->to_tfo_len = optlen - 2; 3354 to->to_tfo_cookie = to->to_tfo_len ? cp + 2 : NULL; 3355 break; 3356 default: 3357 continue; 3358 } 3359 } 3360 } 3361 3362 /* 3363 * Pull out of band byte out of a segment so 3364 * it doesn't appear in the user's data queue. 3365 * It is still reflected in the segment length for 3366 * sequencing purposes. 3367 */ 3368 void 3369 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, 3370 int off) 3371 { 3372 int cnt = off + th->th_urp - 1; 3373 3374 while (cnt >= 0) { 3375 if (m->m_len > cnt) { 3376 char *cp = mtod(m, caddr_t) + cnt; 3377 struct tcpcb *tp = sototcpcb(so); 3378 3379 INP_WLOCK_ASSERT(tp->t_inpcb); 3380 3381 tp->t_iobc = *cp; 3382 tp->t_oobflags |= TCPOOB_HAVEDATA; 3383 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); 3384 m->m_len--; 3385 if (m->m_flags & M_PKTHDR) 3386 m->m_pkthdr.len--; 3387 return; 3388 } 3389 cnt -= m->m_len; 3390 m = m->m_next; 3391 if (m == NULL) 3392 break; 3393 } 3394 panic("tcp_pulloutofband"); 3395 } 3396 3397 /* 3398 * Collect new round-trip time estimate 3399 * and update averages and current timeout. 3400 */ 3401 void 3402 tcp_xmit_timer(struct tcpcb *tp, int rtt) 3403 { 3404 int delta; 3405 3406 INP_WLOCK_ASSERT(tp->t_inpcb); 3407 3408 TCPSTAT_INC(tcps_rttupdated); 3409 tp->t_rttupdated++; 3410 #ifdef STATS 3411 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, 3412 imax(0, rtt * 1000 / hz)); 3413 #endif 3414 if ((tp->t_srtt != 0) && (tp->t_rxtshift <= TCP_RTT_INVALIDATE)) { 3415 /* 3416 * srtt is stored as fixed point with 5 bits after the 3417 * binary point (i.e., scaled by 8). The following magic 3418 * is equivalent to the smoothing algorithm in rfc793 with 3419 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 3420 * point). Adjust rtt to origin 0. 3421 */ 3422 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 3423 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 3424 3425 if ((tp->t_srtt += delta) <= 0) 3426 tp->t_srtt = 1; 3427 3428 /* 3429 * We accumulate a smoothed rtt variance (actually, a 3430 * smoothed mean difference), then set the retransmit 3431 * timer to smoothed rtt + 4 times the smoothed variance. 3432 * rttvar is stored as fixed point with 4 bits after the 3433 * binary point (scaled by 16). The following is 3434 * equivalent to rfc793 smoothing with an alpha of .75 3435 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 3436 * rfc793's wired-in beta. 3437 */ 3438 if (delta < 0) 3439 delta = -delta; 3440 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 3441 if ((tp->t_rttvar += delta) <= 0) 3442 tp->t_rttvar = 1; 3443 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 3444 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3445 } else { 3446 /* 3447 * No rtt measurement yet - use the unsmoothed rtt. 3448 * Set the variance to half the rtt (so our first 3449 * retransmit happens at 3*rtt). 3450 */ 3451 tp->t_srtt = rtt << TCP_RTT_SHIFT; 3452 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 3453 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3454 } 3455 tp->t_rtttime = 0; 3456 tp->t_rxtshift = 0; 3457 3458 /* 3459 * the retransmit should happen at rtt + 4 * rttvar. 3460 * Because of the way we do the smoothing, srtt and rttvar 3461 * will each average +1/2 tick of bias. When we compute 3462 * the retransmit timer, we want 1/2 tick of rounding and 3463 * 1 extra tick because of +-1/2 tick uncertainty in the 3464 * firing of the timer. The bias will give us exactly the 3465 * 1.5 tick we need. But, because the bias is 3466 * statistical, we have to test that we don't drop below 3467 * the minimum feasible timer (which is 2 ticks). 3468 */ 3469 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 3470 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 3471 3472 /* 3473 * We received an ack for a packet that wasn't retransmitted; 3474 * it is probably safe to discard any error indications we've 3475 * received recently. This isn't quite right, but close enough 3476 * for now (a route might have failed after we sent a segment, 3477 * and the return path might not be symmetrical). 3478 */ 3479 tp->t_softerror = 0; 3480 } 3481 3482 /* 3483 * Determine a reasonable value for maxseg size. 3484 * If the route is known, check route for mtu. 3485 * If none, use an mss that can be handled on the outgoing interface 3486 * without forcing IP to fragment. If no route is found, route has no mtu, 3487 * or the destination isn't local, use a default, hopefully conservative 3488 * size (usually 512 or the default IP max size, but no more than the mtu 3489 * of the interface), as we can't discover anything about intervening 3490 * gateways or networks. We also initialize the congestion/slow start 3491 * window to be a single segment if the destination isn't local. 3492 * While looking at the routing entry, we also initialize other path-dependent 3493 * parameters from pre-set or cached values in the routing entry. 3494 * 3495 * NOTE that resulting t_maxseg doesn't include space for TCP options or 3496 * IP options, e.g. IPSEC data, since length of this data may vary, and 3497 * thus it is calculated for every segment separately in tcp_output(). 3498 * 3499 * NOTE that this routine is only called when we process an incoming 3500 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS 3501 * settings are handled in tcp_mssopt(). 3502 */ 3503 void 3504 tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer, 3505 struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap) 3506 { 3507 int mss = 0; 3508 uint32_t maxmtu = 0; 3509 struct inpcb *inp = tp->t_inpcb; 3510 struct hc_metrics_lite metrics; 3511 #ifdef INET6 3512 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 3513 size_t min_protoh = isipv6 ? 3514 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) : 3515 sizeof (struct tcpiphdr); 3516 #else 3517 const size_t min_protoh = sizeof(struct tcpiphdr); 3518 #endif 3519 3520 INP_WLOCK_ASSERT(tp->t_inpcb); 3521 3522 if (mtuoffer != -1) { 3523 KASSERT(offer == -1, ("%s: conflict", __func__)); 3524 offer = mtuoffer - min_protoh; 3525 } 3526 3527 /* Initialize. */ 3528 #ifdef INET6 3529 if (isipv6) { 3530 maxmtu = tcp_maxmtu6(&inp->inp_inc, cap); 3531 tp->t_maxseg = V_tcp_v6mssdflt; 3532 } 3533 #endif 3534 #if defined(INET) && defined(INET6) 3535 else 3536 #endif 3537 #ifdef INET 3538 { 3539 maxmtu = tcp_maxmtu(&inp->inp_inc, cap); 3540 tp->t_maxseg = V_tcp_mssdflt; 3541 } 3542 #endif 3543 3544 /* 3545 * No route to sender, stay with default mss and return. 3546 */ 3547 if (maxmtu == 0) { 3548 /* 3549 * In case we return early we need to initialize metrics 3550 * to a defined state as tcp_hc_get() would do for us 3551 * if there was no cache hit. 3552 */ 3553 if (metricptr != NULL) 3554 bzero(metricptr, sizeof(struct hc_metrics_lite)); 3555 return; 3556 } 3557 3558 /* What have we got? */ 3559 switch (offer) { 3560 case 0: 3561 /* 3562 * Offer == 0 means that there was no MSS on the SYN 3563 * segment, in this case we use tcp_mssdflt as 3564 * already assigned to t_maxseg above. 3565 */ 3566 offer = tp->t_maxseg; 3567 break; 3568 3569 case -1: 3570 /* 3571 * Offer == -1 means that we didn't receive SYN yet. 3572 */ 3573 /* FALLTHROUGH */ 3574 3575 default: 3576 /* 3577 * Prevent DoS attack with too small MSS. Round up 3578 * to at least minmss. 3579 */ 3580 offer = max(offer, V_tcp_minmss); 3581 } 3582 3583 /* 3584 * rmx information is now retrieved from tcp_hostcache. 3585 */ 3586 tcp_hc_get(&inp->inp_inc, &metrics); 3587 if (metricptr != NULL) 3588 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite)); 3589 3590 /* 3591 * If there's a discovered mtu in tcp hostcache, use it. 3592 * Else, use the link mtu. 3593 */ 3594 if (metrics.rmx_mtu) 3595 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh; 3596 else { 3597 #ifdef INET6 3598 if (isipv6) { 3599 mss = maxmtu - min_protoh; 3600 if (!V_path_mtu_discovery && 3601 !in6_localaddr(&inp->in6p_faddr)) 3602 mss = min(mss, V_tcp_v6mssdflt); 3603 } 3604 #endif 3605 #if defined(INET) && defined(INET6) 3606 else 3607 #endif 3608 #ifdef INET 3609 { 3610 mss = maxmtu - min_protoh; 3611 if (!V_path_mtu_discovery && 3612 !in_localaddr(inp->inp_faddr)) 3613 mss = min(mss, V_tcp_mssdflt); 3614 } 3615 #endif 3616 /* 3617 * XXX - The above conditional (mss = maxmtu - min_protoh) 3618 * probably violates the TCP spec. 3619 * The problem is that, since we don't know the 3620 * other end's MSS, we are supposed to use a conservative 3621 * default. But, if we do that, then MTU discovery will 3622 * never actually take place, because the conservative 3623 * default is much less than the MTUs typically seen 3624 * on the Internet today. For the moment, we'll sweep 3625 * this under the carpet. 3626 * 3627 * The conservative default might not actually be a problem 3628 * if the only case this occurs is when sending an initial 3629 * SYN with options and data to a host we've never talked 3630 * to before. Then, they will reply with an MSS value which 3631 * will get recorded and the new parameters should get 3632 * recomputed. For Further Study. 3633 */ 3634 } 3635 mss = min(mss, offer); 3636 3637 /* 3638 * Sanity check: make sure that maxseg will be large 3639 * enough to allow some data on segments even if the 3640 * all the option space is used (40bytes). Otherwise 3641 * funny things may happen in tcp_output. 3642 * 3643 * XXXGL: shouldn't we reserve space for IP/IPv6 options? 3644 */ 3645 mss = max(mss, 64); 3646 3647 tp->t_maxseg = mss; 3648 } 3649 3650 void 3651 tcp_mss(struct tcpcb *tp, int offer) 3652 { 3653 int mss; 3654 uint32_t bufsize; 3655 struct inpcb *inp; 3656 struct socket *so; 3657 struct hc_metrics_lite metrics; 3658 struct tcp_ifcap cap; 3659 3660 KASSERT(tp != NULL, ("%s: tp == NULL", __func__)); 3661 3662 bzero(&cap, sizeof(cap)); 3663 tcp_mss_update(tp, offer, -1, &metrics, &cap); 3664 3665 mss = tp->t_maxseg; 3666 inp = tp->t_inpcb; 3667 3668 /* 3669 * If there's a pipesize, change the socket buffer to that size, 3670 * don't change if sb_hiwat is different than default (then it 3671 * has been changed on purpose with setsockopt). 3672 * Make the socket buffers an integral number of mss units; 3673 * if the mss is larger than the socket buffer, decrease the mss. 3674 */ 3675 so = inp->inp_socket; 3676 SOCKBUF_LOCK(&so->so_snd); 3677 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe) 3678 bufsize = metrics.rmx_sendpipe; 3679 else 3680 bufsize = so->so_snd.sb_hiwat; 3681 if (bufsize < mss) 3682 mss = bufsize; 3683 else { 3684 bufsize = roundup(bufsize, mss); 3685 if (bufsize > sb_max) 3686 bufsize = sb_max; 3687 if (bufsize > so->so_snd.sb_hiwat) 3688 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL); 3689 } 3690 SOCKBUF_UNLOCK(&so->so_snd); 3691 /* 3692 * Sanity check: make sure that maxseg will be large 3693 * enough to allow some data on segments even if the 3694 * all the option space is used (40bytes). Otherwise 3695 * funny things may happen in tcp_output. 3696 * 3697 * XXXGL: shouldn't we reserve space for IP/IPv6 options? 3698 */ 3699 tp->t_maxseg = max(mss, 64); 3700 3701 SOCKBUF_LOCK(&so->so_rcv); 3702 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe) 3703 bufsize = metrics.rmx_recvpipe; 3704 else 3705 bufsize = so->so_rcv.sb_hiwat; 3706 if (bufsize > mss) { 3707 bufsize = roundup(bufsize, mss); 3708 if (bufsize > sb_max) 3709 bufsize = sb_max; 3710 if (bufsize > so->so_rcv.sb_hiwat) 3711 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL); 3712 } 3713 SOCKBUF_UNLOCK(&so->so_rcv); 3714 3715 /* Check the interface for TSO capabilities. */ 3716 if (cap.ifcap & CSUM_TSO) { 3717 tp->t_flags |= TF_TSO; 3718 tp->t_tsomax = cap.tsomax; 3719 tp->t_tsomaxsegcount = cap.tsomaxsegcount; 3720 tp->t_tsomaxsegsize = cap.tsomaxsegsize; 3721 } 3722 } 3723 3724 /* 3725 * Determine the MSS option to send on an outgoing SYN. 3726 */ 3727 int 3728 tcp_mssopt(struct in_conninfo *inc) 3729 { 3730 int mss = 0; 3731 uint32_t thcmtu = 0; 3732 uint32_t maxmtu = 0; 3733 size_t min_protoh; 3734 3735 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer")); 3736 3737 #ifdef INET6 3738 if (inc->inc_flags & INC_ISIPV6) { 3739 mss = V_tcp_v6mssdflt; 3740 maxmtu = tcp_maxmtu6(inc, NULL); 3741 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 3742 } 3743 #endif 3744 #if defined(INET) && defined(INET6) 3745 else 3746 #endif 3747 #ifdef INET 3748 { 3749 mss = V_tcp_mssdflt; 3750 maxmtu = tcp_maxmtu(inc, NULL); 3751 min_protoh = sizeof(struct tcpiphdr); 3752 } 3753 #endif 3754 #if defined(INET6) || defined(INET) 3755 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3756 #endif 3757 3758 if (maxmtu && thcmtu) 3759 mss = min(maxmtu, thcmtu) - min_protoh; 3760 else if (maxmtu || thcmtu) 3761 mss = max(maxmtu, thcmtu) - min_protoh; 3762 3763 return (mss); 3764 } 3765 3766 3767 /* 3768 * On a partial ack arrives, force the retransmission of the 3769 * next unacknowledged segment. Do not clear tp->t_dupacks. 3770 * By setting snd_nxt to ti_ack, this forces retransmission timer to 3771 * be started again. 3772 */ 3773 void 3774 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th) 3775 { 3776 tcp_seq onxt = tp->snd_nxt; 3777 uint32_t ocwnd = tp->snd_cwnd; 3778 u_int maxseg = tcp_maxseg(tp); 3779 3780 INP_WLOCK_ASSERT(tp->t_inpcb); 3781 3782 tcp_timer_activate(tp, TT_REXMT, 0); 3783 tp->t_rtttime = 0; 3784 tp->snd_nxt = th->th_ack; 3785 /* 3786 * Set snd_cwnd to one segment beyond acknowledged offset. 3787 * (tp->snd_una has not yet been updated when this function is called.) 3788 */ 3789 tp->snd_cwnd = maxseg + BYTES_THIS_ACK(tp, th); 3790 tp->t_flags |= TF_ACKNOW; 3791 (void) tp->t_fb->tfb_tcp_output(tp); 3792 tp->snd_cwnd = ocwnd; 3793 if (SEQ_GT(onxt, tp->snd_nxt)) 3794 tp->snd_nxt = onxt; 3795 /* 3796 * Partial window deflation. Relies on fact that tp->snd_una 3797 * not updated yet. 3798 */ 3799 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th)) 3800 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th); 3801 else 3802 tp->snd_cwnd = 0; 3803 tp->snd_cwnd += maxseg; 3804 } 3805 3806 int 3807 tcp_compute_pipe(struct tcpcb *tp) 3808 { 3809 return (tp->snd_max - tp->snd_una + 3810 tp->sackhint.sack_bytes_rexmit - 3811 tp->sackhint.sacked_bytes); 3812 } 3813 3814 uint32_t 3815 tcp_compute_initwnd(uint32_t maxseg) 3816 { 3817 /* 3818 * Calculate the Initial Window, also used as Restart Window 3819 * 3820 * RFC5681 Section 3.1 specifies the default conservative values. 3821 * RFC3390 specifies slightly more aggressive values. 3822 * RFC6928 increases it to ten segments. 3823 * Support for user specified value for initial flight size. 3824 */ 3825 if (V_tcp_initcwnd_segments) 3826 return min(V_tcp_initcwnd_segments * maxseg, 3827 max(2 * maxseg, V_tcp_initcwnd_segments * 1460)); 3828 else if (V_tcp_do_rfc3390) 3829 return min(4 * maxseg, max(2 * maxseg, 4380)); 3830 else { 3831 /* Per RFC5681 Section 3.1 */ 3832 if (maxseg > 2190) 3833 return (2 * maxseg); 3834 else if (maxseg > 1095) 3835 return (3 * maxseg); 3836 else 3837 return (4 * maxseg); 3838 } 3839 } 3840