1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 5 * The Regents of the University of California. All rights reserved. 6 * Copyright (c) 2007-2008,2010 7 * Swinburne University of Technology, Melbourne, Australia. 8 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org> 9 * Copyright (c) 2010 The FreeBSD Foundation 10 * Copyright (c) 2010-2011 Juniper Networks, Inc. 11 * All rights reserved. 12 * 13 * Portions of this software were developed at the Centre for Advanced Internet 14 * Architectures, Swinburne University of Technology, by Lawrence Stewart, 15 * James Healy and David Hayes, made possible in part by a grant from the Cisco 16 * University Research Program Fund at Community Foundation Silicon Valley. 17 * 18 * Portions of this software were developed at the Centre for Advanced 19 * Internet Architectures, Swinburne University of Technology, Melbourne, 20 * Australia by David Hayes under sponsorship from the FreeBSD Foundation. 21 * 22 * Portions of this software were developed by Robert N. M. Watson under 23 * contract to Juniper Networks, Inc. 24 * 25 * Redistribution and use in source and binary forms, with or without 26 * modification, are permitted provided that the following conditions 27 * are met: 28 * 1. Redistributions of source code must retain the above copyright 29 * notice, this list of conditions and the following disclaimer. 30 * 2. Redistributions in binary form must reproduce the above copyright 31 * notice, this list of conditions and the following disclaimer in the 32 * documentation and/or other materials provided with the distribution. 33 * 3. Neither the name of the University nor the names of its contributors 34 * may be used to endorse or promote products derived from this software 35 * without specific prior written permission. 36 * 37 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 38 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 39 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 40 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 42 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 43 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 45 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 46 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 47 * SUCH DAMAGE. 48 * 49 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 50 */ 51 52 #include <sys/cdefs.h> 53 __FBSDID("$FreeBSD$"); 54 55 #include "opt_inet.h" 56 #include "opt_inet6.h" 57 #include "opt_ipsec.h" 58 #include "opt_tcpdebug.h" 59 60 #include <sys/param.h> 61 #include <sys/arb.h> 62 #include <sys/kernel.h> 63 #ifdef TCP_HHOOK 64 #include <sys/hhook.h> 65 #endif 66 #include <sys/malloc.h> 67 #include <sys/mbuf.h> 68 #include <sys/proc.h> /* for proc0 declaration */ 69 #include <sys/protosw.h> 70 #include <sys/qmath.h> 71 #include <sys/sdt.h> 72 #include <sys/signalvar.h> 73 #include <sys/socket.h> 74 #include <sys/socketvar.h> 75 #include <sys/sysctl.h> 76 #include <sys/syslog.h> 77 #include <sys/systm.h> 78 #include <sys/stats.h> 79 80 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 81 82 #include <vm/uma.h> 83 84 #include <net/if.h> 85 #include <net/if_var.h> 86 #include <net/route.h> 87 #include <net/vnet.h> 88 89 #define TCPSTATES /* for logging */ 90 91 #include <netinet/in.h> 92 #include <netinet/in_kdtrace.h> 93 #include <netinet/in_pcb.h> 94 #include <netinet/in_systm.h> 95 #include <netinet/ip.h> 96 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 97 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 98 #include <netinet/ip_var.h> 99 #include <netinet/ip_options.h> 100 #include <netinet/ip6.h> 101 #include <netinet/icmp6.h> 102 #include <netinet6/in6_pcb.h> 103 #include <netinet6/in6_var.h> 104 #include <netinet6/ip6_var.h> 105 #include <netinet6/nd6.h> 106 #include <netinet/tcp.h> 107 #include <netinet/tcp_fsm.h> 108 #include <netinet/tcp_log_buf.h> 109 #include <netinet/tcp_seq.h> 110 #include <netinet/tcp_timer.h> 111 #include <netinet/tcp_var.h> 112 #include <netinet6/tcp6_var.h> 113 #include <netinet/tcpip.h> 114 #include <netinet/cc/cc.h> 115 #include <netinet/tcp_fastopen.h> 116 #ifdef TCPPCAP 117 #include <netinet/tcp_pcap.h> 118 #endif 119 #include <netinet/tcp_syncache.h> 120 #ifdef TCPDEBUG 121 #include <netinet/tcp_debug.h> 122 #endif /* TCPDEBUG */ 123 #ifdef TCP_OFFLOAD 124 #include <netinet/tcp_offload.h> 125 #endif 126 127 #include <netipsec/ipsec_support.h> 128 129 #include <machine/in_cksum.h> 130 131 #include <security/mac/mac_framework.h> 132 133 const int tcprexmtthresh = 3; 134 135 VNET_DEFINE(int, tcp_log_in_vain) = 0; 136 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_VNET | CTLFLAG_RW, 137 &VNET_NAME(tcp_log_in_vain), 0, 138 "Log all incoming TCP segments to closed ports"); 139 140 VNET_DEFINE(int, blackhole) = 0; 141 #define V_blackhole VNET(blackhole) 142 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_VNET | CTLFLAG_RW, 143 &VNET_NAME(blackhole), 0, 144 "Do not send RST on segments to closed ports"); 145 146 VNET_DEFINE(int, tcp_delack_enabled) = 1; 147 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_VNET | CTLFLAG_RW, 148 &VNET_NAME(tcp_delack_enabled), 0, 149 "Delay ACK to try and piggyback it onto a data packet"); 150 151 VNET_DEFINE(int, drop_synfin) = 0; 152 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_VNET | CTLFLAG_RW, 153 &VNET_NAME(drop_synfin), 0, 154 "Drop TCP packets with SYN+FIN set"); 155 156 VNET_DEFINE(int, tcp_do_newcwv) = 0; 157 SYSCTL_INT(_net_inet_tcp, OID_AUTO, newcwv, CTLFLAG_VNET | CTLFLAG_RW, 158 &VNET_NAME(tcp_do_newcwv), 0, 159 "Enable New Congestion Window Validation per RFC7661"); 160 161 VNET_DEFINE(int, tcp_do_rfc6675_pipe) = 0; 162 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc6675_pipe, CTLFLAG_VNET | CTLFLAG_RW, 163 &VNET_NAME(tcp_do_rfc6675_pipe), 0, 164 "Use calculated pipe/in-flight bytes per RFC 6675"); 165 166 VNET_DEFINE(int, tcp_do_rfc3042) = 1; 167 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_VNET | CTLFLAG_RW, 168 &VNET_NAME(tcp_do_rfc3042), 0, 169 "Enable RFC 3042 (Limited Transmit)"); 170 171 VNET_DEFINE(int, tcp_do_rfc3390) = 1; 172 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_VNET | CTLFLAG_RW, 173 &VNET_NAME(tcp_do_rfc3390), 0, 174 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 175 176 VNET_DEFINE(int, tcp_initcwnd_segments) = 10; 177 SYSCTL_INT(_net_inet_tcp, OID_AUTO, initcwnd_segments, 178 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_initcwnd_segments), 0, 179 "Slow-start flight size (initial congestion window) in number of segments"); 180 181 VNET_DEFINE(int, tcp_do_rfc3465) = 1; 182 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_VNET | CTLFLAG_RW, 183 &VNET_NAME(tcp_do_rfc3465), 0, 184 "Enable RFC 3465 (Appropriate Byte Counting)"); 185 186 VNET_DEFINE(int, tcp_abc_l_var) = 2; 187 SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_VNET | CTLFLAG_RW, 188 &VNET_NAME(tcp_abc_l_var), 2, 189 "Cap the max cwnd increment during slow-start to this number of segments"); 190 191 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, 192 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 193 "TCP ECN"); 194 195 VNET_DEFINE(int, tcp_do_ecn) = 2; 196 SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW, 197 &VNET_NAME(tcp_do_ecn), 0, 198 "TCP ECN support"); 199 200 VNET_DEFINE(int, tcp_ecn_maxretries) = 1; 201 SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_VNET | CTLFLAG_RW, 202 &VNET_NAME(tcp_ecn_maxretries), 0, 203 "Max retries before giving up on ECN"); 204 205 VNET_DEFINE(int, tcp_insecure_syn) = 0; 206 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_syn, CTLFLAG_VNET | CTLFLAG_RW, 207 &VNET_NAME(tcp_insecure_syn), 0, 208 "Follow RFC793 instead of RFC5961 criteria for accepting SYN packets"); 209 210 VNET_DEFINE(int, tcp_insecure_rst) = 0; 211 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_VNET | CTLFLAG_RW, 212 &VNET_NAME(tcp_insecure_rst), 0, 213 "Follow RFC793 instead of RFC5961 criteria for accepting RST packets"); 214 215 VNET_DEFINE(int, tcp_recvspace) = 1024*64; 216 #define V_tcp_recvspace VNET(tcp_recvspace) 217 SYSCTL_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_VNET | CTLFLAG_RW, 218 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size"); 219 220 VNET_DEFINE(int, tcp_do_autorcvbuf) = 1; 221 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_VNET | CTLFLAG_RW, 222 &VNET_NAME(tcp_do_autorcvbuf), 0, 223 "Enable automatic receive buffer sizing"); 224 225 VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024; 226 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_VNET | CTLFLAG_RW, 227 &VNET_NAME(tcp_autorcvbuf_max), 0, 228 "Max size of automatic receive buffer"); 229 230 VNET_DEFINE(struct inpcbhead, tcb); 231 #define tcb6 tcb /* for KAME src sync over BSD*'s */ 232 VNET_DEFINE(struct inpcbinfo, tcbinfo); 233 234 /* 235 * TCP statistics are stored in an array of counter(9)s, which size matches 236 * size of struct tcpstat. TCP running connection count is a regular array. 237 */ 238 VNET_PCPUSTAT_DEFINE(struct tcpstat, tcpstat); 239 SYSCTL_VNET_PCPUSTAT(_net_inet_tcp, TCPCTL_STATS, stats, struct tcpstat, 240 tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)"); 241 VNET_DEFINE(counter_u64_t, tcps_states[TCP_NSTATES]); 242 SYSCTL_COUNTER_U64_ARRAY(_net_inet_tcp, TCPCTL_STATES, states, CTLFLAG_RD | 243 CTLFLAG_VNET, &VNET_NAME(tcps_states)[0], TCP_NSTATES, 244 "TCP connection counts by TCP state"); 245 246 static void 247 tcp_vnet_init(const void *unused) 248 { 249 250 COUNTER_ARRAY_ALLOC(V_tcps_states, TCP_NSTATES, M_WAITOK); 251 VNET_PCPUSTAT_ALLOC(tcpstat, M_WAITOK); 252 } 253 VNET_SYSINIT(tcp_vnet_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 254 tcp_vnet_init, NULL); 255 256 #ifdef VIMAGE 257 static void 258 tcp_vnet_uninit(const void *unused) 259 { 260 261 COUNTER_ARRAY_FREE(V_tcps_states, TCP_NSTATES); 262 VNET_PCPUSTAT_FREE(tcpstat); 263 } 264 VNET_SYSUNINIT(tcp_vnet_uninit, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 265 tcp_vnet_uninit, NULL); 266 #endif /* VIMAGE */ 267 268 /* 269 * Kernel module interface for updating tcpstat. The first argument is an index 270 * into tcpstat treated as an array. 271 */ 272 void 273 kmod_tcpstat_add(int statnum, int val) 274 { 275 276 counter_u64_add(VNET(tcpstat)[statnum], val); 277 } 278 279 #ifdef TCP_HHOOK 280 /* 281 * Wrapper for the TCP established input helper hook. 282 */ 283 void 284 hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to) 285 { 286 struct tcp_hhook_data hhook_data; 287 288 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) { 289 hhook_data.tp = tp; 290 hhook_data.th = th; 291 hhook_data.to = to; 292 293 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data, 294 tp->osd); 295 } 296 } 297 #endif 298 299 /* 300 * CC wrapper hook functions 301 */ 302 void 303 cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t nsegs, 304 uint16_t type) 305 { 306 #ifdef STATS 307 int32_t gput; 308 #endif 309 310 INP_WLOCK_ASSERT(tp->t_inpcb); 311 312 tp->ccv->nsegs = nsegs; 313 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th); 314 if ((!V_tcp_do_newcwv && (tp->snd_cwnd <= tp->snd_wnd)) || 315 (V_tcp_do_newcwv && (tp->snd_cwnd <= tp->snd_wnd) && 316 (tp->snd_cwnd < (tcp_compute_pipe(tp) * 2)))) 317 tp->ccv->flags |= CCF_CWND_LIMITED; 318 else 319 tp->ccv->flags &= ~CCF_CWND_LIMITED; 320 321 if (type == CC_ACK) { 322 #ifdef STATS 323 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 324 ((int32_t)tp->snd_cwnd) - tp->snd_wnd); 325 if (!IN_RECOVERY(tp->t_flags)) 326 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_ACKLEN, 327 tp->ccv->bytes_this_ack / (tcp_maxseg(tp) * nsegs)); 328 if ((tp->t_flags & TF_GPUTINPROG) && 329 SEQ_GEQ(th->th_ack, tp->gput_ack)) { 330 /* 331 * Compute goodput in bits per millisecond. 332 */ 333 gput = (((int64_t)(th->th_ack - tp->gput_seq)) << 3) / 334 max(1, tcp_ts_getticks() - tp->gput_ts); 335 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 336 gput); 337 /* 338 * XXXLAS: This is a temporary hack, and should be 339 * chained off VOI_TCP_GPUT when stats(9) grows an API 340 * to deal with chained VOIs. 341 */ 342 if (tp->t_stats_gput_prev > 0) 343 stats_voi_update_abs_s32(tp->t_stats, 344 VOI_TCP_GPUT_ND, 345 ((gput - tp->t_stats_gput_prev) * 100) / 346 tp->t_stats_gput_prev); 347 tp->t_flags &= ~TF_GPUTINPROG; 348 tp->t_stats_gput_prev = gput; 349 } 350 #endif /* STATS */ 351 if (tp->snd_cwnd > tp->snd_ssthresh) { 352 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack, 353 nsegs * V_tcp_abc_l_var * tcp_maxseg(tp)); 354 if (tp->t_bytes_acked >= tp->snd_cwnd) { 355 tp->t_bytes_acked -= tp->snd_cwnd; 356 tp->ccv->flags |= CCF_ABC_SENTAWND; 357 } 358 } else { 359 tp->ccv->flags &= ~CCF_ABC_SENTAWND; 360 tp->t_bytes_acked = 0; 361 } 362 } 363 364 if (CC_ALGO(tp)->ack_received != NULL) { 365 /* XXXLAS: Find a way to live without this */ 366 tp->ccv->curack = th->th_ack; 367 CC_ALGO(tp)->ack_received(tp->ccv, type); 368 } 369 #ifdef STATS 370 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, tp->snd_cwnd); 371 #endif 372 } 373 374 void 375 cc_conn_init(struct tcpcb *tp) 376 { 377 struct hc_metrics_lite metrics; 378 struct inpcb *inp = tp->t_inpcb; 379 u_int maxseg; 380 int rtt; 381 382 INP_WLOCK_ASSERT(tp->t_inpcb); 383 384 tcp_hc_get(&inp->inp_inc, &metrics); 385 maxseg = tcp_maxseg(tp); 386 387 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) { 388 tp->t_srtt = rtt; 389 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE; 390 TCPSTAT_INC(tcps_usedrtt); 391 if (metrics.rmx_rttvar) { 392 tp->t_rttvar = metrics.rmx_rttvar; 393 TCPSTAT_INC(tcps_usedrttvar); 394 } else { 395 /* default variation is +- 1 rtt */ 396 tp->t_rttvar = 397 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 398 } 399 TCPT_RANGESET(tp->t_rxtcur, 400 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 401 tp->t_rttmin, TCPTV_REXMTMAX); 402 } 403 if (metrics.rmx_ssthresh) { 404 /* 405 * There's some sort of gateway or interface 406 * buffer limit on the path. Use this to set 407 * the slow start threshold, but set the 408 * threshold to no less than 2*mss. 409 */ 410 tp->snd_ssthresh = max(2 * maxseg, metrics.rmx_ssthresh); 411 TCPSTAT_INC(tcps_usedssthresh); 412 } 413 414 /* 415 * Set the initial slow-start flight size. 416 * 417 * If a SYN or SYN/ACK was lost and retransmitted, we have to 418 * reduce the initial CWND to one segment as congestion is likely 419 * requiring us to be cautious. 420 */ 421 if (tp->snd_cwnd == 1) 422 tp->snd_cwnd = maxseg; /* SYN(-ACK) lost */ 423 else 424 tp->snd_cwnd = tcp_compute_initwnd(maxseg); 425 426 if (CC_ALGO(tp)->conn_init != NULL) 427 CC_ALGO(tp)->conn_init(tp->ccv); 428 } 429 430 void inline 431 cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type) 432 { 433 u_int maxseg; 434 435 INP_WLOCK_ASSERT(tp->t_inpcb); 436 437 #ifdef STATS 438 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 439 #endif 440 441 switch(type) { 442 case CC_NDUPACK: 443 if (!IN_FASTRECOVERY(tp->t_flags)) { 444 tp->snd_recover = tp->snd_max; 445 if (tp->t_flags2 & TF2_ECN_PERMIT) 446 tp->t_flags2 |= TF2_ECN_SND_CWR; 447 } 448 break; 449 case CC_ECN: 450 if (!IN_CONGRECOVERY(tp->t_flags) || 451 /* 452 * Allow ECN reaction on ACK to CWR, if 453 * that data segment was also CE marked. 454 */ 455 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 456 EXIT_CONGRECOVERY(tp->t_flags); 457 TCPSTAT_INC(tcps_ecn_rcwnd); 458 tp->snd_recover = tp->snd_max + 1; 459 if (tp->t_flags2 & TF2_ECN_PERMIT) 460 tp->t_flags2 |= TF2_ECN_SND_CWR; 461 } 462 break; 463 case CC_RTO: 464 maxseg = tcp_maxseg(tp); 465 tp->t_dupacks = 0; 466 tp->t_bytes_acked = 0; 467 EXIT_RECOVERY(tp->t_flags); 468 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 / 469 maxseg) * maxseg; 470 tp->snd_cwnd = maxseg; 471 if (tp->t_flags2 & TF2_ECN_PERMIT) 472 tp->t_flags2 |= TF2_ECN_SND_CWR; 473 break; 474 case CC_RTO_ERR: 475 TCPSTAT_INC(tcps_sndrexmitbad); 476 /* RTO was unnecessary, so reset everything. */ 477 tp->snd_cwnd = tp->snd_cwnd_prev; 478 tp->snd_ssthresh = tp->snd_ssthresh_prev; 479 tp->snd_recover = tp->snd_recover_prev; 480 if (tp->t_flags & TF_WASFRECOVERY) 481 ENTER_FASTRECOVERY(tp->t_flags); 482 if (tp->t_flags & TF_WASCRECOVERY) 483 ENTER_CONGRECOVERY(tp->t_flags); 484 tp->snd_nxt = tp->snd_max; 485 tp->t_flags &= ~TF_PREVVALID; 486 tp->t_badrxtwin = 0; 487 break; 488 } 489 490 if (CC_ALGO(tp)->cong_signal != NULL) { 491 if (th != NULL) 492 tp->ccv->curack = th->th_ack; 493 CC_ALGO(tp)->cong_signal(tp->ccv, type); 494 } 495 } 496 497 void inline 498 cc_post_recovery(struct tcpcb *tp, struct tcphdr *th) 499 { 500 INP_WLOCK_ASSERT(tp->t_inpcb); 501 502 /* XXXLAS: KASSERT that we're in recovery? */ 503 504 if (CC_ALGO(tp)->post_recovery != NULL) { 505 tp->ccv->curack = th->th_ack; 506 CC_ALGO(tp)->post_recovery(tp->ccv); 507 } 508 /* XXXLAS: EXIT_RECOVERY ? */ 509 tp->t_bytes_acked = 0; 510 } 511 512 /* 513 * Indicate whether this ack should be delayed. We can delay the ack if 514 * following conditions are met: 515 * - There is no delayed ack timer in progress. 516 * - Our last ack wasn't a 0-sized window. We never want to delay 517 * the ack that opens up a 0-sized window. 518 * - LRO wasn't used for this segment. We make sure by checking that the 519 * segment size is not larger than the MSS. 520 */ 521 #define DELAY_ACK(tp, tlen) \ 522 ((!tcp_timer_active(tp, TT_DELACK) && \ 523 (tp->t_flags & TF_RXWIN0SENT) == 0) && \ 524 (tlen <= tp->t_maxseg) && \ 525 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN))) 526 527 void inline 528 cc_ecnpkt_handler(struct tcpcb *tp, struct tcphdr *th, uint8_t iptos) 529 { 530 INP_WLOCK_ASSERT(tp->t_inpcb); 531 532 if (CC_ALGO(tp)->ecnpkt_handler != NULL) { 533 switch (iptos & IPTOS_ECN_MASK) { 534 case IPTOS_ECN_CE: 535 tp->ccv->flags |= CCF_IPHDR_CE; 536 break; 537 case IPTOS_ECN_ECT0: 538 /* FALLTHROUGH */ 539 case IPTOS_ECN_ECT1: 540 /* FALLTHROUGH */ 541 case IPTOS_ECN_NOTECT: 542 tp->ccv->flags &= ~CCF_IPHDR_CE; 543 break; 544 } 545 546 if (th->th_flags & TH_CWR) 547 tp->ccv->flags |= CCF_TCPHDR_CWR; 548 else 549 tp->ccv->flags &= ~CCF_TCPHDR_CWR; 550 551 CC_ALGO(tp)->ecnpkt_handler(tp->ccv); 552 553 if (tp->ccv->flags & CCF_ACKNOW) { 554 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 555 tp->t_flags |= TF_ACKNOW; 556 } 557 } 558 } 559 560 /* 561 * TCP input handling is split into multiple parts: 562 * tcp6_input is a thin wrapper around tcp_input for the extended 563 * ip6_protox[] call format in ip6_input 564 * tcp_input handles primary segment validation, inpcb lookup and 565 * SYN processing on listen sockets 566 * tcp_do_segment processes the ACK and text of the segment for 567 * establishing, established and closing connections 568 */ 569 #ifdef INET6 570 int 571 tcp6_input(struct mbuf **mp, int *offp, int proto) 572 { 573 struct mbuf *m; 574 struct in6_ifaddr *ia6; 575 struct ip6_hdr *ip6; 576 577 m = *mp; 578 if (m->m_len < *offp + sizeof(struct tcphdr)) { 579 m = m_pullup(m, *offp + sizeof(struct tcphdr)); 580 if (m == NULL) { 581 *mp = m; 582 TCPSTAT_INC(tcps_rcvshort); 583 return (IPPROTO_DONE); 584 } 585 } 586 587 /* 588 * draft-itojun-ipv6-tcp-to-anycast 589 * better place to put this in? 590 */ 591 ip6 = mtod(m, struct ip6_hdr *); 592 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */); 593 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 594 595 ifa_free(&ia6->ia_ifa); 596 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 597 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6); 598 *mp = NULL; 599 return (IPPROTO_DONE); 600 } 601 if (ia6) 602 ifa_free(&ia6->ia_ifa); 603 604 *mp = m; 605 return (tcp_input(mp, offp, proto)); 606 } 607 #endif /* INET6 */ 608 609 int 610 tcp_input(struct mbuf **mp, int *offp, int proto) 611 { 612 struct mbuf *m = *mp; 613 struct tcphdr *th = NULL; 614 struct ip *ip = NULL; 615 struct inpcb *inp = NULL; 616 struct tcpcb *tp = NULL; 617 struct socket *so = NULL; 618 u_char *optp = NULL; 619 int off0; 620 int optlen = 0; 621 #ifdef INET 622 int len; 623 uint8_t ipttl; 624 #endif 625 int tlen = 0, off; 626 int drop_hdrlen; 627 int thflags; 628 int rstreason = 0; /* For badport_bandlim accounting purposes */ 629 uint8_t iptos; 630 struct m_tag *fwd_tag = NULL; 631 #ifdef INET6 632 struct ip6_hdr *ip6 = NULL; 633 int isipv6; 634 #else 635 const void *ip6 = NULL; 636 #endif /* INET6 */ 637 struct tcpopt to; /* options in this segment */ 638 char *s = NULL; /* address and port logging */ 639 #ifdef TCPDEBUG 640 /* 641 * The size of tcp_saveipgen must be the size of the max ip header, 642 * now IPv6. 643 */ 644 u_char tcp_saveipgen[IP6_HDR_LEN]; 645 struct tcphdr tcp_savetcp; 646 short ostate = 0; 647 #endif 648 649 NET_EPOCH_ASSERT(); 650 651 #ifdef INET6 652 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 653 #endif 654 655 off0 = *offp; 656 m = *mp; 657 *mp = NULL; 658 to.to_flags = 0; 659 TCPSTAT_INC(tcps_rcvtotal); 660 661 #ifdef INET6 662 if (isipv6) { 663 664 ip6 = mtod(m, struct ip6_hdr *); 665 th = (struct tcphdr *)((caddr_t)ip6 + off0); 666 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0; 667 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) { 668 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 669 th->th_sum = m->m_pkthdr.csum_data; 670 else 671 th->th_sum = in6_cksum_pseudo(ip6, tlen, 672 IPPROTO_TCP, m->m_pkthdr.csum_data); 673 th->th_sum ^= 0xffff; 674 } else 675 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen); 676 if (th->th_sum) { 677 TCPSTAT_INC(tcps_rcvbadsum); 678 goto drop; 679 } 680 681 /* 682 * Be proactive about unspecified IPv6 address in source. 683 * As we use all-zero to indicate unbounded/unconnected pcb, 684 * unspecified IPv6 address can be used to confuse us. 685 * 686 * Note that packets with unspecified IPv6 destination is 687 * already dropped in ip6_input. 688 */ 689 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 690 /* XXX stat */ 691 goto drop; 692 } 693 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; 694 } 695 #endif 696 #if defined(INET) && defined(INET6) 697 else 698 #endif 699 #ifdef INET 700 { 701 /* 702 * Get IP and TCP header together in first mbuf. 703 * Note: IP leaves IP header in first mbuf. 704 */ 705 if (off0 > sizeof (struct ip)) { 706 ip_stripoptions(m); 707 off0 = sizeof(struct ip); 708 } 709 if (m->m_len < sizeof (struct tcpiphdr)) { 710 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) 711 == NULL) { 712 TCPSTAT_INC(tcps_rcvshort); 713 return (IPPROTO_DONE); 714 } 715 } 716 ip = mtod(m, struct ip *); 717 th = (struct tcphdr *)((caddr_t)ip + off0); 718 tlen = ntohs(ip->ip_len) - off0; 719 720 iptos = ip->ip_tos; 721 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 722 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 723 th->th_sum = m->m_pkthdr.csum_data; 724 else 725 th->th_sum = in_pseudo(ip->ip_src.s_addr, 726 ip->ip_dst.s_addr, 727 htonl(m->m_pkthdr.csum_data + tlen + 728 IPPROTO_TCP)); 729 th->th_sum ^= 0xffff; 730 } else { 731 struct ipovly *ipov = (struct ipovly *)ip; 732 733 /* 734 * Checksum extended TCP header and data. 735 */ 736 len = off0 + tlen; 737 ipttl = ip->ip_ttl; 738 bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); 739 ipov->ih_len = htons(tlen); 740 th->th_sum = in_cksum(m, len); 741 /* Reset length for SDT probes. */ 742 ip->ip_len = htons(len); 743 /* Reset TOS bits */ 744 ip->ip_tos = iptos; 745 /* Re-initialization for later version check */ 746 ip->ip_ttl = ipttl; 747 ip->ip_v = IPVERSION; 748 ip->ip_hl = off0 >> 2; 749 } 750 751 if (th->th_sum) { 752 TCPSTAT_INC(tcps_rcvbadsum); 753 goto drop; 754 } 755 } 756 #endif /* INET */ 757 758 /* 759 * Check that TCP offset makes sense, 760 * pull out TCP options and adjust length. XXX 761 */ 762 off = th->th_off << 2; 763 if (off < sizeof (struct tcphdr) || off > tlen) { 764 TCPSTAT_INC(tcps_rcvbadoff); 765 goto drop; 766 } 767 tlen -= off; /* tlen is used instead of ti->ti_len */ 768 if (off > sizeof (struct tcphdr)) { 769 #ifdef INET6 770 if (isipv6) { 771 if (m->m_len < off0 + off) { 772 m = m_pullup(m, off0 + off); 773 if (m == NULL) { 774 TCPSTAT_INC(tcps_rcvshort); 775 return (IPPROTO_DONE); 776 } 777 } 778 ip6 = mtod(m, struct ip6_hdr *); 779 th = (struct tcphdr *)((caddr_t)ip6 + off0); 780 } 781 #endif 782 #if defined(INET) && defined(INET6) 783 else 784 #endif 785 #ifdef INET 786 { 787 if (m->m_len < sizeof(struct ip) + off) { 788 if ((m = m_pullup(m, sizeof (struct ip) + off)) 789 == NULL) { 790 TCPSTAT_INC(tcps_rcvshort); 791 return (IPPROTO_DONE); 792 } 793 ip = mtod(m, struct ip *); 794 th = (struct tcphdr *)((caddr_t)ip + off0); 795 } 796 } 797 #endif 798 optlen = off - sizeof (struct tcphdr); 799 optp = (u_char *)(th + 1); 800 } 801 thflags = th->th_flags; 802 803 /* 804 * Convert TCP protocol specific fields to host format. 805 */ 806 tcp_fields_to_host(th); 807 808 /* 809 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options. 810 */ 811 drop_hdrlen = off0 + off; 812 813 /* 814 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. 815 */ 816 if ( 817 #ifdef INET6 818 (isipv6 && (m->m_flags & M_IP6_NEXTHOP)) 819 #ifdef INET 820 || (!isipv6 && (m->m_flags & M_IP_NEXTHOP)) 821 #endif 822 #endif 823 #if defined(INET) && !defined(INET6) 824 (m->m_flags & M_IP_NEXTHOP) 825 #endif 826 ) 827 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 828 829 findpcb: 830 #ifdef INET6 831 if (isipv6 && fwd_tag != NULL) { 832 struct sockaddr_in6 *next_hop6; 833 834 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1); 835 /* 836 * Transparently forwarded. Pretend to be the destination. 837 * Already got one like this? 838 */ 839 inp = in6_pcblookup_mbuf(&V_tcbinfo, 840 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport, 841 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif, m); 842 if (!inp) { 843 /* 844 * It's new. Try to find the ambushing socket. 845 * Because we've rewritten the destination address, 846 * any hardware-generated hash is ignored. 847 */ 848 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src, 849 th->th_sport, &next_hop6->sin6_addr, 850 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) : 851 th->th_dport, INPLOOKUP_WILDCARD | 852 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif); 853 } 854 } else if (isipv6) { 855 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src, 856 th->th_sport, &ip6->ip6_dst, th->th_dport, 857 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB, 858 m->m_pkthdr.rcvif, m); 859 } 860 #endif /* INET6 */ 861 #if defined(INET6) && defined(INET) 862 else 863 #endif 864 #ifdef INET 865 if (fwd_tag != NULL) { 866 struct sockaddr_in *next_hop; 867 868 next_hop = (struct sockaddr_in *)(fwd_tag+1); 869 /* 870 * Transparently forwarded. Pretend to be the destination. 871 * already got one like this? 872 */ 873 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport, 874 ip->ip_dst, th->th_dport, INPLOOKUP_WLOCKPCB, 875 m->m_pkthdr.rcvif, m); 876 if (!inp) { 877 /* 878 * It's new. Try to find the ambushing socket. 879 * Because we've rewritten the destination address, 880 * any hardware-generated hash is ignored. 881 */ 882 inp = in_pcblookup(&V_tcbinfo, ip->ip_src, 883 th->th_sport, next_hop->sin_addr, 884 next_hop->sin_port ? ntohs(next_hop->sin_port) : 885 th->th_dport, INPLOOKUP_WILDCARD | 886 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif); 887 } 888 } else 889 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, 890 th->th_sport, ip->ip_dst, th->th_dport, 891 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB, 892 m->m_pkthdr.rcvif, m); 893 #endif /* INET */ 894 895 /* 896 * If the INPCB does not exist then all data in the incoming 897 * segment is discarded and an appropriate RST is sent back. 898 * XXX MRT Send RST using which routing table? 899 */ 900 if (inp == NULL) { 901 /* 902 * Log communication attempts to ports that are not 903 * in use. 904 */ 905 if ((V_tcp_log_in_vain == 1 && (thflags & TH_SYN)) || 906 V_tcp_log_in_vain == 2) { 907 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6))) 908 log(LOG_INFO, "%s; %s: Connection attempt " 909 "to closed port\n", s, __func__); 910 } 911 /* 912 * When blackholing do not respond with a RST but 913 * completely ignore the segment and drop it. 914 */ 915 if ((V_blackhole == 1 && (thflags & TH_SYN)) || 916 V_blackhole == 2) 917 goto dropunlock; 918 919 rstreason = BANDLIM_RST_CLOSEDPORT; 920 goto dropwithreset; 921 } 922 INP_WLOCK_ASSERT(inp); 923 /* 924 * While waiting for inp lock during the lookup, another thread 925 * can have dropped the inpcb, in which case we need to loop back 926 * and try to find a new inpcb to deliver to. 927 */ 928 if (inp->inp_flags & INP_DROPPED) { 929 INP_WUNLOCK(inp); 930 inp = NULL; 931 goto findpcb; 932 } 933 if ((inp->inp_flowtype == M_HASHTYPE_NONE) && 934 (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) && 935 ((inp->inp_socket == NULL) || 936 (inp->inp_socket->so_options & SO_ACCEPTCONN) == 0)) { 937 inp->inp_flowid = m->m_pkthdr.flowid; 938 inp->inp_flowtype = M_HASHTYPE_GET(m); 939 } 940 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 941 #ifdef INET6 942 if (isipv6 && IPSEC_ENABLED(ipv6) && 943 IPSEC_CHECK_POLICY(ipv6, m, inp) != 0) { 944 goto dropunlock; 945 } 946 #ifdef INET 947 else 948 #endif 949 #endif /* INET6 */ 950 #ifdef INET 951 if (IPSEC_ENABLED(ipv4) && 952 IPSEC_CHECK_POLICY(ipv4, m, inp) != 0) { 953 goto dropunlock; 954 } 955 #endif /* INET */ 956 #endif /* IPSEC */ 957 958 /* 959 * Check the minimum TTL for socket. 960 */ 961 if (inp->inp_ip_minttl != 0) { 962 #ifdef INET6 963 if (isipv6) { 964 if (inp->inp_ip_minttl > ip6->ip6_hlim) 965 goto dropunlock; 966 } else 967 #endif 968 if (inp->inp_ip_minttl > ip->ip_ttl) 969 goto dropunlock; 970 } 971 972 /* 973 * A previous connection in TIMEWAIT state is supposed to catch stray 974 * or duplicate segments arriving late. If this segment was a 975 * legitimate new connection attempt, the old INPCB gets removed and 976 * we can try again to find a listening socket. 977 * 978 * At this point, due to earlier optimism, we may hold only an inpcb 979 * lock, and not the inpcbinfo write lock. If so, we need to try to 980 * acquire it, or if that fails, acquire a reference on the inpcb, 981 * drop all locks, acquire a global write lock, and then re-acquire 982 * the inpcb lock. We may at that point discover that another thread 983 * has tried to free the inpcb, in which case we need to loop back 984 * and try to find a new inpcb to deliver to. 985 * 986 * XXXRW: It may be time to rethink timewait locking. 987 */ 988 if (inp->inp_flags & INP_TIMEWAIT) { 989 if (thflags & TH_SYN) 990 tcp_dooptions(&to, optp, optlen, TO_SYN); 991 /* 992 * NB: tcp_twcheck unlocks the INP and frees the mbuf. 993 */ 994 if (tcp_twcheck(inp, &to, th, m, tlen)) 995 goto findpcb; 996 return (IPPROTO_DONE); 997 } 998 /* 999 * The TCPCB may no longer exist if the connection is winding 1000 * down or it is in the CLOSED state. Either way we drop the 1001 * segment and send an appropriate response. 1002 */ 1003 tp = intotcpcb(inp); 1004 if (tp == NULL || tp->t_state == TCPS_CLOSED) { 1005 rstreason = BANDLIM_RST_CLOSEDPORT; 1006 goto dropwithreset; 1007 } 1008 1009 #ifdef TCP_OFFLOAD 1010 if (tp->t_flags & TF_TOE) { 1011 tcp_offload_input(tp, m); 1012 m = NULL; /* consumed by the TOE driver */ 1013 goto dropunlock; 1014 } 1015 #endif 1016 1017 #ifdef MAC 1018 INP_WLOCK_ASSERT(inp); 1019 if (mac_inpcb_check_deliver(inp, m)) 1020 goto dropunlock; 1021 #endif 1022 so = inp->inp_socket; 1023 KASSERT(so != NULL, ("%s: so == NULL", __func__)); 1024 #ifdef TCPDEBUG 1025 if (so->so_options & SO_DEBUG) { 1026 ostate = tp->t_state; 1027 #ifdef INET6 1028 if (isipv6) { 1029 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6)); 1030 } else 1031 #endif 1032 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip)); 1033 tcp_savetcp = *th; 1034 } 1035 #endif /* TCPDEBUG */ 1036 /* 1037 * When the socket is accepting connections (the INPCB is in LISTEN 1038 * state) we look into the SYN cache if this is a new connection 1039 * attempt or the completion of a previous one. 1040 */ 1041 KASSERT(tp->t_state == TCPS_LISTEN || !(so->so_options & SO_ACCEPTCONN), 1042 ("%s: so accepting but tp %p not listening", __func__, tp)); 1043 if (tp->t_state == TCPS_LISTEN && (so->so_options & SO_ACCEPTCONN)) { 1044 struct in_conninfo inc; 1045 1046 bzero(&inc, sizeof(inc)); 1047 #ifdef INET6 1048 if (isipv6) { 1049 inc.inc_flags |= INC_ISIPV6; 1050 if (inp->inp_inc.inc_flags & INC_IPV6MINMTU) 1051 inc.inc_flags |= INC_IPV6MINMTU; 1052 inc.inc6_faddr = ip6->ip6_src; 1053 inc.inc6_laddr = ip6->ip6_dst; 1054 } else 1055 #endif 1056 { 1057 inc.inc_faddr = ip->ip_src; 1058 inc.inc_laddr = ip->ip_dst; 1059 } 1060 inc.inc_fport = th->th_sport; 1061 inc.inc_lport = th->th_dport; 1062 inc.inc_fibnum = so->so_fibnum; 1063 1064 /* 1065 * Check for an existing connection attempt in syncache if 1066 * the flag is only ACK. A successful lookup creates a new 1067 * socket appended to the listen queue in SYN_RECEIVED state. 1068 */ 1069 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) { 1070 1071 /* 1072 * Parse the TCP options here because 1073 * syncookies need access to the reflected 1074 * timestamp. 1075 */ 1076 tcp_dooptions(&to, optp, optlen, 0); 1077 /* 1078 * NB: syncache_expand() doesn't unlock 1079 * inp and tcpinfo locks. 1080 */ 1081 rstreason = syncache_expand(&inc, &to, th, &so, m); 1082 if (rstreason < 0) { 1083 /* 1084 * A failing TCP MD5 signature comparison 1085 * must result in the segment being dropped 1086 * and must not produce any response back 1087 * to the sender. 1088 */ 1089 goto dropunlock; 1090 } else if (rstreason == 0) { 1091 /* 1092 * No syncache entry or ACK was not 1093 * for our SYN/ACK. Send a RST. 1094 * NB: syncache did its own logging 1095 * of the failure cause. 1096 */ 1097 rstreason = BANDLIM_RST_OPENPORT; 1098 goto dropwithreset; 1099 } 1100 tfo_socket_result: 1101 if (so == NULL) { 1102 /* 1103 * We completed the 3-way handshake 1104 * but could not allocate a socket 1105 * either due to memory shortage, 1106 * listen queue length limits or 1107 * global socket limits. Send RST 1108 * or wait and have the remote end 1109 * retransmit the ACK for another 1110 * try. 1111 */ 1112 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1113 log(LOG_DEBUG, "%s; %s: Listen socket: " 1114 "Socket allocation failed due to " 1115 "limits or memory shortage, %s\n", 1116 s, __func__, 1117 V_tcp_sc_rst_sock_fail ? 1118 "sending RST" : "try again"); 1119 if (V_tcp_sc_rst_sock_fail) { 1120 rstreason = BANDLIM_UNLIMITED; 1121 goto dropwithreset; 1122 } else 1123 goto dropunlock; 1124 } 1125 /* 1126 * Socket is created in state SYN_RECEIVED. 1127 * Unlock the listen socket, lock the newly 1128 * created socket and update the tp variable. 1129 */ 1130 INP_WUNLOCK(inp); /* listen socket */ 1131 inp = sotoinpcb(so); 1132 /* 1133 * New connection inpcb is already locked by 1134 * syncache_expand(). 1135 */ 1136 INP_WLOCK_ASSERT(inp); 1137 tp = intotcpcb(inp); 1138 KASSERT(tp->t_state == TCPS_SYN_RECEIVED, 1139 ("%s: ", __func__)); 1140 /* 1141 * Process the segment and the data it 1142 * contains. tcp_do_segment() consumes 1143 * the mbuf chain and unlocks the inpcb. 1144 */ 1145 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1146 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, 1147 iptos); 1148 return (IPPROTO_DONE); 1149 } 1150 /* 1151 * Segment flag validation for new connection attempts: 1152 * 1153 * Our (SYN|ACK) response was rejected. 1154 * Check with syncache and remove entry to prevent 1155 * retransmits. 1156 * 1157 * NB: syncache_chkrst does its own logging of failure 1158 * causes. 1159 */ 1160 if (thflags & TH_RST) { 1161 syncache_chkrst(&inc, th, m); 1162 goto dropunlock; 1163 } 1164 /* 1165 * We can't do anything without SYN. 1166 */ 1167 if ((thflags & TH_SYN) == 0) { 1168 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1169 log(LOG_DEBUG, "%s; %s: Listen socket: " 1170 "SYN is missing, segment ignored\n", 1171 s, __func__); 1172 TCPSTAT_INC(tcps_badsyn); 1173 goto dropunlock; 1174 } 1175 /* 1176 * (SYN|ACK) is bogus on a listen socket. 1177 */ 1178 if (thflags & TH_ACK) { 1179 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1180 log(LOG_DEBUG, "%s; %s: Listen socket: " 1181 "SYN|ACK invalid, segment rejected\n", 1182 s, __func__); 1183 syncache_badack(&inc); /* XXX: Not needed! */ 1184 TCPSTAT_INC(tcps_badsyn); 1185 rstreason = BANDLIM_RST_OPENPORT; 1186 goto dropwithreset; 1187 } 1188 /* 1189 * If the drop_synfin option is enabled, drop all 1190 * segments with both the SYN and FIN bits set. 1191 * This prevents e.g. nmap from identifying the 1192 * TCP/IP stack. 1193 * XXX: Poor reasoning. nmap has other methods 1194 * and is constantly refining its stack detection 1195 * strategies. 1196 * XXX: This is a violation of the TCP specification 1197 * and was used by RFC1644. 1198 */ 1199 if ((thflags & TH_FIN) && V_drop_synfin) { 1200 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1201 log(LOG_DEBUG, "%s; %s: Listen socket: " 1202 "SYN|FIN segment ignored (based on " 1203 "sysctl setting)\n", s, __func__); 1204 TCPSTAT_INC(tcps_badsyn); 1205 goto dropunlock; 1206 } 1207 /* 1208 * Segment's flags are (SYN) or (SYN|FIN). 1209 * 1210 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored 1211 * as they do not affect the state of the TCP FSM. 1212 * The data pointed to by TH_URG and th_urp is ignored. 1213 */ 1214 KASSERT((thflags & (TH_RST|TH_ACK)) == 0, 1215 ("%s: Listen socket: TH_RST or TH_ACK set", __func__)); 1216 KASSERT(thflags & (TH_SYN), 1217 ("%s: Listen socket: TH_SYN not set", __func__)); 1218 #ifdef INET6 1219 /* 1220 * If deprecated address is forbidden, 1221 * we do not accept SYN to deprecated interface 1222 * address to prevent any new inbound connection from 1223 * getting established. 1224 * When we do not accept SYN, we send a TCP RST, 1225 * with deprecated source address (instead of dropping 1226 * it). We compromise it as it is much better for peer 1227 * to send a RST, and RST will be the final packet 1228 * for the exchange. 1229 * 1230 * If we do not forbid deprecated addresses, we accept 1231 * the SYN packet. RFC2462 does not suggest dropping 1232 * SYN in this case. 1233 * If we decipher RFC2462 5.5.4, it says like this: 1234 * 1. use of deprecated addr with existing 1235 * communication is okay - "SHOULD continue to be 1236 * used" 1237 * 2. use of it with new communication: 1238 * (2a) "SHOULD NOT be used if alternate address 1239 * with sufficient scope is available" 1240 * (2b) nothing mentioned otherwise. 1241 * Here we fall into (2b) case as we have no choice in 1242 * our source address selection - we must obey the peer. 1243 * 1244 * The wording in RFC2462 is confusing, and there are 1245 * multiple description text for deprecated address 1246 * handling - worse, they are not exactly the same. 1247 * I believe 5.5.4 is the best one, so we follow 5.5.4. 1248 */ 1249 if (isipv6 && !V_ip6_use_deprecated) { 1250 struct in6_ifaddr *ia6; 1251 1252 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */); 1253 if (ia6 != NULL && 1254 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 1255 ifa_free(&ia6->ia_ifa); 1256 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1257 log(LOG_DEBUG, "%s; %s: Listen socket: " 1258 "Connection attempt to deprecated " 1259 "IPv6 address rejected\n", 1260 s, __func__); 1261 rstreason = BANDLIM_RST_OPENPORT; 1262 goto dropwithreset; 1263 } 1264 if (ia6) 1265 ifa_free(&ia6->ia_ifa); 1266 } 1267 #endif /* INET6 */ 1268 /* 1269 * Basic sanity checks on incoming SYN requests: 1270 * Don't respond if the destination is a link layer 1271 * broadcast according to RFC1122 4.2.3.10, p. 104. 1272 * If it is from this socket it must be forged. 1273 * Don't respond if the source or destination is a 1274 * global or subnet broad- or multicast address. 1275 * Note that it is quite possible to receive unicast 1276 * link-layer packets with a broadcast IP address. Use 1277 * in_broadcast() to find them. 1278 */ 1279 if (m->m_flags & (M_BCAST|M_MCAST)) { 1280 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1281 log(LOG_DEBUG, "%s; %s: Listen socket: " 1282 "Connection attempt from broad- or multicast " 1283 "link layer address ignored\n", s, __func__); 1284 goto dropunlock; 1285 } 1286 #ifdef INET6 1287 if (isipv6) { 1288 if (th->th_dport == th->th_sport && 1289 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) { 1290 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1291 log(LOG_DEBUG, "%s; %s: Listen socket: " 1292 "Connection attempt to/from self " 1293 "ignored\n", s, __func__); 1294 goto dropunlock; 1295 } 1296 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 1297 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) { 1298 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1299 log(LOG_DEBUG, "%s; %s: Listen socket: " 1300 "Connection attempt from/to multicast " 1301 "address ignored\n", s, __func__); 1302 goto dropunlock; 1303 } 1304 } 1305 #endif 1306 #if defined(INET) && defined(INET6) 1307 else 1308 #endif 1309 #ifdef INET 1310 { 1311 if (th->th_dport == th->th_sport && 1312 ip->ip_dst.s_addr == ip->ip_src.s_addr) { 1313 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1314 log(LOG_DEBUG, "%s; %s: Listen socket: " 1315 "Connection attempt from/to self " 1316 "ignored\n", s, __func__); 1317 goto dropunlock; 1318 } 1319 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 1320 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 1321 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 1322 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) { 1323 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1324 log(LOG_DEBUG, "%s; %s: Listen socket: " 1325 "Connection attempt from/to broad- " 1326 "or multicast address ignored\n", 1327 s, __func__); 1328 goto dropunlock; 1329 } 1330 } 1331 #endif 1332 /* 1333 * SYN appears to be valid. Create compressed TCP state 1334 * for syncache. 1335 */ 1336 #ifdef TCPDEBUG 1337 if (so->so_options & SO_DEBUG) 1338 tcp_trace(TA_INPUT, ostate, tp, 1339 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1340 #endif 1341 TCP_PROBE3(debug__input, tp, th, m); 1342 tcp_dooptions(&to, optp, optlen, TO_SYN); 1343 if (syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL, iptos)) 1344 goto tfo_socket_result; 1345 1346 /* 1347 * Entry added to syncache and mbuf consumed. 1348 * Only the listen socket is unlocked by syncache_add(). 1349 */ 1350 INP_INFO_WUNLOCK_ASSERT(&V_tcbinfo); 1351 return (IPPROTO_DONE); 1352 } else if (tp->t_state == TCPS_LISTEN) { 1353 /* 1354 * When a listen socket is torn down the SO_ACCEPTCONN 1355 * flag is removed first while connections are drained 1356 * from the accept queue in a unlock/lock cycle of the 1357 * ACCEPT_LOCK, opening a race condition allowing a SYN 1358 * attempt go through unhandled. 1359 */ 1360 goto dropunlock; 1361 } 1362 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1363 if (tp->t_flags & TF_SIGNATURE) { 1364 tcp_dooptions(&to, optp, optlen, thflags); 1365 if ((to.to_flags & TOF_SIGNATURE) == 0) { 1366 TCPSTAT_INC(tcps_sig_err_nosigopt); 1367 goto dropunlock; 1368 } 1369 if (!TCPMD5_ENABLED() || 1370 TCPMD5_INPUT(m, th, to.to_signature) != 0) 1371 goto dropunlock; 1372 } 1373 #endif 1374 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1375 1376 /* 1377 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later 1378 * state. tcp_do_segment() always consumes the mbuf chain, unlocks 1379 * the inpcb, and unlocks pcbinfo. 1380 */ 1381 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos); 1382 return (IPPROTO_DONE); 1383 1384 dropwithreset: 1385 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1386 1387 if (inp != NULL) { 1388 tcp_dropwithreset(m, th, tp, tlen, rstreason); 1389 INP_WUNLOCK(inp); 1390 } else 1391 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 1392 m = NULL; /* mbuf chain got consumed. */ 1393 goto drop; 1394 1395 dropunlock: 1396 if (m != NULL) 1397 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1398 1399 if (inp != NULL) 1400 INP_WUNLOCK(inp); 1401 1402 drop: 1403 INP_INFO_WUNLOCK_ASSERT(&V_tcbinfo); 1404 if (s != NULL) 1405 free(s, M_TCPLOG); 1406 if (m != NULL) 1407 m_freem(m); 1408 return (IPPROTO_DONE); 1409 } 1410 1411 /* 1412 * Automatic sizing of receive socket buffer. Often the send 1413 * buffer size is not optimally adjusted to the actual network 1414 * conditions at hand (delay bandwidth product). Setting the 1415 * buffer size too small limits throughput on links with high 1416 * bandwidth and high delay (eg. trans-continental/oceanic links). 1417 * 1418 * On the receive side the socket buffer memory is only rarely 1419 * used to any significant extent. This allows us to be much 1420 * more aggressive in scaling the receive socket buffer. For 1421 * the case that the buffer space is actually used to a large 1422 * extent and we run out of kernel memory we can simply drop 1423 * the new segments; TCP on the sender will just retransmit it 1424 * later. Setting the buffer size too big may only consume too 1425 * much kernel memory if the application doesn't read() from 1426 * the socket or packet loss or reordering makes use of the 1427 * reassembly queue. 1428 * 1429 * The criteria to step up the receive buffer one notch are: 1430 * 1. Application has not set receive buffer size with 1431 * SO_RCVBUF. Setting SO_RCVBUF clears SB_AUTOSIZE. 1432 * 2. the number of bytes received during 1/2 of an sRTT 1433 * is at least 3/8 of the current socket buffer size. 1434 * 3. receive buffer size has not hit maximal automatic size; 1435 * 1436 * If all of the criteria are met we increaset the socket buffer 1437 * by a 1/2 (bounded by the max). This allows us to keep ahead 1438 * of slow-start but also makes it so our peer never gets limited 1439 * by our rwnd which we then open up causing a burst. 1440 * 1441 * This algorithm does two steps per RTT at most and only if 1442 * we receive a bulk stream w/o packet losses or reorderings. 1443 * Shrinking the buffer during idle times is not necessary as 1444 * it doesn't consume any memory when idle. 1445 * 1446 * TODO: Only step up if the application is actually serving 1447 * the buffer to better manage the socket buffer resources. 1448 */ 1449 int 1450 tcp_autorcvbuf(struct mbuf *m, struct tcphdr *th, struct socket *so, 1451 struct tcpcb *tp, int tlen) 1452 { 1453 int newsize = 0; 1454 1455 if (V_tcp_do_autorcvbuf && (so->so_rcv.sb_flags & SB_AUTOSIZE) && 1456 tp->t_srtt != 0 && tp->rfbuf_ts != 0 && 1457 TCP_TS_TO_TICKS(tcp_ts_getticks() - tp->rfbuf_ts) > 1458 ((tp->t_srtt >> TCP_RTT_SHIFT)/2)) { 1459 if (tp->rfbuf_cnt > ((so->so_rcv.sb_hiwat / 2)/ 4 * 3) && 1460 so->so_rcv.sb_hiwat < V_tcp_autorcvbuf_max) { 1461 newsize = min((so->so_rcv.sb_hiwat + (so->so_rcv.sb_hiwat/2)), V_tcp_autorcvbuf_max); 1462 } 1463 TCP_PROBE6(receive__autoresize, NULL, tp, m, tp, th, newsize); 1464 1465 /* Start over with next RTT. */ 1466 tp->rfbuf_ts = 0; 1467 tp->rfbuf_cnt = 0; 1468 } else { 1469 tp->rfbuf_cnt += tlen; /* add up */ 1470 } 1471 return (newsize); 1472 } 1473 1474 void 1475 tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 1476 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos) 1477 { 1478 int thflags, acked, ourfinisacked, needoutput = 0, sack_changed; 1479 int rstreason, todrop, win, incforsyn = 0; 1480 uint32_t tiwin; 1481 uint16_t nsegs; 1482 char *s; 1483 struct in_conninfo *inc; 1484 struct mbuf *mfree; 1485 struct tcpopt to; 1486 int tfo_syn; 1487 1488 #ifdef TCPDEBUG 1489 /* 1490 * The size of tcp_saveipgen must be the size of the max ip header, 1491 * now IPv6. 1492 */ 1493 u_char tcp_saveipgen[IP6_HDR_LEN]; 1494 struct tcphdr tcp_savetcp; 1495 short ostate = 0; 1496 #endif 1497 thflags = th->th_flags; 1498 inc = &tp->t_inpcb->inp_inc; 1499 tp->sackhint.last_sack_ack = 0; 1500 sack_changed = 0; 1501 nsegs = max(1, m->m_pkthdr.lro_nsegs); 1502 1503 NET_EPOCH_ASSERT(); 1504 INP_WLOCK_ASSERT(tp->t_inpcb); 1505 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 1506 __func__)); 1507 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 1508 __func__)); 1509 1510 #ifdef TCPPCAP 1511 /* Save segment, if requested. */ 1512 tcp_pcap_add(th, m, &(tp->t_inpkts)); 1513 #endif 1514 TCP_LOG_EVENT(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 1515 tlen, NULL, true); 1516 1517 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 1518 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1519 log(LOG_DEBUG, "%s; %s: " 1520 "SYN|FIN segment ignored (based on " 1521 "sysctl setting)\n", s, __func__); 1522 free(s, M_TCPLOG); 1523 } 1524 goto drop; 1525 } 1526 1527 /* 1528 * If a segment with the ACK-bit set arrives in the SYN-SENT state 1529 * check SEQ.ACK first. 1530 */ 1531 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 1532 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 1533 rstreason = BANDLIM_UNLIMITED; 1534 goto dropwithreset; 1535 } 1536 1537 /* 1538 * Segment received on connection. 1539 * Reset idle time and keep-alive timer. 1540 * XXX: This should be done after segment 1541 * validation to ignore broken/spoofed segs. 1542 */ 1543 tp->t_rcvtime = ticks; 1544 1545 /* 1546 * Scale up the window into a 32-bit value. 1547 * For the SYN_SENT state the scale is zero. 1548 */ 1549 tiwin = th->th_win << tp->snd_scale; 1550 #ifdef STATS 1551 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 1552 #endif 1553 1554 /* 1555 * TCP ECN processing. 1556 */ 1557 if (tp->t_flags2 & TF2_ECN_PERMIT) { 1558 if (thflags & TH_CWR) { 1559 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 1560 tp->t_flags |= TF_ACKNOW; 1561 } 1562 switch (iptos & IPTOS_ECN_MASK) { 1563 case IPTOS_ECN_CE: 1564 tp->t_flags2 |= TF2_ECN_SND_ECE; 1565 TCPSTAT_INC(tcps_ecn_ce); 1566 break; 1567 case IPTOS_ECN_ECT0: 1568 TCPSTAT_INC(tcps_ecn_ect0); 1569 break; 1570 case IPTOS_ECN_ECT1: 1571 TCPSTAT_INC(tcps_ecn_ect1); 1572 break; 1573 } 1574 1575 /* Process a packet differently from RFC3168. */ 1576 cc_ecnpkt_handler(tp, th, iptos); 1577 1578 /* Congestion experienced. */ 1579 if (thflags & TH_ECE) { 1580 cc_cong_signal(tp, th, CC_ECN); 1581 } 1582 } 1583 1584 /* 1585 * Parse options on any incoming segment. 1586 */ 1587 tcp_dooptions(&to, (u_char *)(th + 1), 1588 (th->th_off << 2) - sizeof(struct tcphdr), 1589 (thflags & TH_SYN) ? TO_SYN : 0); 1590 1591 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1592 if ((tp->t_flags & TF_SIGNATURE) != 0 && 1593 (to.to_flags & TOF_SIGNATURE) == 0) { 1594 TCPSTAT_INC(tcps_sig_err_sigopt); 1595 /* XXX: should drop? */ 1596 } 1597 #endif 1598 /* 1599 * If echoed timestamp is later than the current time, 1600 * fall back to non RFC1323 RTT calculation. Normalize 1601 * timestamp if syncookies were used when this connection 1602 * was established. 1603 */ 1604 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 1605 to.to_tsecr -= tp->ts_offset; 1606 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks())) 1607 to.to_tsecr = 0; 1608 else if (tp->t_flags & TF_PREVVALID && 1609 tp->t_badrxtwin != 0 && SEQ_LT(to.to_tsecr, tp->t_badrxtwin)) 1610 cc_cong_signal(tp, th, CC_RTO_ERR); 1611 } 1612 /* 1613 * Process options only when we get SYN/ACK back. The SYN case 1614 * for incoming connections is handled in tcp_syncache. 1615 * According to RFC1323 the window field in a SYN (i.e., a <SYN> 1616 * or <SYN,ACK>) segment itself is never scaled. 1617 * XXX this is traditional behavior, may need to be cleaned up. 1618 */ 1619 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 1620 /* Handle parallel SYN for ECN */ 1621 if (!(thflags & TH_ACK) && 1622 ((thflags & (TH_CWR | TH_ECE)) == (TH_CWR | TH_ECE)) && 1623 ((V_tcp_do_ecn == 1) || (V_tcp_do_ecn == 2))) { 1624 tp->t_flags2 |= TF2_ECN_PERMIT; 1625 tp->t_flags2 |= TF2_ECN_SND_ECE; 1626 TCPSTAT_INC(tcps_ecn_shs); 1627 } 1628 if ((to.to_flags & TOF_SCALE) && 1629 (tp->t_flags & TF_REQ_SCALE)) { 1630 tp->t_flags |= TF_RCVD_SCALE; 1631 tp->snd_scale = to.to_wscale; 1632 } else 1633 tp->t_flags &= ~TF_REQ_SCALE; 1634 /* 1635 * Initial send window. It will be updated with 1636 * the next incoming segment to the scaled value. 1637 */ 1638 tp->snd_wnd = th->th_win; 1639 if ((to.to_flags & TOF_TS) && 1640 (tp->t_flags & TF_REQ_TSTMP)) { 1641 tp->t_flags |= TF_RCVD_TSTMP; 1642 tp->ts_recent = to.to_tsval; 1643 tp->ts_recent_age = tcp_ts_getticks(); 1644 } else 1645 tp->t_flags &= ~TF_REQ_TSTMP; 1646 if (to.to_flags & TOF_MSS) 1647 tcp_mss(tp, to.to_mss); 1648 if ((tp->t_flags & TF_SACK_PERMIT) && 1649 (to.to_flags & TOF_SACKPERM) == 0) 1650 tp->t_flags &= ~TF_SACK_PERMIT; 1651 if (IS_FASTOPEN(tp->t_flags)) { 1652 if (to.to_flags & TOF_FASTOPEN) { 1653 uint16_t mss; 1654 1655 if (to.to_flags & TOF_MSS) 1656 mss = to.to_mss; 1657 else 1658 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 1659 mss = TCP6_MSS; 1660 else 1661 mss = TCP_MSS; 1662 tcp_fastopen_update_cache(tp, mss, 1663 to.to_tfo_len, to.to_tfo_cookie); 1664 } else 1665 tcp_fastopen_disable_path(tp); 1666 } 1667 } 1668 1669 /* 1670 * If timestamps were negotiated during SYN/ACK they should 1671 * appear on every segment during this session and vice versa. 1672 */ 1673 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS)) { 1674 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1675 log(LOG_DEBUG, "%s; %s: Timestamp missing, " 1676 "no action\n", s, __func__); 1677 free(s, M_TCPLOG); 1678 } 1679 } 1680 if (!(tp->t_flags & TF_RCVD_TSTMP) && (to.to_flags & TOF_TS)) { 1681 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1682 log(LOG_DEBUG, "%s; %s: Timestamp not expected, " 1683 "no action\n", s, __func__); 1684 free(s, M_TCPLOG); 1685 } 1686 } 1687 1688 /* 1689 * Header prediction: check for the two common cases 1690 * of a uni-directional data xfer. If the packet has 1691 * no control flags, is in-sequence, the window didn't 1692 * change and we're not retransmitting, it's a 1693 * candidate. If the length is zero and the ack moved 1694 * forward, we're the sender side of the xfer. Just 1695 * free the data acked & wake any higher level process 1696 * that was blocked waiting for space. If the length 1697 * is non-zero and the ack didn't move, we're the 1698 * receiver side. If we're getting packets in-order 1699 * (the reassembly queue is empty), add the data to 1700 * the socket buffer and note that we need a delayed ack. 1701 * Make sure that the hidden state-flags are also off. 1702 * Since we check for TCPS_ESTABLISHED first, it can only 1703 * be TH_NEEDSYN. 1704 */ 1705 if (tp->t_state == TCPS_ESTABLISHED && 1706 th->th_seq == tp->rcv_nxt && 1707 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 1708 tp->snd_nxt == tp->snd_max && 1709 tiwin && tiwin == tp->snd_wnd && 1710 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && 1711 SEGQ_EMPTY(tp) && 1712 ((to.to_flags & TOF_TS) == 0 || 1713 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) { 1714 1715 /* 1716 * If last ACK falls within this segment's sequence numbers, 1717 * record the timestamp. 1718 * NOTE that the test is modified according to the latest 1719 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1720 */ 1721 if ((to.to_flags & TOF_TS) != 0 && 1722 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 1723 tp->ts_recent_age = tcp_ts_getticks(); 1724 tp->ts_recent = to.to_tsval; 1725 } 1726 1727 if (tlen == 0) { 1728 if (SEQ_GT(th->th_ack, tp->snd_una) && 1729 SEQ_LEQ(th->th_ack, tp->snd_max) && 1730 !IN_RECOVERY(tp->t_flags) && 1731 (to.to_flags & TOF_SACK) == 0 && 1732 TAILQ_EMPTY(&tp->snd_holes)) { 1733 /* 1734 * This is a pure ack for outstanding data. 1735 */ 1736 TCPSTAT_INC(tcps_predack); 1737 1738 /* 1739 * "bad retransmit" recovery without timestamps. 1740 */ 1741 if ((to.to_flags & TOF_TS) == 0 && 1742 tp->t_rxtshift == 1 && 1743 tp->t_flags & TF_PREVVALID && 1744 (int)(ticks - tp->t_badrxtwin) < 0) { 1745 cc_cong_signal(tp, th, CC_RTO_ERR); 1746 } 1747 1748 /* 1749 * Recalculate the transmit timer / rtt. 1750 * 1751 * Some boxes send broken timestamp replies 1752 * during the SYN+ACK phase, ignore 1753 * timestamps of 0 or we could calculate a 1754 * huge RTT and blow up the retransmit timer. 1755 */ 1756 if ((to.to_flags & TOF_TS) != 0 && 1757 to.to_tsecr) { 1758 uint32_t t; 1759 1760 t = tcp_ts_getticks() - to.to_tsecr; 1761 if (!tp->t_rttlow || tp->t_rttlow > t) 1762 tp->t_rttlow = t; 1763 tcp_xmit_timer(tp, 1764 TCP_TS_TO_TICKS(t) + 1); 1765 } else if (tp->t_rtttime && 1766 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1767 if (!tp->t_rttlow || 1768 tp->t_rttlow > ticks - tp->t_rtttime) 1769 tp->t_rttlow = ticks - tp->t_rtttime; 1770 tcp_xmit_timer(tp, 1771 ticks - tp->t_rtttime); 1772 } 1773 acked = BYTES_THIS_ACK(tp, th); 1774 1775 #ifdef TCP_HHOOK 1776 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 1777 hhook_run_tcp_est_in(tp, th, &to); 1778 #endif 1779 1780 TCPSTAT_ADD(tcps_rcvackpack, nsegs); 1781 TCPSTAT_ADD(tcps_rcvackbyte, acked); 1782 sbdrop(&so->so_snd, acked); 1783 if (SEQ_GT(tp->snd_una, tp->snd_recover) && 1784 SEQ_LEQ(th->th_ack, tp->snd_recover)) 1785 tp->snd_recover = th->th_ack - 1; 1786 1787 /* 1788 * Let the congestion control algorithm update 1789 * congestion control related information. This 1790 * typically means increasing the congestion 1791 * window. 1792 */ 1793 cc_ack_received(tp, th, nsegs, CC_ACK); 1794 1795 tp->snd_una = th->th_ack; 1796 /* 1797 * Pull snd_wl2 up to prevent seq wrap relative 1798 * to th_ack. 1799 */ 1800 tp->snd_wl2 = th->th_ack; 1801 tp->t_dupacks = 0; 1802 m_freem(m); 1803 1804 /* 1805 * If all outstanding data are acked, stop 1806 * retransmit timer, otherwise restart timer 1807 * using current (possibly backed-off) value. 1808 * If process is waiting for space, 1809 * wakeup/selwakeup/signal. If data 1810 * are ready to send, let tcp_output 1811 * decide between more output or persist. 1812 */ 1813 #ifdef TCPDEBUG 1814 if (so->so_options & SO_DEBUG) 1815 tcp_trace(TA_INPUT, ostate, tp, 1816 (void *)tcp_saveipgen, 1817 &tcp_savetcp, 0); 1818 #endif 1819 TCP_PROBE3(debug__input, tp, th, m); 1820 if (tp->snd_una == tp->snd_max) 1821 tcp_timer_activate(tp, TT_REXMT, 0); 1822 else if (!tcp_timer_active(tp, TT_PERSIST)) 1823 tcp_timer_activate(tp, TT_REXMT, 1824 tp->t_rxtcur); 1825 sowwakeup(so); 1826 if (sbavail(&so->so_snd)) 1827 (void) tp->t_fb->tfb_tcp_output(tp); 1828 goto check_delack; 1829 } 1830 } else if (th->th_ack == tp->snd_una && 1831 tlen <= sbspace(&so->so_rcv)) { 1832 int newsize = 0; /* automatic sockbuf scaling */ 1833 1834 /* 1835 * This is a pure, in-sequence data packet with 1836 * nothing on the reassembly queue and we have enough 1837 * buffer space to take it. 1838 */ 1839 /* Clean receiver SACK report if present */ 1840 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks) 1841 tcp_clean_sackreport(tp); 1842 TCPSTAT_INC(tcps_preddat); 1843 tp->rcv_nxt += tlen; 1844 if (tlen && 1845 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 1846 (tp->t_fbyte_in == 0)) { 1847 tp->t_fbyte_in = ticks; 1848 if (tp->t_fbyte_in == 0) 1849 tp->t_fbyte_in = 1; 1850 if (tp->t_fbyte_out && tp->t_fbyte_in) 1851 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 1852 } 1853 /* 1854 * Pull snd_wl1 up to prevent seq wrap relative to 1855 * th_seq. 1856 */ 1857 tp->snd_wl1 = th->th_seq; 1858 /* 1859 * Pull rcv_up up to prevent seq wrap relative to 1860 * rcv_nxt. 1861 */ 1862 tp->rcv_up = tp->rcv_nxt; 1863 TCPSTAT_ADD(tcps_rcvpack, nsegs); 1864 TCPSTAT_ADD(tcps_rcvbyte, tlen); 1865 #ifdef TCPDEBUG 1866 if (so->so_options & SO_DEBUG) 1867 tcp_trace(TA_INPUT, ostate, tp, 1868 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1869 #endif 1870 TCP_PROBE3(debug__input, tp, th, m); 1871 1872 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 1873 1874 /* Add data to socket buffer. */ 1875 SOCKBUF_LOCK(&so->so_rcv); 1876 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1877 m_freem(m); 1878 } else { 1879 /* 1880 * Set new socket buffer size. 1881 * Give up when limit is reached. 1882 */ 1883 if (newsize) 1884 if (!sbreserve_locked(&so->so_rcv, 1885 newsize, so, NULL)) 1886 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 1887 m_adj(m, drop_hdrlen); /* delayed header drop */ 1888 sbappendstream_locked(&so->so_rcv, m, 0); 1889 } 1890 /* NB: sorwakeup_locked() does an implicit unlock. */ 1891 sorwakeup_locked(so); 1892 if (DELAY_ACK(tp, tlen)) { 1893 tp->t_flags |= TF_DELACK; 1894 } else { 1895 tp->t_flags |= TF_ACKNOW; 1896 tp->t_fb->tfb_tcp_output(tp); 1897 } 1898 goto check_delack; 1899 } 1900 } 1901 1902 /* 1903 * Calculate amount of space in receive window, 1904 * and then do TCP input processing. 1905 * Receive window is amount of space in rcv queue, 1906 * but not less than advertised window. 1907 */ 1908 win = sbspace(&so->so_rcv); 1909 if (win < 0) 1910 win = 0; 1911 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 1912 1913 switch (tp->t_state) { 1914 1915 /* 1916 * If the state is SYN_RECEIVED: 1917 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1918 */ 1919 case TCPS_SYN_RECEIVED: 1920 if ((thflags & TH_ACK) && 1921 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1922 SEQ_GT(th->th_ack, tp->snd_max))) { 1923 rstreason = BANDLIM_RST_OPENPORT; 1924 goto dropwithreset; 1925 } 1926 if (IS_FASTOPEN(tp->t_flags)) { 1927 /* 1928 * When a TFO connection is in SYN_RECEIVED, the 1929 * only valid packets are the initial SYN, a 1930 * retransmit/copy of the initial SYN (possibly with 1931 * a subset of the original data), a valid ACK, a 1932 * FIN, or a RST. 1933 */ 1934 if ((thflags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)) { 1935 rstreason = BANDLIM_RST_OPENPORT; 1936 goto dropwithreset; 1937 } else if (thflags & TH_SYN) { 1938 /* non-initial SYN is ignored */ 1939 if ((tcp_timer_active(tp, TT_DELACK) || 1940 tcp_timer_active(tp, TT_REXMT))) 1941 goto drop; 1942 } else if (!(thflags & (TH_ACK|TH_FIN|TH_RST))) { 1943 goto drop; 1944 } 1945 } 1946 break; 1947 1948 /* 1949 * If the state is SYN_SENT: 1950 * if seg contains a RST with valid ACK (SEQ.ACK has already 1951 * been verified), then drop the connection. 1952 * if seg contains a RST without an ACK, drop the seg. 1953 * if seg does not contain SYN, then drop the seg. 1954 * Otherwise this is an acceptable SYN segment 1955 * initialize tp->rcv_nxt and tp->irs 1956 * if seg contains ack then advance tp->snd_una 1957 * if seg contains an ECE and ECN support is enabled, the stream 1958 * is ECN capable. 1959 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1960 * arrange for segment to be acked (eventually) 1961 * continue processing rest of data/controls, beginning with URG 1962 */ 1963 case TCPS_SYN_SENT: 1964 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) { 1965 TCP_PROBE5(connect__refused, NULL, tp, 1966 m, tp, th); 1967 tp = tcp_drop(tp, ECONNREFUSED); 1968 } 1969 if (thflags & TH_RST) 1970 goto drop; 1971 if (!(thflags & TH_SYN)) 1972 goto drop; 1973 1974 tp->irs = th->th_seq; 1975 tcp_rcvseqinit(tp); 1976 if (thflags & TH_ACK) { 1977 int tfo_partial_ack = 0; 1978 1979 TCPSTAT_INC(tcps_connects); 1980 soisconnected(so); 1981 #ifdef MAC 1982 mac_socketpeer_set_from_mbuf(m, so); 1983 #endif 1984 /* Do window scaling on this connection? */ 1985 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1986 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1987 tp->rcv_scale = tp->request_r_scale; 1988 } 1989 tp->rcv_adv += min(tp->rcv_wnd, 1990 TCP_MAXWIN << tp->rcv_scale); 1991 tp->snd_una++; /* SYN is acked */ 1992 /* 1993 * If not all the data that was sent in the TFO SYN 1994 * has been acked, resend the remainder right away. 1995 */ 1996 if (IS_FASTOPEN(tp->t_flags) && 1997 (tp->snd_una != tp->snd_max)) { 1998 tp->snd_nxt = th->th_ack; 1999 tfo_partial_ack = 1; 2000 } 2001 /* 2002 * If there's data, delay ACK; if there's also a FIN 2003 * ACKNOW will be turned on later. 2004 */ 2005 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial_ack) 2006 tcp_timer_activate(tp, TT_DELACK, 2007 tcp_delacktime); 2008 else 2009 tp->t_flags |= TF_ACKNOW; 2010 2011 if (((thflags & (TH_CWR | TH_ECE)) == TH_ECE) && 2012 (V_tcp_do_ecn == 1)) { 2013 tp->t_flags2 |= TF2_ECN_PERMIT; 2014 TCPSTAT_INC(tcps_ecn_shs); 2015 } 2016 2017 /* 2018 * Received <SYN,ACK> in SYN_SENT[*] state. 2019 * Transitions: 2020 * SYN_SENT --> ESTABLISHED 2021 * SYN_SENT* --> FIN_WAIT_1 2022 */ 2023 tp->t_starttime = ticks; 2024 if (tp->t_flags & TF_NEEDFIN) { 2025 tcp_state_change(tp, TCPS_FIN_WAIT_1); 2026 tp->t_flags &= ~TF_NEEDFIN; 2027 thflags &= ~TH_SYN; 2028 } else { 2029 tcp_state_change(tp, TCPS_ESTABLISHED); 2030 TCP_PROBE5(connect__established, NULL, tp, 2031 m, tp, th); 2032 cc_conn_init(tp); 2033 tcp_timer_activate(tp, TT_KEEP, 2034 TP_KEEPIDLE(tp)); 2035 } 2036 } else { 2037 /* 2038 * Received initial SYN in SYN-SENT[*] state => 2039 * simultaneous open. 2040 * If it succeeds, connection is * half-synchronized. 2041 * Otherwise, do 3-way handshake: 2042 * SYN-SENT -> SYN-RECEIVED 2043 * SYN-SENT* -> SYN-RECEIVED* 2044 */ 2045 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 2046 tcp_timer_activate(tp, TT_REXMT, 0); 2047 tcp_state_change(tp, TCPS_SYN_RECEIVED); 2048 } 2049 2050 INP_WLOCK_ASSERT(tp->t_inpcb); 2051 2052 /* 2053 * Advance th->th_seq to correspond to first data byte. 2054 * If data, trim to stay within window, 2055 * dropping FIN if necessary. 2056 */ 2057 th->th_seq++; 2058 if (tlen > tp->rcv_wnd) { 2059 todrop = tlen - tp->rcv_wnd; 2060 m_adj(m, -todrop); 2061 tlen = tp->rcv_wnd; 2062 thflags &= ~TH_FIN; 2063 TCPSTAT_INC(tcps_rcvpackafterwin); 2064 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 2065 } 2066 tp->snd_wl1 = th->th_seq - 1; 2067 tp->rcv_up = th->th_seq; 2068 /* 2069 * Client side of transaction: already sent SYN and data. 2070 * If the remote host used T/TCP to validate the SYN, 2071 * our data will be ACK'd; if so, enter normal data segment 2072 * processing in the middle of step 5, ack processing. 2073 * Otherwise, goto step 6. 2074 */ 2075 if (thflags & TH_ACK) 2076 goto process_ACK; 2077 2078 goto step6; 2079 2080 /* 2081 * If the state is LAST_ACK or CLOSING or TIME_WAIT: 2082 * do normal processing. 2083 * 2084 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later. 2085 */ 2086 case TCPS_LAST_ACK: 2087 case TCPS_CLOSING: 2088 break; /* continue normal processing */ 2089 } 2090 2091 /* 2092 * States other than LISTEN or SYN_SENT. 2093 * First check the RST flag and sequence number since reset segments 2094 * are exempt from the timestamp and connection count tests. This 2095 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 2096 * below which allowed reset segments in half the sequence space 2097 * to fall though and be processed (which gives forged reset 2098 * segments with a random sequence number a 50 percent chance of 2099 * killing a connection). 2100 * Then check timestamp, if present. 2101 * Then check the connection count, if present. 2102 * Then check that at least some bytes of segment are within 2103 * receive window. If segment begins before rcv_nxt, 2104 * drop leading data (and SYN); if nothing left, just ack. 2105 */ 2106 if (thflags & TH_RST) { 2107 /* 2108 * RFC5961 Section 3.2 2109 * 2110 * - RST drops connection only if SEG.SEQ == RCV.NXT. 2111 * - If RST is in window, we send challenge ACK. 2112 * 2113 * Note: to take into account delayed ACKs, we should 2114 * test against last_ack_sent instead of rcv_nxt. 2115 * Note 2: we handle special case of closed window, not 2116 * covered by the RFC. 2117 */ 2118 if ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 2119 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || 2120 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) { 2121 2122 KASSERT(tp->t_state != TCPS_SYN_SENT, 2123 ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p", 2124 __func__, th, tp)); 2125 2126 if (V_tcp_insecure_rst || 2127 tp->last_ack_sent == th->th_seq) { 2128 TCPSTAT_INC(tcps_drops); 2129 /* Drop the connection. */ 2130 switch (tp->t_state) { 2131 case TCPS_SYN_RECEIVED: 2132 so->so_error = ECONNREFUSED; 2133 goto close; 2134 case TCPS_ESTABLISHED: 2135 case TCPS_FIN_WAIT_1: 2136 case TCPS_FIN_WAIT_2: 2137 case TCPS_CLOSE_WAIT: 2138 case TCPS_CLOSING: 2139 case TCPS_LAST_ACK: 2140 so->so_error = ECONNRESET; 2141 close: 2142 /* FALLTHROUGH */ 2143 default: 2144 tp = tcp_close(tp); 2145 } 2146 } else { 2147 TCPSTAT_INC(tcps_badrst); 2148 /* Send challenge ACK. */ 2149 tcp_respond(tp, mtod(m, void *), th, m, 2150 tp->rcv_nxt, tp->snd_nxt, TH_ACK); 2151 tp->last_ack_sent = tp->rcv_nxt; 2152 m = NULL; 2153 } 2154 } 2155 goto drop; 2156 } 2157 2158 /* 2159 * RFC5961 Section 4.2 2160 * Send challenge ACK for any SYN in synchronized state. 2161 */ 2162 if ((thflags & TH_SYN) && tp->t_state != TCPS_SYN_SENT && 2163 tp->t_state != TCPS_SYN_RECEIVED) { 2164 TCPSTAT_INC(tcps_badsyn); 2165 if (V_tcp_insecure_syn && 2166 SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 2167 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 2168 tp = tcp_drop(tp, ECONNRESET); 2169 rstreason = BANDLIM_UNLIMITED; 2170 } else { 2171 /* Send challenge ACK. */ 2172 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt, 2173 tp->snd_nxt, TH_ACK); 2174 tp->last_ack_sent = tp->rcv_nxt; 2175 m = NULL; 2176 } 2177 goto drop; 2178 } 2179 2180 /* 2181 * RFC 1323 PAWS: If we have a timestamp reply on this segment 2182 * and it's less than ts_recent, drop it. 2183 */ 2184 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 2185 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 2186 2187 /* Check to see if ts_recent is over 24 days old. */ 2188 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) { 2189 /* 2190 * Invalidate ts_recent. If this segment updates 2191 * ts_recent, the age will be reset later and ts_recent 2192 * will get a valid value. If it does not, setting 2193 * ts_recent to zero will at least satisfy the 2194 * requirement that zero be placed in the timestamp 2195 * echo reply when ts_recent isn't valid. The 2196 * age isn't reset until we get a valid ts_recent 2197 * because we don't want out-of-order segments to be 2198 * dropped when ts_recent is old. 2199 */ 2200 tp->ts_recent = 0; 2201 } else { 2202 TCPSTAT_INC(tcps_rcvduppack); 2203 TCPSTAT_ADD(tcps_rcvdupbyte, tlen); 2204 TCPSTAT_INC(tcps_pawsdrop); 2205 if (tlen) 2206 goto dropafterack; 2207 goto drop; 2208 } 2209 } 2210 2211 /* 2212 * In the SYN-RECEIVED state, validate that the packet belongs to 2213 * this connection before trimming the data to fit the receive 2214 * window. Check the sequence number versus IRS since we know 2215 * the sequence numbers haven't wrapped. This is a partial fix 2216 * for the "LAND" DoS attack. 2217 */ 2218 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 2219 rstreason = BANDLIM_RST_OPENPORT; 2220 goto dropwithreset; 2221 } 2222 2223 todrop = tp->rcv_nxt - th->th_seq; 2224 if (todrop > 0) { 2225 if (thflags & TH_SYN) { 2226 thflags &= ~TH_SYN; 2227 th->th_seq++; 2228 if (th->th_urp > 1) 2229 th->th_urp--; 2230 else 2231 thflags &= ~TH_URG; 2232 todrop--; 2233 } 2234 /* 2235 * Following if statement from Stevens, vol. 2, p. 960. 2236 */ 2237 if (todrop > tlen 2238 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 2239 /* 2240 * Any valid FIN must be to the left of the window. 2241 * At this point the FIN must be a duplicate or out 2242 * of sequence; drop it. 2243 */ 2244 thflags &= ~TH_FIN; 2245 2246 /* 2247 * Send an ACK to resynchronize and drop any data. 2248 * But keep on processing for RST or ACK. 2249 */ 2250 tp->t_flags |= TF_ACKNOW; 2251 todrop = tlen; 2252 TCPSTAT_INC(tcps_rcvduppack); 2253 TCPSTAT_ADD(tcps_rcvdupbyte, todrop); 2254 } else { 2255 TCPSTAT_INC(tcps_rcvpartduppack); 2256 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop); 2257 } 2258 /* 2259 * DSACK - add SACK block for dropped range 2260 */ 2261 if ((todrop > 0) && (tp->t_flags & TF_SACK_PERMIT)) { 2262 tcp_update_sack_list(tp, th->th_seq, 2263 th->th_seq + todrop); 2264 /* 2265 * ACK now, as the next in-sequence segment 2266 * will clear the DSACK block again 2267 */ 2268 tp->t_flags |= TF_ACKNOW; 2269 } 2270 drop_hdrlen += todrop; /* drop from the top afterwards */ 2271 th->th_seq += todrop; 2272 tlen -= todrop; 2273 if (th->th_urp > todrop) 2274 th->th_urp -= todrop; 2275 else { 2276 thflags &= ~TH_URG; 2277 th->th_urp = 0; 2278 } 2279 } 2280 2281 /* 2282 * If new data are received on a connection after the 2283 * user processes are gone, then RST the other end. 2284 */ 2285 if ((so->so_state & SS_NOFDREF) && 2286 tp->t_state > TCPS_CLOSE_WAIT && tlen) { 2287 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 2288 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data " 2289 "after socket was closed, " 2290 "sending RST and removing tcpcb\n", 2291 s, __func__, tcpstates[tp->t_state], tlen); 2292 free(s, M_TCPLOG); 2293 } 2294 tp = tcp_close(tp); 2295 TCPSTAT_INC(tcps_rcvafterclose); 2296 rstreason = BANDLIM_UNLIMITED; 2297 goto dropwithreset; 2298 } 2299 2300 /* 2301 * If segment ends after window, drop trailing data 2302 * (and PUSH and FIN); if nothing left, just ACK. 2303 */ 2304 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 2305 if (todrop > 0) { 2306 TCPSTAT_INC(tcps_rcvpackafterwin); 2307 if (todrop >= tlen) { 2308 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen); 2309 /* 2310 * If window is closed can only take segments at 2311 * window edge, and have to drop data and PUSH from 2312 * incoming segments. Continue processing, but 2313 * remember to ack. Otherwise, drop segment 2314 * and ack. 2315 */ 2316 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 2317 tp->t_flags |= TF_ACKNOW; 2318 TCPSTAT_INC(tcps_rcvwinprobe); 2319 } else 2320 goto dropafterack; 2321 } else 2322 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 2323 m_adj(m, -todrop); 2324 tlen -= todrop; 2325 thflags &= ~(TH_PUSH|TH_FIN); 2326 } 2327 2328 /* 2329 * If last ACK falls within this segment's sequence numbers, 2330 * record its timestamp. 2331 * NOTE: 2332 * 1) That the test incorporates suggestions from the latest 2333 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 2334 * 2) That updating only on newer timestamps interferes with 2335 * our earlier PAWS tests, so this check should be solely 2336 * predicated on the sequence space of this segment. 2337 * 3) That we modify the segment boundary check to be 2338 * Last.ACK.Sent <= SEG.SEQ + SEG.Len 2339 * instead of RFC1323's 2340 * Last.ACK.Sent < SEG.SEQ + SEG.Len, 2341 * This modified check allows us to overcome RFC1323's 2342 * limitations as described in Stevens TCP/IP Illustrated 2343 * Vol. 2 p.869. In such cases, we can still calculate the 2344 * RTT correctly when RCV.NXT == Last.ACK.Sent. 2345 */ 2346 if ((to.to_flags & TOF_TS) != 0 && 2347 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 2348 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 2349 ((thflags & (TH_SYN|TH_FIN)) != 0))) { 2350 tp->ts_recent_age = tcp_ts_getticks(); 2351 tp->ts_recent = to.to_tsval; 2352 } 2353 2354 /* 2355 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 2356 * flag is on (half-synchronized state), then queue data for 2357 * later processing; else drop segment and return. 2358 */ 2359 if ((thflags & TH_ACK) == 0) { 2360 if (tp->t_state == TCPS_SYN_RECEIVED || 2361 (tp->t_flags & TF_NEEDSYN)) { 2362 if (tp->t_state == TCPS_SYN_RECEIVED && 2363 IS_FASTOPEN(tp->t_flags)) { 2364 tp->snd_wnd = tiwin; 2365 cc_conn_init(tp); 2366 } 2367 goto step6; 2368 } else if (tp->t_flags & TF_ACKNOW) 2369 goto dropafterack; 2370 else 2371 goto drop; 2372 } 2373 2374 /* 2375 * Ack processing. 2376 */ 2377 switch (tp->t_state) { 2378 2379 /* 2380 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter 2381 * ESTABLISHED state and continue processing. 2382 * The ACK was checked above. 2383 */ 2384 case TCPS_SYN_RECEIVED: 2385 2386 TCPSTAT_INC(tcps_connects); 2387 soisconnected(so); 2388 /* Do window scaling? */ 2389 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2390 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2391 tp->rcv_scale = tp->request_r_scale; 2392 } 2393 tp->snd_wnd = tiwin; 2394 /* 2395 * Make transitions: 2396 * SYN-RECEIVED -> ESTABLISHED 2397 * SYN-RECEIVED* -> FIN-WAIT-1 2398 */ 2399 tp->t_starttime = ticks; 2400 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) { 2401 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 2402 tp->t_tfo_pending = NULL; 2403 } 2404 if (tp->t_flags & TF_NEEDFIN) { 2405 tcp_state_change(tp, TCPS_FIN_WAIT_1); 2406 tp->t_flags &= ~TF_NEEDFIN; 2407 } else { 2408 tcp_state_change(tp, TCPS_ESTABLISHED); 2409 TCP_PROBE5(accept__established, NULL, tp, 2410 m, tp, th); 2411 /* 2412 * TFO connections call cc_conn_init() during SYN 2413 * processing. Calling it again here for such 2414 * connections is not harmless as it would undo the 2415 * snd_cwnd reduction that occurs when a TFO SYN|ACK 2416 * is retransmitted. 2417 */ 2418 if (!IS_FASTOPEN(tp->t_flags)) 2419 cc_conn_init(tp); 2420 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp)); 2421 } 2422 /* 2423 * Account for the ACK of our SYN prior to 2424 * regular ACK processing below, except for 2425 * simultaneous SYN, which is handled later. 2426 */ 2427 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 2428 incforsyn = 1; 2429 /* 2430 * If segment contains data or ACK, will call tcp_reass() 2431 * later; if not, do so now to pass queued data to user. 2432 */ 2433 if (tlen == 0 && (thflags & TH_FIN) == 0) 2434 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 2435 (struct mbuf *)0); 2436 tp->snd_wl1 = th->th_seq - 1; 2437 /* FALLTHROUGH */ 2438 2439 /* 2440 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 2441 * ACKs. If the ack is in the range 2442 * tp->snd_una < th->th_ack <= tp->snd_max 2443 * then advance tp->snd_una to th->th_ack and drop 2444 * data from the retransmission queue. If this ACK reflects 2445 * more up to date window information we update our window information. 2446 */ 2447 case TCPS_ESTABLISHED: 2448 case TCPS_FIN_WAIT_1: 2449 case TCPS_FIN_WAIT_2: 2450 case TCPS_CLOSE_WAIT: 2451 case TCPS_CLOSING: 2452 case TCPS_LAST_ACK: 2453 if (SEQ_GT(th->th_ack, tp->snd_max)) { 2454 TCPSTAT_INC(tcps_rcvacktoomuch); 2455 goto dropafterack; 2456 } 2457 if ((tp->t_flags & TF_SACK_PERMIT) && 2458 ((to.to_flags & TOF_SACK) || 2459 !TAILQ_EMPTY(&tp->snd_holes))) 2460 sack_changed = tcp_sack_doack(tp, &to, th->th_ack); 2461 else 2462 /* 2463 * Reset the value so that previous (valid) value 2464 * from the last ack with SACK doesn't get used. 2465 */ 2466 tp->sackhint.sacked_bytes = 0; 2467 2468 #ifdef TCP_HHOOK 2469 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 2470 hhook_run_tcp_est_in(tp, th, &to); 2471 #endif 2472 2473 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 2474 u_int maxseg; 2475 2476 maxseg = tcp_maxseg(tp); 2477 if (tlen == 0 && 2478 (tiwin == tp->snd_wnd || 2479 (tp->t_flags & TF_SACK_PERMIT))) { 2480 /* 2481 * If this is the first time we've seen a 2482 * FIN from the remote, this is not a 2483 * duplicate and it needs to be processed 2484 * normally. This happens during a 2485 * simultaneous close. 2486 */ 2487 if ((thflags & TH_FIN) && 2488 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) { 2489 tp->t_dupacks = 0; 2490 break; 2491 } 2492 TCPSTAT_INC(tcps_rcvdupack); 2493 /* 2494 * If we have outstanding data (other than 2495 * a window probe), this is a completely 2496 * duplicate ack (ie, window info didn't 2497 * change and FIN isn't set), 2498 * the ack is the biggest we've 2499 * seen and we've seen exactly our rexmt 2500 * threshold of them, assume a packet 2501 * has been dropped and retransmit it. 2502 * Kludge snd_nxt & the congestion 2503 * window so we send only this one 2504 * packet. 2505 * 2506 * We know we're losing at the current 2507 * window size so do congestion avoidance 2508 * (set ssthresh to half the current window 2509 * and pull our congestion window back to 2510 * the new ssthresh). 2511 * 2512 * Dup acks mean that packets have left the 2513 * network (they're now cached at the receiver) 2514 * so bump cwnd by the amount in the receiver 2515 * to keep a constant cwnd packets in the 2516 * network. 2517 * 2518 * When using TCP ECN, notify the peer that 2519 * we reduced the cwnd. 2520 */ 2521 /* 2522 * Following 2 kinds of acks should not affect 2523 * dupack counting: 2524 * 1) Old acks 2525 * 2) Acks with SACK but without any new SACK 2526 * information in them. These could result from 2527 * any anomaly in the network like a switch 2528 * duplicating packets or a possible DoS attack. 2529 */ 2530 if (th->th_ack != tp->snd_una || 2531 ((tp->t_flags & TF_SACK_PERMIT) && 2532 !sack_changed)) 2533 break; 2534 else if (!tcp_timer_active(tp, TT_REXMT)) 2535 tp->t_dupacks = 0; 2536 else if (++tp->t_dupacks > tcprexmtthresh || 2537 IN_FASTRECOVERY(tp->t_flags)) { 2538 cc_ack_received(tp, th, nsegs, 2539 CC_DUPACK); 2540 if ((tp->t_flags & TF_SACK_PERMIT) && 2541 IN_FASTRECOVERY(tp->t_flags)) { 2542 int awnd; 2543 2544 /* 2545 * Compute the amount of data in flight first. 2546 * We can inject new data into the pipe iff 2547 * we have less than 1/2 the original window's 2548 * worth of data in flight. 2549 */ 2550 if (V_tcp_do_rfc6675_pipe) 2551 awnd = tcp_compute_pipe(tp); 2552 else 2553 awnd = (tp->snd_nxt - tp->snd_fack) + 2554 tp->sackhint.sack_bytes_rexmit; 2555 2556 if (awnd < tp->snd_ssthresh) { 2557 tp->snd_cwnd += maxseg; 2558 if (tp->snd_cwnd > tp->snd_ssthresh) 2559 tp->snd_cwnd = tp->snd_ssthresh; 2560 } 2561 } else 2562 tp->snd_cwnd += maxseg; 2563 (void) tp->t_fb->tfb_tcp_output(tp); 2564 goto drop; 2565 } else if (tp->t_dupacks == tcprexmtthresh) { 2566 tcp_seq onxt = tp->snd_nxt; 2567 2568 /* 2569 * If we're doing sack, check to 2570 * see if we're already in sack 2571 * recovery. If we're not doing sack, 2572 * check to see if we're in newreno 2573 * recovery. 2574 */ 2575 if (tp->t_flags & TF_SACK_PERMIT) { 2576 if (IN_FASTRECOVERY(tp->t_flags)) { 2577 tp->t_dupacks = 0; 2578 break; 2579 } 2580 } else { 2581 if (SEQ_LEQ(th->th_ack, 2582 tp->snd_recover)) { 2583 tp->t_dupacks = 0; 2584 break; 2585 } 2586 } 2587 /* Congestion signal before ack. */ 2588 cc_cong_signal(tp, th, CC_NDUPACK); 2589 cc_ack_received(tp, th, nsegs, 2590 CC_DUPACK); 2591 tcp_timer_activate(tp, TT_REXMT, 0); 2592 tp->t_rtttime = 0; 2593 if (tp->t_flags & TF_SACK_PERMIT) { 2594 TCPSTAT_INC( 2595 tcps_sack_recovery_episode); 2596 tp->snd_recover = tp->snd_nxt; 2597 tp->snd_cwnd = maxseg; 2598 (void) tp->t_fb->tfb_tcp_output(tp); 2599 goto drop; 2600 } 2601 tp->snd_nxt = th->th_ack; 2602 tp->snd_cwnd = maxseg; 2603 (void) tp->t_fb->tfb_tcp_output(tp); 2604 KASSERT(tp->snd_limited <= 2, 2605 ("%s: tp->snd_limited too big", 2606 __func__)); 2607 tp->snd_cwnd = tp->snd_ssthresh + 2608 maxseg * 2609 (tp->t_dupacks - tp->snd_limited); 2610 if (SEQ_GT(onxt, tp->snd_nxt)) 2611 tp->snd_nxt = onxt; 2612 goto drop; 2613 } else if (V_tcp_do_rfc3042) { 2614 /* 2615 * Process first and second duplicate 2616 * ACKs. Each indicates a segment 2617 * leaving the network, creating room 2618 * for more. Make sure we can send a 2619 * packet on reception of each duplicate 2620 * ACK by increasing snd_cwnd by one 2621 * segment. Restore the original 2622 * snd_cwnd after packet transmission. 2623 */ 2624 cc_ack_received(tp, th, nsegs, 2625 CC_DUPACK); 2626 uint32_t oldcwnd = tp->snd_cwnd; 2627 tcp_seq oldsndmax = tp->snd_max; 2628 u_int sent; 2629 int avail; 2630 2631 KASSERT(tp->t_dupacks == 1 || 2632 tp->t_dupacks == 2, 2633 ("%s: dupacks not 1 or 2", 2634 __func__)); 2635 if (tp->t_dupacks == 1) 2636 tp->snd_limited = 0; 2637 tp->snd_cwnd = 2638 (tp->snd_nxt - tp->snd_una) + 2639 (tp->t_dupacks - tp->snd_limited) * 2640 maxseg; 2641 /* 2642 * Only call tcp_output when there 2643 * is new data available to be sent. 2644 * Otherwise we would send pure ACKs. 2645 */ 2646 SOCKBUF_LOCK(&so->so_snd); 2647 avail = sbavail(&so->so_snd) - 2648 (tp->snd_nxt - tp->snd_una); 2649 SOCKBUF_UNLOCK(&so->so_snd); 2650 if (avail > 0) 2651 (void) tp->t_fb->tfb_tcp_output(tp); 2652 sent = tp->snd_max - oldsndmax; 2653 if (sent > maxseg) { 2654 KASSERT((tp->t_dupacks == 2 && 2655 tp->snd_limited == 0) || 2656 (sent == maxseg + 1 && 2657 tp->t_flags & TF_SENTFIN), 2658 ("%s: sent too much", 2659 __func__)); 2660 tp->snd_limited = 2; 2661 } else if (sent > 0) 2662 ++tp->snd_limited; 2663 tp->snd_cwnd = oldcwnd; 2664 goto drop; 2665 } 2666 } 2667 break; 2668 } else { 2669 /* 2670 * This ack is advancing the left edge, reset the 2671 * counter. 2672 */ 2673 tp->t_dupacks = 0; 2674 /* 2675 * If this ack also has new SACK info, increment the 2676 * counter as per rfc6675. The variable 2677 * sack_changed tracks all changes to the SACK 2678 * scoreboard, including when partial ACKs without 2679 * SACK options are received, and clear the scoreboard 2680 * from the left side. Such partial ACKs should not be 2681 * counted as dupacks here. 2682 */ 2683 if ((tp->t_flags & TF_SACK_PERMIT) && 2684 (to.to_flags & TOF_SACK) && 2685 sack_changed) 2686 tp->t_dupacks++; 2687 } 2688 2689 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), 2690 ("%s: th_ack <= snd_una", __func__)); 2691 2692 /* 2693 * If the congestion window was inflated to account 2694 * for the other side's cached packets, retract it. 2695 */ 2696 if (IN_FASTRECOVERY(tp->t_flags)) { 2697 if (SEQ_LT(th->th_ack, tp->snd_recover)) { 2698 if (tp->t_flags & TF_SACK_PERMIT) 2699 tcp_sack_partialack(tp, th); 2700 else 2701 tcp_newreno_partial_ack(tp, th); 2702 } else 2703 cc_post_recovery(tp, th); 2704 } 2705 /* 2706 * If we reach this point, ACK is not a duplicate, 2707 * i.e., it ACKs something we sent. 2708 */ 2709 if (tp->t_flags & TF_NEEDSYN) { 2710 /* 2711 * T/TCP: Connection was half-synchronized, and our 2712 * SYN has been ACK'd (so connection is now fully 2713 * synchronized). Go to non-starred state, 2714 * increment snd_una for ACK of SYN, and check if 2715 * we can do window scaling. 2716 */ 2717 tp->t_flags &= ~TF_NEEDSYN; 2718 tp->snd_una++; 2719 /* Do window scaling? */ 2720 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2721 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2722 tp->rcv_scale = tp->request_r_scale; 2723 /* Send window already scaled. */ 2724 } 2725 } 2726 2727 process_ACK: 2728 INP_WLOCK_ASSERT(tp->t_inpcb); 2729 2730 /* 2731 * Adjust for the SYN bit in sequence space, 2732 * but don't account for it in cwnd calculations. 2733 * This is for the SYN_RECEIVED, non-simultaneous 2734 * SYN case. SYN_SENT and simultaneous SYN are 2735 * treated elsewhere. 2736 */ 2737 if (incforsyn) 2738 tp->snd_una++; 2739 acked = BYTES_THIS_ACK(tp, th); 2740 KASSERT(acked >= 0, ("%s: acked unexepectedly negative " 2741 "(tp->snd_una=%u, th->th_ack=%u, tp=%p, m=%p)", __func__, 2742 tp->snd_una, th->th_ack, tp, m)); 2743 TCPSTAT_ADD(tcps_rcvackpack, nsegs); 2744 TCPSTAT_ADD(tcps_rcvackbyte, acked); 2745 2746 /* 2747 * If we just performed our first retransmit, and the ACK 2748 * arrives within our recovery window, then it was a mistake 2749 * to do the retransmit in the first place. Recover our 2750 * original cwnd and ssthresh, and proceed to transmit where 2751 * we left off. 2752 */ 2753 if (tp->t_rxtshift == 1 && 2754 tp->t_flags & TF_PREVVALID && 2755 tp->t_badrxtwin && 2756 SEQ_LT(to.to_tsecr, tp->t_badrxtwin)) 2757 cc_cong_signal(tp, th, CC_RTO_ERR); 2758 2759 /* 2760 * If we have a timestamp reply, update smoothed 2761 * round trip time. If no timestamp is present but 2762 * transmit timer is running and timed sequence 2763 * number was acked, update smoothed round trip time. 2764 * Since we now have an rtt measurement, cancel the 2765 * timer backoff (cf., Phil Karn's retransmit alg.). 2766 * Recompute the initial retransmit timer. 2767 * 2768 * Some boxes send broken timestamp replies 2769 * during the SYN+ACK phase, ignore 2770 * timestamps of 0 or we could calculate a 2771 * huge RTT and blow up the retransmit timer. 2772 */ 2773 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) { 2774 uint32_t t; 2775 2776 t = tcp_ts_getticks() - to.to_tsecr; 2777 if (!tp->t_rttlow || tp->t_rttlow > t) 2778 tp->t_rttlow = t; 2779 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1); 2780 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) { 2781 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime) 2782 tp->t_rttlow = ticks - tp->t_rtttime; 2783 tcp_xmit_timer(tp, ticks - tp->t_rtttime); 2784 } 2785 2786 /* 2787 * If all outstanding data is acked, stop retransmit 2788 * timer and remember to restart (more output or persist). 2789 * If there is more data to be acked, restart retransmit 2790 * timer, using current (possibly backed-off) value. 2791 */ 2792 if (th->th_ack == tp->snd_max) { 2793 tcp_timer_activate(tp, TT_REXMT, 0); 2794 needoutput = 1; 2795 } else if (!tcp_timer_active(tp, TT_PERSIST)) 2796 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur); 2797 2798 /* 2799 * If no data (only SYN) was ACK'd, 2800 * skip rest of ACK processing. 2801 */ 2802 if (acked == 0) 2803 goto step6; 2804 2805 /* 2806 * Let the congestion control algorithm update congestion 2807 * control related information. This typically means increasing 2808 * the congestion window. 2809 */ 2810 cc_ack_received(tp, th, nsegs, CC_ACK); 2811 2812 SOCKBUF_LOCK(&so->so_snd); 2813 if (acked > sbavail(&so->so_snd)) { 2814 if (tp->snd_wnd >= sbavail(&so->so_snd)) 2815 tp->snd_wnd -= sbavail(&so->so_snd); 2816 else 2817 tp->snd_wnd = 0; 2818 mfree = sbcut_locked(&so->so_snd, 2819 (int)sbavail(&so->so_snd)); 2820 ourfinisacked = 1; 2821 } else { 2822 mfree = sbcut_locked(&so->so_snd, acked); 2823 if (tp->snd_wnd >= (uint32_t) acked) 2824 tp->snd_wnd -= acked; 2825 else 2826 tp->snd_wnd = 0; 2827 ourfinisacked = 0; 2828 } 2829 /* NB: sowwakeup_locked() does an implicit unlock. */ 2830 sowwakeup_locked(so); 2831 m_freem(mfree); 2832 /* Detect una wraparound. */ 2833 if (!IN_RECOVERY(tp->t_flags) && 2834 SEQ_GT(tp->snd_una, tp->snd_recover) && 2835 SEQ_LEQ(th->th_ack, tp->snd_recover)) 2836 tp->snd_recover = th->th_ack - 1; 2837 /* XXXLAS: Can this be moved up into cc_post_recovery? */ 2838 if (IN_RECOVERY(tp->t_flags) && 2839 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 2840 EXIT_RECOVERY(tp->t_flags); 2841 } 2842 tp->snd_una = th->th_ack; 2843 if (tp->t_flags & TF_SACK_PERMIT) { 2844 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 2845 tp->snd_recover = tp->snd_una; 2846 } 2847 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2848 tp->snd_nxt = tp->snd_una; 2849 2850 switch (tp->t_state) { 2851 2852 /* 2853 * In FIN_WAIT_1 STATE in addition to the processing 2854 * for the ESTABLISHED state if our FIN is now acknowledged 2855 * then enter FIN_WAIT_2. 2856 */ 2857 case TCPS_FIN_WAIT_1: 2858 if (ourfinisacked) { 2859 /* 2860 * If we can't receive any more 2861 * data, then closing user can proceed. 2862 * Starting the timer is contrary to the 2863 * specification, but if we don't get a FIN 2864 * we'll hang forever. 2865 * 2866 * XXXjl: 2867 * we should release the tp also, and use a 2868 * compressed state. 2869 */ 2870 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2871 soisdisconnected(so); 2872 tcp_timer_activate(tp, TT_2MSL, 2873 (tcp_fast_finwait2_recycle ? 2874 tcp_finwait2_timeout : 2875 TP_MAXIDLE(tp))); 2876 } 2877 tcp_state_change(tp, TCPS_FIN_WAIT_2); 2878 } 2879 break; 2880 2881 /* 2882 * In CLOSING STATE in addition to the processing for 2883 * the ESTABLISHED state if the ACK acknowledges our FIN 2884 * then enter the TIME-WAIT state, otherwise ignore 2885 * the segment. 2886 */ 2887 case TCPS_CLOSING: 2888 if (ourfinisacked) { 2889 tcp_twstart(tp); 2890 m_freem(m); 2891 return; 2892 } 2893 break; 2894 2895 /* 2896 * In LAST_ACK, we may still be waiting for data to drain 2897 * and/or to be acked, as well as for the ack of our FIN. 2898 * If our FIN is now acknowledged, delete the TCB, 2899 * enter the closed state and return. 2900 */ 2901 case TCPS_LAST_ACK: 2902 if (ourfinisacked) { 2903 tp = tcp_close(tp); 2904 goto drop; 2905 } 2906 break; 2907 } 2908 } 2909 2910 step6: 2911 INP_WLOCK_ASSERT(tp->t_inpcb); 2912 2913 /* 2914 * Update window information. 2915 * Don't look at window if no ACK: TAC's send garbage on first SYN. 2916 */ 2917 if ((thflags & TH_ACK) && 2918 (SEQ_LT(tp->snd_wl1, th->th_seq) || 2919 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 2920 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 2921 /* keep track of pure window updates */ 2922 if (tlen == 0 && 2923 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 2924 TCPSTAT_INC(tcps_rcvwinupd); 2925 tp->snd_wnd = tiwin; 2926 tp->snd_wl1 = th->th_seq; 2927 tp->snd_wl2 = th->th_ack; 2928 if (tp->snd_wnd > tp->max_sndwnd) 2929 tp->max_sndwnd = tp->snd_wnd; 2930 needoutput = 1; 2931 } 2932 2933 /* 2934 * Process segments with URG. 2935 */ 2936 if ((thflags & TH_URG) && th->th_urp && 2937 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2938 /* 2939 * This is a kludge, but if we receive and accept 2940 * random urgent pointers, we'll crash in 2941 * soreceive. It's hard to imagine someone 2942 * actually wanting to send this much urgent data. 2943 */ 2944 SOCKBUF_LOCK(&so->so_rcv); 2945 if (th->th_urp + sbavail(&so->so_rcv) > sb_max) { 2946 th->th_urp = 0; /* XXX */ 2947 thflags &= ~TH_URG; /* XXX */ 2948 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */ 2949 goto dodata; /* XXX */ 2950 } 2951 /* 2952 * If this segment advances the known urgent pointer, 2953 * then mark the data stream. This should not happen 2954 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 2955 * a FIN has been received from the remote side. 2956 * In these states we ignore the URG. 2957 * 2958 * According to RFC961 (Assigned Protocols), 2959 * the urgent pointer points to the last octet 2960 * of urgent data. We continue, however, 2961 * to consider it to indicate the first octet 2962 * of data past the urgent section as the original 2963 * spec states (in one of two places). 2964 */ 2965 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) { 2966 tp->rcv_up = th->th_seq + th->th_urp; 2967 so->so_oobmark = sbavail(&so->so_rcv) + 2968 (tp->rcv_up - tp->rcv_nxt) - 1; 2969 if (so->so_oobmark == 0) 2970 so->so_rcv.sb_state |= SBS_RCVATMARK; 2971 sohasoutofband(so); 2972 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 2973 } 2974 SOCKBUF_UNLOCK(&so->so_rcv); 2975 /* 2976 * Remove out of band data so doesn't get presented to user. 2977 * This can happen independent of advancing the URG pointer, 2978 * but if two URG's are pending at once, some out-of-band 2979 * data may creep in... ick. 2980 */ 2981 if (th->th_urp <= (uint32_t)tlen && 2982 !(so->so_options & SO_OOBINLINE)) { 2983 /* hdr drop is delayed */ 2984 tcp_pulloutofband(so, th, m, drop_hdrlen); 2985 } 2986 } else { 2987 /* 2988 * If no out of band data is expected, 2989 * pull receive urgent pointer along 2990 * with the receive window. 2991 */ 2992 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 2993 tp->rcv_up = tp->rcv_nxt; 2994 } 2995 dodata: /* XXX */ 2996 INP_WLOCK_ASSERT(tp->t_inpcb); 2997 2998 /* 2999 * Process the segment text, merging it into the TCP sequencing queue, 3000 * and arranging for acknowledgment of receipt if necessary. 3001 * This process logically involves adjusting tp->rcv_wnd as data 3002 * is presented to the user (this happens in tcp_usrreq.c, 3003 * case PRU_RCVD). If a FIN has already been received on this 3004 * connection then we just ignore the text. 3005 */ 3006 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 3007 IS_FASTOPEN(tp->t_flags)); 3008 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 3009 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 3010 tcp_seq save_start = th->th_seq; 3011 tcp_seq save_rnxt = tp->rcv_nxt; 3012 int save_tlen = tlen; 3013 m_adj(m, drop_hdrlen); /* delayed header drop */ 3014 /* 3015 * Insert segment which includes th into TCP reassembly queue 3016 * with control block tp. Set thflags to whether reassembly now 3017 * includes a segment with FIN. This handles the common case 3018 * inline (segment is the next to be received on an established 3019 * connection, and the queue is empty), avoiding linkage into 3020 * and removal from the queue and repetition of various 3021 * conversions. 3022 * Set DELACK for segments received in order, but ack 3023 * immediately when segments are out of order (so 3024 * fast retransmit can work). 3025 */ 3026 if (th->th_seq == tp->rcv_nxt && 3027 SEGQ_EMPTY(tp) && 3028 (TCPS_HAVEESTABLISHED(tp->t_state) || 3029 tfo_syn)) { 3030 if (DELAY_ACK(tp, tlen) || tfo_syn) 3031 tp->t_flags |= TF_DELACK; 3032 else 3033 tp->t_flags |= TF_ACKNOW; 3034 tp->rcv_nxt += tlen; 3035 if (tlen && 3036 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 3037 (tp->t_fbyte_in == 0)) { 3038 tp->t_fbyte_in = ticks; 3039 if (tp->t_fbyte_in == 0) 3040 tp->t_fbyte_in = 1; 3041 if (tp->t_fbyte_out && tp->t_fbyte_in) 3042 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 3043 } 3044 thflags = th->th_flags & TH_FIN; 3045 TCPSTAT_INC(tcps_rcvpack); 3046 TCPSTAT_ADD(tcps_rcvbyte, tlen); 3047 SOCKBUF_LOCK(&so->so_rcv); 3048 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 3049 m_freem(m); 3050 else 3051 sbappendstream_locked(&so->so_rcv, m, 0); 3052 /* NB: sorwakeup_locked() does an implicit unlock. */ 3053 sorwakeup_locked(so); 3054 } else { 3055 /* 3056 * XXX: Due to the header drop above "th" is 3057 * theoretically invalid by now. Fortunately 3058 * m_adj() doesn't actually frees any mbufs 3059 * when trimming from the head. 3060 */ 3061 tcp_seq temp = save_start; 3062 thflags = tcp_reass(tp, th, &temp, &tlen, m); 3063 tp->t_flags |= TF_ACKNOW; 3064 } 3065 if ((tp->t_flags & TF_SACK_PERMIT) && (save_tlen > 0)) { 3066 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 3067 /* 3068 * DSACK actually handled in the fastpath 3069 * above. 3070 */ 3071 tcp_update_sack_list(tp, save_start, 3072 save_start + save_tlen); 3073 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 3074 if ((tp->rcv_numsacks >= 1) && 3075 (tp->sackblks[0].end == save_start)) { 3076 /* 3077 * Partial overlap, recorded at todrop 3078 * above. 3079 */ 3080 tcp_update_sack_list(tp, 3081 tp->sackblks[0].start, 3082 tp->sackblks[0].end); 3083 } else { 3084 tcp_update_dsack_list(tp, save_start, 3085 save_start + save_tlen); 3086 } 3087 } else if (tlen >= save_tlen) { 3088 /* Update of sackblks. */ 3089 tcp_update_dsack_list(tp, save_start, 3090 save_start + save_tlen); 3091 } else if (tlen > 0) { 3092 tcp_update_dsack_list(tp, save_start, 3093 save_start + tlen); 3094 } 3095 } 3096 #if 0 3097 /* 3098 * Note the amount of data that peer has sent into 3099 * our window, in order to estimate the sender's 3100 * buffer size. 3101 * XXX: Unused. 3102 */ 3103 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) 3104 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 3105 else 3106 len = so->so_rcv.sb_hiwat; 3107 #endif 3108 } else { 3109 m_freem(m); 3110 thflags &= ~TH_FIN; 3111 } 3112 3113 /* 3114 * If FIN is received ACK the FIN and let the user know 3115 * that the connection is closing. 3116 */ 3117 if (thflags & TH_FIN) { 3118 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 3119 socantrcvmore(so); 3120 /* 3121 * If connection is half-synchronized 3122 * (ie NEEDSYN flag on) then delay ACK, 3123 * so it may be piggybacked when SYN is sent. 3124 * Otherwise, since we received a FIN then no 3125 * more input can be expected, send ACK now. 3126 */ 3127 if (tp->t_flags & TF_NEEDSYN) 3128 tp->t_flags |= TF_DELACK; 3129 else 3130 tp->t_flags |= TF_ACKNOW; 3131 tp->rcv_nxt++; 3132 } 3133 switch (tp->t_state) { 3134 3135 /* 3136 * In SYN_RECEIVED and ESTABLISHED STATES 3137 * enter the CLOSE_WAIT state. 3138 */ 3139 case TCPS_SYN_RECEIVED: 3140 tp->t_starttime = ticks; 3141 /* FALLTHROUGH */ 3142 case TCPS_ESTABLISHED: 3143 tcp_state_change(tp, TCPS_CLOSE_WAIT); 3144 break; 3145 3146 /* 3147 * If still in FIN_WAIT_1 STATE FIN has not been acked so 3148 * enter the CLOSING state. 3149 */ 3150 case TCPS_FIN_WAIT_1: 3151 tcp_state_change(tp, TCPS_CLOSING); 3152 break; 3153 3154 /* 3155 * In FIN_WAIT_2 state enter the TIME_WAIT state, 3156 * starting the time-wait timer, turning off the other 3157 * standard timers. 3158 */ 3159 case TCPS_FIN_WAIT_2: 3160 tcp_twstart(tp); 3161 return; 3162 } 3163 } 3164 #ifdef TCPDEBUG 3165 if (so->so_options & SO_DEBUG) 3166 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen, 3167 &tcp_savetcp, 0); 3168 #endif 3169 TCP_PROBE3(debug__input, tp, th, m); 3170 3171 /* 3172 * Return any desired output. 3173 */ 3174 if (needoutput || (tp->t_flags & TF_ACKNOW)) 3175 (void) tp->t_fb->tfb_tcp_output(tp); 3176 3177 check_delack: 3178 INP_WLOCK_ASSERT(tp->t_inpcb); 3179 3180 if (tp->t_flags & TF_DELACK) { 3181 tp->t_flags &= ~TF_DELACK; 3182 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 3183 } 3184 INP_WUNLOCK(tp->t_inpcb); 3185 return; 3186 3187 dropafterack: 3188 /* 3189 * Generate an ACK dropping incoming segment if it occupies 3190 * sequence space, where the ACK reflects our state. 3191 * 3192 * We can now skip the test for the RST flag since all 3193 * paths to this code happen after packets containing 3194 * RST have been dropped. 3195 * 3196 * In the SYN-RECEIVED state, don't send an ACK unless the 3197 * segment we received passes the SYN-RECEIVED ACK test. 3198 * If it fails send a RST. This breaks the loop in the 3199 * "LAND" DoS attack, and also prevents an ACK storm 3200 * between two listening ports that have been sent forged 3201 * SYN segments, each with the source address of the other. 3202 */ 3203 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 3204 (SEQ_GT(tp->snd_una, th->th_ack) || 3205 SEQ_GT(th->th_ack, tp->snd_max)) ) { 3206 rstreason = BANDLIM_RST_OPENPORT; 3207 goto dropwithreset; 3208 } 3209 #ifdef TCPDEBUG 3210 if (so->so_options & SO_DEBUG) 3211 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 3212 &tcp_savetcp, 0); 3213 #endif 3214 TCP_PROBE3(debug__input, tp, th, m); 3215 tp->t_flags |= TF_ACKNOW; 3216 (void) tp->t_fb->tfb_tcp_output(tp); 3217 INP_WUNLOCK(tp->t_inpcb); 3218 m_freem(m); 3219 return; 3220 3221 dropwithreset: 3222 if (tp != NULL) { 3223 tcp_dropwithreset(m, th, tp, tlen, rstreason); 3224 INP_WUNLOCK(tp->t_inpcb); 3225 } else 3226 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 3227 return; 3228 3229 drop: 3230 /* 3231 * Drop space held by incoming segment and return. 3232 */ 3233 #ifdef TCPDEBUG 3234 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 3235 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 3236 &tcp_savetcp, 0); 3237 #endif 3238 TCP_PROBE3(debug__input, tp, th, m); 3239 if (tp != NULL) 3240 INP_WUNLOCK(tp->t_inpcb); 3241 m_freem(m); 3242 } 3243 3244 /* 3245 * Issue RST and make ACK acceptable to originator of segment. 3246 * The mbuf must still include the original packet header. 3247 * tp may be NULL. 3248 */ 3249 void 3250 tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, 3251 int tlen, int rstreason) 3252 { 3253 #ifdef INET 3254 struct ip *ip; 3255 #endif 3256 #ifdef INET6 3257 struct ip6_hdr *ip6; 3258 #endif 3259 3260 if (tp != NULL) { 3261 INP_WLOCK_ASSERT(tp->t_inpcb); 3262 } 3263 3264 /* Don't bother if destination was broadcast/multicast. */ 3265 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST)) 3266 goto drop; 3267 #ifdef INET6 3268 if (mtod(m, struct ip *)->ip_v == 6) { 3269 ip6 = mtod(m, struct ip6_hdr *); 3270 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 3271 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 3272 goto drop; 3273 /* IPv6 anycast check is done at tcp6_input() */ 3274 } 3275 #endif 3276 #if defined(INET) && defined(INET6) 3277 else 3278 #endif 3279 #ifdef INET 3280 { 3281 ip = mtod(m, struct ip *); 3282 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 3283 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 3284 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 3285 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 3286 goto drop; 3287 } 3288 #endif 3289 3290 /* Perform bandwidth limiting. */ 3291 if (badport_bandlim(rstreason) < 0) 3292 goto drop; 3293 3294 /* tcp_respond consumes the mbuf chain. */ 3295 if (th->th_flags & TH_ACK) { 3296 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, 3297 th->th_ack, TH_RST); 3298 } else { 3299 if (th->th_flags & TH_SYN) 3300 tlen++; 3301 if (th->th_flags & TH_FIN) 3302 tlen++; 3303 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen, 3304 (tcp_seq)0, TH_RST|TH_ACK); 3305 } 3306 return; 3307 drop: 3308 m_freem(m); 3309 } 3310 3311 /* 3312 * Parse TCP options and place in tcpopt. 3313 */ 3314 void 3315 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags) 3316 { 3317 int opt, optlen; 3318 3319 to->to_flags = 0; 3320 for (; cnt > 0; cnt -= optlen, cp += optlen) { 3321 opt = cp[0]; 3322 if (opt == TCPOPT_EOL) 3323 break; 3324 if (opt == TCPOPT_NOP) 3325 optlen = 1; 3326 else { 3327 if (cnt < 2) 3328 break; 3329 optlen = cp[1]; 3330 if (optlen < 2 || optlen > cnt) 3331 break; 3332 } 3333 switch (opt) { 3334 case TCPOPT_MAXSEG: 3335 if (optlen != TCPOLEN_MAXSEG) 3336 continue; 3337 if (!(flags & TO_SYN)) 3338 continue; 3339 to->to_flags |= TOF_MSS; 3340 bcopy((char *)cp + 2, 3341 (char *)&to->to_mss, sizeof(to->to_mss)); 3342 to->to_mss = ntohs(to->to_mss); 3343 break; 3344 case TCPOPT_WINDOW: 3345 if (optlen != TCPOLEN_WINDOW) 3346 continue; 3347 if (!(flags & TO_SYN)) 3348 continue; 3349 to->to_flags |= TOF_SCALE; 3350 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT); 3351 break; 3352 case TCPOPT_TIMESTAMP: 3353 if (optlen != TCPOLEN_TIMESTAMP) 3354 continue; 3355 to->to_flags |= TOF_TS; 3356 bcopy((char *)cp + 2, 3357 (char *)&to->to_tsval, sizeof(to->to_tsval)); 3358 to->to_tsval = ntohl(to->to_tsval); 3359 bcopy((char *)cp + 6, 3360 (char *)&to->to_tsecr, sizeof(to->to_tsecr)); 3361 to->to_tsecr = ntohl(to->to_tsecr); 3362 break; 3363 case TCPOPT_SIGNATURE: 3364 /* 3365 * In order to reply to a host which has set the 3366 * TCP_SIGNATURE option in its initial SYN, we have 3367 * to record the fact that the option was observed 3368 * here for the syncache code to perform the correct 3369 * response. 3370 */ 3371 if (optlen != TCPOLEN_SIGNATURE) 3372 continue; 3373 to->to_flags |= TOF_SIGNATURE; 3374 to->to_signature = cp + 2; 3375 break; 3376 case TCPOPT_SACK_PERMITTED: 3377 if (optlen != TCPOLEN_SACK_PERMITTED) 3378 continue; 3379 if (!(flags & TO_SYN)) 3380 continue; 3381 if (!V_tcp_do_sack) 3382 continue; 3383 to->to_flags |= TOF_SACKPERM; 3384 break; 3385 case TCPOPT_SACK: 3386 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) 3387 continue; 3388 if (flags & TO_SYN) 3389 continue; 3390 to->to_flags |= TOF_SACK; 3391 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK; 3392 to->to_sacks = cp + 2; 3393 TCPSTAT_INC(tcps_sack_rcv_blocks); 3394 break; 3395 case TCPOPT_FAST_OPEN: 3396 /* 3397 * Cookie length validation is performed by the 3398 * server side cookie checking code or the client 3399 * side cookie cache update code. 3400 */ 3401 if (!(flags & TO_SYN)) 3402 continue; 3403 if (!V_tcp_fastopen_client_enable && 3404 !V_tcp_fastopen_server_enable) 3405 continue; 3406 to->to_flags |= TOF_FASTOPEN; 3407 to->to_tfo_len = optlen - 2; 3408 to->to_tfo_cookie = to->to_tfo_len ? cp + 2 : NULL; 3409 break; 3410 default: 3411 continue; 3412 } 3413 } 3414 } 3415 3416 /* 3417 * Pull out of band byte out of a segment so 3418 * it doesn't appear in the user's data queue. 3419 * It is still reflected in the segment length for 3420 * sequencing purposes. 3421 */ 3422 void 3423 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, 3424 int off) 3425 { 3426 int cnt = off + th->th_urp - 1; 3427 3428 while (cnt >= 0) { 3429 if (m->m_len > cnt) { 3430 char *cp = mtod(m, caddr_t) + cnt; 3431 struct tcpcb *tp = sototcpcb(so); 3432 3433 INP_WLOCK_ASSERT(tp->t_inpcb); 3434 3435 tp->t_iobc = *cp; 3436 tp->t_oobflags |= TCPOOB_HAVEDATA; 3437 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); 3438 m->m_len--; 3439 if (m->m_flags & M_PKTHDR) 3440 m->m_pkthdr.len--; 3441 return; 3442 } 3443 cnt -= m->m_len; 3444 m = m->m_next; 3445 if (m == NULL) 3446 break; 3447 } 3448 panic("tcp_pulloutofband"); 3449 } 3450 3451 /* 3452 * Collect new round-trip time estimate 3453 * and update averages and current timeout. 3454 */ 3455 void 3456 tcp_xmit_timer(struct tcpcb *tp, int rtt) 3457 { 3458 int delta; 3459 3460 INP_WLOCK_ASSERT(tp->t_inpcb); 3461 3462 TCPSTAT_INC(tcps_rttupdated); 3463 tp->t_rttupdated++; 3464 #ifdef STATS 3465 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, 3466 imax(0, rtt * 1000 / hz)); 3467 #endif 3468 if ((tp->t_srtt != 0) && (tp->t_rxtshift <= TCP_RTT_INVALIDATE)) { 3469 /* 3470 * srtt is stored as fixed point with 5 bits after the 3471 * binary point (i.e., scaled by 8). The following magic 3472 * is equivalent to the smoothing algorithm in rfc793 with 3473 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 3474 * point). Adjust rtt to origin 0. 3475 */ 3476 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 3477 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 3478 3479 if ((tp->t_srtt += delta) <= 0) 3480 tp->t_srtt = 1; 3481 3482 /* 3483 * We accumulate a smoothed rtt variance (actually, a 3484 * smoothed mean difference), then set the retransmit 3485 * timer to smoothed rtt + 4 times the smoothed variance. 3486 * rttvar is stored as fixed point with 4 bits after the 3487 * binary point (scaled by 16). The following is 3488 * equivalent to rfc793 smoothing with an alpha of .75 3489 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 3490 * rfc793's wired-in beta. 3491 */ 3492 if (delta < 0) 3493 delta = -delta; 3494 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 3495 if ((tp->t_rttvar += delta) <= 0) 3496 tp->t_rttvar = 1; 3497 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 3498 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3499 } else { 3500 /* 3501 * No rtt measurement yet - use the unsmoothed rtt. 3502 * Set the variance to half the rtt (so our first 3503 * retransmit happens at 3*rtt). 3504 */ 3505 tp->t_srtt = rtt << TCP_RTT_SHIFT; 3506 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 3507 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3508 } 3509 tp->t_rtttime = 0; 3510 tp->t_rxtshift = 0; 3511 3512 /* 3513 * the retransmit should happen at rtt + 4 * rttvar. 3514 * Because of the way we do the smoothing, srtt and rttvar 3515 * will each average +1/2 tick of bias. When we compute 3516 * the retransmit timer, we want 1/2 tick of rounding and 3517 * 1 extra tick because of +-1/2 tick uncertainty in the 3518 * firing of the timer. The bias will give us exactly the 3519 * 1.5 tick we need. But, because the bias is 3520 * statistical, we have to test that we don't drop below 3521 * the minimum feasible timer (which is 2 ticks). 3522 */ 3523 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 3524 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 3525 3526 /* 3527 * We received an ack for a packet that wasn't retransmitted; 3528 * it is probably safe to discard any error indications we've 3529 * received recently. This isn't quite right, but close enough 3530 * for now (a route might have failed after we sent a segment, 3531 * and the return path might not be symmetrical). 3532 */ 3533 tp->t_softerror = 0; 3534 } 3535 3536 /* 3537 * Determine a reasonable value for maxseg size. 3538 * If the route is known, check route for mtu. 3539 * If none, use an mss that can be handled on the outgoing interface 3540 * without forcing IP to fragment. If no route is found, route has no mtu, 3541 * or the destination isn't local, use a default, hopefully conservative 3542 * size (usually 512 or the default IP max size, but no more than the mtu 3543 * of the interface), as we can't discover anything about intervening 3544 * gateways or networks. We also initialize the congestion/slow start 3545 * window to be a single segment if the destination isn't local. 3546 * While looking at the routing entry, we also initialize other path-dependent 3547 * parameters from pre-set or cached values in the routing entry. 3548 * 3549 * NOTE that resulting t_maxseg doesn't include space for TCP options or 3550 * IP options, e.g. IPSEC data, since length of this data may vary, and 3551 * thus it is calculated for every segment separately in tcp_output(). 3552 * 3553 * NOTE that this routine is only called when we process an incoming 3554 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS 3555 * settings are handled in tcp_mssopt(). 3556 */ 3557 void 3558 tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer, 3559 struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap) 3560 { 3561 int mss = 0; 3562 uint32_t maxmtu = 0; 3563 struct inpcb *inp = tp->t_inpcb; 3564 struct hc_metrics_lite metrics; 3565 #ifdef INET6 3566 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 3567 size_t min_protoh = isipv6 ? 3568 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) : 3569 sizeof (struct tcpiphdr); 3570 #else 3571 const size_t min_protoh = sizeof(struct tcpiphdr); 3572 #endif 3573 3574 INP_WLOCK_ASSERT(tp->t_inpcb); 3575 3576 if (mtuoffer != -1) { 3577 KASSERT(offer == -1, ("%s: conflict", __func__)); 3578 offer = mtuoffer - min_protoh; 3579 } 3580 3581 /* Initialize. */ 3582 #ifdef INET6 3583 if (isipv6) { 3584 maxmtu = tcp_maxmtu6(&inp->inp_inc, cap); 3585 tp->t_maxseg = V_tcp_v6mssdflt; 3586 } 3587 #endif 3588 #if defined(INET) && defined(INET6) 3589 else 3590 #endif 3591 #ifdef INET 3592 { 3593 maxmtu = tcp_maxmtu(&inp->inp_inc, cap); 3594 tp->t_maxseg = V_tcp_mssdflt; 3595 } 3596 #endif 3597 3598 /* 3599 * No route to sender, stay with default mss and return. 3600 */ 3601 if (maxmtu == 0) { 3602 /* 3603 * In case we return early we need to initialize metrics 3604 * to a defined state as tcp_hc_get() would do for us 3605 * if there was no cache hit. 3606 */ 3607 if (metricptr != NULL) 3608 bzero(metricptr, sizeof(struct hc_metrics_lite)); 3609 return; 3610 } 3611 3612 /* What have we got? */ 3613 switch (offer) { 3614 case 0: 3615 /* 3616 * Offer == 0 means that there was no MSS on the SYN 3617 * segment, in this case we use tcp_mssdflt as 3618 * already assigned to t_maxseg above. 3619 */ 3620 offer = tp->t_maxseg; 3621 break; 3622 3623 case -1: 3624 /* 3625 * Offer == -1 means that we didn't receive SYN yet. 3626 */ 3627 /* FALLTHROUGH */ 3628 3629 default: 3630 /* 3631 * Prevent DoS attack with too small MSS. Round up 3632 * to at least minmss. 3633 */ 3634 offer = max(offer, V_tcp_minmss); 3635 } 3636 3637 /* 3638 * rmx information is now retrieved from tcp_hostcache. 3639 */ 3640 tcp_hc_get(&inp->inp_inc, &metrics); 3641 if (metricptr != NULL) 3642 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite)); 3643 3644 /* 3645 * If there's a discovered mtu in tcp hostcache, use it. 3646 * Else, use the link mtu. 3647 */ 3648 if (metrics.rmx_mtu) 3649 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh; 3650 else { 3651 #ifdef INET6 3652 if (isipv6) { 3653 mss = maxmtu - min_protoh; 3654 if (!V_path_mtu_discovery && 3655 !in6_localaddr(&inp->in6p_faddr)) 3656 mss = min(mss, V_tcp_v6mssdflt); 3657 } 3658 #endif 3659 #if defined(INET) && defined(INET6) 3660 else 3661 #endif 3662 #ifdef INET 3663 { 3664 mss = maxmtu - min_protoh; 3665 if (!V_path_mtu_discovery && 3666 !in_localaddr(inp->inp_faddr)) 3667 mss = min(mss, V_tcp_mssdflt); 3668 } 3669 #endif 3670 /* 3671 * XXX - The above conditional (mss = maxmtu - min_protoh) 3672 * probably violates the TCP spec. 3673 * The problem is that, since we don't know the 3674 * other end's MSS, we are supposed to use a conservative 3675 * default. But, if we do that, then MTU discovery will 3676 * never actually take place, because the conservative 3677 * default is much less than the MTUs typically seen 3678 * on the Internet today. For the moment, we'll sweep 3679 * this under the carpet. 3680 * 3681 * The conservative default might not actually be a problem 3682 * if the only case this occurs is when sending an initial 3683 * SYN with options and data to a host we've never talked 3684 * to before. Then, they will reply with an MSS value which 3685 * will get recorded and the new parameters should get 3686 * recomputed. For Further Study. 3687 */ 3688 } 3689 mss = min(mss, offer); 3690 3691 /* 3692 * Sanity check: make sure that maxseg will be large 3693 * enough to allow some data on segments even if the 3694 * all the option space is used (40bytes). Otherwise 3695 * funny things may happen in tcp_output. 3696 * 3697 * XXXGL: shouldn't we reserve space for IP/IPv6 options? 3698 */ 3699 mss = max(mss, 64); 3700 3701 tp->t_maxseg = mss; 3702 } 3703 3704 void 3705 tcp_mss(struct tcpcb *tp, int offer) 3706 { 3707 int mss; 3708 uint32_t bufsize; 3709 struct inpcb *inp; 3710 struct socket *so; 3711 struct hc_metrics_lite metrics; 3712 struct tcp_ifcap cap; 3713 3714 KASSERT(tp != NULL, ("%s: tp == NULL", __func__)); 3715 3716 bzero(&cap, sizeof(cap)); 3717 tcp_mss_update(tp, offer, -1, &metrics, &cap); 3718 3719 mss = tp->t_maxseg; 3720 inp = tp->t_inpcb; 3721 3722 /* 3723 * If there's a pipesize, change the socket buffer to that size, 3724 * don't change if sb_hiwat is different than default (then it 3725 * has been changed on purpose with setsockopt). 3726 * Make the socket buffers an integral number of mss units; 3727 * if the mss is larger than the socket buffer, decrease the mss. 3728 */ 3729 so = inp->inp_socket; 3730 SOCKBUF_LOCK(&so->so_snd); 3731 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe) 3732 bufsize = metrics.rmx_sendpipe; 3733 else 3734 bufsize = so->so_snd.sb_hiwat; 3735 if (bufsize < mss) 3736 mss = bufsize; 3737 else { 3738 bufsize = roundup(bufsize, mss); 3739 if (bufsize > sb_max) 3740 bufsize = sb_max; 3741 if (bufsize > so->so_snd.sb_hiwat) 3742 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL); 3743 } 3744 SOCKBUF_UNLOCK(&so->so_snd); 3745 /* 3746 * Sanity check: make sure that maxseg will be large 3747 * enough to allow some data on segments even if the 3748 * all the option space is used (40bytes). Otherwise 3749 * funny things may happen in tcp_output. 3750 * 3751 * XXXGL: shouldn't we reserve space for IP/IPv6 options? 3752 */ 3753 tp->t_maxseg = max(mss, 64); 3754 3755 SOCKBUF_LOCK(&so->so_rcv); 3756 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe) 3757 bufsize = metrics.rmx_recvpipe; 3758 else 3759 bufsize = so->so_rcv.sb_hiwat; 3760 if (bufsize > mss) { 3761 bufsize = roundup(bufsize, mss); 3762 if (bufsize > sb_max) 3763 bufsize = sb_max; 3764 if (bufsize > so->so_rcv.sb_hiwat) 3765 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL); 3766 } 3767 SOCKBUF_UNLOCK(&so->so_rcv); 3768 3769 /* Check the interface for TSO capabilities. */ 3770 if (cap.ifcap & CSUM_TSO) { 3771 tp->t_flags |= TF_TSO; 3772 tp->t_tsomax = cap.tsomax; 3773 tp->t_tsomaxsegcount = cap.tsomaxsegcount; 3774 tp->t_tsomaxsegsize = cap.tsomaxsegsize; 3775 } 3776 } 3777 3778 /* 3779 * Determine the MSS option to send on an outgoing SYN. 3780 */ 3781 int 3782 tcp_mssopt(struct in_conninfo *inc) 3783 { 3784 int mss = 0; 3785 uint32_t thcmtu = 0; 3786 uint32_t maxmtu = 0; 3787 size_t min_protoh; 3788 3789 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer")); 3790 3791 #ifdef INET6 3792 if (inc->inc_flags & INC_ISIPV6) { 3793 mss = V_tcp_v6mssdflt; 3794 maxmtu = tcp_maxmtu6(inc, NULL); 3795 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 3796 } 3797 #endif 3798 #if defined(INET) && defined(INET6) 3799 else 3800 #endif 3801 #ifdef INET 3802 { 3803 mss = V_tcp_mssdflt; 3804 maxmtu = tcp_maxmtu(inc, NULL); 3805 min_protoh = sizeof(struct tcpiphdr); 3806 } 3807 #endif 3808 #if defined(INET6) || defined(INET) 3809 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3810 #endif 3811 3812 if (maxmtu && thcmtu) 3813 mss = min(maxmtu, thcmtu) - min_protoh; 3814 else if (maxmtu || thcmtu) 3815 mss = max(maxmtu, thcmtu) - min_protoh; 3816 3817 return (mss); 3818 } 3819 3820 3821 /* 3822 * On a partial ack arrives, force the retransmission of the 3823 * next unacknowledged segment. Do not clear tp->t_dupacks. 3824 * By setting snd_nxt to ti_ack, this forces retransmission timer to 3825 * be started again. 3826 */ 3827 void 3828 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th) 3829 { 3830 tcp_seq onxt = tp->snd_nxt; 3831 uint32_t ocwnd = tp->snd_cwnd; 3832 u_int maxseg = tcp_maxseg(tp); 3833 3834 INP_WLOCK_ASSERT(tp->t_inpcb); 3835 3836 tcp_timer_activate(tp, TT_REXMT, 0); 3837 tp->t_rtttime = 0; 3838 tp->snd_nxt = th->th_ack; 3839 /* 3840 * Set snd_cwnd to one segment beyond acknowledged offset. 3841 * (tp->snd_una has not yet been updated when this function is called.) 3842 */ 3843 tp->snd_cwnd = maxseg + BYTES_THIS_ACK(tp, th); 3844 tp->t_flags |= TF_ACKNOW; 3845 (void) tp->t_fb->tfb_tcp_output(tp); 3846 tp->snd_cwnd = ocwnd; 3847 if (SEQ_GT(onxt, tp->snd_nxt)) 3848 tp->snd_nxt = onxt; 3849 /* 3850 * Partial window deflation. Relies on fact that tp->snd_una 3851 * not updated yet. 3852 */ 3853 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th)) 3854 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th); 3855 else 3856 tp->snd_cwnd = 0; 3857 tp->snd_cwnd += maxseg; 3858 } 3859 3860 int 3861 tcp_compute_pipe(struct tcpcb *tp) 3862 { 3863 return (tp->snd_max - tp->snd_una + 3864 tp->sackhint.sack_bytes_rexmit - 3865 tp->sackhint.sacked_bytes); 3866 } 3867 3868 uint32_t 3869 tcp_compute_initwnd(uint32_t maxseg) 3870 { 3871 /* 3872 * Calculate the Initial Window, also used as Restart Window 3873 * 3874 * RFC5681 Section 3.1 specifies the default conservative values. 3875 * RFC3390 specifies slightly more aggressive values. 3876 * RFC6928 increases it to ten segments. 3877 * Support for user specified value for initial flight size. 3878 */ 3879 if (V_tcp_initcwnd_segments) 3880 return min(V_tcp_initcwnd_segments * maxseg, 3881 max(2 * maxseg, V_tcp_initcwnd_segments * 1460)); 3882 else if (V_tcp_do_rfc3390) 3883 return min(4 * maxseg, max(2 * maxseg, 4380)); 3884 else { 3885 /* Per RFC5681 Section 3.1 */ 3886 if (maxseg > 2190) 3887 return (2 * maxseg); 3888 else if (maxseg > 1095) 3889 return (3 * maxseg); 3890 else 3891 return (4 * maxseg); 3892 } 3893 } 3894