1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 5 * The Regents of the University of California. All rights reserved. 6 * Copyright (c) 2007-2008,2010 7 * Swinburne University of Technology, Melbourne, Australia. 8 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org> 9 * Copyright (c) 2010 The FreeBSD Foundation 10 * Copyright (c) 2010-2011 Juniper Networks, Inc. 11 * All rights reserved. 12 * 13 * Portions of this software were developed at the Centre for Advanced Internet 14 * Architectures, Swinburne University of Technology, by Lawrence Stewart, 15 * James Healy and David Hayes, made possible in part by a grant from the Cisco 16 * University Research Program Fund at Community Foundation Silicon Valley. 17 * 18 * Portions of this software were developed at the Centre for Advanced 19 * Internet Architectures, Swinburne University of Technology, Melbourne, 20 * Australia by David Hayes under sponsorship from the FreeBSD Foundation. 21 * 22 * Portions of this software were developed by Robert N. M. Watson under 23 * contract to Juniper Networks, Inc. 24 * 25 * Redistribution and use in source and binary forms, with or without 26 * modification, are permitted provided that the following conditions 27 * are met: 28 * 1. Redistributions of source code must retain the above copyright 29 * notice, this list of conditions and the following disclaimer. 30 * 2. Redistributions in binary form must reproduce the above copyright 31 * notice, this list of conditions and the following disclaimer in the 32 * documentation and/or other materials provided with the distribution. 33 * 3. Neither the name of the University nor the names of its contributors 34 * may be used to endorse or promote products derived from this software 35 * without specific prior written permission. 36 * 37 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 38 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 39 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 40 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 42 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 43 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 45 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 46 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 47 * SUCH DAMAGE. 48 * 49 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 50 */ 51 52 #include <sys/cdefs.h> 53 __FBSDID("$FreeBSD$"); 54 55 #include "opt_inet.h" 56 #include "opt_inet6.h" 57 #include "opt_ipsec.h" 58 #include "opt_tcpdebug.h" 59 60 #include <sys/param.h> 61 #include <sys/arb.h> 62 #include <sys/kernel.h> 63 #ifdef TCP_HHOOK 64 #include <sys/hhook.h> 65 #endif 66 #include <sys/malloc.h> 67 #include <sys/mbuf.h> 68 #include <sys/proc.h> /* for proc0 declaration */ 69 #include <sys/protosw.h> 70 #include <sys/qmath.h> 71 #include <sys/sdt.h> 72 #include <sys/signalvar.h> 73 #include <sys/socket.h> 74 #include <sys/socketvar.h> 75 #include <sys/sysctl.h> 76 #include <sys/syslog.h> 77 #include <sys/systm.h> 78 #include <sys/stats.h> 79 80 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 81 82 #include <vm/uma.h> 83 84 #include <net/if.h> 85 #include <net/if_var.h> 86 #include <net/route.h> 87 #include <net/vnet.h> 88 89 #define TCPSTATES /* for logging */ 90 91 #include <netinet/in.h> 92 #include <netinet/in_kdtrace.h> 93 #include <netinet/in_pcb.h> 94 #include <netinet/in_systm.h> 95 #include <netinet/ip.h> 96 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 97 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 98 #include <netinet/ip_var.h> 99 #include <netinet/ip_options.h> 100 #include <netinet/ip6.h> 101 #include <netinet/icmp6.h> 102 #include <netinet6/in6_pcb.h> 103 #include <netinet6/in6_var.h> 104 #include <netinet6/ip6_var.h> 105 #include <netinet6/nd6.h> 106 #include <netinet/tcp.h> 107 #include <netinet/tcp_fsm.h> 108 #include <netinet/tcp_log_buf.h> 109 #include <netinet/tcp_seq.h> 110 #include <netinet/tcp_timer.h> 111 #include <netinet/tcp_var.h> 112 #include <netinet6/tcp6_var.h> 113 #include <netinet/tcpip.h> 114 #include <netinet/cc/cc.h> 115 #include <netinet/tcp_fastopen.h> 116 #ifdef TCPPCAP 117 #include <netinet/tcp_pcap.h> 118 #endif 119 #include <netinet/tcp_syncache.h> 120 #ifdef TCPDEBUG 121 #include <netinet/tcp_debug.h> 122 #endif /* TCPDEBUG */ 123 #ifdef TCP_OFFLOAD 124 #include <netinet/tcp_offload.h> 125 #endif 126 127 #include <netipsec/ipsec_support.h> 128 129 #include <machine/in_cksum.h> 130 131 #include <security/mac/mac_framework.h> 132 133 const int tcprexmtthresh = 3; 134 135 VNET_DEFINE(int, tcp_log_in_vain) = 0; 136 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_VNET | CTLFLAG_RW, 137 &VNET_NAME(tcp_log_in_vain), 0, 138 "Log all incoming TCP segments to closed ports"); 139 140 VNET_DEFINE(int, blackhole) = 0; 141 #define V_blackhole VNET(blackhole) 142 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_VNET | CTLFLAG_RW, 143 &VNET_NAME(blackhole), 0, 144 "Do not send RST on segments to closed ports"); 145 146 VNET_DEFINE(int, tcp_delack_enabled) = 1; 147 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_VNET | CTLFLAG_RW, 148 &VNET_NAME(tcp_delack_enabled), 0, 149 "Delay ACK to try and piggyback it onto a data packet"); 150 151 VNET_DEFINE(int, drop_synfin) = 0; 152 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_VNET | CTLFLAG_RW, 153 &VNET_NAME(drop_synfin), 0, 154 "Drop TCP packets with SYN+FIN set"); 155 156 VNET_DEFINE(int, tcp_do_newcwv) = 0; 157 SYSCTL_INT(_net_inet_tcp, OID_AUTO, newcwv, CTLFLAG_VNET | CTLFLAG_RW, 158 &VNET_NAME(tcp_do_newcwv), 0, 159 "Enable New Congestion Window Validation per RFC7661"); 160 161 VNET_DEFINE(int, tcp_do_rfc6675_pipe) = 0; 162 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc6675_pipe, CTLFLAG_VNET | CTLFLAG_RW, 163 &VNET_NAME(tcp_do_rfc6675_pipe), 0, 164 "Use calculated pipe/in-flight bytes per RFC 6675"); 165 166 VNET_DEFINE(int, tcp_do_rfc3042) = 1; 167 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_VNET | CTLFLAG_RW, 168 &VNET_NAME(tcp_do_rfc3042), 0, 169 "Enable RFC 3042 (Limited Transmit)"); 170 171 VNET_DEFINE(int, tcp_do_rfc3390) = 1; 172 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_VNET | CTLFLAG_RW, 173 &VNET_NAME(tcp_do_rfc3390), 0, 174 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 175 176 VNET_DEFINE(int, tcp_initcwnd_segments) = 10; 177 SYSCTL_INT(_net_inet_tcp, OID_AUTO, initcwnd_segments, 178 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_initcwnd_segments), 0, 179 "Slow-start flight size (initial congestion window) in number of segments"); 180 181 VNET_DEFINE(int, tcp_do_rfc3465) = 1; 182 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_VNET | CTLFLAG_RW, 183 &VNET_NAME(tcp_do_rfc3465), 0, 184 "Enable RFC 3465 (Appropriate Byte Counting)"); 185 186 VNET_DEFINE(int, tcp_abc_l_var) = 2; 187 SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_VNET | CTLFLAG_RW, 188 &VNET_NAME(tcp_abc_l_var), 2, 189 "Cap the max cwnd increment during slow-start to this number of segments"); 190 191 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, 192 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 193 "TCP ECN"); 194 195 VNET_DEFINE(int, tcp_do_ecn) = 2; 196 SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW, 197 &VNET_NAME(tcp_do_ecn), 0, 198 "TCP ECN support"); 199 200 VNET_DEFINE(int, tcp_ecn_maxretries) = 1; 201 SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_VNET | CTLFLAG_RW, 202 &VNET_NAME(tcp_ecn_maxretries), 0, 203 "Max retries before giving up on ECN"); 204 205 VNET_DEFINE(int, tcp_insecure_syn) = 0; 206 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_syn, CTLFLAG_VNET | CTLFLAG_RW, 207 &VNET_NAME(tcp_insecure_syn), 0, 208 "Follow RFC793 instead of RFC5961 criteria for accepting SYN packets"); 209 210 VNET_DEFINE(int, tcp_insecure_rst) = 0; 211 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_VNET | CTLFLAG_RW, 212 &VNET_NAME(tcp_insecure_rst), 0, 213 "Follow RFC793 instead of RFC5961 criteria for accepting RST packets"); 214 215 VNET_DEFINE(int, tcp_recvspace) = 1024*64; 216 #define V_tcp_recvspace VNET(tcp_recvspace) 217 SYSCTL_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_VNET | CTLFLAG_RW, 218 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size"); 219 220 VNET_DEFINE(int, tcp_do_autorcvbuf) = 1; 221 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_VNET | CTLFLAG_RW, 222 &VNET_NAME(tcp_do_autorcvbuf), 0, 223 "Enable automatic receive buffer sizing"); 224 225 VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024; 226 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_VNET | CTLFLAG_RW, 227 &VNET_NAME(tcp_autorcvbuf_max), 0, 228 "Max size of automatic receive buffer"); 229 230 VNET_DEFINE(struct inpcbhead, tcb); 231 #define tcb6 tcb /* for KAME src sync over BSD*'s */ 232 VNET_DEFINE(struct inpcbinfo, tcbinfo); 233 234 /* 235 * TCP statistics are stored in an array of counter(9)s, which size matches 236 * size of struct tcpstat. TCP running connection count is a regular array. 237 */ 238 VNET_PCPUSTAT_DEFINE(struct tcpstat, tcpstat); 239 SYSCTL_VNET_PCPUSTAT(_net_inet_tcp, TCPCTL_STATS, stats, struct tcpstat, 240 tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)"); 241 VNET_DEFINE(counter_u64_t, tcps_states[TCP_NSTATES]); 242 SYSCTL_COUNTER_U64_ARRAY(_net_inet_tcp, TCPCTL_STATES, states, CTLFLAG_RD | 243 CTLFLAG_VNET, &VNET_NAME(tcps_states)[0], TCP_NSTATES, 244 "TCP connection counts by TCP state"); 245 246 static void 247 tcp_vnet_init(const void *unused) 248 { 249 250 COUNTER_ARRAY_ALLOC(V_tcps_states, TCP_NSTATES, M_WAITOK); 251 VNET_PCPUSTAT_ALLOC(tcpstat, M_WAITOK); 252 } 253 VNET_SYSINIT(tcp_vnet_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 254 tcp_vnet_init, NULL); 255 256 #ifdef VIMAGE 257 static void 258 tcp_vnet_uninit(const void *unused) 259 { 260 261 COUNTER_ARRAY_FREE(V_tcps_states, TCP_NSTATES); 262 VNET_PCPUSTAT_FREE(tcpstat); 263 } 264 VNET_SYSUNINIT(tcp_vnet_uninit, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 265 tcp_vnet_uninit, NULL); 266 #endif /* VIMAGE */ 267 268 /* 269 * Kernel module interface for updating tcpstat. The first argument is an index 270 * into tcpstat treated as an array. 271 */ 272 void 273 kmod_tcpstat_add(int statnum, int val) 274 { 275 276 counter_u64_add(VNET(tcpstat)[statnum], val); 277 } 278 279 #ifdef TCP_HHOOK 280 /* 281 * Wrapper for the TCP established input helper hook. 282 */ 283 void 284 hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to) 285 { 286 struct tcp_hhook_data hhook_data; 287 288 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) { 289 hhook_data.tp = tp; 290 hhook_data.th = th; 291 hhook_data.to = to; 292 293 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data, 294 tp->osd); 295 } 296 } 297 #endif 298 299 /* 300 * CC wrapper hook functions 301 */ 302 void 303 cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t nsegs, 304 uint16_t type) 305 { 306 #ifdef STATS 307 int32_t gput; 308 #endif 309 310 INP_WLOCK_ASSERT(tp->t_inpcb); 311 312 tp->ccv->nsegs = nsegs; 313 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th); 314 if ((!V_tcp_do_newcwv && (tp->snd_cwnd <= tp->snd_wnd)) || 315 (V_tcp_do_newcwv && (tp->snd_cwnd <= tp->snd_wnd) && 316 (tp->snd_cwnd < (tcp_compute_pipe(tp) * 2)))) 317 tp->ccv->flags |= CCF_CWND_LIMITED; 318 else 319 tp->ccv->flags &= ~CCF_CWND_LIMITED; 320 321 if (type == CC_ACK) { 322 #ifdef STATS 323 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 324 ((int32_t)tp->snd_cwnd) - tp->snd_wnd); 325 if (!IN_RECOVERY(tp->t_flags)) 326 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_ACKLEN, 327 tp->ccv->bytes_this_ack / (tcp_maxseg(tp) * nsegs)); 328 if ((tp->t_flags & TF_GPUTINPROG) && 329 SEQ_GEQ(th->th_ack, tp->gput_ack)) { 330 /* 331 * Compute goodput in bits per millisecond. 332 */ 333 gput = (((int64_t)(th->th_ack - tp->gput_seq)) << 3) / 334 max(1, tcp_ts_getticks() - tp->gput_ts); 335 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 336 gput); 337 /* 338 * XXXLAS: This is a temporary hack, and should be 339 * chained off VOI_TCP_GPUT when stats(9) grows an API 340 * to deal with chained VOIs. 341 */ 342 if (tp->t_stats_gput_prev > 0) 343 stats_voi_update_abs_s32(tp->t_stats, 344 VOI_TCP_GPUT_ND, 345 ((gput - tp->t_stats_gput_prev) * 100) / 346 tp->t_stats_gput_prev); 347 tp->t_flags &= ~TF_GPUTINPROG; 348 tp->t_stats_gput_prev = gput; 349 } 350 #endif /* STATS */ 351 if (tp->snd_cwnd > tp->snd_ssthresh) { 352 tp->t_bytes_acked += tp->ccv->bytes_this_ack; 353 if (tp->t_bytes_acked >= tp->snd_cwnd) { 354 tp->t_bytes_acked -= tp->snd_cwnd; 355 tp->ccv->flags |= CCF_ABC_SENTAWND; 356 } 357 } else { 358 tp->ccv->flags &= ~CCF_ABC_SENTAWND; 359 tp->t_bytes_acked = 0; 360 } 361 } 362 363 if (CC_ALGO(tp)->ack_received != NULL) { 364 /* XXXLAS: Find a way to live without this */ 365 tp->ccv->curack = th->th_ack; 366 CC_ALGO(tp)->ack_received(tp->ccv, type); 367 } 368 #ifdef STATS 369 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, tp->snd_cwnd); 370 #endif 371 } 372 373 void 374 cc_conn_init(struct tcpcb *tp) 375 { 376 struct hc_metrics_lite metrics; 377 struct inpcb *inp = tp->t_inpcb; 378 u_int maxseg; 379 int rtt; 380 381 INP_WLOCK_ASSERT(tp->t_inpcb); 382 383 tcp_hc_get(&inp->inp_inc, &metrics); 384 maxseg = tcp_maxseg(tp); 385 386 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) { 387 tp->t_srtt = rtt; 388 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE; 389 TCPSTAT_INC(tcps_usedrtt); 390 if (metrics.rmx_rttvar) { 391 tp->t_rttvar = metrics.rmx_rttvar; 392 TCPSTAT_INC(tcps_usedrttvar); 393 } else { 394 /* default variation is +- 1 rtt */ 395 tp->t_rttvar = 396 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 397 } 398 TCPT_RANGESET(tp->t_rxtcur, 399 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 400 tp->t_rttmin, TCPTV_REXMTMAX); 401 } 402 if (metrics.rmx_ssthresh) { 403 /* 404 * There's some sort of gateway or interface 405 * buffer limit on the path. Use this to set 406 * the slow start threshold, but set the 407 * threshold to no less than 2*mss. 408 */ 409 tp->snd_ssthresh = max(2 * maxseg, metrics.rmx_ssthresh); 410 TCPSTAT_INC(tcps_usedssthresh); 411 } 412 413 /* 414 * Set the initial slow-start flight size. 415 * 416 * If a SYN or SYN/ACK was lost and retransmitted, we have to 417 * reduce the initial CWND to one segment as congestion is likely 418 * requiring us to be cautious. 419 */ 420 if (tp->snd_cwnd == 1) 421 tp->snd_cwnd = maxseg; /* SYN(-ACK) lost */ 422 else 423 tp->snd_cwnd = tcp_compute_initwnd(maxseg); 424 425 if (CC_ALGO(tp)->conn_init != NULL) 426 CC_ALGO(tp)->conn_init(tp->ccv); 427 } 428 429 void inline 430 cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type) 431 { 432 INP_WLOCK_ASSERT(tp->t_inpcb); 433 434 #ifdef STATS 435 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 436 #endif 437 438 switch(type) { 439 case CC_NDUPACK: 440 if (!IN_FASTRECOVERY(tp->t_flags)) { 441 tp->snd_recover = tp->snd_max; 442 if (tp->t_flags2 & TF2_ECN_PERMIT) 443 tp->t_flags2 |= TF2_ECN_SND_CWR; 444 } 445 break; 446 case CC_ECN: 447 if (!IN_CONGRECOVERY(tp->t_flags) || 448 /* 449 * Allow ECN reaction on ACK to CWR, if 450 * that data segment was also CE marked. 451 */ 452 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 453 EXIT_CONGRECOVERY(tp->t_flags); 454 TCPSTAT_INC(tcps_ecn_rcwnd); 455 tp->snd_recover = tp->snd_max + 1; 456 if (tp->t_flags2 & TF2_ECN_PERMIT) 457 tp->t_flags2 |= TF2_ECN_SND_CWR; 458 } 459 break; 460 case CC_RTO: 461 tp->t_dupacks = 0; 462 tp->t_bytes_acked = 0; 463 EXIT_RECOVERY(tp->t_flags); 464 if (tp->t_flags2 & TF2_ECN_PERMIT) 465 tp->t_flags2 |= TF2_ECN_SND_CWR; 466 break; 467 case CC_RTO_ERR: 468 TCPSTAT_INC(tcps_sndrexmitbad); 469 /* RTO was unnecessary, so reset everything. */ 470 tp->snd_cwnd = tp->snd_cwnd_prev; 471 tp->snd_ssthresh = tp->snd_ssthresh_prev; 472 tp->snd_recover = tp->snd_recover_prev; 473 if (tp->t_flags & TF_WASFRECOVERY) 474 ENTER_FASTRECOVERY(tp->t_flags); 475 if (tp->t_flags & TF_WASCRECOVERY) 476 ENTER_CONGRECOVERY(tp->t_flags); 477 tp->snd_nxt = tp->snd_max; 478 tp->t_flags &= ~TF_PREVVALID; 479 tp->t_badrxtwin = 0; 480 break; 481 } 482 483 if (CC_ALGO(tp)->cong_signal != NULL) { 484 if (th != NULL) 485 tp->ccv->curack = th->th_ack; 486 CC_ALGO(tp)->cong_signal(tp->ccv, type); 487 } 488 } 489 490 void inline 491 cc_post_recovery(struct tcpcb *tp, struct tcphdr *th) 492 { 493 INP_WLOCK_ASSERT(tp->t_inpcb); 494 495 /* XXXLAS: KASSERT that we're in recovery? */ 496 497 if (CC_ALGO(tp)->post_recovery != NULL) { 498 tp->ccv->curack = th->th_ack; 499 CC_ALGO(tp)->post_recovery(tp->ccv); 500 } 501 /* XXXLAS: EXIT_RECOVERY ? */ 502 tp->t_bytes_acked = 0; 503 } 504 505 /* 506 * Indicate whether this ack should be delayed. We can delay the ack if 507 * following conditions are met: 508 * - There is no delayed ack timer in progress. 509 * - Our last ack wasn't a 0-sized window. We never want to delay 510 * the ack that opens up a 0-sized window. 511 * - LRO wasn't used for this segment. We make sure by checking that the 512 * segment size is not larger than the MSS. 513 */ 514 #define DELAY_ACK(tp, tlen) \ 515 ((!tcp_timer_active(tp, TT_DELACK) && \ 516 (tp->t_flags & TF_RXWIN0SENT) == 0) && \ 517 (tlen <= tp->t_maxseg) && \ 518 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN))) 519 520 void inline 521 cc_ecnpkt_handler(struct tcpcb *tp, struct tcphdr *th, uint8_t iptos) 522 { 523 INP_WLOCK_ASSERT(tp->t_inpcb); 524 525 if (CC_ALGO(tp)->ecnpkt_handler != NULL) { 526 switch (iptos & IPTOS_ECN_MASK) { 527 case IPTOS_ECN_CE: 528 tp->ccv->flags |= CCF_IPHDR_CE; 529 break; 530 case IPTOS_ECN_ECT0: 531 /* FALLTHROUGH */ 532 case IPTOS_ECN_ECT1: 533 /* FALLTHROUGH */ 534 case IPTOS_ECN_NOTECT: 535 tp->ccv->flags &= ~CCF_IPHDR_CE; 536 break; 537 } 538 539 if (th->th_flags & TH_CWR) 540 tp->ccv->flags |= CCF_TCPHDR_CWR; 541 else 542 tp->ccv->flags &= ~CCF_TCPHDR_CWR; 543 544 CC_ALGO(tp)->ecnpkt_handler(tp->ccv); 545 546 if (tp->ccv->flags & CCF_ACKNOW) { 547 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 548 tp->t_flags |= TF_ACKNOW; 549 } 550 } 551 } 552 553 /* 554 * TCP input handling is split into multiple parts: 555 * tcp6_input is a thin wrapper around tcp_input for the extended 556 * ip6_protox[] call format in ip6_input 557 * tcp_input handles primary segment validation, inpcb lookup and 558 * SYN processing on listen sockets 559 * tcp_do_segment processes the ACK and text of the segment for 560 * establishing, established and closing connections 561 */ 562 #ifdef INET6 563 int 564 tcp6_input(struct mbuf **mp, int *offp, int proto) 565 { 566 struct mbuf *m; 567 struct in6_ifaddr *ia6; 568 struct ip6_hdr *ip6; 569 570 m = *mp; 571 if (m->m_len < *offp + sizeof(struct tcphdr)) { 572 m = m_pullup(m, *offp + sizeof(struct tcphdr)); 573 if (m == NULL) { 574 *mp = m; 575 TCPSTAT_INC(tcps_rcvshort); 576 return (IPPROTO_DONE); 577 } 578 } 579 580 /* 581 * draft-itojun-ipv6-tcp-to-anycast 582 * better place to put this in? 583 */ 584 ip6 = mtod(m, struct ip6_hdr *); 585 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */); 586 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 587 ifa_free(&ia6->ia_ifa); 588 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 589 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6); 590 *mp = NULL; 591 return (IPPROTO_DONE); 592 } 593 if (ia6) 594 ifa_free(&ia6->ia_ifa); 595 596 *mp = m; 597 return (tcp_input(mp, offp, proto)); 598 } 599 #endif /* INET6 */ 600 601 int 602 tcp_input(struct mbuf **mp, int *offp, int proto) 603 { 604 struct mbuf *m = *mp; 605 struct tcphdr *th = NULL; 606 struct ip *ip = NULL; 607 struct inpcb *inp = NULL; 608 struct tcpcb *tp = NULL; 609 struct socket *so = NULL; 610 u_char *optp = NULL; 611 int off0; 612 int optlen = 0; 613 #ifdef INET 614 int len; 615 uint8_t ipttl; 616 #endif 617 int tlen = 0, off; 618 int drop_hdrlen; 619 int thflags; 620 int rstreason = 0; /* For badport_bandlim accounting purposes */ 621 uint8_t iptos; 622 struct m_tag *fwd_tag = NULL; 623 #ifdef INET6 624 struct ip6_hdr *ip6 = NULL; 625 int isipv6; 626 #else 627 const void *ip6 = NULL; 628 #endif /* INET6 */ 629 struct tcpopt to; /* options in this segment */ 630 char *s = NULL; /* address and port logging */ 631 #ifdef TCPDEBUG 632 /* 633 * The size of tcp_saveipgen must be the size of the max ip header, 634 * now IPv6. 635 */ 636 u_char tcp_saveipgen[IP6_HDR_LEN]; 637 struct tcphdr tcp_savetcp; 638 short ostate = 0; 639 #endif 640 641 NET_EPOCH_ASSERT(); 642 643 #ifdef INET6 644 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 645 #endif 646 647 off0 = *offp; 648 m = *mp; 649 *mp = NULL; 650 to.to_flags = 0; 651 TCPSTAT_INC(tcps_rcvtotal); 652 653 #ifdef INET6 654 if (isipv6) { 655 ip6 = mtod(m, struct ip6_hdr *); 656 th = (struct tcphdr *)((caddr_t)ip6 + off0); 657 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0; 658 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) { 659 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 660 th->th_sum = m->m_pkthdr.csum_data; 661 else 662 th->th_sum = in6_cksum_pseudo(ip6, tlen, 663 IPPROTO_TCP, m->m_pkthdr.csum_data); 664 th->th_sum ^= 0xffff; 665 } else 666 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen); 667 if (th->th_sum) { 668 TCPSTAT_INC(tcps_rcvbadsum); 669 goto drop; 670 } 671 672 /* 673 * Be proactive about unspecified IPv6 address in source. 674 * As we use all-zero to indicate unbounded/unconnected pcb, 675 * unspecified IPv6 address can be used to confuse us. 676 * 677 * Note that packets with unspecified IPv6 destination is 678 * already dropped in ip6_input. 679 */ 680 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 681 /* XXX stat */ 682 goto drop; 683 } 684 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; 685 } 686 #endif 687 #if defined(INET) && defined(INET6) 688 else 689 #endif 690 #ifdef INET 691 { 692 /* 693 * Get IP and TCP header together in first mbuf. 694 * Note: IP leaves IP header in first mbuf. 695 */ 696 if (off0 > sizeof (struct ip)) { 697 ip_stripoptions(m); 698 off0 = sizeof(struct ip); 699 } 700 if (m->m_len < sizeof (struct tcpiphdr)) { 701 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) 702 == NULL) { 703 TCPSTAT_INC(tcps_rcvshort); 704 return (IPPROTO_DONE); 705 } 706 } 707 ip = mtod(m, struct ip *); 708 th = (struct tcphdr *)((caddr_t)ip + off0); 709 tlen = ntohs(ip->ip_len) - off0; 710 711 iptos = ip->ip_tos; 712 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 713 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 714 th->th_sum = m->m_pkthdr.csum_data; 715 else 716 th->th_sum = in_pseudo(ip->ip_src.s_addr, 717 ip->ip_dst.s_addr, 718 htonl(m->m_pkthdr.csum_data + tlen + 719 IPPROTO_TCP)); 720 th->th_sum ^= 0xffff; 721 } else { 722 struct ipovly *ipov = (struct ipovly *)ip; 723 724 /* 725 * Checksum extended TCP header and data. 726 */ 727 len = off0 + tlen; 728 ipttl = ip->ip_ttl; 729 bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); 730 ipov->ih_len = htons(tlen); 731 th->th_sum = in_cksum(m, len); 732 /* Reset length for SDT probes. */ 733 ip->ip_len = htons(len); 734 /* Reset TOS bits */ 735 ip->ip_tos = iptos; 736 /* Re-initialization for later version check */ 737 ip->ip_ttl = ipttl; 738 ip->ip_v = IPVERSION; 739 ip->ip_hl = off0 >> 2; 740 } 741 742 if (th->th_sum) { 743 TCPSTAT_INC(tcps_rcvbadsum); 744 goto drop; 745 } 746 } 747 #endif /* INET */ 748 749 /* 750 * Check that TCP offset makes sense, 751 * pull out TCP options and adjust length. XXX 752 */ 753 off = th->th_off << 2; 754 if (off < sizeof (struct tcphdr) || off > tlen) { 755 TCPSTAT_INC(tcps_rcvbadoff); 756 goto drop; 757 } 758 tlen -= off; /* tlen is used instead of ti->ti_len */ 759 if (off > sizeof (struct tcphdr)) { 760 #ifdef INET6 761 if (isipv6) { 762 if (m->m_len < off0 + off) { 763 m = m_pullup(m, off0 + off); 764 if (m == NULL) { 765 TCPSTAT_INC(tcps_rcvshort); 766 return (IPPROTO_DONE); 767 } 768 } 769 ip6 = mtod(m, struct ip6_hdr *); 770 th = (struct tcphdr *)((caddr_t)ip6 + off0); 771 } 772 #endif 773 #if defined(INET) && defined(INET6) 774 else 775 #endif 776 #ifdef INET 777 { 778 if (m->m_len < sizeof(struct ip) + off) { 779 if ((m = m_pullup(m, sizeof (struct ip) + off)) 780 == NULL) { 781 TCPSTAT_INC(tcps_rcvshort); 782 return (IPPROTO_DONE); 783 } 784 ip = mtod(m, struct ip *); 785 th = (struct tcphdr *)((caddr_t)ip + off0); 786 } 787 } 788 #endif 789 optlen = off - sizeof (struct tcphdr); 790 optp = (u_char *)(th + 1); 791 } 792 thflags = th->th_flags; 793 794 /* 795 * Convert TCP protocol specific fields to host format. 796 */ 797 tcp_fields_to_host(th); 798 799 /* 800 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options. 801 */ 802 drop_hdrlen = off0 + off; 803 804 /* 805 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. 806 */ 807 if ( 808 #ifdef INET6 809 (isipv6 && (m->m_flags & M_IP6_NEXTHOP)) 810 #ifdef INET 811 || (!isipv6 && (m->m_flags & M_IP_NEXTHOP)) 812 #endif 813 #endif 814 #if defined(INET) && !defined(INET6) 815 (m->m_flags & M_IP_NEXTHOP) 816 #endif 817 ) 818 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 819 820 findpcb: 821 #ifdef INET6 822 if (isipv6 && fwd_tag != NULL) { 823 struct sockaddr_in6 *next_hop6; 824 825 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1); 826 /* 827 * Transparently forwarded. Pretend to be the destination. 828 * Already got one like this? 829 */ 830 inp = in6_pcblookup_mbuf(&V_tcbinfo, 831 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport, 832 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif, m); 833 if (!inp) { 834 /* 835 * It's new. Try to find the ambushing socket. 836 * Because we've rewritten the destination address, 837 * any hardware-generated hash is ignored. 838 */ 839 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src, 840 th->th_sport, &next_hop6->sin6_addr, 841 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) : 842 th->th_dport, INPLOOKUP_WILDCARD | 843 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif); 844 } 845 } else if (isipv6) { 846 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src, 847 th->th_sport, &ip6->ip6_dst, th->th_dport, 848 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB, 849 m->m_pkthdr.rcvif, m); 850 } 851 #endif /* INET6 */ 852 #if defined(INET6) && defined(INET) 853 else 854 #endif 855 #ifdef INET 856 if (fwd_tag != NULL) { 857 struct sockaddr_in *next_hop; 858 859 next_hop = (struct sockaddr_in *)(fwd_tag+1); 860 /* 861 * Transparently forwarded. Pretend to be the destination. 862 * already got one like this? 863 */ 864 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport, 865 ip->ip_dst, th->th_dport, INPLOOKUP_WLOCKPCB, 866 m->m_pkthdr.rcvif, m); 867 if (!inp) { 868 /* 869 * It's new. Try to find the ambushing socket. 870 * Because we've rewritten the destination address, 871 * any hardware-generated hash is ignored. 872 */ 873 inp = in_pcblookup(&V_tcbinfo, ip->ip_src, 874 th->th_sport, next_hop->sin_addr, 875 next_hop->sin_port ? ntohs(next_hop->sin_port) : 876 th->th_dport, INPLOOKUP_WILDCARD | 877 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif); 878 } 879 } else 880 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, 881 th->th_sport, ip->ip_dst, th->th_dport, 882 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB, 883 m->m_pkthdr.rcvif, m); 884 #endif /* INET */ 885 886 /* 887 * If the INPCB does not exist then all data in the incoming 888 * segment is discarded and an appropriate RST is sent back. 889 * XXX MRT Send RST using which routing table? 890 */ 891 if (inp == NULL) { 892 /* 893 * Log communication attempts to ports that are not 894 * in use. 895 */ 896 if ((V_tcp_log_in_vain == 1 && (thflags & TH_SYN)) || 897 V_tcp_log_in_vain == 2) { 898 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6))) 899 log(LOG_INFO, "%s; %s: Connection attempt " 900 "to closed port\n", s, __func__); 901 } 902 /* 903 * When blackholing do not respond with a RST but 904 * completely ignore the segment and drop it. 905 */ 906 if ((V_blackhole == 1 && (thflags & TH_SYN)) || 907 V_blackhole == 2) 908 goto dropunlock; 909 910 rstreason = BANDLIM_RST_CLOSEDPORT; 911 goto dropwithreset; 912 } 913 INP_WLOCK_ASSERT(inp); 914 /* 915 * While waiting for inp lock during the lookup, another thread 916 * can have dropped the inpcb, in which case we need to loop back 917 * and try to find a new inpcb to deliver to. 918 */ 919 if (inp->inp_flags & INP_DROPPED) { 920 INP_WUNLOCK(inp); 921 inp = NULL; 922 goto findpcb; 923 } 924 if ((inp->inp_flowtype == M_HASHTYPE_NONE) && 925 (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) && 926 ((inp->inp_socket == NULL) || 927 (inp->inp_socket->so_options & SO_ACCEPTCONN) == 0)) { 928 inp->inp_flowid = m->m_pkthdr.flowid; 929 inp->inp_flowtype = M_HASHTYPE_GET(m); 930 } 931 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 932 #ifdef INET6 933 if (isipv6 && IPSEC_ENABLED(ipv6) && 934 IPSEC_CHECK_POLICY(ipv6, m, inp) != 0) { 935 goto dropunlock; 936 } 937 #ifdef INET 938 else 939 #endif 940 #endif /* INET6 */ 941 #ifdef INET 942 if (IPSEC_ENABLED(ipv4) && 943 IPSEC_CHECK_POLICY(ipv4, m, inp) != 0) { 944 goto dropunlock; 945 } 946 #endif /* INET */ 947 #endif /* IPSEC */ 948 949 /* 950 * Check the minimum TTL for socket. 951 */ 952 if (inp->inp_ip_minttl != 0) { 953 #ifdef INET6 954 if (isipv6) { 955 if (inp->inp_ip_minttl > ip6->ip6_hlim) 956 goto dropunlock; 957 } else 958 #endif 959 if (inp->inp_ip_minttl > ip->ip_ttl) 960 goto dropunlock; 961 } 962 963 /* 964 * A previous connection in TIMEWAIT state is supposed to catch stray 965 * or duplicate segments arriving late. If this segment was a 966 * legitimate new connection attempt, the old INPCB gets removed and 967 * we can try again to find a listening socket. 968 * 969 * At this point, due to earlier optimism, we may hold only an inpcb 970 * lock, and not the inpcbinfo write lock. If so, we need to try to 971 * acquire it, or if that fails, acquire a reference on the inpcb, 972 * drop all locks, acquire a global write lock, and then re-acquire 973 * the inpcb lock. We may at that point discover that another thread 974 * has tried to free the inpcb, in which case we need to loop back 975 * and try to find a new inpcb to deliver to. 976 * 977 * XXXRW: It may be time to rethink timewait locking. 978 */ 979 if (inp->inp_flags & INP_TIMEWAIT) { 980 tcp_dooptions(&to, optp, optlen, 981 (thflags & TH_SYN) ? TO_SYN : 0); 982 /* 983 * NB: tcp_twcheck unlocks the INP and frees the mbuf. 984 */ 985 if (tcp_twcheck(inp, &to, th, m, tlen)) 986 goto findpcb; 987 return (IPPROTO_DONE); 988 } 989 /* 990 * The TCPCB may no longer exist if the connection is winding 991 * down or it is in the CLOSED state. Either way we drop the 992 * segment and send an appropriate response. 993 */ 994 tp = intotcpcb(inp); 995 if (tp == NULL || tp->t_state == TCPS_CLOSED) { 996 rstreason = BANDLIM_RST_CLOSEDPORT; 997 goto dropwithreset; 998 } 999 1000 #ifdef TCP_OFFLOAD 1001 if (tp->t_flags & TF_TOE) { 1002 tcp_offload_input(tp, m); 1003 m = NULL; /* consumed by the TOE driver */ 1004 goto dropunlock; 1005 } 1006 #endif 1007 1008 #ifdef MAC 1009 INP_WLOCK_ASSERT(inp); 1010 if (mac_inpcb_check_deliver(inp, m)) 1011 goto dropunlock; 1012 #endif 1013 so = inp->inp_socket; 1014 KASSERT(so != NULL, ("%s: so == NULL", __func__)); 1015 #ifdef TCPDEBUG 1016 if (so->so_options & SO_DEBUG) { 1017 ostate = tp->t_state; 1018 #ifdef INET6 1019 if (isipv6) { 1020 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6)); 1021 } else 1022 #endif 1023 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip)); 1024 tcp_savetcp = *th; 1025 } 1026 #endif /* TCPDEBUG */ 1027 /* 1028 * When the socket is accepting connections (the INPCB is in LISTEN 1029 * state) we look into the SYN cache if this is a new connection 1030 * attempt or the completion of a previous one. 1031 */ 1032 KASSERT(tp->t_state == TCPS_LISTEN || !(so->so_options & SO_ACCEPTCONN), 1033 ("%s: so accepting but tp %p not listening", __func__, tp)); 1034 if (tp->t_state == TCPS_LISTEN && (so->so_options & SO_ACCEPTCONN)) { 1035 struct in_conninfo inc; 1036 1037 bzero(&inc, sizeof(inc)); 1038 #ifdef INET6 1039 if (isipv6) { 1040 inc.inc_flags |= INC_ISIPV6; 1041 if (inp->inp_inc.inc_flags & INC_IPV6MINMTU) 1042 inc.inc_flags |= INC_IPV6MINMTU; 1043 inc.inc6_faddr = ip6->ip6_src; 1044 inc.inc6_laddr = ip6->ip6_dst; 1045 } else 1046 #endif 1047 { 1048 inc.inc_faddr = ip->ip_src; 1049 inc.inc_laddr = ip->ip_dst; 1050 } 1051 inc.inc_fport = th->th_sport; 1052 inc.inc_lport = th->th_dport; 1053 inc.inc_fibnum = so->so_fibnum; 1054 1055 /* 1056 * Check for an existing connection attempt in syncache if 1057 * the flag is only ACK. A successful lookup creates a new 1058 * socket appended to the listen queue in SYN_RECEIVED state. 1059 */ 1060 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) { 1061 /* 1062 * Parse the TCP options here because 1063 * syncookies need access to the reflected 1064 * timestamp. 1065 */ 1066 tcp_dooptions(&to, optp, optlen, 0); 1067 /* 1068 * NB: syncache_expand() doesn't unlock 1069 * inp and tcpinfo locks. 1070 */ 1071 rstreason = syncache_expand(&inc, &to, th, &so, m); 1072 if (rstreason < 0) { 1073 /* 1074 * A failing TCP MD5 signature comparison 1075 * must result in the segment being dropped 1076 * and must not produce any response back 1077 * to the sender. 1078 */ 1079 goto dropunlock; 1080 } else if (rstreason == 0) { 1081 /* 1082 * No syncache entry or ACK was not 1083 * for our SYN/ACK. Send a RST. 1084 * NB: syncache did its own logging 1085 * of the failure cause. 1086 */ 1087 rstreason = BANDLIM_RST_OPENPORT; 1088 goto dropwithreset; 1089 } 1090 tfo_socket_result: 1091 if (so == NULL) { 1092 /* 1093 * We completed the 3-way handshake 1094 * but could not allocate a socket 1095 * either due to memory shortage, 1096 * listen queue length limits or 1097 * global socket limits. Send RST 1098 * or wait and have the remote end 1099 * retransmit the ACK for another 1100 * try. 1101 */ 1102 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1103 log(LOG_DEBUG, "%s; %s: Listen socket: " 1104 "Socket allocation failed due to " 1105 "limits or memory shortage, %s\n", 1106 s, __func__, 1107 V_tcp_sc_rst_sock_fail ? 1108 "sending RST" : "try again"); 1109 if (V_tcp_sc_rst_sock_fail) { 1110 rstreason = BANDLIM_UNLIMITED; 1111 goto dropwithreset; 1112 } else 1113 goto dropunlock; 1114 } 1115 /* 1116 * Socket is created in state SYN_RECEIVED. 1117 * Unlock the listen socket, lock the newly 1118 * created socket and update the tp variable. 1119 */ 1120 INP_WUNLOCK(inp); /* listen socket */ 1121 inp = sotoinpcb(so); 1122 /* 1123 * New connection inpcb is already locked by 1124 * syncache_expand(). 1125 */ 1126 INP_WLOCK_ASSERT(inp); 1127 tp = intotcpcb(inp); 1128 KASSERT(tp->t_state == TCPS_SYN_RECEIVED, 1129 ("%s: ", __func__)); 1130 /* 1131 * Process the segment and the data it 1132 * contains. tcp_do_segment() consumes 1133 * the mbuf chain and unlocks the inpcb. 1134 */ 1135 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1136 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, 1137 iptos); 1138 return (IPPROTO_DONE); 1139 } 1140 /* 1141 * Segment flag validation for new connection attempts: 1142 * 1143 * Our (SYN|ACK) response was rejected. 1144 * Check with syncache and remove entry to prevent 1145 * retransmits. 1146 * 1147 * NB: syncache_chkrst does its own logging of failure 1148 * causes. 1149 */ 1150 if (thflags & TH_RST) { 1151 syncache_chkrst(&inc, th, m); 1152 goto dropunlock; 1153 } 1154 /* 1155 * We can't do anything without SYN. 1156 */ 1157 if ((thflags & TH_SYN) == 0) { 1158 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1159 log(LOG_DEBUG, "%s; %s: Listen socket: " 1160 "SYN is missing, segment ignored\n", 1161 s, __func__); 1162 TCPSTAT_INC(tcps_badsyn); 1163 goto dropunlock; 1164 } 1165 /* 1166 * (SYN|ACK) is bogus on a listen socket. 1167 */ 1168 if (thflags & TH_ACK) { 1169 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1170 log(LOG_DEBUG, "%s; %s: Listen socket: " 1171 "SYN|ACK invalid, segment rejected\n", 1172 s, __func__); 1173 syncache_badack(&inc); /* XXX: Not needed! */ 1174 TCPSTAT_INC(tcps_badsyn); 1175 rstreason = BANDLIM_RST_OPENPORT; 1176 goto dropwithreset; 1177 } 1178 /* 1179 * If the drop_synfin option is enabled, drop all 1180 * segments with both the SYN and FIN bits set. 1181 * This prevents e.g. nmap from identifying the 1182 * TCP/IP stack. 1183 * XXX: Poor reasoning. nmap has other methods 1184 * and is constantly refining its stack detection 1185 * strategies. 1186 * XXX: This is a violation of the TCP specification 1187 * and was used by RFC1644. 1188 */ 1189 if ((thflags & TH_FIN) && V_drop_synfin) { 1190 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1191 log(LOG_DEBUG, "%s; %s: Listen socket: " 1192 "SYN|FIN segment ignored (based on " 1193 "sysctl setting)\n", s, __func__); 1194 TCPSTAT_INC(tcps_badsyn); 1195 goto dropunlock; 1196 } 1197 /* 1198 * Segment's flags are (SYN) or (SYN|FIN). 1199 * 1200 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored 1201 * as they do not affect the state of the TCP FSM. 1202 * The data pointed to by TH_URG and th_urp is ignored. 1203 */ 1204 KASSERT((thflags & (TH_RST|TH_ACK)) == 0, 1205 ("%s: Listen socket: TH_RST or TH_ACK set", __func__)); 1206 KASSERT(thflags & (TH_SYN), 1207 ("%s: Listen socket: TH_SYN not set", __func__)); 1208 #ifdef INET6 1209 /* 1210 * If deprecated address is forbidden, 1211 * we do not accept SYN to deprecated interface 1212 * address to prevent any new inbound connection from 1213 * getting established. 1214 * When we do not accept SYN, we send a TCP RST, 1215 * with deprecated source address (instead of dropping 1216 * it). We compromise it as it is much better for peer 1217 * to send a RST, and RST will be the final packet 1218 * for the exchange. 1219 * 1220 * If we do not forbid deprecated addresses, we accept 1221 * the SYN packet. RFC2462 does not suggest dropping 1222 * SYN in this case. 1223 * If we decipher RFC2462 5.5.4, it says like this: 1224 * 1. use of deprecated addr with existing 1225 * communication is okay - "SHOULD continue to be 1226 * used" 1227 * 2. use of it with new communication: 1228 * (2a) "SHOULD NOT be used if alternate address 1229 * with sufficient scope is available" 1230 * (2b) nothing mentioned otherwise. 1231 * Here we fall into (2b) case as we have no choice in 1232 * our source address selection - we must obey the peer. 1233 * 1234 * The wording in RFC2462 is confusing, and there are 1235 * multiple description text for deprecated address 1236 * handling - worse, they are not exactly the same. 1237 * I believe 5.5.4 is the best one, so we follow 5.5.4. 1238 */ 1239 if (isipv6 && !V_ip6_use_deprecated) { 1240 struct in6_ifaddr *ia6; 1241 1242 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */); 1243 if (ia6 != NULL && 1244 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 1245 ifa_free(&ia6->ia_ifa); 1246 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1247 log(LOG_DEBUG, "%s; %s: Listen socket: " 1248 "Connection attempt to deprecated " 1249 "IPv6 address rejected\n", 1250 s, __func__); 1251 rstreason = BANDLIM_RST_OPENPORT; 1252 goto dropwithreset; 1253 } 1254 if (ia6) 1255 ifa_free(&ia6->ia_ifa); 1256 } 1257 #endif /* INET6 */ 1258 /* 1259 * Basic sanity checks on incoming SYN requests: 1260 * Don't respond if the destination is a link layer 1261 * broadcast according to RFC1122 4.2.3.10, p. 104. 1262 * If it is from this socket it must be forged. 1263 * Don't respond if the source or destination is a 1264 * global or subnet broad- or multicast address. 1265 * Note that it is quite possible to receive unicast 1266 * link-layer packets with a broadcast IP address. Use 1267 * in_broadcast() to find them. 1268 */ 1269 if (m->m_flags & (M_BCAST|M_MCAST)) { 1270 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1271 log(LOG_DEBUG, "%s; %s: Listen socket: " 1272 "Connection attempt from broad- or multicast " 1273 "link layer address ignored\n", s, __func__); 1274 goto dropunlock; 1275 } 1276 #ifdef INET6 1277 if (isipv6) { 1278 if (th->th_dport == th->th_sport && 1279 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) { 1280 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1281 log(LOG_DEBUG, "%s; %s: Listen socket: " 1282 "Connection attempt to/from self " 1283 "ignored\n", s, __func__); 1284 goto dropunlock; 1285 } 1286 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 1287 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) { 1288 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1289 log(LOG_DEBUG, "%s; %s: Listen socket: " 1290 "Connection attempt from/to multicast " 1291 "address ignored\n", s, __func__); 1292 goto dropunlock; 1293 } 1294 } 1295 #endif 1296 #if defined(INET) && defined(INET6) 1297 else 1298 #endif 1299 #ifdef INET 1300 { 1301 if (th->th_dport == th->th_sport && 1302 ip->ip_dst.s_addr == ip->ip_src.s_addr) { 1303 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1304 log(LOG_DEBUG, "%s; %s: Listen socket: " 1305 "Connection attempt from/to self " 1306 "ignored\n", s, __func__); 1307 goto dropunlock; 1308 } 1309 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 1310 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 1311 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 1312 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) { 1313 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1314 log(LOG_DEBUG, "%s; %s: Listen socket: " 1315 "Connection attempt from/to broad- " 1316 "or multicast address ignored\n", 1317 s, __func__); 1318 goto dropunlock; 1319 } 1320 } 1321 #endif 1322 /* 1323 * SYN appears to be valid. Create compressed TCP state 1324 * for syncache. 1325 */ 1326 #ifdef TCPDEBUG 1327 if (so->so_options & SO_DEBUG) 1328 tcp_trace(TA_INPUT, ostate, tp, 1329 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1330 #endif 1331 TCP_PROBE3(debug__input, tp, th, m); 1332 tcp_dooptions(&to, optp, optlen, TO_SYN); 1333 if (syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL, iptos)) 1334 goto tfo_socket_result; 1335 1336 /* 1337 * Entry added to syncache and mbuf consumed. 1338 * Only the listen socket is unlocked by syncache_add(). 1339 */ 1340 INP_INFO_WUNLOCK_ASSERT(&V_tcbinfo); 1341 return (IPPROTO_DONE); 1342 } else if (tp->t_state == TCPS_LISTEN) { 1343 /* 1344 * When a listen socket is torn down the SO_ACCEPTCONN 1345 * flag is removed first while connections are drained 1346 * from the accept queue in a unlock/lock cycle of the 1347 * ACCEPT_LOCK, opening a race condition allowing a SYN 1348 * attempt go through unhandled. 1349 */ 1350 goto dropunlock; 1351 } 1352 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1353 if (tp->t_flags & TF_SIGNATURE) { 1354 tcp_dooptions(&to, optp, optlen, thflags); 1355 if ((to.to_flags & TOF_SIGNATURE) == 0) { 1356 TCPSTAT_INC(tcps_sig_err_nosigopt); 1357 goto dropunlock; 1358 } 1359 if (!TCPMD5_ENABLED() || 1360 TCPMD5_INPUT(m, th, to.to_signature) != 0) 1361 goto dropunlock; 1362 } 1363 #endif 1364 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1365 1366 /* 1367 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later 1368 * state. tcp_do_segment() always consumes the mbuf chain, unlocks 1369 * the inpcb, and unlocks pcbinfo. 1370 */ 1371 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos); 1372 return (IPPROTO_DONE); 1373 1374 dropwithreset: 1375 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1376 1377 if (inp != NULL) { 1378 tcp_dropwithreset(m, th, tp, tlen, rstreason); 1379 INP_WUNLOCK(inp); 1380 } else 1381 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 1382 m = NULL; /* mbuf chain got consumed. */ 1383 goto drop; 1384 1385 dropunlock: 1386 if (m != NULL) 1387 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1388 1389 if (inp != NULL) 1390 INP_WUNLOCK(inp); 1391 1392 drop: 1393 INP_INFO_WUNLOCK_ASSERT(&V_tcbinfo); 1394 if (s != NULL) 1395 free(s, M_TCPLOG); 1396 if (m != NULL) 1397 m_freem(m); 1398 return (IPPROTO_DONE); 1399 } 1400 1401 /* 1402 * Automatic sizing of receive socket buffer. Often the send 1403 * buffer size is not optimally adjusted to the actual network 1404 * conditions at hand (delay bandwidth product). Setting the 1405 * buffer size too small limits throughput on links with high 1406 * bandwidth and high delay (eg. trans-continental/oceanic links). 1407 * 1408 * On the receive side the socket buffer memory is only rarely 1409 * used to any significant extent. This allows us to be much 1410 * more aggressive in scaling the receive socket buffer. For 1411 * the case that the buffer space is actually used to a large 1412 * extent and we run out of kernel memory we can simply drop 1413 * the new segments; TCP on the sender will just retransmit it 1414 * later. Setting the buffer size too big may only consume too 1415 * much kernel memory if the application doesn't read() from 1416 * the socket or packet loss or reordering makes use of the 1417 * reassembly queue. 1418 * 1419 * The criteria to step up the receive buffer one notch are: 1420 * 1. Application has not set receive buffer size with 1421 * SO_RCVBUF. Setting SO_RCVBUF clears SB_AUTOSIZE. 1422 * 2. the number of bytes received during 1/2 of an sRTT 1423 * is at least 3/8 of the current socket buffer size. 1424 * 3. receive buffer size has not hit maximal automatic size; 1425 * 1426 * If all of the criteria are met we increaset the socket buffer 1427 * by a 1/2 (bounded by the max). This allows us to keep ahead 1428 * of slow-start but also makes it so our peer never gets limited 1429 * by our rwnd which we then open up causing a burst. 1430 * 1431 * This algorithm does two steps per RTT at most and only if 1432 * we receive a bulk stream w/o packet losses or reorderings. 1433 * Shrinking the buffer during idle times is not necessary as 1434 * it doesn't consume any memory when idle. 1435 * 1436 * TODO: Only step up if the application is actually serving 1437 * the buffer to better manage the socket buffer resources. 1438 */ 1439 int 1440 tcp_autorcvbuf(struct mbuf *m, struct tcphdr *th, struct socket *so, 1441 struct tcpcb *tp, int tlen) 1442 { 1443 int newsize = 0; 1444 1445 if (V_tcp_do_autorcvbuf && (so->so_rcv.sb_flags & SB_AUTOSIZE) && 1446 tp->t_srtt != 0 && tp->rfbuf_ts != 0 && 1447 TCP_TS_TO_TICKS(tcp_ts_getticks() - tp->rfbuf_ts) > 1448 ((tp->t_srtt >> TCP_RTT_SHIFT)/2)) { 1449 if (tp->rfbuf_cnt > ((so->so_rcv.sb_hiwat / 2)/ 4 * 3) && 1450 so->so_rcv.sb_hiwat < V_tcp_autorcvbuf_max) { 1451 newsize = min((so->so_rcv.sb_hiwat + (so->so_rcv.sb_hiwat/2)), V_tcp_autorcvbuf_max); 1452 } 1453 TCP_PROBE6(receive__autoresize, NULL, tp, m, tp, th, newsize); 1454 1455 /* Start over with next RTT. */ 1456 tp->rfbuf_ts = 0; 1457 tp->rfbuf_cnt = 0; 1458 } else { 1459 tp->rfbuf_cnt += tlen; /* add up */ 1460 } 1461 return (newsize); 1462 } 1463 1464 void 1465 tcp_handle_wakeup(struct tcpcb *tp, struct socket *so) 1466 { 1467 /* 1468 * Since tp might be gone if the session entered 1469 * the TIME_WAIT state before coming here, we need 1470 * to check if the socket is still connected. 1471 */ 1472 if ((so->so_state & SS_ISCONNECTED) == 0) 1473 return; 1474 INP_LOCK_ASSERT(tp->t_inpcb); 1475 if (tp->t_flags & TF_WAKESOR) { 1476 tp->t_flags &= ~TF_WAKESOR; 1477 SOCKBUF_UNLOCK_ASSERT(&so->so_rcv); 1478 sorwakeup(so); 1479 } 1480 if (tp->t_flags & TF_WAKESOW) { 1481 tp->t_flags &= ~TF_WAKESOW; 1482 SOCKBUF_UNLOCK_ASSERT(&so->so_snd); 1483 sowwakeup(so); 1484 } 1485 } 1486 1487 void 1488 tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 1489 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos) 1490 { 1491 int thflags, acked, ourfinisacked, needoutput = 0, sack_changed; 1492 int rstreason, todrop, win, incforsyn = 0; 1493 uint32_t tiwin; 1494 uint16_t nsegs; 1495 char *s; 1496 struct in_conninfo *inc; 1497 struct mbuf *mfree; 1498 struct tcpopt to; 1499 int tfo_syn; 1500 1501 #ifdef TCPDEBUG 1502 /* 1503 * The size of tcp_saveipgen must be the size of the max ip header, 1504 * now IPv6. 1505 */ 1506 u_char tcp_saveipgen[IP6_HDR_LEN]; 1507 struct tcphdr tcp_savetcp; 1508 short ostate = 0; 1509 #endif 1510 thflags = th->th_flags; 1511 inc = &tp->t_inpcb->inp_inc; 1512 tp->sackhint.last_sack_ack = 0; 1513 sack_changed = 0; 1514 nsegs = max(1, m->m_pkthdr.lro_nsegs); 1515 1516 NET_EPOCH_ASSERT(); 1517 INP_WLOCK_ASSERT(tp->t_inpcb); 1518 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 1519 __func__)); 1520 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 1521 __func__)); 1522 1523 #ifdef TCPPCAP 1524 /* Save segment, if requested. */ 1525 tcp_pcap_add(th, m, &(tp->t_inpkts)); 1526 #endif 1527 TCP_LOG_EVENT(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 1528 tlen, NULL, true); 1529 1530 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 1531 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1532 log(LOG_DEBUG, "%s; %s: " 1533 "SYN|FIN segment ignored (based on " 1534 "sysctl setting)\n", s, __func__); 1535 free(s, M_TCPLOG); 1536 } 1537 goto drop; 1538 } 1539 1540 /* 1541 * If a segment with the ACK-bit set arrives in the SYN-SENT state 1542 * check SEQ.ACK first. 1543 */ 1544 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 1545 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 1546 rstreason = BANDLIM_UNLIMITED; 1547 goto dropwithreset; 1548 } 1549 1550 /* 1551 * Segment received on connection. 1552 * Reset idle time and keep-alive timer. 1553 * XXX: This should be done after segment 1554 * validation to ignore broken/spoofed segs. 1555 */ 1556 tp->t_rcvtime = ticks; 1557 1558 /* 1559 * Scale up the window into a 32-bit value. 1560 * For the SYN_SENT state the scale is zero. 1561 */ 1562 tiwin = th->th_win << tp->snd_scale; 1563 #ifdef STATS 1564 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 1565 #endif 1566 1567 /* 1568 * TCP ECN processing. 1569 */ 1570 if (tp->t_flags2 & TF2_ECN_PERMIT) { 1571 if (thflags & TH_CWR) { 1572 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 1573 tp->t_flags |= TF_ACKNOW; 1574 } 1575 switch (iptos & IPTOS_ECN_MASK) { 1576 case IPTOS_ECN_CE: 1577 tp->t_flags2 |= TF2_ECN_SND_ECE; 1578 TCPSTAT_INC(tcps_ecn_ce); 1579 break; 1580 case IPTOS_ECN_ECT0: 1581 TCPSTAT_INC(tcps_ecn_ect0); 1582 break; 1583 case IPTOS_ECN_ECT1: 1584 TCPSTAT_INC(tcps_ecn_ect1); 1585 break; 1586 } 1587 1588 /* Process a packet differently from RFC3168. */ 1589 cc_ecnpkt_handler(tp, th, iptos); 1590 1591 /* Congestion experienced. */ 1592 if (thflags & TH_ECE) { 1593 cc_cong_signal(tp, th, CC_ECN); 1594 } 1595 } 1596 1597 /* 1598 * Parse options on any incoming segment. 1599 */ 1600 tcp_dooptions(&to, (u_char *)(th + 1), 1601 (th->th_off << 2) - sizeof(struct tcphdr), 1602 (thflags & TH_SYN) ? TO_SYN : 0); 1603 1604 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1605 if ((tp->t_flags & TF_SIGNATURE) != 0 && 1606 (to.to_flags & TOF_SIGNATURE) == 0) { 1607 TCPSTAT_INC(tcps_sig_err_sigopt); 1608 /* XXX: should drop? */ 1609 } 1610 #endif 1611 /* 1612 * If echoed timestamp is later than the current time, 1613 * fall back to non RFC1323 RTT calculation. Normalize 1614 * timestamp if syncookies were used when this connection 1615 * was established. 1616 */ 1617 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 1618 to.to_tsecr -= tp->ts_offset; 1619 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks())) 1620 to.to_tsecr = 0; 1621 else if (tp->t_flags & TF_PREVVALID && 1622 tp->t_badrxtwin != 0 && SEQ_LT(to.to_tsecr, tp->t_badrxtwin)) 1623 cc_cong_signal(tp, th, CC_RTO_ERR); 1624 } 1625 /* 1626 * Process options only when we get SYN/ACK back. The SYN case 1627 * for incoming connections is handled in tcp_syncache. 1628 * According to RFC1323 the window field in a SYN (i.e., a <SYN> 1629 * or <SYN,ACK>) segment itself is never scaled. 1630 * XXX this is traditional behavior, may need to be cleaned up. 1631 */ 1632 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 1633 /* Handle parallel SYN for ECN */ 1634 if (!(thflags & TH_ACK) && 1635 ((thflags & (TH_CWR | TH_ECE)) == (TH_CWR | TH_ECE)) && 1636 ((V_tcp_do_ecn == 1) || (V_tcp_do_ecn == 2))) { 1637 tp->t_flags2 |= TF2_ECN_PERMIT; 1638 tp->t_flags2 |= TF2_ECN_SND_ECE; 1639 TCPSTAT_INC(tcps_ecn_shs); 1640 } 1641 if ((to.to_flags & TOF_SCALE) && 1642 (tp->t_flags & TF_REQ_SCALE)) { 1643 tp->t_flags |= TF_RCVD_SCALE; 1644 tp->snd_scale = to.to_wscale; 1645 } else 1646 tp->t_flags &= ~TF_REQ_SCALE; 1647 /* 1648 * Initial send window. It will be updated with 1649 * the next incoming segment to the scaled value. 1650 */ 1651 tp->snd_wnd = th->th_win; 1652 if ((to.to_flags & TOF_TS) && 1653 (tp->t_flags & TF_REQ_TSTMP)) { 1654 tp->t_flags |= TF_RCVD_TSTMP; 1655 tp->ts_recent = to.to_tsval; 1656 tp->ts_recent_age = tcp_ts_getticks(); 1657 } else 1658 tp->t_flags &= ~TF_REQ_TSTMP; 1659 if (to.to_flags & TOF_MSS) 1660 tcp_mss(tp, to.to_mss); 1661 if ((tp->t_flags & TF_SACK_PERMIT) && 1662 (to.to_flags & TOF_SACKPERM) == 0) 1663 tp->t_flags &= ~TF_SACK_PERMIT; 1664 if (IS_FASTOPEN(tp->t_flags)) { 1665 if (to.to_flags & TOF_FASTOPEN) { 1666 uint16_t mss; 1667 1668 if (to.to_flags & TOF_MSS) 1669 mss = to.to_mss; 1670 else 1671 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 1672 mss = TCP6_MSS; 1673 else 1674 mss = TCP_MSS; 1675 tcp_fastopen_update_cache(tp, mss, 1676 to.to_tfo_len, to.to_tfo_cookie); 1677 } else 1678 tcp_fastopen_disable_path(tp); 1679 } 1680 } 1681 1682 /* 1683 * If timestamps were negotiated during SYN/ACK and a 1684 * segment without a timestamp is received, silently drop 1685 * the segment. 1686 * See section 3.2 of RFC 7323. 1687 */ 1688 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS)) { 1689 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1690 log(LOG_DEBUG, "%s; %s: Timestamp missing, " 1691 "segment silently dropped\n", s, __func__); 1692 free(s, M_TCPLOG); 1693 } 1694 goto drop; 1695 } 1696 /* 1697 * If timestamps were not negotiated during SYN/ACK and a 1698 * segment without a timestamp is received, ignore the 1699 * timestamp and process the packet normally. 1700 * See section 3.2 of RFC 7323. 1701 */ 1702 if (!(tp->t_flags & TF_RCVD_TSTMP) && (to.to_flags & TOF_TS)) { 1703 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1704 log(LOG_DEBUG, "%s; %s: Timestamp not expected, " 1705 "segment processed normally\n", s, __func__); 1706 free(s, M_TCPLOG); 1707 } 1708 } 1709 1710 /* 1711 * Header prediction: check for the two common cases 1712 * of a uni-directional data xfer. If the packet has 1713 * no control flags, is in-sequence, the window didn't 1714 * change and we're not retransmitting, it's a 1715 * candidate. If the length is zero and the ack moved 1716 * forward, we're the sender side of the xfer. Just 1717 * free the data acked & wake any higher level process 1718 * that was blocked waiting for space. If the length 1719 * is non-zero and the ack didn't move, we're the 1720 * receiver side. If we're getting packets in-order 1721 * (the reassembly queue is empty), add the data to 1722 * the socket buffer and note that we need a delayed ack. 1723 * Make sure that the hidden state-flags are also off. 1724 * Since we check for TCPS_ESTABLISHED first, it can only 1725 * be TH_NEEDSYN. 1726 */ 1727 if (tp->t_state == TCPS_ESTABLISHED && 1728 th->th_seq == tp->rcv_nxt && 1729 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 1730 tp->snd_nxt == tp->snd_max && 1731 tiwin && tiwin == tp->snd_wnd && 1732 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && 1733 SEGQ_EMPTY(tp) && 1734 ((to.to_flags & TOF_TS) == 0 || 1735 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) { 1736 /* 1737 * If last ACK falls within this segment's sequence numbers, 1738 * record the timestamp. 1739 * NOTE that the test is modified according to the latest 1740 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1741 */ 1742 if ((to.to_flags & TOF_TS) != 0 && 1743 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 1744 tp->ts_recent_age = tcp_ts_getticks(); 1745 tp->ts_recent = to.to_tsval; 1746 } 1747 1748 if (tlen == 0) { 1749 if (SEQ_GT(th->th_ack, tp->snd_una) && 1750 SEQ_LEQ(th->th_ack, tp->snd_max) && 1751 !IN_RECOVERY(tp->t_flags) && 1752 (to.to_flags & TOF_SACK) == 0 && 1753 TAILQ_EMPTY(&tp->snd_holes)) { 1754 /* 1755 * This is a pure ack for outstanding data. 1756 */ 1757 TCPSTAT_INC(tcps_predack); 1758 1759 /* 1760 * "bad retransmit" recovery without timestamps. 1761 */ 1762 if ((to.to_flags & TOF_TS) == 0 && 1763 tp->t_rxtshift == 1 && 1764 tp->t_flags & TF_PREVVALID && 1765 (int)(ticks - tp->t_badrxtwin) < 0) { 1766 cc_cong_signal(tp, th, CC_RTO_ERR); 1767 } 1768 1769 /* 1770 * Recalculate the transmit timer / rtt. 1771 * 1772 * Some boxes send broken timestamp replies 1773 * during the SYN+ACK phase, ignore 1774 * timestamps of 0 or we could calculate a 1775 * huge RTT and blow up the retransmit timer. 1776 */ 1777 if ((to.to_flags & TOF_TS) != 0 && 1778 to.to_tsecr) { 1779 uint32_t t; 1780 1781 t = tcp_ts_getticks() - to.to_tsecr; 1782 if (!tp->t_rttlow || tp->t_rttlow > t) 1783 tp->t_rttlow = t; 1784 tcp_xmit_timer(tp, 1785 TCP_TS_TO_TICKS(t) + 1); 1786 } else if (tp->t_rtttime && 1787 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1788 if (!tp->t_rttlow || 1789 tp->t_rttlow > ticks - tp->t_rtttime) 1790 tp->t_rttlow = ticks - tp->t_rtttime; 1791 tcp_xmit_timer(tp, 1792 ticks - tp->t_rtttime); 1793 } 1794 acked = BYTES_THIS_ACK(tp, th); 1795 1796 #ifdef TCP_HHOOK 1797 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 1798 hhook_run_tcp_est_in(tp, th, &to); 1799 #endif 1800 1801 TCPSTAT_ADD(tcps_rcvackpack, nsegs); 1802 TCPSTAT_ADD(tcps_rcvackbyte, acked); 1803 sbdrop(&so->so_snd, acked); 1804 if (SEQ_GT(tp->snd_una, tp->snd_recover) && 1805 SEQ_LEQ(th->th_ack, tp->snd_recover)) 1806 tp->snd_recover = th->th_ack - 1; 1807 1808 /* 1809 * Let the congestion control algorithm update 1810 * congestion control related information. This 1811 * typically means increasing the congestion 1812 * window. 1813 */ 1814 cc_ack_received(tp, th, nsegs, CC_ACK); 1815 1816 tp->snd_una = th->th_ack; 1817 /* 1818 * Pull snd_wl2 up to prevent seq wrap relative 1819 * to th_ack. 1820 */ 1821 tp->snd_wl2 = th->th_ack; 1822 tp->t_dupacks = 0; 1823 m_freem(m); 1824 1825 /* 1826 * If all outstanding data are acked, stop 1827 * retransmit timer, otherwise restart timer 1828 * using current (possibly backed-off) value. 1829 * If process is waiting for space, 1830 * wakeup/selwakeup/signal. If data 1831 * are ready to send, let tcp_output 1832 * decide between more output or persist. 1833 */ 1834 #ifdef TCPDEBUG 1835 if (so->so_options & SO_DEBUG) 1836 tcp_trace(TA_INPUT, ostate, tp, 1837 (void *)tcp_saveipgen, 1838 &tcp_savetcp, 0); 1839 #endif 1840 TCP_PROBE3(debug__input, tp, th, m); 1841 if (tp->snd_una == tp->snd_max) 1842 tcp_timer_activate(tp, TT_REXMT, 0); 1843 else if (!tcp_timer_active(tp, TT_PERSIST)) 1844 tcp_timer_activate(tp, TT_REXMT, 1845 tp->t_rxtcur); 1846 tp->t_flags |= TF_WAKESOW; 1847 if (sbavail(&so->so_snd)) 1848 (void) tp->t_fb->tfb_tcp_output(tp); 1849 goto check_delack; 1850 } 1851 } else if (th->th_ack == tp->snd_una && 1852 tlen <= sbspace(&so->so_rcv)) { 1853 int newsize = 0; /* automatic sockbuf scaling */ 1854 1855 /* 1856 * This is a pure, in-sequence data packet with 1857 * nothing on the reassembly queue and we have enough 1858 * buffer space to take it. 1859 */ 1860 /* Clean receiver SACK report if present */ 1861 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks) 1862 tcp_clean_sackreport(tp); 1863 TCPSTAT_INC(tcps_preddat); 1864 tp->rcv_nxt += tlen; 1865 if (tlen && 1866 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 1867 (tp->t_fbyte_in == 0)) { 1868 tp->t_fbyte_in = ticks; 1869 if (tp->t_fbyte_in == 0) 1870 tp->t_fbyte_in = 1; 1871 if (tp->t_fbyte_out && tp->t_fbyte_in) 1872 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 1873 } 1874 /* 1875 * Pull snd_wl1 up to prevent seq wrap relative to 1876 * th_seq. 1877 */ 1878 tp->snd_wl1 = th->th_seq; 1879 /* 1880 * Pull rcv_up up to prevent seq wrap relative to 1881 * rcv_nxt. 1882 */ 1883 tp->rcv_up = tp->rcv_nxt; 1884 TCPSTAT_ADD(tcps_rcvpack, nsegs); 1885 TCPSTAT_ADD(tcps_rcvbyte, tlen); 1886 #ifdef TCPDEBUG 1887 if (so->so_options & SO_DEBUG) 1888 tcp_trace(TA_INPUT, ostate, tp, 1889 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1890 #endif 1891 TCP_PROBE3(debug__input, tp, th, m); 1892 1893 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 1894 1895 /* Add data to socket buffer. */ 1896 SOCKBUF_LOCK(&so->so_rcv); 1897 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1898 m_freem(m); 1899 } else { 1900 /* 1901 * Set new socket buffer size. 1902 * Give up when limit is reached. 1903 */ 1904 if (newsize) 1905 if (!sbreserve_locked(&so->so_rcv, 1906 newsize, so, NULL)) 1907 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 1908 m_adj(m, drop_hdrlen); /* delayed header drop */ 1909 sbappendstream_locked(&so->so_rcv, m, 0); 1910 } 1911 SOCKBUF_UNLOCK(&so->so_rcv); 1912 tp->t_flags |= TF_WAKESOR; 1913 if (DELAY_ACK(tp, tlen)) { 1914 tp->t_flags |= TF_DELACK; 1915 } else { 1916 tp->t_flags |= TF_ACKNOW; 1917 tp->t_fb->tfb_tcp_output(tp); 1918 } 1919 goto check_delack; 1920 } 1921 } 1922 1923 /* 1924 * Calculate amount of space in receive window, 1925 * and then do TCP input processing. 1926 * Receive window is amount of space in rcv queue, 1927 * but not less than advertised window. 1928 */ 1929 win = sbspace(&so->so_rcv); 1930 if (win < 0) 1931 win = 0; 1932 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 1933 1934 switch (tp->t_state) { 1935 /* 1936 * If the state is SYN_RECEIVED: 1937 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1938 */ 1939 case TCPS_SYN_RECEIVED: 1940 if ((thflags & TH_ACK) && 1941 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1942 SEQ_GT(th->th_ack, tp->snd_max))) { 1943 rstreason = BANDLIM_RST_OPENPORT; 1944 goto dropwithreset; 1945 } 1946 if (IS_FASTOPEN(tp->t_flags)) { 1947 /* 1948 * When a TFO connection is in SYN_RECEIVED, the 1949 * only valid packets are the initial SYN, a 1950 * retransmit/copy of the initial SYN (possibly with 1951 * a subset of the original data), a valid ACK, a 1952 * FIN, or a RST. 1953 */ 1954 if ((thflags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)) { 1955 rstreason = BANDLIM_RST_OPENPORT; 1956 goto dropwithreset; 1957 } else if (thflags & TH_SYN) { 1958 /* non-initial SYN is ignored */ 1959 if ((tcp_timer_active(tp, TT_DELACK) || 1960 tcp_timer_active(tp, TT_REXMT))) 1961 goto drop; 1962 } else if (!(thflags & (TH_ACK|TH_FIN|TH_RST))) { 1963 goto drop; 1964 } 1965 } 1966 break; 1967 1968 /* 1969 * If the state is SYN_SENT: 1970 * if seg contains a RST with valid ACK (SEQ.ACK has already 1971 * been verified), then drop the connection. 1972 * if seg contains a RST without an ACK, drop the seg. 1973 * if seg does not contain SYN, then drop the seg. 1974 * Otherwise this is an acceptable SYN segment 1975 * initialize tp->rcv_nxt and tp->irs 1976 * if seg contains ack then advance tp->snd_una 1977 * if seg contains an ECE and ECN support is enabled, the stream 1978 * is ECN capable. 1979 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1980 * arrange for segment to be acked (eventually) 1981 * continue processing rest of data/controls, beginning with URG 1982 */ 1983 case TCPS_SYN_SENT: 1984 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) { 1985 TCP_PROBE5(connect__refused, NULL, tp, 1986 m, tp, th); 1987 tp = tcp_drop(tp, ECONNREFUSED); 1988 } 1989 if (thflags & TH_RST) 1990 goto drop; 1991 if (!(thflags & TH_SYN)) 1992 goto drop; 1993 1994 tp->irs = th->th_seq; 1995 tcp_rcvseqinit(tp); 1996 if (thflags & TH_ACK) { 1997 int tfo_partial_ack = 0; 1998 1999 TCPSTAT_INC(tcps_connects); 2000 soisconnected(so); 2001 #ifdef MAC 2002 mac_socketpeer_set_from_mbuf(m, so); 2003 #endif 2004 /* Do window scaling on this connection? */ 2005 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2006 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2007 tp->rcv_scale = tp->request_r_scale; 2008 } 2009 tp->rcv_adv += min(tp->rcv_wnd, 2010 TCP_MAXWIN << tp->rcv_scale); 2011 tp->snd_una++; /* SYN is acked */ 2012 /* 2013 * If not all the data that was sent in the TFO SYN 2014 * has been acked, resend the remainder right away. 2015 */ 2016 if (IS_FASTOPEN(tp->t_flags) && 2017 (tp->snd_una != tp->snd_max)) { 2018 tp->snd_nxt = th->th_ack; 2019 tfo_partial_ack = 1; 2020 } 2021 /* 2022 * If there's data, delay ACK; if there's also a FIN 2023 * ACKNOW will be turned on later. 2024 */ 2025 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial_ack) 2026 tcp_timer_activate(tp, TT_DELACK, 2027 tcp_delacktime); 2028 else 2029 tp->t_flags |= TF_ACKNOW; 2030 2031 if (((thflags & (TH_CWR | TH_ECE)) == TH_ECE) && 2032 (V_tcp_do_ecn == 1)) { 2033 tp->t_flags2 |= TF2_ECN_PERMIT; 2034 TCPSTAT_INC(tcps_ecn_shs); 2035 } 2036 2037 /* 2038 * Received <SYN,ACK> in SYN_SENT[*] state. 2039 * Transitions: 2040 * SYN_SENT --> ESTABLISHED 2041 * SYN_SENT* --> FIN_WAIT_1 2042 */ 2043 tp->t_starttime = ticks; 2044 if (tp->t_flags & TF_NEEDFIN) { 2045 tcp_state_change(tp, TCPS_FIN_WAIT_1); 2046 tp->t_flags &= ~TF_NEEDFIN; 2047 thflags &= ~TH_SYN; 2048 } else { 2049 tcp_state_change(tp, TCPS_ESTABLISHED); 2050 TCP_PROBE5(connect__established, NULL, tp, 2051 m, tp, th); 2052 cc_conn_init(tp); 2053 tcp_timer_activate(tp, TT_KEEP, 2054 TP_KEEPIDLE(tp)); 2055 } 2056 } else { 2057 /* 2058 * Received initial SYN in SYN-SENT[*] state => 2059 * simultaneous open. 2060 * If it succeeds, connection is * half-synchronized. 2061 * Otherwise, do 3-way handshake: 2062 * SYN-SENT -> SYN-RECEIVED 2063 * SYN-SENT* -> SYN-RECEIVED* 2064 */ 2065 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 2066 tcp_timer_activate(tp, TT_REXMT, 0); 2067 tcp_state_change(tp, TCPS_SYN_RECEIVED); 2068 } 2069 2070 INP_WLOCK_ASSERT(tp->t_inpcb); 2071 2072 /* 2073 * Advance th->th_seq to correspond to first data byte. 2074 * If data, trim to stay within window, 2075 * dropping FIN if necessary. 2076 */ 2077 th->th_seq++; 2078 if (tlen > tp->rcv_wnd) { 2079 todrop = tlen - tp->rcv_wnd; 2080 m_adj(m, -todrop); 2081 tlen = tp->rcv_wnd; 2082 thflags &= ~TH_FIN; 2083 TCPSTAT_INC(tcps_rcvpackafterwin); 2084 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 2085 } 2086 tp->snd_wl1 = th->th_seq - 1; 2087 tp->rcv_up = th->th_seq; 2088 /* 2089 * Client side of transaction: already sent SYN and data. 2090 * If the remote host used T/TCP to validate the SYN, 2091 * our data will be ACK'd; if so, enter normal data segment 2092 * processing in the middle of step 5, ack processing. 2093 * Otherwise, goto step 6. 2094 */ 2095 if (thflags & TH_ACK) 2096 goto process_ACK; 2097 2098 goto step6; 2099 2100 /* 2101 * If the state is LAST_ACK or CLOSING or TIME_WAIT: 2102 * do normal processing. 2103 * 2104 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later. 2105 */ 2106 case TCPS_LAST_ACK: 2107 case TCPS_CLOSING: 2108 break; /* continue normal processing */ 2109 } 2110 2111 /* 2112 * States other than LISTEN or SYN_SENT. 2113 * First check the RST flag and sequence number since reset segments 2114 * are exempt from the timestamp and connection count tests. This 2115 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 2116 * below which allowed reset segments in half the sequence space 2117 * to fall though and be processed (which gives forged reset 2118 * segments with a random sequence number a 50 percent chance of 2119 * killing a connection). 2120 * Then check timestamp, if present. 2121 * Then check the connection count, if present. 2122 * Then check that at least some bytes of segment are within 2123 * receive window. If segment begins before rcv_nxt, 2124 * drop leading data (and SYN); if nothing left, just ack. 2125 */ 2126 if (thflags & TH_RST) { 2127 /* 2128 * RFC5961 Section 3.2 2129 * 2130 * - RST drops connection only if SEG.SEQ == RCV.NXT. 2131 * - If RST is in window, we send challenge ACK. 2132 * 2133 * Note: to take into account delayed ACKs, we should 2134 * test against last_ack_sent instead of rcv_nxt. 2135 * Note 2: we handle special case of closed window, not 2136 * covered by the RFC. 2137 */ 2138 if ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 2139 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || 2140 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) { 2141 KASSERT(tp->t_state != TCPS_SYN_SENT, 2142 ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p", 2143 __func__, th, tp)); 2144 2145 if (V_tcp_insecure_rst || 2146 tp->last_ack_sent == th->th_seq) { 2147 TCPSTAT_INC(tcps_drops); 2148 /* Drop the connection. */ 2149 switch (tp->t_state) { 2150 case TCPS_SYN_RECEIVED: 2151 so->so_error = ECONNREFUSED; 2152 goto close; 2153 case TCPS_ESTABLISHED: 2154 case TCPS_FIN_WAIT_1: 2155 case TCPS_FIN_WAIT_2: 2156 case TCPS_CLOSE_WAIT: 2157 case TCPS_CLOSING: 2158 case TCPS_LAST_ACK: 2159 so->so_error = ECONNRESET; 2160 close: 2161 /* FALLTHROUGH */ 2162 default: 2163 tp = tcp_close(tp); 2164 } 2165 } else { 2166 TCPSTAT_INC(tcps_badrst); 2167 /* Send challenge ACK. */ 2168 tcp_respond(tp, mtod(m, void *), th, m, 2169 tp->rcv_nxt, tp->snd_nxt, TH_ACK); 2170 tp->last_ack_sent = tp->rcv_nxt; 2171 m = NULL; 2172 } 2173 } 2174 goto drop; 2175 } 2176 2177 /* 2178 * RFC5961 Section 4.2 2179 * Send challenge ACK for any SYN in synchronized state. 2180 */ 2181 if ((thflags & TH_SYN) && tp->t_state != TCPS_SYN_SENT && 2182 tp->t_state != TCPS_SYN_RECEIVED) { 2183 TCPSTAT_INC(tcps_badsyn); 2184 if (V_tcp_insecure_syn && 2185 SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 2186 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 2187 tp = tcp_drop(tp, ECONNRESET); 2188 rstreason = BANDLIM_UNLIMITED; 2189 } else { 2190 /* Send challenge ACK. */ 2191 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt, 2192 tp->snd_nxt, TH_ACK); 2193 tp->last_ack_sent = tp->rcv_nxt; 2194 m = NULL; 2195 } 2196 goto drop; 2197 } 2198 2199 /* 2200 * RFC 1323 PAWS: If we have a timestamp reply on this segment 2201 * and it's less than ts_recent, drop it. 2202 */ 2203 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 2204 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 2205 /* Check to see if ts_recent is over 24 days old. */ 2206 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) { 2207 /* 2208 * Invalidate ts_recent. If this segment updates 2209 * ts_recent, the age will be reset later and ts_recent 2210 * will get a valid value. If it does not, setting 2211 * ts_recent to zero will at least satisfy the 2212 * requirement that zero be placed in the timestamp 2213 * echo reply when ts_recent isn't valid. The 2214 * age isn't reset until we get a valid ts_recent 2215 * because we don't want out-of-order segments to be 2216 * dropped when ts_recent is old. 2217 */ 2218 tp->ts_recent = 0; 2219 } else { 2220 TCPSTAT_INC(tcps_rcvduppack); 2221 TCPSTAT_ADD(tcps_rcvdupbyte, tlen); 2222 TCPSTAT_INC(tcps_pawsdrop); 2223 if (tlen) 2224 goto dropafterack; 2225 goto drop; 2226 } 2227 } 2228 2229 /* 2230 * In the SYN-RECEIVED state, validate that the packet belongs to 2231 * this connection before trimming the data to fit the receive 2232 * window. Check the sequence number versus IRS since we know 2233 * the sequence numbers haven't wrapped. This is a partial fix 2234 * for the "LAND" DoS attack. 2235 */ 2236 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 2237 rstreason = BANDLIM_RST_OPENPORT; 2238 goto dropwithreset; 2239 } 2240 2241 todrop = tp->rcv_nxt - th->th_seq; 2242 if (todrop > 0) { 2243 if (thflags & TH_SYN) { 2244 thflags &= ~TH_SYN; 2245 th->th_seq++; 2246 if (th->th_urp > 1) 2247 th->th_urp--; 2248 else 2249 thflags &= ~TH_URG; 2250 todrop--; 2251 } 2252 /* 2253 * Following if statement from Stevens, vol. 2, p. 960. 2254 */ 2255 if (todrop > tlen 2256 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 2257 /* 2258 * Any valid FIN must be to the left of the window. 2259 * At this point the FIN must be a duplicate or out 2260 * of sequence; drop it. 2261 */ 2262 thflags &= ~TH_FIN; 2263 2264 /* 2265 * Send an ACK to resynchronize and drop any data. 2266 * But keep on processing for RST or ACK. 2267 */ 2268 tp->t_flags |= TF_ACKNOW; 2269 todrop = tlen; 2270 TCPSTAT_INC(tcps_rcvduppack); 2271 TCPSTAT_ADD(tcps_rcvdupbyte, todrop); 2272 } else { 2273 TCPSTAT_INC(tcps_rcvpartduppack); 2274 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop); 2275 } 2276 /* 2277 * DSACK - add SACK block for dropped range 2278 */ 2279 if ((todrop > 0) && (tp->t_flags & TF_SACK_PERMIT)) { 2280 tcp_update_sack_list(tp, th->th_seq, 2281 th->th_seq + todrop); 2282 /* 2283 * ACK now, as the next in-sequence segment 2284 * will clear the DSACK block again 2285 */ 2286 tp->t_flags |= TF_ACKNOW; 2287 } 2288 drop_hdrlen += todrop; /* drop from the top afterwards */ 2289 th->th_seq += todrop; 2290 tlen -= todrop; 2291 if (th->th_urp > todrop) 2292 th->th_urp -= todrop; 2293 else { 2294 thflags &= ~TH_URG; 2295 th->th_urp = 0; 2296 } 2297 } 2298 2299 /* 2300 * If new data are received on a connection after the 2301 * user processes are gone, then RST the other end. 2302 */ 2303 if ((so->so_state & SS_NOFDREF) && 2304 tp->t_state > TCPS_CLOSE_WAIT && tlen) { 2305 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 2306 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data " 2307 "after socket was closed, " 2308 "sending RST and removing tcpcb\n", 2309 s, __func__, tcpstates[tp->t_state], tlen); 2310 free(s, M_TCPLOG); 2311 } 2312 tp = tcp_close(tp); 2313 TCPSTAT_INC(tcps_rcvafterclose); 2314 rstreason = BANDLIM_UNLIMITED; 2315 goto dropwithreset; 2316 } 2317 2318 /* 2319 * If segment ends after window, drop trailing data 2320 * (and PUSH and FIN); if nothing left, just ACK. 2321 */ 2322 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 2323 if (todrop > 0) { 2324 TCPSTAT_INC(tcps_rcvpackafterwin); 2325 if (todrop >= tlen) { 2326 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen); 2327 /* 2328 * If window is closed can only take segments at 2329 * window edge, and have to drop data and PUSH from 2330 * incoming segments. Continue processing, but 2331 * remember to ack. Otherwise, drop segment 2332 * and ack. 2333 */ 2334 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 2335 tp->t_flags |= TF_ACKNOW; 2336 TCPSTAT_INC(tcps_rcvwinprobe); 2337 } else 2338 goto dropafterack; 2339 } else 2340 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 2341 m_adj(m, -todrop); 2342 tlen -= todrop; 2343 thflags &= ~(TH_PUSH|TH_FIN); 2344 } 2345 2346 /* 2347 * If last ACK falls within this segment's sequence numbers, 2348 * record its timestamp. 2349 * NOTE: 2350 * 1) That the test incorporates suggestions from the latest 2351 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 2352 * 2) That updating only on newer timestamps interferes with 2353 * our earlier PAWS tests, so this check should be solely 2354 * predicated on the sequence space of this segment. 2355 * 3) That we modify the segment boundary check to be 2356 * Last.ACK.Sent <= SEG.SEQ + SEG.Len 2357 * instead of RFC1323's 2358 * Last.ACK.Sent < SEG.SEQ + SEG.Len, 2359 * This modified check allows us to overcome RFC1323's 2360 * limitations as described in Stevens TCP/IP Illustrated 2361 * Vol. 2 p.869. In such cases, we can still calculate the 2362 * RTT correctly when RCV.NXT == Last.ACK.Sent. 2363 */ 2364 if ((to.to_flags & TOF_TS) != 0 && 2365 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 2366 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 2367 ((thflags & (TH_SYN|TH_FIN)) != 0))) { 2368 tp->ts_recent_age = tcp_ts_getticks(); 2369 tp->ts_recent = to.to_tsval; 2370 } 2371 2372 /* 2373 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 2374 * flag is on (half-synchronized state), then queue data for 2375 * later processing; else drop segment and return. 2376 */ 2377 if ((thflags & TH_ACK) == 0) { 2378 if (tp->t_state == TCPS_SYN_RECEIVED || 2379 (tp->t_flags & TF_NEEDSYN)) { 2380 if (tp->t_state == TCPS_SYN_RECEIVED && 2381 IS_FASTOPEN(tp->t_flags)) { 2382 tp->snd_wnd = tiwin; 2383 cc_conn_init(tp); 2384 } 2385 goto step6; 2386 } else if (tp->t_flags & TF_ACKNOW) 2387 goto dropafterack; 2388 else 2389 goto drop; 2390 } 2391 2392 /* 2393 * Ack processing. 2394 */ 2395 switch (tp->t_state) { 2396 /* 2397 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter 2398 * ESTABLISHED state and continue processing. 2399 * The ACK was checked above. 2400 */ 2401 case TCPS_SYN_RECEIVED: 2402 2403 TCPSTAT_INC(tcps_connects); 2404 soisconnected(so); 2405 /* Do window scaling? */ 2406 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2407 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2408 tp->rcv_scale = tp->request_r_scale; 2409 } 2410 tp->snd_wnd = tiwin; 2411 /* 2412 * Make transitions: 2413 * SYN-RECEIVED -> ESTABLISHED 2414 * SYN-RECEIVED* -> FIN-WAIT-1 2415 */ 2416 tp->t_starttime = ticks; 2417 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) { 2418 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 2419 tp->t_tfo_pending = NULL; 2420 } 2421 if (tp->t_flags & TF_NEEDFIN) { 2422 tcp_state_change(tp, TCPS_FIN_WAIT_1); 2423 tp->t_flags &= ~TF_NEEDFIN; 2424 } else { 2425 tcp_state_change(tp, TCPS_ESTABLISHED); 2426 TCP_PROBE5(accept__established, NULL, tp, 2427 m, tp, th); 2428 /* 2429 * TFO connections call cc_conn_init() during SYN 2430 * processing. Calling it again here for such 2431 * connections is not harmless as it would undo the 2432 * snd_cwnd reduction that occurs when a TFO SYN|ACK 2433 * is retransmitted. 2434 */ 2435 if (!IS_FASTOPEN(tp->t_flags)) 2436 cc_conn_init(tp); 2437 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp)); 2438 } 2439 /* 2440 * Account for the ACK of our SYN prior to 2441 * regular ACK processing below, except for 2442 * simultaneous SYN, which is handled later. 2443 */ 2444 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 2445 incforsyn = 1; 2446 /* 2447 * If segment contains data or ACK, will call tcp_reass() 2448 * later; if not, do so now to pass queued data to user. 2449 */ 2450 if (tlen == 0 && (thflags & TH_FIN) == 0) 2451 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 2452 (struct mbuf *)0); 2453 tp->snd_wl1 = th->th_seq - 1; 2454 /* FALLTHROUGH */ 2455 2456 /* 2457 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 2458 * ACKs. If the ack is in the range 2459 * tp->snd_una < th->th_ack <= tp->snd_max 2460 * then advance tp->snd_una to th->th_ack and drop 2461 * data from the retransmission queue. If this ACK reflects 2462 * more up to date window information we update our window information. 2463 */ 2464 case TCPS_ESTABLISHED: 2465 case TCPS_FIN_WAIT_1: 2466 case TCPS_FIN_WAIT_2: 2467 case TCPS_CLOSE_WAIT: 2468 case TCPS_CLOSING: 2469 case TCPS_LAST_ACK: 2470 if (SEQ_GT(th->th_ack, tp->snd_max)) { 2471 TCPSTAT_INC(tcps_rcvacktoomuch); 2472 goto dropafterack; 2473 } 2474 if ((tp->t_flags & TF_SACK_PERMIT) && 2475 ((to.to_flags & TOF_SACK) || 2476 !TAILQ_EMPTY(&tp->snd_holes))) 2477 sack_changed = tcp_sack_doack(tp, &to, th->th_ack); 2478 else 2479 /* 2480 * Reset the value so that previous (valid) value 2481 * from the last ack with SACK doesn't get used. 2482 */ 2483 tp->sackhint.sacked_bytes = 0; 2484 2485 #ifdef TCP_HHOOK 2486 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 2487 hhook_run_tcp_est_in(tp, th, &to); 2488 #endif 2489 2490 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 2491 u_int maxseg; 2492 2493 maxseg = tcp_maxseg(tp); 2494 if (tlen == 0 && 2495 (tiwin == tp->snd_wnd || 2496 (tp->t_flags & TF_SACK_PERMIT))) { 2497 /* 2498 * If this is the first time we've seen a 2499 * FIN from the remote, this is not a 2500 * duplicate and it needs to be processed 2501 * normally. This happens during a 2502 * simultaneous close. 2503 */ 2504 if ((thflags & TH_FIN) && 2505 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) { 2506 tp->t_dupacks = 0; 2507 break; 2508 } 2509 TCPSTAT_INC(tcps_rcvdupack); 2510 /* 2511 * If we have outstanding data (other than 2512 * a window probe), this is a completely 2513 * duplicate ack (ie, window info didn't 2514 * change and FIN isn't set), 2515 * the ack is the biggest we've 2516 * seen and we've seen exactly our rexmt 2517 * threshold of them, assume a packet 2518 * has been dropped and retransmit it. 2519 * Kludge snd_nxt & the congestion 2520 * window so we send only this one 2521 * packet. 2522 * 2523 * We know we're losing at the current 2524 * window size so do congestion avoidance 2525 * (set ssthresh to half the current window 2526 * and pull our congestion window back to 2527 * the new ssthresh). 2528 * 2529 * Dup acks mean that packets have left the 2530 * network (they're now cached at the receiver) 2531 * so bump cwnd by the amount in the receiver 2532 * to keep a constant cwnd packets in the 2533 * network. 2534 * 2535 * When using TCP ECN, notify the peer that 2536 * we reduced the cwnd. 2537 */ 2538 /* 2539 * Following 2 kinds of acks should not affect 2540 * dupack counting: 2541 * 1) Old acks 2542 * 2) Acks with SACK but without any new SACK 2543 * information in them. These could result from 2544 * any anomaly in the network like a switch 2545 * duplicating packets or a possible DoS attack. 2546 */ 2547 if (th->th_ack != tp->snd_una || 2548 ((tp->t_flags & TF_SACK_PERMIT) && 2549 !sack_changed)) 2550 break; 2551 else if (!tcp_timer_active(tp, TT_REXMT)) 2552 tp->t_dupacks = 0; 2553 else if (++tp->t_dupacks > tcprexmtthresh || 2554 IN_FASTRECOVERY(tp->t_flags)) { 2555 cc_ack_received(tp, th, nsegs, 2556 CC_DUPACK); 2557 if ((tp->t_flags & TF_SACK_PERMIT) && 2558 IN_FASTRECOVERY(tp->t_flags)) { 2559 int awnd; 2560 2561 /* 2562 * Compute the amount of data in flight first. 2563 * We can inject new data into the pipe iff 2564 * we have less than 1/2 the original window's 2565 * worth of data in flight. 2566 */ 2567 if (V_tcp_do_rfc6675_pipe) 2568 awnd = tcp_compute_pipe(tp); 2569 else 2570 awnd = (tp->snd_nxt - tp->snd_fack) + 2571 tp->sackhint.sack_bytes_rexmit; 2572 2573 if (awnd < tp->snd_ssthresh) { 2574 tp->snd_cwnd += maxseg; 2575 if (tp->snd_cwnd > tp->snd_ssthresh) 2576 tp->snd_cwnd = tp->snd_ssthresh; 2577 } 2578 } else 2579 tp->snd_cwnd += maxseg; 2580 (void) tp->t_fb->tfb_tcp_output(tp); 2581 goto drop; 2582 } else if (tp->t_dupacks == tcprexmtthresh) { 2583 tcp_seq onxt = tp->snd_nxt; 2584 2585 /* 2586 * If we're doing sack, check to 2587 * see if we're already in sack 2588 * recovery. If we're not doing sack, 2589 * check to see if we're in newreno 2590 * recovery. 2591 */ 2592 if (tp->t_flags & TF_SACK_PERMIT) { 2593 if (IN_FASTRECOVERY(tp->t_flags)) { 2594 tp->t_dupacks = 0; 2595 break; 2596 } 2597 } else { 2598 if (SEQ_LEQ(th->th_ack, 2599 tp->snd_recover)) { 2600 tp->t_dupacks = 0; 2601 break; 2602 } 2603 } 2604 /* Congestion signal before ack. */ 2605 cc_cong_signal(tp, th, CC_NDUPACK); 2606 cc_ack_received(tp, th, nsegs, 2607 CC_DUPACK); 2608 tcp_timer_activate(tp, TT_REXMT, 0); 2609 tp->t_rtttime = 0; 2610 if (tp->t_flags & TF_SACK_PERMIT) { 2611 TCPSTAT_INC( 2612 tcps_sack_recovery_episode); 2613 tp->snd_recover = tp->snd_nxt; 2614 tp->snd_cwnd = maxseg; 2615 (void) tp->t_fb->tfb_tcp_output(tp); 2616 goto drop; 2617 } 2618 tp->snd_nxt = th->th_ack; 2619 tp->snd_cwnd = maxseg; 2620 (void) tp->t_fb->tfb_tcp_output(tp); 2621 KASSERT(tp->snd_limited <= 2, 2622 ("%s: tp->snd_limited too big", 2623 __func__)); 2624 tp->snd_cwnd = tp->snd_ssthresh + 2625 maxseg * 2626 (tp->t_dupacks - tp->snd_limited); 2627 if (SEQ_GT(onxt, tp->snd_nxt)) 2628 tp->snd_nxt = onxt; 2629 goto drop; 2630 } else if (V_tcp_do_rfc3042) { 2631 /* 2632 * Process first and second duplicate 2633 * ACKs. Each indicates a segment 2634 * leaving the network, creating room 2635 * for more. Make sure we can send a 2636 * packet on reception of each duplicate 2637 * ACK by increasing snd_cwnd by one 2638 * segment. Restore the original 2639 * snd_cwnd after packet transmission. 2640 */ 2641 cc_ack_received(tp, th, nsegs, 2642 CC_DUPACK); 2643 uint32_t oldcwnd = tp->snd_cwnd; 2644 tcp_seq oldsndmax = tp->snd_max; 2645 u_int sent; 2646 int avail; 2647 2648 KASSERT(tp->t_dupacks == 1 || 2649 tp->t_dupacks == 2, 2650 ("%s: dupacks not 1 or 2", 2651 __func__)); 2652 if (tp->t_dupacks == 1) 2653 tp->snd_limited = 0; 2654 tp->snd_cwnd = 2655 (tp->snd_nxt - tp->snd_una) + 2656 (tp->t_dupacks - tp->snd_limited) * 2657 maxseg; 2658 /* 2659 * Only call tcp_output when there 2660 * is new data available to be sent. 2661 * Otherwise we would send pure ACKs. 2662 */ 2663 SOCKBUF_LOCK(&so->so_snd); 2664 avail = sbavail(&so->so_snd) - 2665 (tp->snd_nxt - tp->snd_una); 2666 SOCKBUF_UNLOCK(&so->so_snd); 2667 if (avail > 0) 2668 (void) tp->t_fb->tfb_tcp_output(tp); 2669 sent = tp->snd_max - oldsndmax; 2670 if (sent > maxseg) { 2671 KASSERT((tp->t_dupacks == 2 && 2672 tp->snd_limited == 0) || 2673 (sent == maxseg + 1 && 2674 tp->t_flags & TF_SENTFIN), 2675 ("%s: sent too much", 2676 __func__)); 2677 tp->snd_limited = 2; 2678 } else if (sent > 0) 2679 ++tp->snd_limited; 2680 tp->snd_cwnd = oldcwnd; 2681 goto drop; 2682 } 2683 } 2684 break; 2685 } else { 2686 /* 2687 * This ack is advancing the left edge, reset the 2688 * counter. 2689 */ 2690 tp->t_dupacks = 0; 2691 /* 2692 * If this ack also has new SACK info, increment the 2693 * counter as per rfc6675. The variable 2694 * sack_changed tracks all changes to the SACK 2695 * scoreboard, including when partial ACKs without 2696 * SACK options are received, and clear the scoreboard 2697 * from the left side. Such partial ACKs should not be 2698 * counted as dupacks here. 2699 */ 2700 if ((tp->t_flags & TF_SACK_PERMIT) && 2701 (to.to_flags & TOF_SACK) && 2702 sack_changed) 2703 tp->t_dupacks++; 2704 } 2705 2706 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), 2707 ("%s: th_ack <= snd_una", __func__)); 2708 2709 /* 2710 * If the congestion window was inflated to account 2711 * for the other side's cached packets, retract it. 2712 */ 2713 if (IN_FASTRECOVERY(tp->t_flags)) { 2714 if (SEQ_LT(th->th_ack, tp->snd_recover)) { 2715 if (tp->t_flags & TF_SACK_PERMIT) 2716 tcp_sack_partialack(tp, th); 2717 else 2718 tcp_newreno_partial_ack(tp, th); 2719 } else 2720 cc_post_recovery(tp, th); 2721 } 2722 /* 2723 * If we reach this point, ACK is not a duplicate, 2724 * i.e., it ACKs something we sent. 2725 */ 2726 if (tp->t_flags & TF_NEEDSYN) { 2727 /* 2728 * T/TCP: Connection was half-synchronized, and our 2729 * SYN has been ACK'd (so connection is now fully 2730 * synchronized). Go to non-starred state, 2731 * increment snd_una for ACK of SYN, and check if 2732 * we can do window scaling. 2733 */ 2734 tp->t_flags &= ~TF_NEEDSYN; 2735 tp->snd_una++; 2736 /* Do window scaling? */ 2737 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2738 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2739 tp->rcv_scale = tp->request_r_scale; 2740 /* Send window already scaled. */ 2741 } 2742 } 2743 2744 process_ACK: 2745 INP_WLOCK_ASSERT(tp->t_inpcb); 2746 2747 /* 2748 * Adjust for the SYN bit in sequence space, 2749 * but don't account for it in cwnd calculations. 2750 * This is for the SYN_RECEIVED, non-simultaneous 2751 * SYN case. SYN_SENT and simultaneous SYN are 2752 * treated elsewhere. 2753 */ 2754 if (incforsyn) 2755 tp->snd_una++; 2756 acked = BYTES_THIS_ACK(tp, th); 2757 KASSERT(acked >= 0, ("%s: acked unexepectedly negative " 2758 "(tp->snd_una=%u, th->th_ack=%u, tp=%p, m=%p)", __func__, 2759 tp->snd_una, th->th_ack, tp, m)); 2760 TCPSTAT_ADD(tcps_rcvackpack, nsegs); 2761 TCPSTAT_ADD(tcps_rcvackbyte, acked); 2762 2763 /* 2764 * If we just performed our first retransmit, and the ACK 2765 * arrives within our recovery window, then it was a mistake 2766 * to do the retransmit in the first place. Recover our 2767 * original cwnd and ssthresh, and proceed to transmit where 2768 * we left off. 2769 */ 2770 if (tp->t_rxtshift == 1 && 2771 tp->t_flags & TF_PREVVALID && 2772 tp->t_badrxtwin && 2773 SEQ_LT(to.to_tsecr, tp->t_badrxtwin)) 2774 cc_cong_signal(tp, th, CC_RTO_ERR); 2775 2776 /* 2777 * If we have a timestamp reply, update smoothed 2778 * round trip time. If no timestamp is present but 2779 * transmit timer is running and timed sequence 2780 * number was acked, update smoothed round trip time. 2781 * Since we now have an rtt measurement, cancel the 2782 * timer backoff (cf., Phil Karn's retransmit alg.). 2783 * Recompute the initial retransmit timer. 2784 * 2785 * Some boxes send broken timestamp replies 2786 * during the SYN+ACK phase, ignore 2787 * timestamps of 0 or we could calculate a 2788 * huge RTT and blow up the retransmit timer. 2789 */ 2790 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) { 2791 uint32_t t; 2792 2793 t = tcp_ts_getticks() - to.to_tsecr; 2794 if (!tp->t_rttlow || tp->t_rttlow > t) 2795 tp->t_rttlow = t; 2796 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1); 2797 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) { 2798 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime) 2799 tp->t_rttlow = ticks - tp->t_rtttime; 2800 tcp_xmit_timer(tp, ticks - tp->t_rtttime); 2801 } 2802 2803 /* 2804 * If all outstanding data is acked, stop retransmit 2805 * timer and remember to restart (more output or persist). 2806 * If there is more data to be acked, restart retransmit 2807 * timer, using current (possibly backed-off) value. 2808 */ 2809 if (th->th_ack == tp->snd_max) { 2810 tcp_timer_activate(tp, TT_REXMT, 0); 2811 needoutput = 1; 2812 } else if (!tcp_timer_active(tp, TT_PERSIST)) 2813 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur); 2814 2815 /* 2816 * If no data (only SYN) was ACK'd, 2817 * skip rest of ACK processing. 2818 */ 2819 if (acked == 0) 2820 goto step6; 2821 2822 /* 2823 * Let the congestion control algorithm update congestion 2824 * control related information. This typically means increasing 2825 * the congestion window. 2826 */ 2827 cc_ack_received(tp, th, nsegs, CC_ACK); 2828 2829 SOCKBUF_LOCK(&so->so_snd); 2830 if (acked > sbavail(&so->so_snd)) { 2831 if (tp->snd_wnd >= sbavail(&so->so_snd)) 2832 tp->snd_wnd -= sbavail(&so->so_snd); 2833 else 2834 tp->snd_wnd = 0; 2835 mfree = sbcut_locked(&so->so_snd, 2836 (int)sbavail(&so->so_snd)); 2837 ourfinisacked = 1; 2838 } else { 2839 mfree = sbcut_locked(&so->so_snd, acked); 2840 if (tp->snd_wnd >= (uint32_t) acked) 2841 tp->snd_wnd -= acked; 2842 else 2843 tp->snd_wnd = 0; 2844 ourfinisacked = 0; 2845 } 2846 SOCKBUF_UNLOCK(&so->so_snd); 2847 tp->t_flags |= TF_WAKESOW; 2848 m_freem(mfree); 2849 /* Detect una wraparound. */ 2850 if (!IN_RECOVERY(tp->t_flags) && 2851 SEQ_GT(tp->snd_una, tp->snd_recover) && 2852 SEQ_LEQ(th->th_ack, tp->snd_recover)) 2853 tp->snd_recover = th->th_ack - 1; 2854 /* XXXLAS: Can this be moved up into cc_post_recovery? */ 2855 if (IN_RECOVERY(tp->t_flags) && 2856 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 2857 EXIT_RECOVERY(tp->t_flags); 2858 } 2859 tp->snd_una = th->th_ack; 2860 if (tp->t_flags & TF_SACK_PERMIT) { 2861 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 2862 tp->snd_recover = tp->snd_una; 2863 } 2864 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2865 tp->snd_nxt = tp->snd_una; 2866 2867 switch (tp->t_state) { 2868 /* 2869 * In FIN_WAIT_1 STATE in addition to the processing 2870 * for the ESTABLISHED state if our FIN is now acknowledged 2871 * then enter FIN_WAIT_2. 2872 */ 2873 case TCPS_FIN_WAIT_1: 2874 if (ourfinisacked) { 2875 /* 2876 * If we can't receive any more 2877 * data, then closing user can proceed. 2878 * Starting the timer is contrary to the 2879 * specification, but if we don't get a FIN 2880 * we'll hang forever. 2881 * 2882 * XXXjl: 2883 * we should release the tp also, and use a 2884 * compressed state. 2885 */ 2886 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2887 soisdisconnected(so); 2888 tcp_timer_activate(tp, TT_2MSL, 2889 (tcp_fast_finwait2_recycle ? 2890 tcp_finwait2_timeout : 2891 TP_MAXIDLE(tp))); 2892 } 2893 tcp_state_change(tp, TCPS_FIN_WAIT_2); 2894 } 2895 break; 2896 2897 /* 2898 * In CLOSING STATE in addition to the processing for 2899 * the ESTABLISHED state if the ACK acknowledges our FIN 2900 * then enter the TIME-WAIT state, otherwise ignore 2901 * the segment. 2902 */ 2903 case TCPS_CLOSING: 2904 if (ourfinisacked) { 2905 tcp_twstart(tp); 2906 m_freem(m); 2907 return; 2908 } 2909 break; 2910 2911 /* 2912 * In LAST_ACK, we may still be waiting for data to drain 2913 * and/or to be acked, as well as for the ack of our FIN. 2914 * If our FIN is now acknowledged, delete the TCB, 2915 * enter the closed state and return. 2916 */ 2917 case TCPS_LAST_ACK: 2918 if (ourfinisacked) { 2919 tp = tcp_close(tp); 2920 goto drop; 2921 } 2922 break; 2923 } 2924 } 2925 2926 step6: 2927 INP_WLOCK_ASSERT(tp->t_inpcb); 2928 2929 /* 2930 * Update window information. 2931 * Don't look at window if no ACK: TAC's send garbage on first SYN. 2932 */ 2933 if ((thflags & TH_ACK) && 2934 (SEQ_LT(tp->snd_wl1, th->th_seq) || 2935 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 2936 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 2937 /* keep track of pure window updates */ 2938 if (tlen == 0 && 2939 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 2940 TCPSTAT_INC(tcps_rcvwinupd); 2941 tp->snd_wnd = tiwin; 2942 tp->snd_wl1 = th->th_seq; 2943 tp->snd_wl2 = th->th_ack; 2944 if (tp->snd_wnd > tp->max_sndwnd) 2945 tp->max_sndwnd = tp->snd_wnd; 2946 needoutput = 1; 2947 } 2948 2949 /* 2950 * Process segments with URG. 2951 */ 2952 if ((thflags & TH_URG) && th->th_urp && 2953 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2954 /* 2955 * This is a kludge, but if we receive and accept 2956 * random urgent pointers, we'll crash in 2957 * soreceive. It's hard to imagine someone 2958 * actually wanting to send this much urgent data. 2959 */ 2960 SOCKBUF_LOCK(&so->so_rcv); 2961 if (th->th_urp + sbavail(&so->so_rcv) > sb_max) { 2962 th->th_urp = 0; /* XXX */ 2963 thflags &= ~TH_URG; /* XXX */ 2964 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */ 2965 goto dodata; /* XXX */ 2966 } 2967 /* 2968 * If this segment advances the known urgent pointer, 2969 * then mark the data stream. This should not happen 2970 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 2971 * a FIN has been received from the remote side. 2972 * In these states we ignore the URG. 2973 * 2974 * According to RFC961 (Assigned Protocols), 2975 * the urgent pointer points to the last octet 2976 * of urgent data. We continue, however, 2977 * to consider it to indicate the first octet 2978 * of data past the urgent section as the original 2979 * spec states (in one of two places). 2980 */ 2981 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) { 2982 tp->rcv_up = th->th_seq + th->th_urp; 2983 so->so_oobmark = sbavail(&so->so_rcv) + 2984 (tp->rcv_up - tp->rcv_nxt) - 1; 2985 if (so->so_oobmark == 0) 2986 so->so_rcv.sb_state |= SBS_RCVATMARK; 2987 sohasoutofband(so); 2988 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 2989 } 2990 SOCKBUF_UNLOCK(&so->so_rcv); 2991 /* 2992 * Remove out of band data so doesn't get presented to user. 2993 * This can happen independent of advancing the URG pointer, 2994 * but if two URG's are pending at once, some out-of-band 2995 * data may creep in... ick. 2996 */ 2997 if (th->th_urp <= (uint32_t)tlen && 2998 !(so->so_options & SO_OOBINLINE)) { 2999 /* hdr drop is delayed */ 3000 tcp_pulloutofband(so, th, m, drop_hdrlen); 3001 } 3002 } else { 3003 /* 3004 * If no out of band data is expected, 3005 * pull receive urgent pointer along 3006 * with the receive window. 3007 */ 3008 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 3009 tp->rcv_up = tp->rcv_nxt; 3010 } 3011 dodata: /* XXX */ 3012 INP_WLOCK_ASSERT(tp->t_inpcb); 3013 3014 /* 3015 * Process the segment text, merging it into the TCP sequencing queue, 3016 * and arranging for acknowledgment of receipt if necessary. 3017 * This process logically involves adjusting tp->rcv_wnd as data 3018 * is presented to the user (this happens in tcp_usrreq.c, 3019 * case PRU_RCVD). If a FIN has already been received on this 3020 * connection then we just ignore the text. 3021 */ 3022 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 3023 IS_FASTOPEN(tp->t_flags)); 3024 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 3025 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 3026 tcp_seq save_start = th->th_seq; 3027 tcp_seq save_rnxt = tp->rcv_nxt; 3028 int save_tlen = tlen; 3029 m_adj(m, drop_hdrlen); /* delayed header drop */ 3030 /* 3031 * Insert segment which includes th into TCP reassembly queue 3032 * with control block tp. Set thflags to whether reassembly now 3033 * includes a segment with FIN. This handles the common case 3034 * inline (segment is the next to be received on an established 3035 * connection, and the queue is empty), avoiding linkage into 3036 * and removal from the queue and repetition of various 3037 * conversions. 3038 * Set DELACK for segments received in order, but ack 3039 * immediately when segments are out of order (so 3040 * fast retransmit can work). 3041 */ 3042 if (th->th_seq == tp->rcv_nxt && 3043 SEGQ_EMPTY(tp) && 3044 (TCPS_HAVEESTABLISHED(tp->t_state) || 3045 tfo_syn)) { 3046 if (DELAY_ACK(tp, tlen) || tfo_syn) 3047 tp->t_flags |= TF_DELACK; 3048 else 3049 tp->t_flags |= TF_ACKNOW; 3050 tp->rcv_nxt += tlen; 3051 if (tlen && 3052 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 3053 (tp->t_fbyte_in == 0)) { 3054 tp->t_fbyte_in = ticks; 3055 if (tp->t_fbyte_in == 0) 3056 tp->t_fbyte_in = 1; 3057 if (tp->t_fbyte_out && tp->t_fbyte_in) 3058 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 3059 } 3060 thflags = th->th_flags & TH_FIN; 3061 TCPSTAT_INC(tcps_rcvpack); 3062 TCPSTAT_ADD(tcps_rcvbyte, tlen); 3063 SOCKBUF_LOCK(&so->so_rcv); 3064 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 3065 m_freem(m); 3066 else 3067 sbappendstream_locked(&so->so_rcv, m, 0); 3068 SOCKBUF_UNLOCK(&so->so_rcv); 3069 tp->t_flags |= TF_WAKESOR; 3070 } else { 3071 /* 3072 * XXX: Due to the header drop above "th" is 3073 * theoretically invalid by now. Fortunately 3074 * m_adj() doesn't actually frees any mbufs 3075 * when trimming from the head. 3076 */ 3077 tcp_seq temp = save_start; 3078 thflags = tcp_reass(tp, th, &temp, &tlen, m); 3079 tp->t_flags |= TF_ACKNOW; 3080 } 3081 if ((tp->t_flags & TF_SACK_PERMIT) && (save_tlen > 0)) { 3082 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 3083 /* 3084 * DSACK actually handled in the fastpath 3085 * above. 3086 */ 3087 tcp_update_sack_list(tp, save_start, 3088 save_start + save_tlen); 3089 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 3090 if ((tp->rcv_numsacks >= 1) && 3091 (tp->sackblks[0].end == save_start)) { 3092 /* 3093 * Partial overlap, recorded at todrop 3094 * above. 3095 */ 3096 tcp_update_sack_list(tp, 3097 tp->sackblks[0].start, 3098 tp->sackblks[0].end); 3099 } else { 3100 tcp_update_dsack_list(tp, save_start, 3101 save_start + save_tlen); 3102 } 3103 } else if (tlen >= save_tlen) { 3104 /* Update of sackblks. */ 3105 tcp_update_dsack_list(tp, save_start, 3106 save_start + save_tlen); 3107 } else if (tlen > 0) { 3108 tcp_update_dsack_list(tp, save_start, 3109 save_start + tlen); 3110 } 3111 } 3112 #if 0 3113 /* 3114 * Note the amount of data that peer has sent into 3115 * our window, in order to estimate the sender's 3116 * buffer size. 3117 * XXX: Unused. 3118 */ 3119 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) 3120 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 3121 else 3122 len = so->so_rcv.sb_hiwat; 3123 #endif 3124 } else { 3125 m_freem(m); 3126 thflags &= ~TH_FIN; 3127 } 3128 3129 /* 3130 * If FIN is received ACK the FIN and let the user know 3131 * that the connection is closing. 3132 */ 3133 if (thflags & TH_FIN) { 3134 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 3135 socantrcvmore(so); 3136 /* The socket upcall is handled by socantrcvmore. */ 3137 tp->t_flags &= ~TF_WAKESOR; 3138 /* 3139 * If connection is half-synchronized 3140 * (ie NEEDSYN flag on) then delay ACK, 3141 * so it may be piggybacked when SYN is sent. 3142 * Otherwise, since we received a FIN then no 3143 * more input can be expected, send ACK now. 3144 */ 3145 if (tp->t_flags & TF_NEEDSYN) 3146 tp->t_flags |= TF_DELACK; 3147 else 3148 tp->t_flags |= TF_ACKNOW; 3149 tp->rcv_nxt++; 3150 } 3151 switch (tp->t_state) { 3152 /* 3153 * In SYN_RECEIVED and ESTABLISHED STATES 3154 * enter the CLOSE_WAIT state. 3155 */ 3156 case TCPS_SYN_RECEIVED: 3157 tp->t_starttime = ticks; 3158 /* FALLTHROUGH */ 3159 case TCPS_ESTABLISHED: 3160 tcp_state_change(tp, TCPS_CLOSE_WAIT); 3161 break; 3162 3163 /* 3164 * If still in FIN_WAIT_1 STATE FIN has not been acked so 3165 * enter the CLOSING state. 3166 */ 3167 case TCPS_FIN_WAIT_1: 3168 tcp_state_change(tp, TCPS_CLOSING); 3169 break; 3170 3171 /* 3172 * In FIN_WAIT_2 state enter the TIME_WAIT state, 3173 * starting the time-wait timer, turning off the other 3174 * standard timers. 3175 */ 3176 case TCPS_FIN_WAIT_2: 3177 tcp_twstart(tp); 3178 return; 3179 } 3180 } 3181 #ifdef TCPDEBUG 3182 if (so->so_options & SO_DEBUG) 3183 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen, 3184 &tcp_savetcp, 0); 3185 #endif 3186 TCP_PROBE3(debug__input, tp, th, m); 3187 3188 /* 3189 * Return any desired output. 3190 */ 3191 if (needoutput || (tp->t_flags & TF_ACKNOW)) 3192 (void) tp->t_fb->tfb_tcp_output(tp); 3193 3194 check_delack: 3195 INP_WLOCK_ASSERT(tp->t_inpcb); 3196 3197 if (tp->t_flags & TF_DELACK) { 3198 tp->t_flags &= ~TF_DELACK; 3199 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 3200 } 3201 tcp_handle_wakeup(tp, so); 3202 INP_WUNLOCK(tp->t_inpcb); 3203 return; 3204 3205 dropafterack: 3206 /* 3207 * Generate an ACK dropping incoming segment if it occupies 3208 * sequence space, where the ACK reflects our state. 3209 * 3210 * We can now skip the test for the RST flag since all 3211 * paths to this code happen after packets containing 3212 * RST have been dropped. 3213 * 3214 * In the SYN-RECEIVED state, don't send an ACK unless the 3215 * segment we received passes the SYN-RECEIVED ACK test. 3216 * If it fails send a RST. This breaks the loop in the 3217 * "LAND" DoS attack, and also prevents an ACK storm 3218 * between two listening ports that have been sent forged 3219 * SYN segments, each with the source address of the other. 3220 */ 3221 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 3222 (SEQ_GT(tp->snd_una, th->th_ack) || 3223 SEQ_GT(th->th_ack, tp->snd_max)) ) { 3224 rstreason = BANDLIM_RST_OPENPORT; 3225 goto dropwithreset; 3226 } 3227 #ifdef TCPDEBUG 3228 if (so->so_options & SO_DEBUG) 3229 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 3230 &tcp_savetcp, 0); 3231 #endif 3232 TCP_PROBE3(debug__input, tp, th, m); 3233 tp->t_flags |= TF_ACKNOW; 3234 (void) tp->t_fb->tfb_tcp_output(tp); 3235 tcp_handle_wakeup(tp, so); 3236 INP_WUNLOCK(tp->t_inpcb); 3237 m_freem(m); 3238 return; 3239 3240 dropwithreset: 3241 if (tp != NULL) { 3242 tcp_dropwithreset(m, th, tp, tlen, rstreason); 3243 tcp_handle_wakeup(tp, so); 3244 INP_WUNLOCK(tp->t_inpcb); 3245 } else 3246 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 3247 return; 3248 3249 drop: 3250 /* 3251 * Drop space held by incoming segment and return. 3252 */ 3253 #ifdef TCPDEBUG 3254 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 3255 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 3256 &tcp_savetcp, 0); 3257 #endif 3258 TCP_PROBE3(debug__input, tp, th, m); 3259 if (tp != NULL) { 3260 tcp_handle_wakeup(tp, so); 3261 INP_WUNLOCK(tp->t_inpcb); 3262 } 3263 m_freem(m); 3264 } 3265 3266 /* 3267 * Issue RST and make ACK acceptable to originator of segment. 3268 * The mbuf must still include the original packet header. 3269 * tp may be NULL. 3270 */ 3271 void 3272 tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, 3273 int tlen, int rstreason) 3274 { 3275 #ifdef INET 3276 struct ip *ip; 3277 #endif 3278 #ifdef INET6 3279 struct ip6_hdr *ip6; 3280 #endif 3281 3282 if (tp != NULL) { 3283 INP_WLOCK_ASSERT(tp->t_inpcb); 3284 } 3285 3286 /* Don't bother if destination was broadcast/multicast. */ 3287 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST)) 3288 goto drop; 3289 #ifdef INET6 3290 if (mtod(m, struct ip *)->ip_v == 6) { 3291 ip6 = mtod(m, struct ip6_hdr *); 3292 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 3293 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 3294 goto drop; 3295 /* IPv6 anycast check is done at tcp6_input() */ 3296 } 3297 #endif 3298 #if defined(INET) && defined(INET6) 3299 else 3300 #endif 3301 #ifdef INET 3302 { 3303 ip = mtod(m, struct ip *); 3304 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 3305 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 3306 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 3307 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 3308 goto drop; 3309 } 3310 #endif 3311 3312 /* Perform bandwidth limiting. */ 3313 if (badport_bandlim(rstreason) < 0) 3314 goto drop; 3315 3316 /* tcp_respond consumes the mbuf chain. */ 3317 if (th->th_flags & TH_ACK) { 3318 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, 3319 th->th_ack, TH_RST); 3320 } else { 3321 if (th->th_flags & TH_SYN) 3322 tlen++; 3323 if (th->th_flags & TH_FIN) 3324 tlen++; 3325 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen, 3326 (tcp_seq)0, TH_RST|TH_ACK); 3327 } 3328 return; 3329 drop: 3330 m_freem(m); 3331 } 3332 3333 /* 3334 * Parse TCP options and place in tcpopt. 3335 */ 3336 void 3337 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags) 3338 { 3339 int opt, optlen; 3340 3341 to->to_flags = 0; 3342 for (; cnt > 0; cnt -= optlen, cp += optlen) { 3343 opt = cp[0]; 3344 if (opt == TCPOPT_EOL) 3345 break; 3346 if (opt == TCPOPT_NOP) 3347 optlen = 1; 3348 else { 3349 if (cnt < 2) 3350 break; 3351 optlen = cp[1]; 3352 if (optlen < 2 || optlen > cnt) 3353 break; 3354 } 3355 switch (opt) { 3356 case TCPOPT_MAXSEG: 3357 if (optlen != TCPOLEN_MAXSEG) 3358 continue; 3359 if (!(flags & TO_SYN)) 3360 continue; 3361 to->to_flags |= TOF_MSS; 3362 bcopy((char *)cp + 2, 3363 (char *)&to->to_mss, sizeof(to->to_mss)); 3364 to->to_mss = ntohs(to->to_mss); 3365 break; 3366 case TCPOPT_WINDOW: 3367 if (optlen != TCPOLEN_WINDOW) 3368 continue; 3369 if (!(flags & TO_SYN)) 3370 continue; 3371 to->to_flags |= TOF_SCALE; 3372 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT); 3373 break; 3374 case TCPOPT_TIMESTAMP: 3375 if (optlen != TCPOLEN_TIMESTAMP) 3376 continue; 3377 to->to_flags |= TOF_TS; 3378 bcopy((char *)cp + 2, 3379 (char *)&to->to_tsval, sizeof(to->to_tsval)); 3380 to->to_tsval = ntohl(to->to_tsval); 3381 bcopy((char *)cp + 6, 3382 (char *)&to->to_tsecr, sizeof(to->to_tsecr)); 3383 to->to_tsecr = ntohl(to->to_tsecr); 3384 break; 3385 case TCPOPT_SIGNATURE: 3386 /* 3387 * In order to reply to a host which has set the 3388 * TCP_SIGNATURE option in its initial SYN, we have 3389 * to record the fact that the option was observed 3390 * here for the syncache code to perform the correct 3391 * response. 3392 */ 3393 if (optlen != TCPOLEN_SIGNATURE) 3394 continue; 3395 to->to_flags |= TOF_SIGNATURE; 3396 to->to_signature = cp + 2; 3397 break; 3398 case TCPOPT_SACK_PERMITTED: 3399 if (optlen != TCPOLEN_SACK_PERMITTED) 3400 continue; 3401 if (!(flags & TO_SYN)) 3402 continue; 3403 if (!V_tcp_do_sack) 3404 continue; 3405 to->to_flags |= TOF_SACKPERM; 3406 break; 3407 case TCPOPT_SACK: 3408 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) 3409 continue; 3410 if (flags & TO_SYN) 3411 continue; 3412 to->to_flags |= TOF_SACK; 3413 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK; 3414 to->to_sacks = cp + 2; 3415 TCPSTAT_INC(tcps_sack_rcv_blocks); 3416 break; 3417 case TCPOPT_FAST_OPEN: 3418 /* 3419 * Cookie length validation is performed by the 3420 * server side cookie checking code or the client 3421 * side cookie cache update code. 3422 */ 3423 if (!(flags & TO_SYN)) 3424 continue; 3425 if (!V_tcp_fastopen_client_enable && 3426 !V_tcp_fastopen_server_enable) 3427 continue; 3428 to->to_flags |= TOF_FASTOPEN; 3429 to->to_tfo_len = optlen - 2; 3430 to->to_tfo_cookie = to->to_tfo_len ? cp + 2 : NULL; 3431 break; 3432 default: 3433 continue; 3434 } 3435 } 3436 } 3437 3438 /* 3439 * Pull out of band byte out of a segment so 3440 * it doesn't appear in the user's data queue. 3441 * It is still reflected in the segment length for 3442 * sequencing purposes. 3443 */ 3444 void 3445 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, 3446 int off) 3447 { 3448 int cnt = off + th->th_urp - 1; 3449 3450 while (cnt >= 0) { 3451 if (m->m_len > cnt) { 3452 char *cp = mtod(m, caddr_t) + cnt; 3453 struct tcpcb *tp = sototcpcb(so); 3454 3455 INP_WLOCK_ASSERT(tp->t_inpcb); 3456 3457 tp->t_iobc = *cp; 3458 tp->t_oobflags |= TCPOOB_HAVEDATA; 3459 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); 3460 m->m_len--; 3461 if (m->m_flags & M_PKTHDR) 3462 m->m_pkthdr.len--; 3463 return; 3464 } 3465 cnt -= m->m_len; 3466 m = m->m_next; 3467 if (m == NULL) 3468 break; 3469 } 3470 panic("tcp_pulloutofband"); 3471 } 3472 3473 /* 3474 * Collect new round-trip time estimate 3475 * and update averages and current timeout. 3476 */ 3477 void 3478 tcp_xmit_timer(struct tcpcb *tp, int rtt) 3479 { 3480 int delta; 3481 3482 INP_WLOCK_ASSERT(tp->t_inpcb); 3483 3484 TCPSTAT_INC(tcps_rttupdated); 3485 tp->t_rttupdated++; 3486 #ifdef STATS 3487 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, 3488 imax(0, rtt * 1000 / hz)); 3489 #endif 3490 if ((tp->t_srtt != 0) && (tp->t_rxtshift <= TCP_RTT_INVALIDATE)) { 3491 /* 3492 * srtt is stored as fixed point with 5 bits after the 3493 * binary point (i.e., scaled by 8). The following magic 3494 * is equivalent to the smoothing algorithm in rfc793 with 3495 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 3496 * point). Adjust rtt to origin 0. 3497 */ 3498 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 3499 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 3500 3501 if ((tp->t_srtt += delta) <= 0) 3502 tp->t_srtt = 1; 3503 3504 /* 3505 * We accumulate a smoothed rtt variance (actually, a 3506 * smoothed mean difference), then set the retransmit 3507 * timer to smoothed rtt + 4 times the smoothed variance. 3508 * rttvar is stored as fixed point with 4 bits after the 3509 * binary point (scaled by 16). The following is 3510 * equivalent to rfc793 smoothing with an alpha of .75 3511 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 3512 * rfc793's wired-in beta. 3513 */ 3514 if (delta < 0) 3515 delta = -delta; 3516 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 3517 if ((tp->t_rttvar += delta) <= 0) 3518 tp->t_rttvar = 1; 3519 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 3520 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3521 } else { 3522 /* 3523 * No rtt measurement yet - use the unsmoothed rtt. 3524 * Set the variance to half the rtt (so our first 3525 * retransmit happens at 3*rtt). 3526 */ 3527 tp->t_srtt = rtt << TCP_RTT_SHIFT; 3528 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 3529 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3530 } 3531 tp->t_rtttime = 0; 3532 tp->t_rxtshift = 0; 3533 3534 /* 3535 * the retransmit should happen at rtt + 4 * rttvar. 3536 * Because of the way we do the smoothing, srtt and rttvar 3537 * will each average +1/2 tick of bias. When we compute 3538 * the retransmit timer, we want 1/2 tick of rounding and 3539 * 1 extra tick because of +-1/2 tick uncertainty in the 3540 * firing of the timer. The bias will give us exactly the 3541 * 1.5 tick we need. But, because the bias is 3542 * statistical, we have to test that we don't drop below 3543 * the minimum feasible timer (which is 2 ticks). 3544 */ 3545 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 3546 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 3547 3548 /* 3549 * We received an ack for a packet that wasn't retransmitted; 3550 * it is probably safe to discard any error indications we've 3551 * received recently. This isn't quite right, but close enough 3552 * for now (a route might have failed after we sent a segment, 3553 * and the return path might not be symmetrical). 3554 */ 3555 tp->t_softerror = 0; 3556 } 3557 3558 /* 3559 * Determine a reasonable value for maxseg size. 3560 * If the route is known, check route for mtu. 3561 * If none, use an mss that can be handled on the outgoing interface 3562 * without forcing IP to fragment. If no route is found, route has no mtu, 3563 * or the destination isn't local, use a default, hopefully conservative 3564 * size (usually 512 or the default IP max size, but no more than the mtu 3565 * of the interface), as we can't discover anything about intervening 3566 * gateways or networks. We also initialize the congestion/slow start 3567 * window to be a single segment if the destination isn't local. 3568 * While looking at the routing entry, we also initialize other path-dependent 3569 * parameters from pre-set or cached values in the routing entry. 3570 * 3571 * NOTE that resulting t_maxseg doesn't include space for TCP options or 3572 * IP options, e.g. IPSEC data, since length of this data may vary, and 3573 * thus it is calculated for every segment separately in tcp_output(). 3574 * 3575 * NOTE that this routine is only called when we process an incoming 3576 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS 3577 * settings are handled in tcp_mssopt(). 3578 */ 3579 void 3580 tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer, 3581 struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap) 3582 { 3583 int mss = 0; 3584 uint32_t maxmtu = 0; 3585 struct inpcb *inp = tp->t_inpcb; 3586 struct hc_metrics_lite metrics; 3587 #ifdef INET6 3588 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 3589 size_t min_protoh = isipv6 ? 3590 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) : 3591 sizeof (struct tcpiphdr); 3592 #else 3593 const size_t min_protoh = sizeof(struct tcpiphdr); 3594 #endif 3595 3596 INP_WLOCK_ASSERT(tp->t_inpcb); 3597 3598 if (mtuoffer != -1) { 3599 KASSERT(offer == -1, ("%s: conflict", __func__)); 3600 offer = mtuoffer - min_protoh; 3601 } 3602 3603 /* Initialize. */ 3604 #ifdef INET6 3605 if (isipv6) { 3606 maxmtu = tcp_maxmtu6(&inp->inp_inc, cap); 3607 tp->t_maxseg = V_tcp_v6mssdflt; 3608 } 3609 #endif 3610 #if defined(INET) && defined(INET6) 3611 else 3612 #endif 3613 #ifdef INET 3614 { 3615 maxmtu = tcp_maxmtu(&inp->inp_inc, cap); 3616 tp->t_maxseg = V_tcp_mssdflt; 3617 } 3618 #endif 3619 3620 /* 3621 * No route to sender, stay with default mss and return. 3622 */ 3623 if (maxmtu == 0) { 3624 /* 3625 * In case we return early we need to initialize metrics 3626 * to a defined state as tcp_hc_get() would do for us 3627 * if there was no cache hit. 3628 */ 3629 if (metricptr != NULL) 3630 bzero(metricptr, sizeof(struct hc_metrics_lite)); 3631 return; 3632 } 3633 3634 /* What have we got? */ 3635 switch (offer) { 3636 case 0: 3637 /* 3638 * Offer == 0 means that there was no MSS on the SYN 3639 * segment, in this case we use tcp_mssdflt as 3640 * already assigned to t_maxseg above. 3641 */ 3642 offer = tp->t_maxseg; 3643 break; 3644 3645 case -1: 3646 /* 3647 * Offer == -1 means that we didn't receive SYN yet. 3648 */ 3649 /* FALLTHROUGH */ 3650 3651 default: 3652 /* 3653 * Prevent DoS attack with too small MSS. Round up 3654 * to at least minmss. 3655 */ 3656 offer = max(offer, V_tcp_minmss); 3657 } 3658 3659 /* 3660 * rmx information is now retrieved from tcp_hostcache. 3661 */ 3662 tcp_hc_get(&inp->inp_inc, &metrics); 3663 if (metricptr != NULL) 3664 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite)); 3665 3666 /* 3667 * If there's a discovered mtu in tcp hostcache, use it. 3668 * Else, use the link mtu. 3669 */ 3670 if (metrics.rmx_mtu) 3671 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh; 3672 else { 3673 #ifdef INET6 3674 if (isipv6) { 3675 mss = maxmtu - min_protoh; 3676 if (!V_path_mtu_discovery && 3677 !in6_localaddr(&inp->in6p_faddr)) 3678 mss = min(mss, V_tcp_v6mssdflt); 3679 } 3680 #endif 3681 #if defined(INET) && defined(INET6) 3682 else 3683 #endif 3684 #ifdef INET 3685 { 3686 mss = maxmtu - min_protoh; 3687 if (!V_path_mtu_discovery && 3688 !in_localaddr(inp->inp_faddr)) 3689 mss = min(mss, V_tcp_mssdflt); 3690 } 3691 #endif 3692 /* 3693 * XXX - The above conditional (mss = maxmtu - min_protoh) 3694 * probably violates the TCP spec. 3695 * The problem is that, since we don't know the 3696 * other end's MSS, we are supposed to use a conservative 3697 * default. But, if we do that, then MTU discovery will 3698 * never actually take place, because the conservative 3699 * default is much less than the MTUs typically seen 3700 * on the Internet today. For the moment, we'll sweep 3701 * this under the carpet. 3702 * 3703 * The conservative default might not actually be a problem 3704 * if the only case this occurs is when sending an initial 3705 * SYN with options and data to a host we've never talked 3706 * to before. Then, they will reply with an MSS value which 3707 * will get recorded and the new parameters should get 3708 * recomputed. For Further Study. 3709 */ 3710 } 3711 mss = min(mss, offer); 3712 3713 /* 3714 * Sanity check: make sure that maxseg will be large 3715 * enough to allow some data on segments even if the 3716 * all the option space is used (40bytes). Otherwise 3717 * funny things may happen in tcp_output. 3718 * 3719 * XXXGL: shouldn't we reserve space for IP/IPv6 options? 3720 */ 3721 mss = max(mss, 64); 3722 3723 tp->t_maxseg = mss; 3724 } 3725 3726 void 3727 tcp_mss(struct tcpcb *tp, int offer) 3728 { 3729 int mss; 3730 uint32_t bufsize; 3731 struct inpcb *inp; 3732 struct socket *so; 3733 struct hc_metrics_lite metrics; 3734 struct tcp_ifcap cap; 3735 3736 KASSERT(tp != NULL, ("%s: tp == NULL", __func__)); 3737 3738 bzero(&cap, sizeof(cap)); 3739 tcp_mss_update(tp, offer, -1, &metrics, &cap); 3740 3741 mss = tp->t_maxseg; 3742 inp = tp->t_inpcb; 3743 3744 /* 3745 * If there's a pipesize, change the socket buffer to that size, 3746 * don't change if sb_hiwat is different than default (then it 3747 * has been changed on purpose with setsockopt). 3748 * Make the socket buffers an integral number of mss units; 3749 * if the mss is larger than the socket buffer, decrease the mss. 3750 */ 3751 so = inp->inp_socket; 3752 SOCKBUF_LOCK(&so->so_snd); 3753 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe) 3754 bufsize = metrics.rmx_sendpipe; 3755 else 3756 bufsize = so->so_snd.sb_hiwat; 3757 if (bufsize < mss) 3758 mss = bufsize; 3759 else { 3760 bufsize = roundup(bufsize, mss); 3761 if (bufsize > sb_max) 3762 bufsize = sb_max; 3763 if (bufsize > so->so_snd.sb_hiwat) 3764 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL); 3765 } 3766 SOCKBUF_UNLOCK(&so->so_snd); 3767 /* 3768 * Sanity check: make sure that maxseg will be large 3769 * enough to allow some data on segments even if the 3770 * all the option space is used (40bytes). Otherwise 3771 * funny things may happen in tcp_output. 3772 * 3773 * XXXGL: shouldn't we reserve space for IP/IPv6 options? 3774 */ 3775 tp->t_maxseg = max(mss, 64); 3776 3777 SOCKBUF_LOCK(&so->so_rcv); 3778 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe) 3779 bufsize = metrics.rmx_recvpipe; 3780 else 3781 bufsize = so->so_rcv.sb_hiwat; 3782 if (bufsize > mss) { 3783 bufsize = roundup(bufsize, mss); 3784 if (bufsize > sb_max) 3785 bufsize = sb_max; 3786 if (bufsize > so->so_rcv.sb_hiwat) 3787 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL); 3788 } 3789 SOCKBUF_UNLOCK(&so->so_rcv); 3790 3791 /* Check the interface for TSO capabilities. */ 3792 if (cap.ifcap & CSUM_TSO) { 3793 tp->t_flags |= TF_TSO; 3794 tp->t_tsomax = cap.tsomax; 3795 tp->t_tsomaxsegcount = cap.tsomaxsegcount; 3796 tp->t_tsomaxsegsize = cap.tsomaxsegsize; 3797 } 3798 } 3799 3800 /* 3801 * Determine the MSS option to send on an outgoing SYN. 3802 */ 3803 int 3804 tcp_mssopt(struct in_conninfo *inc) 3805 { 3806 int mss = 0; 3807 uint32_t thcmtu = 0; 3808 uint32_t maxmtu = 0; 3809 size_t min_protoh; 3810 3811 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer")); 3812 3813 #ifdef INET6 3814 if (inc->inc_flags & INC_ISIPV6) { 3815 mss = V_tcp_v6mssdflt; 3816 maxmtu = tcp_maxmtu6(inc, NULL); 3817 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 3818 } 3819 #endif 3820 #if defined(INET) && defined(INET6) 3821 else 3822 #endif 3823 #ifdef INET 3824 { 3825 mss = V_tcp_mssdflt; 3826 maxmtu = tcp_maxmtu(inc, NULL); 3827 min_protoh = sizeof(struct tcpiphdr); 3828 } 3829 #endif 3830 #if defined(INET6) || defined(INET) 3831 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3832 #endif 3833 3834 if (maxmtu && thcmtu) 3835 mss = min(maxmtu, thcmtu) - min_protoh; 3836 else if (maxmtu || thcmtu) 3837 mss = max(maxmtu, thcmtu) - min_protoh; 3838 3839 return (mss); 3840 } 3841 3842 /* 3843 * On a partial ack arrives, force the retransmission of the 3844 * next unacknowledged segment. Do not clear tp->t_dupacks. 3845 * By setting snd_nxt to ti_ack, this forces retransmission timer to 3846 * be started again. 3847 */ 3848 void 3849 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th) 3850 { 3851 tcp_seq onxt = tp->snd_nxt; 3852 uint32_t ocwnd = tp->snd_cwnd; 3853 u_int maxseg = tcp_maxseg(tp); 3854 3855 INP_WLOCK_ASSERT(tp->t_inpcb); 3856 3857 tcp_timer_activate(tp, TT_REXMT, 0); 3858 tp->t_rtttime = 0; 3859 tp->snd_nxt = th->th_ack; 3860 /* 3861 * Set snd_cwnd to one segment beyond acknowledged offset. 3862 * (tp->snd_una has not yet been updated when this function is called.) 3863 */ 3864 tp->snd_cwnd = maxseg + BYTES_THIS_ACK(tp, th); 3865 tp->t_flags |= TF_ACKNOW; 3866 (void) tp->t_fb->tfb_tcp_output(tp); 3867 tp->snd_cwnd = ocwnd; 3868 if (SEQ_GT(onxt, tp->snd_nxt)) 3869 tp->snd_nxt = onxt; 3870 /* 3871 * Partial window deflation. Relies on fact that tp->snd_una 3872 * not updated yet. 3873 */ 3874 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th)) 3875 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th); 3876 else 3877 tp->snd_cwnd = 0; 3878 tp->snd_cwnd += maxseg; 3879 } 3880 3881 int 3882 tcp_compute_pipe(struct tcpcb *tp) 3883 { 3884 return (tp->snd_max - tp->snd_una + 3885 tp->sackhint.sack_bytes_rexmit - 3886 tp->sackhint.sacked_bytes); 3887 } 3888 3889 uint32_t 3890 tcp_compute_initwnd(uint32_t maxseg) 3891 { 3892 /* 3893 * Calculate the Initial Window, also used as Restart Window 3894 * 3895 * RFC5681 Section 3.1 specifies the default conservative values. 3896 * RFC3390 specifies slightly more aggressive values. 3897 * RFC6928 increases it to ten segments. 3898 * Support for user specified value for initial flight size. 3899 */ 3900 if (V_tcp_initcwnd_segments) 3901 return min(V_tcp_initcwnd_segments * maxseg, 3902 max(2 * maxseg, V_tcp_initcwnd_segments * 1460)); 3903 else if (V_tcp_do_rfc3390) 3904 return min(4 * maxseg, max(2 * maxseg, 4380)); 3905 else { 3906 /* Per RFC5681 Section 3.1 */ 3907 if (maxseg > 2190) 3908 return (2 * maxseg); 3909 else if (maxseg > 1095) 3910 return (3 * maxseg); 3911 else 3912 return (4 * maxseg); 3913 } 3914 } 3915