1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 5 * The Regents of the University of California. All rights reserved. 6 * Copyright (c) 2007-2008,2010 7 * Swinburne University of Technology, Melbourne, Australia. 8 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org> 9 * Copyright (c) 2010 The FreeBSD Foundation 10 * Copyright (c) 2010-2011 Juniper Networks, Inc. 11 * All rights reserved. 12 * 13 * Portions of this software were developed at the Centre for Advanced Internet 14 * Architectures, Swinburne University of Technology, by Lawrence Stewart, 15 * James Healy and David Hayes, made possible in part by a grant from the Cisco 16 * University Research Program Fund at Community Foundation Silicon Valley. 17 * 18 * Portions of this software were developed at the Centre for Advanced 19 * Internet Architectures, Swinburne University of Technology, Melbourne, 20 * Australia by David Hayes under sponsorship from the FreeBSD Foundation. 21 * 22 * Portions of this software were developed by Robert N. M. Watson under 23 * contract to Juniper Networks, Inc. 24 * 25 * Redistribution and use in source and binary forms, with or without 26 * modification, are permitted provided that the following conditions 27 * are met: 28 * 1. Redistributions of source code must retain the above copyright 29 * notice, this list of conditions and the following disclaimer. 30 * 2. Redistributions in binary form must reproduce the above copyright 31 * notice, this list of conditions and the following disclaimer in the 32 * documentation and/or other materials provided with the distribution. 33 * 3. Neither the name of the University nor the names of its contributors 34 * may be used to endorse or promote products derived from this software 35 * without specific prior written permission. 36 * 37 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 38 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 39 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 40 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 42 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 43 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 45 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 46 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 47 * SUCH DAMAGE. 48 * 49 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 50 */ 51 52 #include <sys/cdefs.h> 53 __FBSDID("$FreeBSD$"); 54 55 #include "opt_inet.h" 56 #include "opt_inet6.h" 57 #include "opt_ipsec.h" 58 59 #include <sys/param.h> 60 #include <sys/arb.h> 61 #include <sys/kernel.h> 62 #ifdef TCP_HHOOK 63 #include <sys/hhook.h> 64 #endif 65 #include <sys/malloc.h> 66 #include <sys/mbuf.h> 67 #include <sys/proc.h> /* for proc0 declaration */ 68 #include <sys/protosw.h> 69 #include <sys/qmath.h> 70 #include <sys/sdt.h> 71 #include <sys/signalvar.h> 72 #include <sys/socket.h> 73 #include <sys/socketvar.h> 74 #include <sys/sysctl.h> 75 #include <sys/syslog.h> 76 #include <sys/systm.h> 77 #include <sys/stats.h> 78 79 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 80 81 #include <vm/uma.h> 82 83 #include <net/if.h> 84 #include <net/if_var.h> 85 #include <net/route.h> 86 #include <net/vnet.h> 87 88 #define TCPSTATES /* for logging */ 89 90 #include <netinet/in.h> 91 #include <netinet/in_kdtrace.h> 92 #include <netinet/in_pcb.h> 93 #include <netinet/in_systm.h> 94 #include <netinet/ip.h> 95 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 96 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 97 #include <netinet/ip_var.h> 98 #include <netinet/ip_options.h> 99 #include <netinet/ip6.h> 100 #include <netinet/icmp6.h> 101 #include <netinet6/in6_pcb.h> 102 #include <netinet6/in6_var.h> 103 #include <netinet6/ip6_var.h> 104 #include <netinet6/nd6.h> 105 #include <netinet/tcp.h> 106 #include <netinet/tcp_fsm.h> 107 #include <netinet/tcp_seq.h> 108 #include <netinet/tcp_timer.h> 109 #include <netinet/tcp_var.h> 110 #include <netinet/tcp_log_buf.h> 111 #include <netinet6/tcp6_var.h> 112 #include <netinet/tcpip.h> 113 #include <netinet/cc/cc.h> 114 #include <netinet/tcp_fastopen.h> 115 #ifdef TCPPCAP 116 #include <netinet/tcp_pcap.h> 117 #endif 118 #include <netinet/tcp_syncache.h> 119 #ifdef TCP_OFFLOAD 120 #include <netinet/tcp_offload.h> 121 #endif 122 #include <netinet/tcp_ecn.h> 123 #include <netinet/udp.h> 124 125 #include <netipsec/ipsec_support.h> 126 127 #include <machine/in_cksum.h> 128 129 #include <security/mac/mac_framework.h> 130 131 const int tcprexmtthresh = 3; 132 133 VNET_DEFINE(int, tcp_log_in_vain) = 0; 134 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_VNET | CTLFLAG_RW, 135 &VNET_NAME(tcp_log_in_vain), 0, 136 "Log all incoming TCP segments to closed ports"); 137 138 VNET_DEFINE(int, blackhole) = 0; 139 #define V_blackhole VNET(blackhole) 140 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_VNET | CTLFLAG_RW, 141 &VNET_NAME(blackhole), 0, 142 "Do not send RST on segments to closed ports"); 143 144 VNET_DEFINE(bool, blackhole_local) = false; 145 #define V_blackhole_local VNET(blackhole_local) 146 SYSCTL_BOOL(_net_inet_tcp, OID_AUTO, blackhole_local, CTLFLAG_VNET | 147 CTLFLAG_RW, &VNET_NAME(blackhole_local), false, 148 "Enforce net.inet.tcp.blackhole for locally originated packets"); 149 150 VNET_DEFINE(int, tcp_delack_enabled) = 1; 151 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_VNET | CTLFLAG_RW, 152 &VNET_NAME(tcp_delack_enabled), 0, 153 "Delay ACK to try and piggyback it onto a data packet"); 154 155 VNET_DEFINE(int, drop_synfin) = 0; 156 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_VNET | CTLFLAG_RW, 157 &VNET_NAME(drop_synfin), 0, 158 "Drop TCP packets with SYN+FIN set"); 159 160 VNET_DEFINE(int, tcp_do_prr_conservative) = 0; 161 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_prr_conservative, CTLFLAG_VNET | CTLFLAG_RW, 162 &VNET_NAME(tcp_do_prr_conservative), 0, 163 "Do conservative Proportional Rate Reduction"); 164 165 VNET_DEFINE(int, tcp_do_prr) = 1; 166 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_prr, CTLFLAG_VNET | CTLFLAG_RW, 167 &VNET_NAME(tcp_do_prr), 1, 168 "Enable Proportional Rate Reduction per RFC 6937"); 169 170 VNET_DEFINE(int, tcp_do_lrd) = 0; 171 SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_lrd, CTLFLAG_VNET | CTLFLAG_RW, 172 &VNET_NAME(tcp_do_lrd), 1, 173 "Perform Lost Retransmission Detection"); 174 175 VNET_DEFINE(int, tcp_do_newcwv) = 0; 176 SYSCTL_INT(_net_inet_tcp, OID_AUTO, newcwv, CTLFLAG_VNET | CTLFLAG_RW, 177 &VNET_NAME(tcp_do_newcwv), 0, 178 "Enable New Congestion Window Validation per RFC7661"); 179 180 VNET_DEFINE(int, tcp_do_rfc3042) = 1; 181 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_VNET | CTLFLAG_RW, 182 &VNET_NAME(tcp_do_rfc3042), 0, 183 "Enable RFC 3042 (Limited Transmit)"); 184 185 VNET_DEFINE(int, tcp_do_rfc3390) = 1; 186 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_VNET | CTLFLAG_RW, 187 &VNET_NAME(tcp_do_rfc3390), 0, 188 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 189 190 VNET_DEFINE(int, tcp_initcwnd_segments) = 10; 191 SYSCTL_INT(_net_inet_tcp, OID_AUTO, initcwnd_segments, 192 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_initcwnd_segments), 0, 193 "Slow-start flight size (initial congestion window) in number of segments"); 194 195 VNET_DEFINE(int, tcp_do_rfc3465) = 1; 196 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_VNET | CTLFLAG_RW, 197 &VNET_NAME(tcp_do_rfc3465), 0, 198 "Enable RFC 3465 (Appropriate Byte Counting)"); 199 200 VNET_DEFINE(int, tcp_abc_l_var) = 2; 201 SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_VNET | CTLFLAG_RW, 202 &VNET_NAME(tcp_abc_l_var), 2, 203 "Cap the max cwnd increment during slow-start to this number of segments"); 204 205 VNET_DEFINE(int, tcp_insecure_syn) = 0; 206 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_syn, CTLFLAG_VNET | CTLFLAG_RW, 207 &VNET_NAME(tcp_insecure_syn), 0, 208 "Follow RFC793 instead of RFC5961 criteria for accepting SYN packets"); 209 210 VNET_DEFINE(int, tcp_insecure_rst) = 0; 211 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_VNET | CTLFLAG_RW, 212 &VNET_NAME(tcp_insecure_rst), 0, 213 "Follow RFC793 instead of RFC5961 criteria for accepting RST packets"); 214 215 VNET_DEFINE(int, tcp_recvspace) = 1024*64; 216 #define V_tcp_recvspace VNET(tcp_recvspace) 217 SYSCTL_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_VNET | CTLFLAG_RW, 218 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size"); 219 220 VNET_DEFINE(int, tcp_do_autorcvbuf) = 1; 221 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_VNET | CTLFLAG_RW, 222 &VNET_NAME(tcp_do_autorcvbuf), 0, 223 "Enable automatic receive buffer sizing"); 224 225 VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024; 226 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_VNET | CTLFLAG_RW, 227 &VNET_NAME(tcp_autorcvbuf_max), 0, 228 "Max size of automatic receive buffer"); 229 230 VNET_DEFINE(struct inpcbinfo, tcbinfo); 231 232 /* 233 * TCP statistics are stored in an array of counter(9)s, which size matches 234 * size of struct tcpstat. TCP running connection count is a regular array. 235 */ 236 VNET_PCPUSTAT_DEFINE(struct tcpstat, tcpstat); 237 SYSCTL_VNET_PCPUSTAT(_net_inet_tcp, TCPCTL_STATS, stats, struct tcpstat, 238 tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)"); 239 VNET_DEFINE(counter_u64_t, tcps_states[TCP_NSTATES]); 240 SYSCTL_COUNTER_U64_ARRAY(_net_inet_tcp, TCPCTL_STATES, states, CTLFLAG_RD | 241 CTLFLAG_VNET, &VNET_NAME(tcps_states)[0], TCP_NSTATES, 242 "TCP connection counts by TCP state"); 243 244 /* 245 * Kernel module interface for updating tcpstat. The first argument is an index 246 * into tcpstat treated as an array. 247 */ 248 void 249 kmod_tcpstat_add(int statnum, int val) 250 { 251 252 counter_u64_add(VNET(tcpstat)[statnum], val); 253 } 254 255 /* 256 * Make sure that we only start a SACK loss recovery when 257 * receiving a duplicate ACK with a SACK block, and also 258 * complete SACK loss recovery in case the other end 259 * reneges. 260 */ 261 static bool inline 262 tcp_is_sack_recovery(struct tcpcb *tp, struct tcpopt *to) 263 { 264 return ((tp->t_flags & TF_SACK_PERMIT) && 265 ((to->to_flags & TOF_SACK) || 266 (!TAILQ_EMPTY(&tp->snd_holes)))); 267 } 268 269 #ifdef TCP_HHOOK 270 /* 271 * Wrapper for the TCP established input helper hook. 272 */ 273 void 274 hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to) 275 { 276 struct tcp_hhook_data hhook_data; 277 278 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) { 279 hhook_data.tp = tp; 280 hhook_data.th = th; 281 hhook_data.to = to; 282 283 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data, 284 &tp->t_osd); 285 } 286 } 287 #endif 288 289 /* 290 * CC wrapper hook functions 291 */ 292 void 293 cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t nsegs, 294 uint16_t type) 295 { 296 #ifdef STATS 297 int32_t gput; 298 #endif 299 300 INP_WLOCK_ASSERT(tptoinpcb(tp)); 301 302 tp->t_ccv.nsegs = nsegs; 303 tp->t_ccv.bytes_this_ack = BYTES_THIS_ACK(tp, th); 304 if ((!V_tcp_do_newcwv && (tp->snd_cwnd <= tp->snd_wnd)) || 305 (V_tcp_do_newcwv && (tp->snd_cwnd <= tp->snd_wnd) && 306 (tp->snd_cwnd < (tcp_compute_pipe(tp) * 2)))) 307 tp->t_ccv.flags |= CCF_CWND_LIMITED; 308 else 309 tp->t_ccv.flags &= ~CCF_CWND_LIMITED; 310 311 if (type == CC_ACK) { 312 #ifdef STATS 313 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 314 ((int32_t)tp->snd_cwnd) - tp->snd_wnd); 315 if (!IN_RECOVERY(tp->t_flags)) 316 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_ACKLEN, 317 tp->t_ccv.bytes_this_ack / (tcp_maxseg(tp) * nsegs)); 318 if ((tp->t_flags & TF_GPUTINPROG) && 319 SEQ_GEQ(th->th_ack, tp->gput_ack)) { 320 /* 321 * Compute goodput in bits per millisecond. 322 */ 323 gput = (((int64_t)SEQ_SUB(th->th_ack, tp->gput_seq)) << 3) / 324 max(1, tcp_ts_getticks() - tp->gput_ts); 325 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 326 gput); 327 /* 328 * XXXLAS: This is a temporary hack, and should be 329 * chained off VOI_TCP_GPUT when stats(9) grows an API 330 * to deal with chained VOIs. 331 */ 332 if (tp->t_stats_gput_prev > 0) 333 stats_voi_update_abs_s32(tp->t_stats, 334 VOI_TCP_GPUT_ND, 335 ((gput - tp->t_stats_gput_prev) * 100) / 336 tp->t_stats_gput_prev); 337 tp->t_flags &= ~TF_GPUTINPROG; 338 tp->t_stats_gput_prev = gput; 339 } 340 #endif /* STATS */ 341 if (tp->snd_cwnd > tp->snd_ssthresh) { 342 tp->t_bytes_acked += tp->t_ccv.bytes_this_ack; 343 if (tp->t_bytes_acked >= tp->snd_cwnd) { 344 tp->t_bytes_acked -= tp->snd_cwnd; 345 tp->t_ccv.flags |= CCF_ABC_SENTAWND; 346 } 347 } else { 348 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 349 tp->t_bytes_acked = 0; 350 } 351 } 352 353 if (CC_ALGO(tp)->ack_received != NULL) { 354 /* XXXLAS: Find a way to live without this */ 355 tp->t_ccv.curack = th->th_ack; 356 CC_ALGO(tp)->ack_received(&tp->t_ccv, type); 357 } 358 #ifdef STATS 359 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, tp->snd_cwnd); 360 #endif 361 } 362 363 void 364 cc_conn_init(struct tcpcb *tp) 365 { 366 struct hc_metrics_lite metrics; 367 struct inpcb *inp = tptoinpcb(tp); 368 u_int maxseg; 369 int rtt; 370 371 INP_WLOCK_ASSERT(inp); 372 373 tcp_hc_get(&inp->inp_inc, &metrics); 374 maxseg = tcp_maxseg(tp); 375 376 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) { 377 tp->t_srtt = rtt; 378 TCPSTAT_INC(tcps_usedrtt); 379 if (metrics.rmx_rttvar) { 380 tp->t_rttvar = metrics.rmx_rttvar; 381 TCPSTAT_INC(tcps_usedrttvar); 382 } else { 383 /* default variation is +- 1 rtt */ 384 tp->t_rttvar = 385 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 386 } 387 TCPT_RANGESET(tp->t_rxtcur, 388 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 389 tp->t_rttmin, TCPTV_REXMTMAX); 390 } 391 if (metrics.rmx_ssthresh) { 392 /* 393 * There's some sort of gateway or interface 394 * buffer limit on the path. Use this to set 395 * the slow start threshold, but set the 396 * threshold to no less than 2*mss. 397 */ 398 tp->snd_ssthresh = max(2 * maxseg, metrics.rmx_ssthresh); 399 TCPSTAT_INC(tcps_usedssthresh); 400 } 401 402 /* 403 * Set the initial slow-start flight size. 404 * 405 * If a SYN or SYN/ACK was lost and retransmitted, we have to 406 * reduce the initial CWND to one segment as congestion is likely 407 * requiring us to be cautious. 408 */ 409 if (tp->snd_cwnd == 1) 410 tp->snd_cwnd = maxseg; /* SYN(-ACK) lost */ 411 else 412 tp->snd_cwnd = tcp_compute_initwnd(maxseg); 413 414 if (CC_ALGO(tp)->conn_init != NULL) 415 CC_ALGO(tp)->conn_init(&tp->t_ccv); 416 } 417 418 void inline 419 cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type) 420 { 421 INP_WLOCK_ASSERT(tptoinpcb(tp)); 422 423 #ifdef STATS 424 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 425 #endif 426 427 switch(type) { 428 case CC_NDUPACK: 429 if (!IN_FASTRECOVERY(tp->t_flags)) { 430 tp->snd_recover = tp->snd_max; 431 if (tp->t_flags2 & TF2_ECN_PERMIT) 432 tp->t_flags2 |= TF2_ECN_SND_CWR; 433 } 434 break; 435 case CC_ECN: 436 if (!IN_CONGRECOVERY(tp->t_flags) || 437 /* 438 * Allow ECN reaction on ACK to CWR, if 439 * that data segment was also CE marked. 440 */ 441 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 442 EXIT_CONGRECOVERY(tp->t_flags); 443 TCPSTAT_INC(tcps_ecn_rcwnd); 444 tp->snd_recover = tp->snd_max + 1; 445 if (tp->t_flags2 & TF2_ECN_PERMIT) 446 tp->t_flags2 |= TF2_ECN_SND_CWR; 447 } 448 break; 449 case CC_RTO: 450 tp->t_dupacks = 0; 451 tp->t_bytes_acked = 0; 452 EXIT_RECOVERY(tp->t_flags); 453 if (tp->t_flags2 & TF2_ECN_PERMIT) 454 tp->t_flags2 |= TF2_ECN_SND_CWR; 455 break; 456 case CC_RTO_ERR: 457 TCPSTAT_INC(tcps_sndrexmitbad); 458 /* RTO was unnecessary, so reset everything. */ 459 tp->snd_cwnd = tp->snd_cwnd_prev; 460 tp->snd_ssthresh = tp->snd_ssthresh_prev; 461 tp->snd_recover = tp->snd_recover_prev; 462 if (tp->t_flags & TF_WASFRECOVERY) 463 ENTER_FASTRECOVERY(tp->t_flags); 464 if (tp->t_flags & TF_WASCRECOVERY) 465 ENTER_CONGRECOVERY(tp->t_flags); 466 tp->snd_nxt = tp->snd_max; 467 tp->t_flags &= ~TF_PREVVALID; 468 tp->t_badrxtwin = 0; 469 break; 470 } 471 472 if (CC_ALGO(tp)->cong_signal != NULL) { 473 if (th != NULL) 474 tp->t_ccv.curack = th->th_ack; 475 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type); 476 } 477 } 478 479 void inline 480 cc_post_recovery(struct tcpcb *tp, struct tcphdr *th) 481 { 482 INP_WLOCK_ASSERT(tptoinpcb(tp)); 483 484 /* XXXLAS: KASSERT that we're in recovery? */ 485 486 if (CC_ALGO(tp)->post_recovery != NULL) { 487 tp->t_ccv.curack = th->th_ack; 488 CC_ALGO(tp)->post_recovery(&tp->t_ccv); 489 } 490 /* XXXLAS: EXIT_RECOVERY ? */ 491 tp->t_bytes_acked = 0; 492 tp->sackhint.delivered_data = 0; 493 tp->sackhint.prr_out = 0; 494 } 495 496 /* 497 * Indicate whether this ack should be delayed. We can delay the ack if 498 * following conditions are met: 499 * - There is no delayed ack timer in progress. 500 * - Our last ack wasn't a 0-sized window. We never want to delay 501 * the ack that opens up a 0-sized window. 502 * - LRO wasn't used for this segment. We make sure by checking that the 503 * segment size is not larger than the MSS. 504 */ 505 #define DELAY_ACK(tp, tlen) \ 506 ((!tcp_timer_active(tp, TT_DELACK) && \ 507 (tp->t_flags & TF_RXWIN0SENT) == 0) && \ 508 (tlen <= tp->t_maxseg) && \ 509 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN))) 510 511 void inline 512 cc_ecnpkt_handler_flags(struct tcpcb *tp, uint16_t flags, uint8_t iptos) 513 { 514 INP_WLOCK_ASSERT(tptoinpcb(tp)); 515 516 if (CC_ALGO(tp)->ecnpkt_handler != NULL) { 517 switch (iptos & IPTOS_ECN_MASK) { 518 case IPTOS_ECN_CE: 519 tp->t_ccv.flags |= CCF_IPHDR_CE; 520 break; 521 case IPTOS_ECN_ECT0: 522 /* FALLTHROUGH */ 523 case IPTOS_ECN_ECT1: 524 /* FALLTHROUGH */ 525 case IPTOS_ECN_NOTECT: 526 tp->t_ccv.flags &= ~CCF_IPHDR_CE; 527 break; 528 } 529 530 if (flags & TH_CWR) 531 tp->t_ccv.flags |= CCF_TCPHDR_CWR; 532 else 533 tp->t_ccv.flags &= ~CCF_TCPHDR_CWR; 534 535 CC_ALGO(tp)->ecnpkt_handler(&tp->t_ccv); 536 537 if (tp->t_ccv.flags & CCF_ACKNOW) { 538 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 539 tp->t_flags |= TF_ACKNOW; 540 } 541 } 542 } 543 544 void inline 545 cc_ecnpkt_handler(struct tcpcb *tp, struct tcphdr *th, uint8_t iptos) 546 { 547 cc_ecnpkt_handler_flags(tp, tcp_get_flags(th), iptos); 548 } 549 550 /* 551 * TCP input handling is split into multiple parts: 552 * tcp6_input is a thin wrapper around tcp_input for the extended 553 * ip6_protox[] call format in ip6_input 554 * tcp_input handles primary segment validation, inpcb lookup and 555 * SYN processing on listen sockets 556 * tcp_do_segment processes the ACK and text of the segment for 557 * establishing, established and closing connections 558 */ 559 #ifdef INET6 560 int 561 tcp6_input_with_port(struct mbuf **mp, int *offp, int proto, uint16_t port) 562 { 563 struct mbuf *m; 564 struct in6_ifaddr *ia6; 565 struct ip6_hdr *ip6; 566 567 m = *mp; 568 if (m->m_len < *offp + sizeof(struct tcphdr)) { 569 m = m_pullup(m, *offp + sizeof(struct tcphdr)); 570 if (m == NULL) { 571 *mp = m; 572 TCPSTAT_INC(tcps_rcvshort); 573 return (IPPROTO_DONE); 574 } 575 } 576 577 /* 578 * draft-itojun-ipv6-tcp-to-anycast 579 * better place to put this in? 580 */ 581 ip6 = mtod(m, struct ip6_hdr *); 582 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */, false); 583 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 584 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 585 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6); 586 *mp = NULL; 587 return (IPPROTO_DONE); 588 } 589 590 *mp = m; 591 return (tcp_input_with_port(mp, offp, proto, port)); 592 } 593 594 int 595 tcp6_input(struct mbuf **mp, int *offp, int proto) 596 { 597 598 return(tcp6_input_with_port(mp, offp, proto, 0)); 599 } 600 #endif /* INET6 */ 601 602 int 603 tcp_input_with_port(struct mbuf **mp, int *offp, int proto, uint16_t port) 604 { 605 struct mbuf *m = *mp; 606 struct tcphdr *th = NULL; 607 struct ip *ip = NULL; 608 struct inpcb *inp = NULL; 609 struct tcpcb *tp = NULL; 610 struct socket *so = NULL; 611 u_char *optp = NULL; 612 int off0; 613 int optlen = 0; 614 #ifdef INET 615 int len; 616 uint8_t ipttl; 617 #endif 618 int tlen = 0, off; 619 int drop_hdrlen; 620 int thflags; 621 int rstreason = 0; /* For badport_bandlim accounting purposes */ 622 int lookupflag; 623 uint8_t iptos; 624 struct m_tag *fwd_tag = NULL; 625 #ifdef INET6 626 struct ip6_hdr *ip6 = NULL; 627 int isipv6; 628 #else 629 const void *ip6 = NULL; 630 #endif /* INET6 */ 631 struct tcpopt to; /* options in this segment */ 632 char *s = NULL; /* address and port logging */ 633 634 NET_EPOCH_ASSERT(); 635 636 #ifdef INET6 637 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 638 #endif 639 640 off0 = *offp; 641 m = *mp; 642 *mp = NULL; 643 to.to_flags = 0; 644 TCPSTAT_INC(tcps_rcvtotal); 645 646 #ifdef INET6 647 if (isipv6) { 648 ip6 = mtod(m, struct ip6_hdr *); 649 th = (struct tcphdr *)((caddr_t)ip6 + off0); 650 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0; 651 if (port) 652 goto skip6_csum; 653 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) { 654 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 655 th->th_sum = m->m_pkthdr.csum_data; 656 else 657 th->th_sum = in6_cksum_pseudo(ip6, tlen, 658 IPPROTO_TCP, m->m_pkthdr.csum_data); 659 th->th_sum ^= 0xffff; 660 } else 661 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen); 662 if (th->th_sum) { 663 TCPSTAT_INC(tcps_rcvbadsum); 664 goto drop; 665 } 666 skip6_csum: 667 /* 668 * Be proactive about unspecified IPv6 address in source. 669 * As we use all-zero to indicate unbounded/unconnected pcb, 670 * unspecified IPv6 address can be used to confuse us. 671 * 672 * Note that packets with unspecified IPv6 destination is 673 * already dropped in ip6_input. 674 */ 675 KASSERT(!IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_dst), 676 ("%s: unspecified destination v6 address", __func__)); 677 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 678 IP6STAT_INC(ip6s_badscope); /* XXX */ 679 goto drop; 680 } 681 iptos = IPV6_TRAFFIC_CLASS(ip6); 682 } 683 #endif 684 #if defined(INET) && defined(INET6) 685 else 686 #endif 687 #ifdef INET 688 { 689 /* 690 * Get IP and TCP header together in first mbuf. 691 * Note: IP leaves IP header in first mbuf. 692 */ 693 if (off0 > sizeof (struct ip)) { 694 ip_stripoptions(m); 695 off0 = sizeof(struct ip); 696 } 697 if (m->m_len < sizeof (struct tcpiphdr)) { 698 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) 699 == NULL) { 700 TCPSTAT_INC(tcps_rcvshort); 701 return (IPPROTO_DONE); 702 } 703 } 704 ip = mtod(m, struct ip *); 705 th = (struct tcphdr *)((caddr_t)ip + off0); 706 tlen = ntohs(ip->ip_len) - off0; 707 708 iptos = ip->ip_tos; 709 if (port) 710 goto skip_csum; 711 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 712 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 713 th->th_sum = m->m_pkthdr.csum_data; 714 else 715 th->th_sum = in_pseudo(ip->ip_src.s_addr, 716 ip->ip_dst.s_addr, 717 htonl(m->m_pkthdr.csum_data + tlen + 718 IPPROTO_TCP)); 719 th->th_sum ^= 0xffff; 720 } else { 721 struct ipovly *ipov = (struct ipovly *)ip; 722 723 /* 724 * Checksum extended TCP header and data. 725 */ 726 len = off0 + tlen; 727 ipttl = ip->ip_ttl; 728 bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); 729 ipov->ih_len = htons(tlen); 730 th->th_sum = in_cksum(m, len); 731 /* Reset length for SDT probes. */ 732 ip->ip_len = htons(len); 733 /* Reset TOS bits */ 734 ip->ip_tos = iptos; 735 /* Re-initialization for later version check */ 736 ip->ip_ttl = ipttl; 737 ip->ip_v = IPVERSION; 738 ip->ip_hl = off0 >> 2; 739 } 740 skip_csum: 741 if (th->th_sum && (port == 0)) { 742 TCPSTAT_INC(tcps_rcvbadsum); 743 goto drop; 744 } 745 KASSERT(ip->ip_dst.s_addr != INADDR_ANY, 746 ("%s: unspecified destination v4 address", __func__)); 747 if (__predict_false(ip->ip_src.s_addr == INADDR_ANY)) { 748 IPSTAT_INC(ips_badaddr); 749 goto drop; 750 } 751 } 752 #endif /* INET */ 753 754 /* 755 * Check that TCP offset makes sense, 756 * pull out TCP options and adjust length. XXX 757 */ 758 off = th->th_off << 2; 759 if (off < sizeof (struct tcphdr) || off > tlen) { 760 TCPSTAT_INC(tcps_rcvbadoff); 761 goto drop; 762 } 763 tlen -= off; /* tlen is used instead of ti->ti_len */ 764 if (off > sizeof (struct tcphdr)) { 765 #ifdef INET6 766 if (isipv6) { 767 if (m->m_len < off0 + off) { 768 m = m_pullup(m, off0 + off); 769 if (m == NULL) { 770 TCPSTAT_INC(tcps_rcvshort); 771 return (IPPROTO_DONE); 772 } 773 } 774 ip6 = mtod(m, struct ip6_hdr *); 775 th = (struct tcphdr *)((caddr_t)ip6 + off0); 776 } 777 #endif 778 #if defined(INET) && defined(INET6) 779 else 780 #endif 781 #ifdef INET 782 { 783 if (m->m_len < sizeof(struct ip) + off) { 784 if ((m = m_pullup(m, sizeof (struct ip) + off)) 785 == NULL) { 786 TCPSTAT_INC(tcps_rcvshort); 787 return (IPPROTO_DONE); 788 } 789 ip = mtod(m, struct ip *); 790 th = (struct tcphdr *)((caddr_t)ip + off0); 791 } 792 } 793 #endif 794 optlen = off - sizeof (struct tcphdr); 795 optp = (u_char *)(th + 1); 796 } 797 thflags = tcp_get_flags(th); 798 799 /* 800 * Convert TCP protocol specific fields to host format. 801 */ 802 tcp_fields_to_host(th); 803 804 /* 805 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options. 806 */ 807 drop_hdrlen = off0 + off; 808 809 /* 810 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. 811 */ 812 if ( 813 #ifdef INET6 814 (isipv6 && (m->m_flags & M_IP6_NEXTHOP)) 815 #ifdef INET 816 || (!isipv6 && (m->m_flags & M_IP_NEXTHOP)) 817 #endif 818 #endif 819 #if defined(INET) && !defined(INET6) 820 (m->m_flags & M_IP_NEXTHOP) 821 #endif 822 ) 823 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 824 825 /* 826 * For initial SYN packets we don't need write lock on matching 827 * PCB, be it a listening one or a synchronized one. The packet 828 * shall not modify its state. 829 */ 830 lookupflag = INPLOOKUP_WILDCARD | 831 ((thflags & (TH_ACK|TH_SYN)) == TH_SYN ? 832 INPLOOKUP_RLOCKPCB : INPLOOKUP_WLOCKPCB); 833 findpcb: 834 #ifdef INET6 835 if (isipv6 && fwd_tag != NULL) { 836 struct sockaddr_in6 *next_hop6; 837 838 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1); 839 /* 840 * Transparently forwarded. Pretend to be the destination. 841 * Already got one like this? 842 */ 843 inp = in6_pcblookup_mbuf(&V_tcbinfo, 844 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport, 845 lookupflag & ~INPLOOKUP_WILDCARD, m->m_pkthdr.rcvif, m); 846 if (!inp) { 847 /* 848 * It's new. Try to find the ambushing socket. 849 * Because we've rewritten the destination address, 850 * any hardware-generated hash is ignored. 851 */ 852 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src, 853 th->th_sport, &next_hop6->sin6_addr, 854 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) : 855 th->th_dport, lookupflag, m->m_pkthdr.rcvif); 856 } 857 } else if (isipv6) { 858 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src, 859 th->th_sport, &ip6->ip6_dst, th->th_dport, lookupflag, 860 m->m_pkthdr.rcvif, m); 861 } 862 #endif /* INET6 */ 863 #if defined(INET6) && defined(INET) 864 else 865 #endif 866 #ifdef INET 867 if (fwd_tag != NULL) { 868 struct sockaddr_in *next_hop; 869 870 next_hop = (struct sockaddr_in *)(fwd_tag+1); 871 /* 872 * Transparently forwarded. Pretend to be the destination. 873 * already got one like this? 874 */ 875 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport, 876 ip->ip_dst, th->th_dport, lookupflag & ~INPLOOKUP_WILDCARD, 877 m->m_pkthdr.rcvif, m); 878 if (!inp) { 879 /* 880 * It's new. Try to find the ambushing socket. 881 * Because we've rewritten the destination address, 882 * any hardware-generated hash is ignored. 883 */ 884 inp = in_pcblookup(&V_tcbinfo, ip->ip_src, 885 th->th_sport, next_hop->sin_addr, 886 next_hop->sin_port ? ntohs(next_hop->sin_port) : 887 th->th_dport, lookupflag, m->m_pkthdr.rcvif); 888 } 889 } else 890 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, 891 th->th_sport, ip->ip_dst, th->th_dport, lookupflag, 892 m->m_pkthdr.rcvif, m); 893 #endif /* INET */ 894 895 /* 896 * If the INPCB does not exist then all data in the incoming 897 * segment is discarded and an appropriate RST is sent back. 898 * XXX MRT Send RST using which routing table? 899 */ 900 if (inp == NULL) { 901 if (rstreason != 0) { 902 /* We came here after second (safety) lookup. */ 903 MPASS((lookupflag & INPLOOKUP_WILDCARD) == 0); 904 goto dropwithreset; 905 } 906 /* 907 * Log communication attempts to ports that are not 908 * in use. 909 */ 910 if ((V_tcp_log_in_vain == 1 && (thflags & TH_SYN)) || 911 V_tcp_log_in_vain == 2) { 912 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6))) 913 log(LOG_INFO, "%s; %s: Connection attempt " 914 "to closed port\n", s, __func__); 915 } 916 /* 917 * When blackholing do not respond with a RST but 918 * completely ignore the segment and drop it. 919 */ 920 if (((V_blackhole == 1 && (thflags & TH_SYN)) || 921 V_blackhole == 2) && (V_blackhole_local || ( 922 #ifdef INET6 923 isipv6 ? !in6_localaddr(&ip6->ip6_src) : 924 #endif 925 #ifdef INET 926 !in_localip(ip->ip_src) 927 #else 928 true 929 #endif 930 ))) 931 goto dropunlock; 932 933 rstreason = BANDLIM_RST_CLOSEDPORT; 934 goto dropwithreset; 935 } 936 INP_LOCK_ASSERT(inp); 937 938 if ((inp->inp_flowtype == M_HASHTYPE_NONE) && 939 (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) && 940 !SOLISTENING(inp->inp_socket)) { 941 inp->inp_flowid = m->m_pkthdr.flowid; 942 inp->inp_flowtype = M_HASHTYPE_GET(m); 943 } 944 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 945 #ifdef INET6 946 if (isipv6 && IPSEC_ENABLED(ipv6) && 947 IPSEC_CHECK_POLICY(ipv6, m, inp) != 0) { 948 goto dropunlock; 949 } 950 #ifdef INET 951 else 952 #endif 953 #endif /* INET6 */ 954 #ifdef INET 955 if (IPSEC_ENABLED(ipv4) && 956 IPSEC_CHECK_POLICY(ipv4, m, inp) != 0) { 957 goto dropunlock; 958 } 959 #endif /* INET */ 960 #endif /* IPSEC */ 961 962 /* 963 * Check the minimum TTL for socket. 964 */ 965 if (inp->inp_ip_minttl != 0) { 966 #ifdef INET6 967 if (isipv6) { 968 if (inp->inp_ip_minttl > ip6->ip6_hlim) 969 goto dropunlock; 970 } else 971 #endif 972 if (inp->inp_ip_minttl > ip->ip_ttl) 973 goto dropunlock; 974 } 975 976 tp = intotcpcb(inp); 977 switch (tp->t_state) { 978 case TCPS_TIME_WAIT: 979 /* 980 * A previous connection in TIMEWAIT state is supposed to catch 981 * stray or duplicate segments arriving late. If this segment 982 * was a legitimate new connection attempt, the old INPCB gets 983 * removed and we can try again to find a listening socket. 984 */ 985 tcp_dooptions(&to, optp, optlen, 986 (thflags & TH_SYN) ? TO_SYN : 0); 987 /* 988 * tcp_twcheck unlocks the inp always, and frees the m if fails. 989 */ 990 if (tcp_twcheck(inp, &to, th, m, tlen)) 991 goto findpcb; 992 return (IPPROTO_DONE); 993 case TCPS_CLOSED: 994 /* 995 * The TCPCB may no longer exist if the connection is winding 996 * down or it is in the CLOSED state. Either way we drop the 997 * segment and send an appropriate response. 998 */ 999 rstreason = BANDLIM_RST_CLOSEDPORT; 1000 goto dropwithreset; 1001 } 1002 1003 if ((tp->t_port != port) && (tp->t_state > TCPS_LISTEN)) { 1004 rstreason = BANDLIM_RST_CLOSEDPORT; 1005 goto dropwithreset; 1006 } 1007 1008 #ifdef TCP_OFFLOAD 1009 if (tp->t_flags & TF_TOE) { 1010 tcp_offload_input(tp, m); 1011 m = NULL; /* consumed by the TOE driver */ 1012 goto dropunlock; 1013 } 1014 #endif 1015 1016 #ifdef MAC 1017 if (mac_inpcb_check_deliver(inp, m)) 1018 goto dropunlock; 1019 #endif 1020 so = inp->inp_socket; 1021 KASSERT(so != NULL, ("%s: so == NULL", __func__)); 1022 /* 1023 * When the socket is accepting connections (the INPCB is in LISTEN 1024 * state) we look into the SYN cache if this is a new connection 1025 * attempt or the completion of a previous one. 1026 */ 1027 KASSERT(tp->t_state == TCPS_LISTEN || !SOLISTENING(so), 1028 ("%s: so accepting but tp %p not listening", __func__, tp)); 1029 if (tp->t_state == TCPS_LISTEN && SOLISTENING(so)) { 1030 struct in_conninfo inc; 1031 1032 bzero(&inc, sizeof(inc)); 1033 #ifdef INET6 1034 if (isipv6) { 1035 inc.inc_flags |= INC_ISIPV6; 1036 if (inp->inp_inc.inc_flags & INC_IPV6MINMTU) 1037 inc.inc_flags |= INC_IPV6MINMTU; 1038 inc.inc6_faddr = ip6->ip6_src; 1039 inc.inc6_laddr = ip6->ip6_dst; 1040 } else 1041 #endif 1042 { 1043 inc.inc_faddr = ip->ip_src; 1044 inc.inc_laddr = ip->ip_dst; 1045 } 1046 inc.inc_fport = th->th_sport; 1047 inc.inc_lport = th->th_dport; 1048 inc.inc_fibnum = so->so_fibnum; 1049 1050 /* 1051 * Check for an existing connection attempt in syncache if 1052 * the flag is only ACK. A successful lookup creates a new 1053 * socket appended to the listen queue in SYN_RECEIVED state. 1054 */ 1055 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) { 1056 /* 1057 * Parse the TCP options here because 1058 * syncookies need access to the reflected 1059 * timestamp. 1060 */ 1061 tcp_dooptions(&to, optp, optlen, 0); 1062 /* 1063 * NB: syncache_expand() doesn't unlock inp. 1064 */ 1065 rstreason = syncache_expand(&inc, &to, th, &so, m, port); 1066 if (rstreason < 0) { 1067 /* 1068 * A failing TCP MD5 signature comparison 1069 * must result in the segment being dropped 1070 * and must not produce any response back 1071 * to the sender. 1072 */ 1073 goto dropunlock; 1074 } else if (rstreason == 0) { 1075 /* 1076 * No syncache entry, or ACK was not for our 1077 * SYN/ACK. Do our protection against double 1078 * ACK. If peer sent us 2 ACKs, then for the 1079 * first one syncache_expand() successfully 1080 * converted syncache entry into a socket, 1081 * while we were waiting on the inpcb lock. We 1082 * don't want to sent RST for the second ACK, 1083 * so we perform second lookup without wildcard 1084 * match, hoping to find the new socket. If 1085 * the ACK is stray indeed, rstreason would 1086 * hint the above code that the lookup was a 1087 * second attempt. 1088 * 1089 * NB: syncache did its own logging 1090 * of the failure cause. 1091 */ 1092 INP_WUNLOCK(inp); 1093 rstreason = BANDLIM_RST_OPENPORT; 1094 lookupflag &= ~INPLOOKUP_WILDCARD; 1095 goto findpcb; 1096 } 1097 tfo_socket_result: 1098 if (so == NULL) { 1099 /* 1100 * We completed the 3-way handshake 1101 * but could not allocate a socket 1102 * either due to memory shortage, 1103 * listen queue length limits or 1104 * global socket limits. Send RST 1105 * or wait and have the remote end 1106 * retransmit the ACK for another 1107 * try. 1108 */ 1109 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1110 log(LOG_DEBUG, "%s; %s: Listen socket: " 1111 "Socket allocation failed due to " 1112 "limits or memory shortage, %s\n", 1113 s, __func__, 1114 V_tcp_sc_rst_sock_fail ? 1115 "sending RST" : "try again"); 1116 if (V_tcp_sc_rst_sock_fail) { 1117 rstreason = BANDLIM_UNLIMITED; 1118 goto dropwithreset; 1119 } else 1120 goto dropunlock; 1121 } 1122 /* 1123 * Socket is created in state SYN_RECEIVED. 1124 * Unlock the listen socket, lock the newly 1125 * created socket and update the tp variable. 1126 * If we came here via jump to tfo_socket_result, 1127 * then listening socket is read-locked. 1128 */ 1129 INP_UNLOCK(inp); /* listen socket */ 1130 inp = sotoinpcb(so); 1131 /* 1132 * New connection inpcb is already locked by 1133 * syncache_expand(). 1134 */ 1135 INP_WLOCK_ASSERT(inp); 1136 tp = intotcpcb(inp); 1137 KASSERT(tp->t_state == TCPS_SYN_RECEIVED, 1138 ("%s: ", __func__)); 1139 /* 1140 * Process the segment and the data it 1141 * contains. tcp_do_segment() consumes 1142 * the mbuf chain and unlocks the inpcb. 1143 */ 1144 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1145 tp->t_fb->tfb_tcp_do_segment(tp, m, th, drop_hdrlen, 1146 tlen, iptos); 1147 return (IPPROTO_DONE); 1148 } 1149 /* 1150 * Segment flag validation for new connection attempts: 1151 * 1152 * Our (SYN|ACK) response was rejected. 1153 * Check with syncache and remove entry to prevent 1154 * retransmits. 1155 * 1156 * NB: syncache_chkrst does its own logging of failure 1157 * causes. 1158 */ 1159 if (thflags & TH_RST) { 1160 syncache_chkrst(&inc, th, m, port); 1161 goto dropunlock; 1162 } 1163 /* 1164 * We can't do anything without SYN. 1165 */ 1166 if ((thflags & TH_SYN) == 0) { 1167 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1168 log(LOG_DEBUG, "%s; %s: Listen socket: " 1169 "SYN is missing, segment ignored\n", 1170 s, __func__); 1171 TCPSTAT_INC(tcps_badsyn); 1172 goto dropunlock; 1173 } 1174 /* 1175 * (SYN|ACK) is bogus on a listen socket. 1176 */ 1177 if (thflags & TH_ACK) { 1178 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1179 log(LOG_DEBUG, "%s; %s: Listen socket: " 1180 "SYN|ACK invalid, segment rejected\n", 1181 s, __func__); 1182 syncache_badack(&inc, port); /* XXX: Not needed! */ 1183 TCPSTAT_INC(tcps_badsyn); 1184 rstreason = BANDLIM_RST_OPENPORT; 1185 goto dropwithreset; 1186 } 1187 /* 1188 * If the drop_synfin option is enabled, drop all 1189 * segments with both the SYN and FIN bits set. 1190 * This prevents e.g. nmap from identifying the 1191 * TCP/IP stack. 1192 * XXX: Poor reasoning. nmap has other methods 1193 * and is constantly refining its stack detection 1194 * strategies. 1195 * XXX: This is a violation of the TCP specification 1196 * and was used by RFC1644. 1197 */ 1198 if ((thflags & TH_FIN) && V_drop_synfin) { 1199 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1200 log(LOG_DEBUG, "%s; %s: Listen socket: " 1201 "SYN|FIN segment ignored (based on " 1202 "sysctl setting)\n", s, __func__); 1203 TCPSTAT_INC(tcps_badsyn); 1204 goto dropunlock; 1205 } 1206 /* 1207 * Segment's flags are (SYN) or (SYN|FIN). 1208 * 1209 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored 1210 * as they do not affect the state of the TCP FSM. 1211 * The data pointed to by TH_URG and th_urp is ignored. 1212 */ 1213 KASSERT((thflags & (TH_RST|TH_ACK)) == 0, 1214 ("%s: Listen socket: TH_RST or TH_ACK set", __func__)); 1215 KASSERT(thflags & (TH_SYN), 1216 ("%s: Listen socket: TH_SYN not set", __func__)); 1217 INP_RLOCK_ASSERT(inp); 1218 #ifdef INET6 1219 /* 1220 * If deprecated address is forbidden, 1221 * we do not accept SYN to deprecated interface 1222 * address to prevent any new inbound connection from 1223 * getting established. 1224 * When we do not accept SYN, we send a TCP RST, 1225 * with deprecated source address (instead of dropping 1226 * it). We compromise it as it is much better for peer 1227 * to send a RST, and RST will be the final packet 1228 * for the exchange. 1229 * 1230 * If we do not forbid deprecated addresses, we accept 1231 * the SYN packet. RFC2462 does not suggest dropping 1232 * SYN in this case. 1233 * If we decipher RFC2462 5.5.4, it says like this: 1234 * 1. use of deprecated addr with existing 1235 * communication is okay - "SHOULD continue to be 1236 * used" 1237 * 2. use of it with new communication: 1238 * (2a) "SHOULD NOT be used if alternate address 1239 * with sufficient scope is available" 1240 * (2b) nothing mentioned otherwise. 1241 * Here we fall into (2b) case as we have no choice in 1242 * our source address selection - we must obey the peer. 1243 * 1244 * The wording in RFC2462 is confusing, and there are 1245 * multiple description text for deprecated address 1246 * handling - worse, they are not exactly the same. 1247 * I believe 5.5.4 is the best one, so we follow 5.5.4. 1248 */ 1249 if (isipv6 && !V_ip6_use_deprecated) { 1250 struct in6_ifaddr *ia6; 1251 1252 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */, false); 1253 if (ia6 != NULL && 1254 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 1255 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1256 log(LOG_DEBUG, "%s; %s: Listen socket: " 1257 "Connection attempt to deprecated " 1258 "IPv6 address rejected\n", 1259 s, __func__); 1260 rstreason = BANDLIM_RST_OPENPORT; 1261 goto dropwithreset; 1262 } 1263 } 1264 #endif /* INET6 */ 1265 /* 1266 * Basic sanity checks on incoming SYN requests: 1267 * Don't respond if the destination is a link layer 1268 * broadcast according to RFC1122 4.2.3.10, p. 104. 1269 * If it is from this socket it must be forged. 1270 * Don't respond if the source or destination is a 1271 * global or subnet broad- or multicast address. 1272 * Note that it is quite possible to receive unicast 1273 * link-layer packets with a broadcast IP address. Use 1274 * in_broadcast() to find them. 1275 */ 1276 if (m->m_flags & (M_BCAST|M_MCAST)) { 1277 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1278 log(LOG_DEBUG, "%s; %s: Listen socket: " 1279 "Connection attempt from broad- or multicast " 1280 "link layer address ignored\n", s, __func__); 1281 goto dropunlock; 1282 } 1283 #ifdef INET6 1284 if (isipv6) { 1285 if (th->th_dport == th->th_sport && 1286 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) { 1287 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1288 log(LOG_DEBUG, "%s; %s: Listen socket: " 1289 "Connection attempt to/from self " 1290 "ignored\n", s, __func__); 1291 goto dropunlock; 1292 } 1293 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 1294 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) { 1295 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1296 log(LOG_DEBUG, "%s; %s: Listen socket: " 1297 "Connection attempt from/to multicast " 1298 "address ignored\n", s, __func__); 1299 goto dropunlock; 1300 } 1301 } 1302 #endif 1303 #if defined(INET) && defined(INET6) 1304 else 1305 #endif 1306 #ifdef INET 1307 { 1308 if (th->th_dport == th->th_sport && 1309 ip->ip_dst.s_addr == ip->ip_src.s_addr) { 1310 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1311 log(LOG_DEBUG, "%s; %s: Listen socket: " 1312 "Connection attempt from/to self " 1313 "ignored\n", s, __func__); 1314 goto dropunlock; 1315 } 1316 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 1317 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 1318 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 1319 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) { 1320 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1321 log(LOG_DEBUG, "%s; %s: Listen socket: " 1322 "Connection attempt from/to broad- " 1323 "or multicast address ignored\n", 1324 s, __func__); 1325 goto dropunlock; 1326 } 1327 } 1328 #endif 1329 /* 1330 * SYN appears to be valid. Create compressed TCP state 1331 * for syncache. 1332 */ 1333 TCP_PROBE3(debug__input, tp, th, m); 1334 tcp_dooptions(&to, optp, optlen, TO_SYN); 1335 if ((so = syncache_add(&inc, &to, th, inp, so, m, NULL, NULL, 1336 iptos, port)) != NULL) 1337 goto tfo_socket_result; 1338 1339 /* 1340 * Entry added to syncache and mbuf consumed. 1341 * Only the listen socket is unlocked by syncache_add(). 1342 */ 1343 return (IPPROTO_DONE); 1344 } else if (tp->t_state == TCPS_LISTEN) { 1345 /* 1346 * When a listen socket is torn down the SO_ACCEPTCONN 1347 * flag is removed first while connections are drained 1348 * from the accept queue in a unlock/lock cycle of the 1349 * ACCEPT_LOCK, opening a race condition allowing a SYN 1350 * attempt go through unhandled. 1351 */ 1352 goto dropunlock; 1353 } 1354 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1355 if (tp->t_flags & TF_SIGNATURE) { 1356 tcp_dooptions(&to, optp, optlen, thflags); 1357 if ((to.to_flags & TOF_SIGNATURE) == 0) { 1358 TCPSTAT_INC(tcps_sig_err_nosigopt); 1359 goto dropunlock; 1360 } 1361 if (!TCPMD5_ENABLED() || 1362 TCPMD5_INPUT(m, th, to.to_signature) != 0) 1363 goto dropunlock; 1364 } 1365 #endif 1366 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1367 1368 /* 1369 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later 1370 * state. tcp_do_segment() always consumes the mbuf chain, unlocks 1371 * the inpcb, and unlocks pcbinfo. 1372 * 1373 * XXXGL: in case of a pure SYN arriving on existing connection 1374 * TCP stacks won't need to modify the PCB, they would either drop 1375 * the segment silently, or send a challenge ACK. However, we try 1376 * to upgrade the lock, because calling convention for stacks is 1377 * write-lock on PCB. If upgrade fails, drop the SYN. 1378 */ 1379 if ((lookupflag & INPLOOKUP_RLOCKPCB) && INP_TRY_UPGRADE(inp) == 0) 1380 goto dropunlock; 1381 1382 tp->t_fb->tfb_tcp_do_segment(tp, m, th, drop_hdrlen, tlen, iptos); 1383 return (IPPROTO_DONE); 1384 1385 dropwithreset: 1386 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1387 1388 if (inp != NULL) { 1389 tcp_dropwithreset(m, th, tp, tlen, rstreason); 1390 INP_UNLOCK(inp); 1391 } else 1392 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 1393 m = NULL; /* mbuf chain got consumed. */ 1394 goto drop; 1395 1396 dropunlock: 1397 if (m != NULL) 1398 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1399 1400 if (inp != NULL) 1401 INP_UNLOCK(inp); 1402 1403 drop: 1404 if (s != NULL) 1405 free(s, M_TCPLOG); 1406 if (m != NULL) 1407 m_freem(m); 1408 return (IPPROTO_DONE); 1409 } 1410 1411 /* 1412 * Automatic sizing of receive socket buffer. Often the send 1413 * buffer size is not optimally adjusted to the actual network 1414 * conditions at hand (delay bandwidth product). Setting the 1415 * buffer size too small limits throughput on links with high 1416 * bandwidth and high delay (eg. trans-continental/oceanic links). 1417 * 1418 * On the receive side the socket buffer memory is only rarely 1419 * used to any significant extent. This allows us to be much 1420 * more aggressive in scaling the receive socket buffer. For 1421 * the case that the buffer space is actually used to a large 1422 * extent and we run out of kernel memory we can simply drop 1423 * the new segments; TCP on the sender will just retransmit it 1424 * later. Setting the buffer size too big may only consume too 1425 * much kernel memory if the application doesn't read() from 1426 * the socket or packet loss or reordering makes use of the 1427 * reassembly queue. 1428 * 1429 * The criteria to step up the receive buffer one notch are: 1430 * 1. Application has not set receive buffer size with 1431 * SO_RCVBUF. Setting SO_RCVBUF clears SB_AUTOSIZE. 1432 * 2. the number of bytes received during 1/2 of an sRTT 1433 * is at least 3/8 of the current socket buffer size. 1434 * 3. receive buffer size has not hit maximal automatic size; 1435 * 1436 * If all of the criteria are met we increaset the socket buffer 1437 * by a 1/2 (bounded by the max). This allows us to keep ahead 1438 * of slow-start but also makes it so our peer never gets limited 1439 * by our rwnd which we then open up causing a burst. 1440 * 1441 * This algorithm does two steps per RTT at most and only if 1442 * we receive a bulk stream w/o packet losses or reorderings. 1443 * Shrinking the buffer during idle times is not necessary as 1444 * it doesn't consume any memory when idle. 1445 * 1446 * TODO: Only step up if the application is actually serving 1447 * the buffer to better manage the socket buffer resources. 1448 */ 1449 int 1450 tcp_autorcvbuf(struct mbuf *m, struct tcphdr *th, struct socket *so, 1451 struct tcpcb *tp, int tlen) 1452 { 1453 int newsize = 0; 1454 1455 if (V_tcp_do_autorcvbuf && (so->so_rcv.sb_flags & SB_AUTOSIZE) && 1456 tp->t_srtt != 0 && tp->rfbuf_ts != 0 && 1457 TCP_TS_TO_TICKS(tcp_ts_getticks() - tp->rfbuf_ts) > 1458 ((tp->t_srtt >> TCP_RTT_SHIFT)/2)) { 1459 if (tp->rfbuf_cnt > ((so->so_rcv.sb_hiwat / 2)/ 4 * 3) && 1460 so->so_rcv.sb_hiwat < V_tcp_autorcvbuf_max) { 1461 newsize = min((so->so_rcv.sb_hiwat + (so->so_rcv.sb_hiwat/2)), V_tcp_autorcvbuf_max); 1462 } 1463 TCP_PROBE6(receive__autoresize, NULL, tp, m, tp, th, newsize); 1464 1465 /* Start over with next RTT. */ 1466 tp->rfbuf_ts = 0; 1467 tp->rfbuf_cnt = 0; 1468 } else { 1469 tp->rfbuf_cnt += tlen; /* add up */ 1470 } 1471 return (newsize); 1472 } 1473 1474 int 1475 tcp_input(struct mbuf **mp, int *offp, int proto) 1476 { 1477 return(tcp_input_with_port(mp, offp, proto, 0)); 1478 } 1479 1480 static void 1481 tcp_handle_wakeup(struct tcpcb *tp) 1482 { 1483 1484 INP_WLOCK_ASSERT(tptoinpcb(tp)); 1485 1486 if (tp->t_flags & TF_WAKESOR) { 1487 struct socket *so = tptosocket(tp); 1488 1489 tp->t_flags &= ~TF_WAKESOR; 1490 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1491 sorwakeup_locked(so); 1492 } 1493 } 1494 1495 void 1496 tcp_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 1497 int drop_hdrlen, int tlen, uint8_t iptos) 1498 { 1499 uint16_t thflags; 1500 int acked, ourfinisacked, needoutput = 0, sack_changed; 1501 int rstreason, todrop, win, incforsyn = 0; 1502 uint32_t tiwin; 1503 uint16_t nsegs; 1504 char *s; 1505 struct inpcb *inp = tptoinpcb(tp); 1506 struct socket *so = tptosocket(tp); 1507 struct in_conninfo *inc = &inp->inp_inc; 1508 struct mbuf *mfree; 1509 struct tcpopt to; 1510 int tfo_syn; 1511 u_int maxseg; 1512 1513 thflags = tcp_get_flags(th); 1514 tp->sackhint.last_sack_ack = 0; 1515 sack_changed = 0; 1516 nsegs = max(1, m->m_pkthdr.lro_nsegs); 1517 1518 NET_EPOCH_ASSERT(); 1519 INP_WLOCK_ASSERT(inp); 1520 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 1521 __func__)); 1522 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 1523 __func__)); 1524 1525 #ifdef TCPPCAP 1526 /* Save segment, if requested. */ 1527 tcp_pcap_add(th, m, &(tp->t_inpkts)); 1528 #endif 1529 TCP_LOG_EVENT(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 1530 tlen, NULL, true); 1531 1532 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 1533 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1534 log(LOG_DEBUG, "%s; %s: " 1535 "SYN|FIN segment ignored (based on " 1536 "sysctl setting)\n", s, __func__); 1537 free(s, M_TCPLOG); 1538 } 1539 goto drop; 1540 } 1541 1542 /* 1543 * If a segment with the ACK-bit set arrives in the SYN-SENT state 1544 * check SEQ.ACK first. 1545 */ 1546 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 1547 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 1548 rstreason = BANDLIM_UNLIMITED; 1549 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 1550 goto dropwithreset; 1551 } 1552 1553 /* 1554 * Segment received on connection. 1555 * Reset idle time and keep-alive timer. 1556 * XXX: This should be done after segment 1557 * validation to ignore broken/spoofed segs. 1558 */ 1559 if (tp->t_idle_reduce && 1560 (tp->snd_max == tp->snd_una) && 1561 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 1562 cc_after_idle(tp); 1563 tp->t_rcvtime = ticks; 1564 1565 if (thflags & TH_FIN) 1566 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 1567 /* 1568 * Scale up the window into a 32-bit value. 1569 * For the SYN_SENT state the scale is zero. 1570 */ 1571 tiwin = th->th_win << tp->snd_scale; 1572 #ifdef STATS 1573 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 1574 #endif 1575 1576 /* 1577 * TCP ECN processing. 1578 */ 1579 if (tcp_ecn_input_segment(tp, thflags, tlen, 1580 tcp_packets_this_ack(tp, th->th_ack), 1581 iptos)) 1582 cc_cong_signal(tp, th, CC_ECN); 1583 1584 /* 1585 * Parse options on any incoming segment. 1586 */ 1587 tcp_dooptions(&to, (u_char *)(th + 1), 1588 (th->th_off << 2) - sizeof(struct tcphdr), 1589 (thflags & TH_SYN) ? TO_SYN : 0); 1590 1591 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1592 if ((tp->t_flags & TF_SIGNATURE) != 0 && 1593 (to.to_flags & TOF_SIGNATURE) == 0) { 1594 TCPSTAT_INC(tcps_sig_err_sigopt); 1595 /* XXX: should drop? */ 1596 } 1597 #endif 1598 /* 1599 * If echoed timestamp is later than the current time, 1600 * fall back to non RFC1323 RTT calculation. Normalize 1601 * timestamp if syncookies were used when this connection 1602 * was established. 1603 */ 1604 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 1605 to.to_tsecr -= tp->ts_offset; 1606 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks())) 1607 to.to_tsecr = 0; 1608 else if (tp->t_rxtshift == 1 && 1609 tp->t_flags & TF_PREVVALID && 1610 tp->t_badrxtwin != 0 && 1611 TSTMP_LT(to.to_tsecr, tp->t_badrxtwin)) 1612 cc_cong_signal(tp, th, CC_RTO_ERR); 1613 } 1614 /* 1615 * Process options only when we get SYN/ACK back. The SYN case 1616 * for incoming connections is handled in tcp_syncache. 1617 * According to RFC1323 the window field in a SYN (i.e., a <SYN> 1618 * or <SYN,ACK>) segment itself is never scaled. 1619 * XXX this is traditional behavior, may need to be cleaned up. 1620 */ 1621 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 1622 /* Handle parallel SYN for ECN */ 1623 tcp_ecn_input_parallel_syn(tp, thflags, iptos); 1624 if ((to.to_flags & TOF_SCALE) && 1625 (tp->t_flags & TF_REQ_SCALE) && 1626 !(tp->t_flags & TF_NOOPT)) { 1627 tp->t_flags |= TF_RCVD_SCALE; 1628 tp->snd_scale = to.to_wscale; 1629 } else 1630 tp->t_flags &= ~TF_REQ_SCALE; 1631 /* 1632 * Initial send window. It will be updated with 1633 * the next incoming segment to the scaled value. 1634 */ 1635 tp->snd_wnd = th->th_win; 1636 if ((to.to_flags & TOF_TS) && 1637 (tp->t_flags & TF_REQ_TSTMP) && 1638 !(tp->t_flags & TF_NOOPT)) { 1639 tp->t_flags |= TF_RCVD_TSTMP; 1640 tp->ts_recent = to.to_tsval; 1641 tp->ts_recent_age = tcp_ts_getticks(); 1642 } else 1643 tp->t_flags &= ~TF_REQ_TSTMP; 1644 if (to.to_flags & TOF_MSS) 1645 tcp_mss(tp, to.to_mss); 1646 if ((tp->t_flags & TF_SACK_PERMIT) && 1647 (!(to.to_flags & TOF_SACKPERM) || 1648 (tp->t_flags & TF_NOOPT))) 1649 tp->t_flags &= ~TF_SACK_PERMIT; 1650 if (IS_FASTOPEN(tp->t_flags)) { 1651 if ((to.to_flags & TOF_FASTOPEN) && 1652 !(tp->t_flags & TF_NOOPT)) { 1653 uint16_t mss; 1654 1655 if (to.to_flags & TOF_MSS) 1656 mss = to.to_mss; 1657 else 1658 if ((inp->inp_vflag & INP_IPV6) != 0) 1659 mss = TCP6_MSS; 1660 else 1661 mss = TCP_MSS; 1662 tcp_fastopen_update_cache(tp, mss, 1663 to.to_tfo_len, to.to_tfo_cookie); 1664 } else 1665 tcp_fastopen_disable_path(tp); 1666 } 1667 } 1668 1669 /* 1670 * If timestamps were negotiated during SYN/ACK and a 1671 * segment without a timestamp is received, silently drop 1672 * the segment, unless it is a RST segment or missing timestamps are 1673 * tolerated. 1674 * See section 3.2 of RFC 7323. 1675 */ 1676 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS)) { 1677 if (((thflags & TH_RST) != 0) || V_tcp_tolerate_missing_ts) { 1678 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1679 log(LOG_DEBUG, "%s; %s: Timestamp missing, " 1680 "segment processed normally\n", 1681 s, __func__); 1682 free(s, M_TCPLOG); 1683 } 1684 } else { 1685 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1686 log(LOG_DEBUG, "%s; %s: Timestamp missing, " 1687 "segment silently dropped\n", s, __func__); 1688 free(s, M_TCPLOG); 1689 } 1690 goto drop; 1691 } 1692 } 1693 /* 1694 * If timestamps were not negotiated during SYN/ACK and a 1695 * segment with a timestamp is received, ignore the 1696 * timestamp and process the packet normally. 1697 * See section 3.2 of RFC 7323. 1698 */ 1699 if (!(tp->t_flags & TF_RCVD_TSTMP) && (to.to_flags & TOF_TS)) { 1700 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1701 log(LOG_DEBUG, "%s; %s: Timestamp not expected, " 1702 "segment processed normally\n", s, __func__); 1703 free(s, M_TCPLOG); 1704 } 1705 } 1706 1707 /* 1708 * Header prediction: check for the two common cases 1709 * of a uni-directional data xfer. If the packet has 1710 * no control flags, is in-sequence, the window didn't 1711 * change and we're not retransmitting, it's a 1712 * candidate. If the length is zero and the ack moved 1713 * forward, we're the sender side of the xfer. Just 1714 * free the data acked & wake any higher level process 1715 * that was blocked waiting for space. If the length 1716 * is non-zero and the ack didn't move, we're the 1717 * receiver side. If we're getting packets in-order 1718 * (the reassembly queue is empty), add the data to 1719 * the socket buffer and note that we need a delayed ack. 1720 * Make sure that the hidden state-flags are also off. 1721 * Since we check for TCPS_ESTABLISHED first, it can only 1722 * be TH_NEEDSYN. 1723 */ 1724 if (tp->t_state == TCPS_ESTABLISHED && 1725 th->th_seq == tp->rcv_nxt && 1726 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 1727 tp->snd_nxt == tp->snd_max && 1728 tiwin && tiwin == tp->snd_wnd && 1729 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && 1730 SEGQ_EMPTY(tp) && 1731 ((to.to_flags & TOF_TS) == 0 || 1732 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) { 1733 /* 1734 * If last ACK falls within this segment's sequence numbers, 1735 * record the timestamp. 1736 * NOTE that the test is modified according to the latest 1737 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1738 */ 1739 if ((to.to_flags & TOF_TS) != 0 && 1740 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 1741 tp->ts_recent_age = tcp_ts_getticks(); 1742 tp->ts_recent = to.to_tsval; 1743 } 1744 1745 if (tlen == 0) { 1746 if (SEQ_GT(th->th_ack, tp->snd_una) && 1747 SEQ_LEQ(th->th_ack, tp->snd_max) && 1748 !IN_RECOVERY(tp->t_flags) && 1749 (to.to_flags & TOF_SACK) == 0 && 1750 TAILQ_EMPTY(&tp->snd_holes)) { 1751 /* 1752 * This is a pure ack for outstanding data. 1753 */ 1754 TCPSTAT_INC(tcps_predack); 1755 1756 /* 1757 * "bad retransmit" recovery without timestamps. 1758 */ 1759 if ((to.to_flags & TOF_TS) == 0 && 1760 tp->t_rxtshift == 1 && 1761 tp->t_flags & TF_PREVVALID && 1762 tp->t_badrxtwin != 0 && 1763 TSTMP_LT(ticks, tp->t_badrxtwin)) { 1764 cc_cong_signal(tp, th, CC_RTO_ERR); 1765 } 1766 1767 /* 1768 * Recalculate the transmit timer / rtt. 1769 * 1770 * Some boxes send broken timestamp replies 1771 * during the SYN+ACK phase, ignore 1772 * timestamps of 0 or we could calculate a 1773 * huge RTT and blow up the retransmit timer. 1774 */ 1775 if ((to.to_flags & TOF_TS) != 0 && 1776 to.to_tsecr) { 1777 uint32_t t; 1778 1779 t = tcp_ts_getticks() - to.to_tsecr; 1780 if (!tp->t_rttlow || tp->t_rttlow > t) 1781 tp->t_rttlow = t; 1782 tcp_xmit_timer(tp, 1783 TCP_TS_TO_TICKS(t) + 1); 1784 } else if (tp->t_rtttime && 1785 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1786 if (!tp->t_rttlow || 1787 tp->t_rttlow > ticks - tp->t_rtttime) 1788 tp->t_rttlow = ticks - tp->t_rtttime; 1789 tcp_xmit_timer(tp, 1790 ticks - tp->t_rtttime); 1791 } 1792 acked = BYTES_THIS_ACK(tp, th); 1793 1794 #ifdef TCP_HHOOK 1795 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 1796 hhook_run_tcp_est_in(tp, th, &to); 1797 #endif 1798 1799 TCPSTAT_ADD(tcps_rcvackpack, nsegs); 1800 TCPSTAT_ADD(tcps_rcvackbyte, acked); 1801 sbdrop(&so->so_snd, acked); 1802 if (SEQ_GT(tp->snd_una, tp->snd_recover) && 1803 SEQ_LEQ(th->th_ack, tp->snd_recover)) 1804 tp->snd_recover = th->th_ack - 1; 1805 1806 /* 1807 * Let the congestion control algorithm update 1808 * congestion control related information. This 1809 * typically means increasing the congestion 1810 * window. 1811 */ 1812 cc_ack_received(tp, th, nsegs, CC_ACK); 1813 1814 tp->snd_una = th->th_ack; 1815 /* 1816 * Pull snd_wl2 up to prevent seq wrap relative 1817 * to th_ack. 1818 */ 1819 tp->snd_wl2 = th->th_ack; 1820 tp->t_dupacks = 0; 1821 m_freem(m); 1822 1823 /* 1824 * If all outstanding data are acked, stop 1825 * retransmit timer, otherwise restart timer 1826 * using current (possibly backed-off) value. 1827 * If process is waiting for space, 1828 * wakeup/selwakeup/signal. If data 1829 * are ready to send, let tcp_output 1830 * decide between more output or persist. 1831 */ 1832 TCP_PROBE3(debug__input, tp, th, m); 1833 /* 1834 * Clear t_acktime if remote side has ACKd 1835 * all data in the socket buffer. 1836 * Otherwise, update t_acktime if we received 1837 * a sufficiently large ACK. 1838 */ 1839 if (sbavail(&so->so_snd) == 0) 1840 tp->t_acktime = 0; 1841 else if (acked > 1) 1842 tp->t_acktime = ticks; 1843 if (tp->snd_una == tp->snd_max) 1844 tcp_timer_activate(tp, TT_REXMT, 0); 1845 else if (!tcp_timer_active(tp, TT_PERSIST)) 1846 tcp_timer_activate(tp, TT_REXMT, 1847 TP_RXTCUR(tp)); 1848 sowwakeup(so); 1849 if (sbavail(&so->so_snd)) 1850 (void) tcp_output(tp); 1851 goto check_delack; 1852 } 1853 } else if (th->th_ack == tp->snd_una && 1854 tlen <= sbspace(&so->so_rcv)) { 1855 int newsize = 0; /* automatic sockbuf scaling */ 1856 1857 /* 1858 * This is a pure, in-sequence data packet with 1859 * nothing on the reassembly queue and we have enough 1860 * buffer space to take it. 1861 */ 1862 /* Clean receiver SACK report if present */ 1863 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks) 1864 tcp_clean_sackreport(tp); 1865 TCPSTAT_INC(tcps_preddat); 1866 tp->rcv_nxt += tlen; 1867 if (tlen && 1868 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 1869 (tp->t_fbyte_in == 0)) { 1870 tp->t_fbyte_in = ticks; 1871 if (tp->t_fbyte_in == 0) 1872 tp->t_fbyte_in = 1; 1873 if (tp->t_fbyte_out && tp->t_fbyte_in) 1874 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 1875 } 1876 /* 1877 * Pull snd_wl1 up to prevent seq wrap relative to 1878 * th_seq. 1879 */ 1880 tp->snd_wl1 = th->th_seq; 1881 /* 1882 * Pull rcv_up up to prevent seq wrap relative to 1883 * rcv_nxt. 1884 */ 1885 tp->rcv_up = tp->rcv_nxt; 1886 TCPSTAT_ADD(tcps_rcvpack, nsegs); 1887 TCPSTAT_ADD(tcps_rcvbyte, tlen); 1888 TCP_PROBE3(debug__input, tp, th, m); 1889 1890 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 1891 1892 /* Add data to socket buffer. */ 1893 SOCKBUF_LOCK(&so->so_rcv); 1894 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1895 m_freem(m); 1896 } else { 1897 /* 1898 * Set new socket buffer size. 1899 * Give up when limit is reached. 1900 */ 1901 if (newsize) 1902 if (!sbreserve_locked(so, SO_RCV, 1903 newsize, NULL)) 1904 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 1905 m_adj(m, drop_hdrlen); /* delayed header drop */ 1906 sbappendstream_locked(&so->so_rcv, m, 0); 1907 } 1908 /* NB: sorwakeup_locked() does an implicit unlock. */ 1909 sorwakeup_locked(so); 1910 if (DELAY_ACK(tp, tlen)) { 1911 tp->t_flags |= TF_DELACK; 1912 } else { 1913 tp->t_flags |= TF_ACKNOW; 1914 tcp_output(tp); 1915 } 1916 goto check_delack; 1917 } 1918 } 1919 1920 /* 1921 * Calculate amount of space in receive window, 1922 * and then do TCP input processing. 1923 * Receive window is amount of space in rcv queue, 1924 * but not less than advertised window. 1925 */ 1926 win = sbspace(&so->so_rcv); 1927 if (win < 0) 1928 win = 0; 1929 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 1930 1931 switch (tp->t_state) { 1932 /* 1933 * If the state is SYN_RECEIVED: 1934 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1935 */ 1936 case TCPS_SYN_RECEIVED: 1937 if ((thflags & TH_ACK) && 1938 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1939 SEQ_GT(th->th_ack, tp->snd_max))) { 1940 rstreason = BANDLIM_RST_OPENPORT; 1941 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 1942 goto dropwithreset; 1943 } 1944 if (IS_FASTOPEN(tp->t_flags)) { 1945 /* 1946 * When a TFO connection is in SYN_RECEIVED, the 1947 * only valid packets are the initial SYN, a 1948 * retransmit/copy of the initial SYN (possibly with 1949 * a subset of the original data), a valid ACK, a 1950 * FIN, or a RST. 1951 */ 1952 if ((thflags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)) { 1953 rstreason = BANDLIM_RST_OPENPORT; 1954 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 1955 goto dropwithreset; 1956 } else if (thflags & TH_SYN) { 1957 /* non-initial SYN is ignored */ 1958 if ((tcp_timer_active(tp, TT_DELACK) || 1959 tcp_timer_active(tp, TT_REXMT))) 1960 goto drop; 1961 } else if (!(thflags & (TH_ACK|TH_FIN|TH_RST))) { 1962 goto drop; 1963 } 1964 } 1965 break; 1966 1967 /* 1968 * If the state is SYN_SENT: 1969 * if seg contains a RST with valid ACK (SEQ.ACK has already 1970 * been verified), then drop the connection. 1971 * if seg contains a RST without an ACK, drop the seg. 1972 * if seg does not contain SYN, then drop the seg. 1973 * Otherwise this is an acceptable SYN segment 1974 * initialize tp->rcv_nxt and tp->irs 1975 * if seg contains ack then advance tp->snd_una 1976 * if seg contains an ECE and ECN support is enabled, the stream 1977 * is ECN capable. 1978 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1979 * arrange for segment to be acked (eventually) 1980 * continue processing rest of data/controls, beginning with URG 1981 */ 1982 case TCPS_SYN_SENT: 1983 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) { 1984 TCP_PROBE5(connect__refused, NULL, tp, 1985 m, tp, th); 1986 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 1987 tp = tcp_drop(tp, ECONNREFUSED); 1988 } 1989 if (thflags & TH_RST) 1990 goto drop; 1991 if (!(thflags & TH_SYN)) 1992 goto drop; 1993 1994 tp->irs = th->th_seq; 1995 tcp_rcvseqinit(tp); 1996 if (thflags & TH_ACK) { 1997 int tfo_partial_ack = 0; 1998 1999 TCPSTAT_INC(tcps_connects); 2000 soisconnected(so); 2001 #ifdef MAC 2002 mac_socketpeer_set_from_mbuf(m, so); 2003 #endif 2004 /* Do window scaling on this connection? */ 2005 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2006 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2007 tp->rcv_scale = tp->request_r_scale; 2008 } 2009 tp->rcv_adv += min(tp->rcv_wnd, 2010 TCP_MAXWIN << tp->rcv_scale); 2011 tp->snd_una++; /* SYN is acked */ 2012 /* 2013 * If not all the data that was sent in the TFO SYN 2014 * has been acked, resend the remainder right away. 2015 */ 2016 if (IS_FASTOPEN(tp->t_flags) && 2017 (tp->snd_una != tp->snd_max)) { 2018 tp->snd_nxt = th->th_ack; 2019 tfo_partial_ack = 1; 2020 } 2021 /* 2022 * If there's data, delay ACK; if there's also a FIN 2023 * ACKNOW will be turned on later. 2024 */ 2025 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial_ack) 2026 tcp_timer_activate(tp, TT_DELACK, 2027 tcp_delacktime); 2028 else 2029 tp->t_flags |= TF_ACKNOW; 2030 2031 tcp_ecn_input_syn_sent(tp, thflags, iptos); 2032 2033 /* 2034 * Received <SYN,ACK> in SYN_SENT[*] state. 2035 * Transitions: 2036 * SYN_SENT --> ESTABLISHED 2037 * SYN_SENT* --> FIN_WAIT_1 2038 */ 2039 tp->t_starttime = ticks; 2040 if (tp->t_flags & TF_NEEDFIN) { 2041 tp->t_acktime = ticks; 2042 tcp_state_change(tp, TCPS_FIN_WAIT_1); 2043 tp->t_flags &= ~TF_NEEDFIN; 2044 thflags &= ~TH_SYN; 2045 } else { 2046 tcp_state_change(tp, TCPS_ESTABLISHED); 2047 TCP_PROBE5(connect__established, NULL, tp, 2048 m, tp, th); 2049 cc_conn_init(tp); 2050 tcp_timer_activate(tp, TT_KEEP, 2051 TP_KEEPIDLE(tp)); 2052 } 2053 } else { 2054 /* 2055 * Received initial SYN in SYN-SENT[*] state => 2056 * simultaneous open. 2057 * If it succeeds, connection is * half-synchronized. 2058 * Otherwise, do 3-way handshake: 2059 * SYN-SENT -> SYN-RECEIVED 2060 * SYN-SENT* -> SYN-RECEIVED* 2061 */ 2062 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); 2063 tcp_timer_activate(tp, TT_REXMT, 0); 2064 tcp_state_change(tp, TCPS_SYN_RECEIVED); 2065 } 2066 2067 /* 2068 * Advance th->th_seq to correspond to first data byte. 2069 * If data, trim to stay within window, 2070 * dropping FIN if necessary. 2071 */ 2072 th->th_seq++; 2073 if (tlen > tp->rcv_wnd) { 2074 todrop = tlen - tp->rcv_wnd; 2075 m_adj(m, -todrop); 2076 tlen = tp->rcv_wnd; 2077 thflags &= ~TH_FIN; 2078 TCPSTAT_INC(tcps_rcvpackafterwin); 2079 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 2080 } 2081 tp->snd_wl1 = th->th_seq - 1; 2082 tp->rcv_up = th->th_seq; 2083 /* 2084 * Client side of transaction: already sent SYN and data. 2085 * If the remote host used T/TCP to validate the SYN, 2086 * our data will be ACK'd; if so, enter normal data segment 2087 * processing in the middle of step 5, ack processing. 2088 * Otherwise, goto step 6. 2089 */ 2090 if (thflags & TH_ACK) 2091 goto process_ACK; 2092 2093 goto step6; 2094 } 2095 2096 /* 2097 * States other than LISTEN or SYN_SENT. 2098 * First check the RST flag and sequence number since reset segments 2099 * are exempt from the timestamp and connection count tests. This 2100 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 2101 * below which allowed reset segments in half the sequence space 2102 * to fall though and be processed (which gives forged reset 2103 * segments with a random sequence number a 50 percent chance of 2104 * killing a connection). 2105 * Then check timestamp, if present. 2106 * Then check the connection count, if present. 2107 * Then check that at least some bytes of segment are within 2108 * receive window. If segment begins before rcv_nxt, 2109 * drop leading data (and SYN); if nothing left, just ack. 2110 */ 2111 if (thflags & TH_RST) { 2112 /* 2113 * RFC5961 Section 3.2 2114 * 2115 * - RST drops connection only if SEG.SEQ == RCV.NXT. 2116 * - If RST is in window, we send challenge ACK. 2117 * 2118 * Note: to take into account delayed ACKs, we should 2119 * test against last_ack_sent instead of rcv_nxt. 2120 * Note 2: we handle special case of closed window, not 2121 * covered by the RFC. 2122 */ 2123 if ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 2124 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || 2125 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) { 2126 KASSERT(tp->t_state != TCPS_SYN_SENT, 2127 ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p", 2128 __func__, th, tp)); 2129 2130 if (V_tcp_insecure_rst || 2131 tp->last_ack_sent == th->th_seq) { 2132 TCPSTAT_INC(tcps_drops); 2133 /* Drop the connection. */ 2134 switch (tp->t_state) { 2135 case TCPS_SYN_RECEIVED: 2136 so->so_error = ECONNREFUSED; 2137 goto close; 2138 case TCPS_ESTABLISHED: 2139 case TCPS_FIN_WAIT_1: 2140 case TCPS_FIN_WAIT_2: 2141 case TCPS_CLOSE_WAIT: 2142 case TCPS_CLOSING: 2143 case TCPS_LAST_ACK: 2144 so->so_error = ECONNRESET; 2145 close: 2146 /* FALLTHROUGH */ 2147 default: 2148 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_RST); 2149 tp = tcp_close(tp); 2150 } 2151 } else { 2152 TCPSTAT_INC(tcps_badrst); 2153 /* Send challenge ACK. */ 2154 tcp_respond(tp, mtod(m, void *), th, m, 2155 tp->rcv_nxt, tp->snd_nxt, TH_ACK); 2156 tp->last_ack_sent = tp->rcv_nxt; 2157 m = NULL; 2158 } 2159 } 2160 goto drop; 2161 } 2162 2163 /* 2164 * RFC5961 Section 4.2 2165 * Send challenge ACK for any SYN in synchronized state. 2166 */ 2167 if ((thflags & TH_SYN) && tp->t_state != TCPS_SYN_SENT && 2168 tp->t_state != TCPS_SYN_RECEIVED) { 2169 TCPSTAT_INC(tcps_badsyn); 2170 if (V_tcp_insecure_syn && 2171 SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 2172 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 2173 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 2174 tp = tcp_drop(tp, ECONNRESET); 2175 rstreason = BANDLIM_UNLIMITED; 2176 } else { 2177 tcp_ecn_input_syn_sent(tp, thflags, iptos); 2178 /* Send challenge ACK. */ 2179 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt, 2180 tp->snd_nxt, TH_ACK); 2181 tp->last_ack_sent = tp->rcv_nxt; 2182 m = NULL; 2183 } 2184 goto drop; 2185 } 2186 2187 /* 2188 * RFC 1323 PAWS: If we have a timestamp reply on this segment 2189 * and it's less than ts_recent, drop it. 2190 */ 2191 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 2192 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 2193 /* Check to see if ts_recent is over 24 days old. */ 2194 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) { 2195 /* 2196 * Invalidate ts_recent. If this segment updates 2197 * ts_recent, the age will be reset later and ts_recent 2198 * will get a valid value. If it does not, setting 2199 * ts_recent to zero will at least satisfy the 2200 * requirement that zero be placed in the timestamp 2201 * echo reply when ts_recent isn't valid. The 2202 * age isn't reset until we get a valid ts_recent 2203 * because we don't want out-of-order segments to be 2204 * dropped when ts_recent is old. 2205 */ 2206 tp->ts_recent = 0; 2207 } else { 2208 TCPSTAT_INC(tcps_rcvduppack); 2209 TCPSTAT_ADD(tcps_rcvdupbyte, tlen); 2210 TCPSTAT_INC(tcps_pawsdrop); 2211 if (tlen) 2212 goto dropafterack; 2213 goto drop; 2214 } 2215 } 2216 2217 /* 2218 * In the SYN-RECEIVED state, validate that the packet belongs to 2219 * this connection before trimming the data to fit the receive 2220 * window. Check the sequence number versus IRS since we know 2221 * the sequence numbers haven't wrapped. This is a partial fix 2222 * for the "LAND" DoS attack. 2223 */ 2224 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 2225 rstreason = BANDLIM_RST_OPENPORT; 2226 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 2227 goto dropwithreset; 2228 } 2229 2230 todrop = tp->rcv_nxt - th->th_seq; 2231 if (todrop > 0) { 2232 if (thflags & TH_SYN) { 2233 thflags &= ~TH_SYN; 2234 th->th_seq++; 2235 if (th->th_urp > 1) 2236 th->th_urp--; 2237 else 2238 thflags &= ~TH_URG; 2239 todrop--; 2240 } 2241 /* 2242 * Following if statement from Stevens, vol. 2, p. 960. 2243 */ 2244 if (todrop > tlen 2245 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 2246 /* 2247 * Any valid FIN must be to the left of the window. 2248 * At this point the FIN must be a duplicate or out 2249 * of sequence; drop it. 2250 */ 2251 thflags &= ~TH_FIN; 2252 2253 /* 2254 * Send an ACK to resynchronize and drop any data. 2255 * But keep on processing for RST or ACK. 2256 */ 2257 tp->t_flags |= TF_ACKNOW; 2258 todrop = tlen; 2259 TCPSTAT_INC(tcps_rcvduppack); 2260 TCPSTAT_ADD(tcps_rcvdupbyte, todrop); 2261 } else { 2262 TCPSTAT_INC(tcps_rcvpartduppack); 2263 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop); 2264 } 2265 /* 2266 * DSACK - add SACK block for dropped range 2267 */ 2268 if ((todrop > 0) && (tp->t_flags & TF_SACK_PERMIT)) { 2269 tcp_update_sack_list(tp, th->th_seq, 2270 th->th_seq + todrop); 2271 /* 2272 * ACK now, as the next in-sequence segment 2273 * will clear the DSACK block again 2274 */ 2275 tp->t_flags |= TF_ACKNOW; 2276 } 2277 drop_hdrlen += todrop; /* drop from the top afterwards */ 2278 th->th_seq += todrop; 2279 tlen -= todrop; 2280 if (th->th_urp > todrop) 2281 th->th_urp -= todrop; 2282 else { 2283 thflags &= ~TH_URG; 2284 th->th_urp = 0; 2285 } 2286 } 2287 2288 /* 2289 * If new data are received on a connection after the 2290 * user processes are gone, then RST the other end. 2291 */ 2292 if ((tp->t_flags & TF_CLOSED) && tlen) { 2293 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 2294 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data " 2295 "after socket was closed, " 2296 "sending RST and removing tcpcb\n", 2297 s, __func__, tcpstates[tp->t_state], tlen); 2298 free(s, M_TCPLOG); 2299 } 2300 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 2301 /* tcp_close will kill the inp pre-log the Reset */ 2302 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 2303 tp = tcp_close(tp); 2304 TCPSTAT_INC(tcps_rcvafterclose); 2305 rstreason = BANDLIM_UNLIMITED; 2306 goto dropwithreset; 2307 } 2308 2309 /* 2310 * If segment ends after window, drop trailing data 2311 * (and PUSH and FIN); if nothing left, just ACK. 2312 */ 2313 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 2314 if (todrop > 0) { 2315 TCPSTAT_INC(tcps_rcvpackafterwin); 2316 if (todrop >= tlen) { 2317 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen); 2318 /* 2319 * If window is closed can only take segments at 2320 * window edge, and have to drop data and PUSH from 2321 * incoming segments. Continue processing, but 2322 * remember to ack. Otherwise, drop segment 2323 * and ack. 2324 */ 2325 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 2326 tp->t_flags |= TF_ACKNOW; 2327 TCPSTAT_INC(tcps_rcvwinprobe); 2328 } else 2329 goto dropafterack; 2330 } else 2331 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 2332 m_adj(m, -todrop); 2333 tlen -= todrop; 2334 thflags &= ~(TH_PUSH|TH_FIN); 2335 } 2336 2337 /* 2338 * If last ACK falls within this segment's sequence numbers, 2339 * record its timestamp. 2340 * NOTE: 2341 * 1) That the test incorporates suggestions from the latest 2342 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 2343 * 2) That updating only on newer timestamps interferes with 2344 * our earlier PAWS tests, so this check should be solely 2345 * predicated on the sequence space of this segment. 2346 * 3) That we modify the segment boundary check to be 2347 * Last.ACK.Sent <= SEG.SEQ + SEG.Len 2348 * instead of RFC1323's 2349 * Last.ACK.Sent < SEG.SEQ + SEG.Len, 2350 * This modified check allows us to overcome RFC1323's 2351 * limitations as described in Stevens TCP/IP Illustrated 2352 * Vol. 2 p.869. In such cases, we can still calculate the 2353 * RTT correctly when RCV.NXT == Last.ACK.Sent. 2354 */ 2355 if ((to.to_flags & TOF_TS) != 0 && 2356 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 2357 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 2358 ((thflags & (TH_SYN|TH_FIN)) != 0))) { 2359 tp->ts_recent_age = tcp_ts_getticks(); 2360 tp->ts_recent = to.to_tsval; 2361 } 2362 2363 /* 2364 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 2365 * flag is on (half-synchronized state), then queue data for 2366 * later processing; else drop segment and return. 2367 */ 2368 if ((thflags & TH_ACK) == 0) { 2369 if (tp->t_state == TCPS_SYN_RECEIVED || 2370 (tp->t_flags & TF_NEEDSYN)) { 2371 if (tp->t_state == TCPS_SYN_RECEIVED && 2372 IS_FASTOPEN(tp->t_flags)) { 2373 tp->snd_wnd = tiwin; 2374 cc_conn_init(tp); 2375 } 2376 goto step6; 2377 } else if (tp->t_flags & TF_ACKNOW) 2378 goto dropafterack; 2379 else 2380 goto drop; 2381 } 2382 2383 /* 2384 * Ack processing. 2385 */ 2386 switch (tp->t_state) { 2387 /* 2388 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter 2389 * ESTABLISHED state and continue processing. 2390 * The ACK was checked above. 2391 */ 2392 case TCPS_SYN_RECEIVED: 2393 2394 TCPSTAT_INC(tcps_connects); 2395 if (tp->t_flags & TF_SONOTCONN) { 2396 /* 2397 * Usually SYN_RECEIVED had been created from a LISTEN, 2398 * and solisten_enqueue() has already marked the socket 2399 * layer as connected. If it didn't, which can happen 2400 * only with an accept_filter(9), then the tp is marked 2401 * with TF_SONOTCONN. The other reason for this mark 2402 * to be set is a simultaneous open, a SYN_RECEIVED 2403 * that had been created from SYN_SENT. 2404 */ 2405 tp->t_flags &= ~TF_SONOTCONN; 2406 soisconnected(so); 2407 } 2408 /* Do window scaling? */ 2409 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2410 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2411 tp->rcv_scale = tp->request_r_scale; 2412 } 2413 tp->snd_wnd = tiwin; 2414 /* 2415 * Make transitions: 2416 * SYN-RECEIVED -> ESTABLISHED 2417 * SYN-RECEIVED* -> FIN-WAIT-1 2418 */ 2419 tp->t_starttime = ticks; 2420 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) { 2421 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 2422 tp->t_tfo_pending = NULL; 2423 } 2424 if (tp->t_flags & TF_NEEDFIN) { 2425 tp->t_acktime = ticks; 2426 tcp_state_change(tp, TCPS_FIN_WAIT_1); 2427 tp->t_flags &= ~TF_NEEDFIN; 2428 } else { 2429 tcp_state_change(tp, TCPS_ESTABLISHED); 2430 TCP_PROBE5(accept__established, NULL, tp, 2431 m, tp, th); 2432 /* 2433 * TFO connections call cc_conn_init() during SYN 2434 * processing. Calling it again here for such 2435 * connections is not harmless as it would undo the 2436 * snd_cwnd reduction that occurs when a TFO SYN|ACK 2437 * is retransmitted. 2438 */ 2439 if (!IS_FASTOPEN(tp->t_flags)) 2440 cc_conn_init(tp); 2441 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp)); 2442 } 2443 /* 2444 * Account for the ACK of our SYN prior to 2445 * regular ACK processing below, except for 2446 * simultaneous SYN, which is handled later. 2447 */ 2448 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 2449 incforsyn = 1; 2450 /* 2451 * If segment contains data or ACK, will call tcp_reass() 2452 * later; if not, do so now to pass queued data to user. 2453 */ 2454 if (tlen == 0 && (thflags & TH_FIN) == 0) { 2455 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 2456 (struct mbuf *)0); 2457 tcp_handle_wakeup(tp); 2458 } 2459 tp->snd_wl1 = th->th_seq - 1; 2460 /* FALLTHROUGH */ 2461 2462 /* 2463 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 2464 * ACKs. If the ack is in the range 2465 * tp->snd_una < th->th_ack <= tp->snd_max 2466 * then advance tp->snd_una to th->th_ack and drop 2467 * data from the retransmission queue. If this ACK reflects 2468 * more up to date window information we update our window information. 2469 */ 2470 case TCPS_ESTABLISHED: 2471 case TCPS_FIN_WAIT_1: 2472 case TCPS_FIN_WAIT_2: 2473 case TCPS_CLOSE_WAIT: 2474 case TCPS_CLOSING: 2475 case TCPS_LAST_ACK: 2476 if (SEQ_GT(th->th_ack, tp->snd_max)) { 2477 TCPSTAT_INC(tcps_rcvacktoomuch); 2478 goto dropafterack; 2479 } 2480 if (tcp_is_sack_recovery(tp, &to)) { 2481 if (((sack_changed = tcp_sack_doack(tp, &to, th->th_ack)) != 0) && 2482 (tp->t_flags & TF_LRD)) { 2483 tcp_sack_lost_retransmission(tp, th); 2484 } 2485 } else 2486 /* 2487 * Reset the value so that previous (valid) value 2488 * from the last ack with SACK doesn't get used. 2489 */ 2490 tp->sackhint.sacked_bytes = 0; 2491 2492 #ifdef TCP_HHOOK 2493 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 2494 hhook_run_tcp_est_in(tp, th, &to); 2495 #endif 2496 2497 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 2498 maxseg = tcp_maxseg(tp); 2499 if (tlen == 0 && 2500 (tiwin == tp->snd_wnd || 2501 (tp->t_flags & TF_SACK_PERMIT))) { 2502 /* 2503 * If this is the first time we've seen a 2504 * FIN from the remote, this is not a 2505 * duplicate and it needs to be processed 2506 * normally. This happens during a 2507 * simultaneous close. 2508 */ 2509 if ((thflags & TH_FIN) && 2510 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) { 2511 tp->t_dupacks = 0; 2512 break; 2513 } 2514 TCPSTAT_INC(tcps_rcvdupack); 2515 /* 2516 * If we have outstanding data (other than 2517 * a window probe), this is a completely 2518 * duplicate ack (ie, window info didn't 2519 * change and FIN isn't set), 2520 * the ack is the biggest we've 2521 * seen and we've seen exactly our rexmt 2522 * threshold of them, assume a packet 2523 * has been dropped and retransmit it. 2524 * Kludge snd_nxt & the congestion 2525 * window so we send only this one 2526 * packet. 2527 * 2528 * We know we're losing at the current 2529 * window size so do congestion avoidance 2530 * (set ssthresh to half the current window 2531 * and pull our congestion window back to 2532 * the new ssthresh). 2533 * 2534 * Dup acks mean that packets have left the 2535 * network (they're now cached at the receiver) 2536 * so bump cwnd by the amount in the receiver 2537 * to keep a constant cwnd packets in the 2538 * network. 2539 * 2540 * When using TCP ECN, notify the peer that 2541 * we reduced the cwnd. 2542 */ 2543 /* 2544 * Following 2 kinds of acks should not affect 2545 * dupack counting: 2546 * 1) Old acks 2547 * 2) Acks with SACK but without any new SACK 2548 * information in them. These could result from 2549 * any anomaly in the network like a switch 2550 * duplicating packets or a possible DoS attack. 2551 */ 2552 if (th->th_ack != tp->snd_una || 2553 (tcp_is_sack_recovery(tp, &to) && 2554 !sack_changed)) 2555 break; 2556 else if (!tcp_timer_active(tp, TT_REXMT)) 2557 tp->t_dupacks = 0; 2558 else if (++tp->t_dupacks > tcprexmtthresh || 2559 IN_FASTRECOVERY(tp->t_flags)) { 2560 cc_ack_received(tp, th, nsegs, 2561 CC_DUPACK); 2562 if (V_tcp_do_prr && 2563 IN_FASTRECOVERY(tp->t_flags)) { 2564 tcp_do_prr_ack(tp, th, &to); 2565 } else if (tcp_is_sack_recovery(tp, &to) && 2566 IN_FASTRECOVERY(tp->t_flags)) { 2567 int awnd; 2568 2569 /* 2570 * Compute the amount of data in flight first. 2571 * We can inject new data into the pipe iff 2572 * we have less than 1/2 the original window's 2573 * worth of data in flight. 2574 */ 2575 if (V_tcp_do_newsack) 2576 awnd = tcp_compute_pipe(tp); 2577 else 2578 awnd = (tp->snd_nxt - tp->snd_fack) + 2579 tp->sackhint.sack_bytes_rexmit; 2580 2581 if (awnd < tp->snd_ssthresh) { 2582 tp->snd_cwnd += maxseg; 2583 if (tp->snd_cwnd > tp->snd_ssthresh) 2584 tp->snd_cwnd = tp->snd_ssthresh; 2585 } 2586 } else 2587 tp->snd_cwnd += maxseg; 2588 (void) tcp_output(tp); 2589 goto drop; 2590 } else if (tp->t_dupacks == tcprexmtthresh || 2591 (tp->t_flags & TF_SACK_PERMIT && 2592 V_tcp_do_newsack && 2593 tp->sackhint.sacked_bytes > 2594 (tcprexmtthresh - 1) * maxseg)) { 2595 enter_recovery: 2596 /* 2597 * Above is the RFC6675 trigger condition of 2598 * more than (dupthresh-1)*maxseg sacked data. 2599 * If the count of holes in the 2600 * scoreboard is >= dupthresh, we could 2601 * also enter loss recovery, but don't 2602 * have that value readily available. 2603 */ 2604 tp->t_dupacks = tcprexmtthresh; 2605 tcp_seq onxt = tp->snd_nxt; 2606 2607 /* 2608 * If we're doing sack, or prr, check 2609 * to see if we're already in sack 2610 * recovery. If we're not doing sack, 2611 * check to see if we're in newreno 2612 * recovery. 2613 */ 2614 if (V_tcp_do_prr || 2615 (tp->t_flags & TF_SACK_PERMIT)) { 2616 if (IN_FASTRECOVERY(tp->t_flags)) { 2617 tp->t_dupacks = 0; 2618 break; 2619 } 2620 } else { 2621 if (SEQ_LEQ(th->th_ack, 2622 tp->snd_recover)) { 2623 tp->t_dupacks = 0; 2624 break; 2625 } 2626 } 2627 /* Congestion signal before ack. */ 2628 cc_cong_signal(tp, th, CC_NDUPACK); 2629 cc_ack_received(tp, th, nsegs, 2630 CC_DUPACK); 2631 tcp_timer_activate(tp, TT_REXMT, 0); 2632 tp->t_rtttime = 0; 2633 if (V_tcp_do_prr) { 2634 /* 2635 * snd_ssthresh is already updated by 2636 * cc_cong_signal. 2637 */ 2638 if (tcp_is_sack_recovery(tp, &to)) { 2639 tp->sackhint.prr_delivered = 2640 tp->sackhint.sacked_bytes; 2641 } else { 2642 tp->sackhint.prr_delivered = 2643 imin(tp->snd_max - tp->snd_una, 2644 imin(INT_MAX / 65536, 2645 tp->t_dupacks) * maxseg); 2646 } 2647 tp->sackhint.recover_fs = max(1, 2648 tp->snd_nxt - tp->snd_una); 2649 } 2650 if (tcp_is_sack_recovery(tp, &to)) { 2651 TCPSTAT_INC( 2652 tcps_sack_recovery_episode); 2653 tp->snd_recover = tp->snd_nxt; 2654 tp->snd_cwnd = maxseg; 2655 (void) tcp_output(tp); 2656 if (SEQ_GT(th->th_ack, tp->snd_una)) 2657 goto resume_partialack; 2658 goto drop; 2659 } 2660 tp->snd_nxt = th->th_ack; 2661 tp->snd_cwnd = maxseg; 2662 (void) tcp_output(tp); 2663 KASSERT(tp->snd_limited <= 2, 2664 ("%s: tp->snd_limited too big", 2665 __func__)); 2666 tp->snd_cwnd = tp->snd_ssthresh + 2667 maxseg * 2668 (tp->t_dupacks - tp->snd_limited); 2669 if (SEQ_GT(onxt, tp->snd_nxt)) 2670 tp->snd_nxt = onxt; 2671 goto drop; 2672 } else if (V_tcp_do_rfc3042) { 2673 /* 2674 * Process first and second duplicate 2675 * ACKs. Each indicates a segment 2676 * leaving the network, creating room 2677 * for more. Make sure we can send a 2678 * packet on reception of each duplicate 2679 * ACK by increasing snd_cwnd by one 2680 * segment. Restore the original 2681 * snd_cwnd after packet transmission. 2682 */ 2683 cc_ack_received(tp, th, nsegs, 2684 CC_DUPACK); 2685 uint32_t oldcwnd = tp->snd_cwnd; 2686 tcp_seq oldsndmax = tp->snd_max; 2687 u_int sent; 2688 int avail; 2689 2690 KASSERT(tp->t_dupacks == 1 || 2691 tp->t_dupacks == 2, 2692 ("%s: dupacks not 1 or 2", 2693 __func__)); 2694 if (tp->t_dupacks == 1) 2695 tp->snd_limited = 0; 2696 tp->snd_cwnd = 2697 (tp->snd_nxt - tp->snd_una) + 2698 (tp->t_dupacks - tp->snd_limited) * 2699 maxseg; 2700 /* 2701 * Only call tcp_output when there 2702 * is new data available to be sent 2703 * or we need to send an ACK. 2704 */ 2705 SOCKBUF_LOCK(&so->so_snd); 2706 avail = sbavail(&so->so_snd) - 2707 (tp->snd_nxt - tp->snd_una); 2708 SOCKBUF_UNLOCK(&so->so_snd); 2709 if (avail > 0 || tp->t_flags & TF_ACKNOW) 2710 (void) tcp_output(tp); 2711 sent = tp->snd_max - oldsndmax; 2712 if (sent > maxseg) { 2713 KASSERT((tp->t_dupacks == 2 && 2714 tp->snd_limited == 0) || 2715 (sent == maxseg + 1 && 2716 tp->t_flags & TF_SENTFIN), 2717 ("%s: sent too much", 2718 __func__)); 2719 tp->snd_limited = 2; 2720 } else if (sent > 0) 2721 ++tp->snd_limited; 2722 tp->snd_cwnd = oldcwnd; 2723 goto drop; 2724 } 2725 } 2726 break; 2727 } else { 2728 /* 2729 * This ack is advancing the left edge, reset the 2730 * counter. 2731 */ 2732 tp->t_dupacks = 0; 2733 /* 2734 * If this ack also has new SACK info, increment the 2735 * counter as per rfc6675. The variable 2736 * sack_changed tracks all changes to the SACK 2737 * scoreboard, including when partial ACKs without 2738 * SACK options are received, and clear the scoreboard 2739 * from the left side. Such partial ACKs should not be 2740 * counted as dupacks here. 2741 */ 2742 if (tcp_is_sack_recovery(tp, &to) && 2743 sack_changed) { 2744 tp->t_dupacks++; 2745 /* limit overhead by setting maxseg last */ 2746 if (!IN_FASTRECOVERY(tp->t_flags) && 2747 (tp->sackhint.sacked_bytes > 2748 ((tcprexmtthresh - 1) * 2749 (maxseg = tcp_maxseg(tp))))) { 2750 goto enter_recovery; 2751 } 2752 } 2753 } 2754 2755 resume_partialack: 2756 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), 2757 ("%s: th_ack <= snd_una", __func__)); 2758 2759 /* 2760 * If the congestion window was inflated to account 2761 * for the other side's cached packets, retract it. 2762 */ 2763 if (IN_FASTRECOVERY(tp->t_flags)) { 2764 if (SEQ_LT(th->th_ack, tp->snd_recover)) { 2765 if (tp->t_flags & TF_SACK_PERMIT) 2766 if (V_tcp_do_prr && to.to_flags & TOF_SACK) { 2767 tcp_timer_activate(tp, TT_REXMT, 0); 2768 tp->t_rtttime = 0; 2769 tcp_do_prr_ack(tp, th, &to); 2770 tp->t_flags |= TF_ACKNOW; 2771 (void) tcp_output(tp); 2772 } else 2773 tcp_sack_partialack(tp, th); 2774 else 2775 tcp_newreno_partial_ack(tp, th); 2776 } else 2777 cc_post_recovery(tp, th); 2778 } else if (IN_CONGRECOVERY(tp->t_flags)) { 2779 if (SEQ_LT(th->th_ack, tp->snd_recover)) { 2780 if (V_tcp_do_prr) { 2781 tp->sackhint.delivered_data = BYTES_THIS_ACK(tp, th); 2782 tp->snd_fack = th->th_ack; 2783 tcp_do_prr_ack(tp, th, &to); 2784 (void) tcp_output(tp); 2785 } 2786 } else 2787 cc_post_recovery(tp, th); 2788 } 2789 /* 2790 * If we reach this point, ACK is not a duplicate, 2791 * i.e., it ACKs something we sent. 2792 */ 2793 if (tp->t_flags & TF_NEEDSYN) { 2794 /* 2795 * T/TCP: Connection was half-synchronized, and our 2796 * SYN has been ACK'd (so connection is now fully 2797 * synchronized). Go to non-starred state, 2798 * increment snd_una for ACK of SYN, and check if 2799 * we can do window scaling. 2800 */ 2801 tp->t_flags &= ~TF_NEEDSYN; 2802 tp->snd_una++; 2803 /* Do window scaling? */ 2804 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2805 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2806 tp->rcv_scale = tp->request_r_scale; 2807 /* Send window already scaled. */ 2808 } 2809 } 2810 2811 process_ACK: 2812 INP_WLOCK_ASSERT(inp); 2813 2814 /* 2815 * Adjust for the SYN bit in sequence space, 2816 * but don't account for it in cwnd calculations. 2817 * This is for the SYN_RECEIVED, non-simultaneous 2818 * SYN case. SYN_SENT and simultaneous SYN are 2819 * treated elsewhere. 2820 */ 2821 if (incforsyn) 2822 tp->snd_una++; 2823 acked = BYTES_THIS_ACK(tp, th); 2824 KASSERT(acked >= 0, ("%s: acked unexepectedly negative " 2825 "(tp->snd_una=%u, th->th_ack=%u, tp=%p, m=%p)", __func__, 2826 tp->snd_una, th->th_ack, tp, m)); 2827 TCPSTAT_ADD(tcps_rcvackpack, nsegs); 2828 TCPSTAT_ADD(tcps_rcvackbyte, acked); 2829 2830 /* 2831 * If we just performed our first retransmit, and the ACK 2832 * arrives within our recovery window, then it was a mistake 2833 * to do the retransmit in the first place. Recover our 2834 * original cwnd and ssthresh, and proceed to transmit where 2835 * we left off. 2836 */ 2837 if (tp->t_rxtshift == 1 && 2838 tp->t_flags & TF_PREVVALID && 2839 tp->t_badrxtwin != 0 && 2840 to.to_flags & TOF_TS && 2841 to.to_tsecr != 0 && 2842 TSTMP_LT(to.to_tsecr, tp->t_badrxtwin)) 2843 cc_cong_signal(tp, th, CC_RTO_ERR); 2844 2845 /* 2846 * If we have a timestamp reply, update smoothed 2847 * round trip time. If no timestamp is present but 2848 * transmit timer is running and timed sequence 2849 * number was acked, update smoothed round trip time. 2850 * Since we now have an rtt measurement, cancel the 2851 * timer backoff (cf., Phil Karn's retransmit alg.). 2852 * Recompute the initial retransmit timer. 2853 * 2854 * Some boxes send broken timestamp replies 2855 * during the SYN+ACK phase, ignore 2856 * timestamps of 0 or we could calculate a 2857 * huge RTT and blow up the retransmit timer. 2858 */ 2859 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) { 2860 uint32_t t; 2861 2862 t = tcp_ts_getticks() - to.to_tsecr; 2863 if (!tp->t_rttlow || tp->t_rttlow > t) 2864 tp->t_rttlow = t; 2865 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1); 2866 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) { 2867 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime) 2868 tp->t_rttlow = ticks - tp->t_rtttime; 2869 tcp_xmit_timer(tp, ticks - tp->t_rtttime); 2870 } 2871 2872 SOCKBUF_LOCK(&so->so_snd); 2873 /* 2874 * Clear t_acktime if remote side has ACKd all data in the 2875 * socket buffer and FIN (if applicable). 2876 * Otherwise, update t_acktime if we received a sufficiently 2877 * large ACK. 2878 */ 2879 if ((tp->t_state <= TCPS_CLOSE_WAIT && 2880 acked == sbavail(&so->so_snd)) || 2881 acked > sbavail(&so->so_snd)) 2882 tp->t_acktime = 0; 2883 else if (acked > 1) 2884 tp->t_acktime = ticks; 2885 2886 /* 2887 * If all outstanding data is acked, stop retransmit 2888 * timer and remember to restart (more output or persist). 2889 * If there is more data to be acked, restart retransmit 2890 * timer, using current (possibly backed-off) value. 2891 */ 2892 if (th->th_ack == tp->snd_max) { 2893 tcp_timer_activate(tp, TT_REXMT, 0); 2894 needoutput = 1; 2895 } else if (!tcp_timer_active(tp, TT_PERSIST)) 2896 tcp_timer_activate(tp, TT_REXMT, TP_RXTCUR(tp)); 2897 2898 /* 2899 * If no data (only SYN) was ACK'd, 2900 * skip rest of ACK processing. 2901 */ 2902 if (acked == 0) { 2903 SOCKBUF_UNLOCK(&so->so_snd); 2904 goto step6; 2905 } 2906 2907 /* 2908 * Let the congestion control algorithm update congestion 2909 * control related information. This typically means increasing 2910 * the congestion window. 2911 */ 2912 cc_ack_received(tp, th, nsegs, CC_ACK); 2913 2914 if (acked > sbavail(&so->so_snd)) { 2915 if (tp->snd_wnd >= sbavail(&so->so_snd)) 2916 tp->snd_wnd -= sbavail(&so->so_snd); 2917 else 2918 tp->snd_wnd = 0; 2919 mfree = sbcut_locked(&so->so_snd, 2920 (int)sbavail(&so->so_snd)); 2921 ourfinisacked = 1; 2922 } else { 2923 mfree = sbcut_locked(&so->so_snd, acked); 2924 if (tp->snd_wnd >= (uint32_t) acked) 2925 tp->snd_wnd -= acked; 2926 else 2927 tp->snd_wnd = 0; 2928 ourfinisacked = 0; 2929 } 2930 /* NB: sowwakeup_locked() does an implicit unlock. */ 2931 sowwakeup_locked(so); 2932 m_freem(mfree); 2933 /* Detect una wraparound. */ 2934 if (!IN_RECOVERY(tp->t_flags) && 2935 SEQ_GT(tp->snd_una, tp->snd_recover) && 2936 SEQ_LEQ(th->th_ack, tp->snd_recover)) 2937 tp->snd_recover = th->th_ack - 1; 2938 /* XXXLAS: Can this be moved up into cc_post_recovery? */ 2939 if (IN_RECOVERY(tp->t_flags) && 2940 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 2941 EXIT_RECOVERY(tp->t_flags); 2942 } 2943 tp->snd_una = th->th_ack; 2944 if (tp->t_flags & TF_SACK_PERMIT) { 2945 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 2946 tp->snd_recover = tp->snd_una; 2947 } 2948 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2949 tp->snd_nxt = tp->snd_una; 2950 2951 switch (tp->t_state) { 2952 /* 2953 * In FIN_WAIT_1 STATE in addition to the processing 2954 * for the ESTABLISHED state if our FIN is now acknowledged 2955 * then enter FIN_WAIT_2. 2956 */ 2957 case TCPS_FIN_WAIT_1: 2958 if (ourfinisacked) { 2959 /* 2960 * If we can't receive any more 2961 * data, then closing user can proceed. 2962 * Starting the timer is contrary to the 2963 * specification, but if we don't get a FIN 2964 * we'll hang forever. 2965 */ 2966 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2967 soisdisconnected(so); 2968 tcp_timer_activate(tp, TT_2MSL, 2969 (tcp_fast_finwait2_recycle ? 2970 tcp_finwait2_timeout : 2971 TP_MAXIDLE(tp))); 2972 } 2973 tcp_state_change(tp, TCPS_FIN_WAIT_2); 2974 } 2975 break; 2976 2977 /* 2978 * In CLOSING STATE in addition to the processing for 2979 * the ESTABLISHED state if the ACK acknowledges our FIN 2980 * then enter the TIME-WAIT state, otherwise ignore 2981 * the segment. 2982 */ 2983 case TCPS_CLOSING: 2984 if (ourfinisacked) { 2985 tcp_twstart(tp); 2986 m_freem(m); 2987 return; 2988 } 2989 break; 2990 2991 /* 2992 * In LAST_ACK, we may still be waiting for data to drain 2993 * and/or to be acked, as well as for the ack of our FIN. 2994 * If our FIN is now acknowledged, delete the TCB, 2995 * enter the closed state and return. 2996 */ 2997 case TCPS_LAST_ACK: 2998 if (ourfinisacked) { 2999 tp = tcp_close(tp); 3000 goto drop; 3001 } 3002 break; 3003 } 3004 } 3005 3006 step6: 3007 INP_WLOCK_ASSERT(inp); 3008 3009 /* 3010 * Update window information. 3011 * Don't look at window if no ACK: TAC's send garbage on first SYN. 3012 */ 3013 if ((thflags & TH_ACK) && 3014 (SEQ_LT(tp->snd_wl1, th->th_seq) || 3015 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 3016 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 3017 /* keep track of pure window updates */ 3018 if (tlen == 0 && 3019 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 3020 TCPSTAT_INC(tcps_rcvwinupd); 3021 tp->snd_wnd = tiwin; 3022 tp->snd_wl1 = th->th_seq; 3023 tp->snd_wl2 = th->th_ack; 3024 if (tp->snd_wnd > tp->max_sndwnd) 3025 tp->max_sndwnd = tp->snd_wnd; 3026 needoutput = 1; 3027 } 3028 3029 /* 3030 * Process segments with URG. 3031 */ 3032 if ((thflags & TH_URG) && th->th_urp && 3033 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 3034 /* 3035 * This is a kludge, but if we receive and accept 3036 * random urgent pointers, we'll crash in 3037 * soreceive. It's hard to imagine someone 3038 * actually wanting to send this much urgent data. 3039 */ 3040 SOCKBUF_LOCK(&so->so_rcv); 3041 if (th->th_urp + sbavail(&so->so_rcv) > sb_max) { 3042 th->th_urp = 0; /* XXX */ 3043 thflags &= ~TH_URG; /* XXX */ 3044 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */ 3045 goto dodata; /* XXX */ 3046 } 3047 /* 3048 * If this segment advances the known urgent pointer, 3049 * then mark the data stream. This should not happen 3050 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 3051 * a FIN has been received from the remote side. 3052 * In these states we ignore the URG. 3053 * 3054 * According to RFC961 (Assigned Protocols), 3055 * the urgent pointer points to the last octet 3056 * of urgent data. We continue, however, 3057 * to consider it to indicate the first octet 3058 * of data past the urgent section as the original 3059 * spec states (in one of two places). 3060 */ 3061 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) { 3062 tp->rcv_up = th->th_seq + th->th_urp; 3063 so->so_oobmark = sbavail(&so->so_rcv) + 3064 (tp->rcv_up - tp->rcv_nxt) - 1; 3065 if (so->so_oobmark == 0) 3066 so->so_rcv.sb_state |= SBS_RCVATMARK; 3067 sohasoutofband(so); 3068 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 3069 } 3070 SOCKBUF_UNLOCK(&so->so_rcv); 3071 /* 3072 * Remove out of band data so doesn't get presented to user. 3073 * This can happen independent of advancing the URG pointer, 3074 * but if two URG's are pending at once, some out-of-band 3075 * data may creep in... ick. 3076 */ 3077 if (th->th_urp <= (uint32_t)tlen && 3078 !(so->so_options & SO_OOBINLINE)) { 3079 /* hdr drop is delayed */ 3080 tcp_pulloutofband(so, th, m, drop_hdrlen); 3081 } 3082 } else { 3083 /* 3084 * If no out of band data is expected, 3085 * pull receive urgent pointer along 3086 * with the receive window. 3087 */ 3088 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 3089 tp->rcv_up = tp->rcv_nxt; 3090 } 3091 dodata: /* XXX */ 3092 INP_WLOCK_ASSERT(inp); 3093 3094 /* 3095 * Process the segment text, merging it into the TCP sequencing queue, 3096 * and arranging for acknowledgment of receipt if necessary. 3097 * This process logically involves adjusting tp->rcv_wnd as data 3098 * is presented to the user (this happens in tcp_usrreq.c, 3099 * case PRU_RCVD). If a FIN has already been received on this 3100 * connection then we just ignore the text. 3101 */ 3102 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 3103 IS_FASTOPEN(tp->t_flags)); 3104 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 3105 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 3106 tcp_seq save_start = th->th_seq; 3107 tcp_seq save_rnxt = tp->rcv_nxt; 3108 int save_tlen = tlen; 3109 m_adj(m, drop_hdrlen); /* delayed header drop */ 3110 /* 3111 * Insert segment which includes th into TCP reassembly queue 3112 * with control block tp. Set thflags to whether reassembly now 3113 * includes a segment with FIN. This handles the common case 3114 * inline (segment is the next to be received on an established 3115 * connection, and the queue is empty), avoiding linkage into 3116 * and removal from the queue and repetition of various 3117 * conversions. 3118 * Set DELACK for segments received in order, but ack 3119 * immediately when segments are out of order (so 3120 * fast retransmit can work). 3121 */ 3122 if (th->th_seq == tp->rcv_nxt && 3123 SEGQ_EMPTY(tp) && 3124 (TCPS_HAVEESTABLISHED(tp->t_state) || 3125 tfo_syn)) { 3126 if (DELAY_ACK(tp, tlen) || tfo_syn) 3127 tp->t_flags |= TF_DELACK; 3128 else 3129 tp->t_flags |= TF_ACKNOW; 3130 tp->rcv_nxt += tlen; 3131 if (tlen && 3132 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 3133 (tp->t_fbyte_in == 0)) { 3134 tp->t_fbyte_in = ticks; 3135 if (tp->t_fbyte_in == 0) 3136 tp->t_fbyte_in = 1; 3137 if (tp->t_fbyte_out && tp->t_fbyte_in) 3138 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 3139 } 3140 thflags = tcp_get_flags(th) & TH_FIN; 3141 TCPSTAT_INC(tcps_rcvpack); 3142 TCPSTAT_ADD(tcps_rcvbyte, tlen); 3143 SOCKBUF_LOCK(&so->so_rcv); 3144 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 3145 m_freem(m); 3146 else 3147 sbappendstream_locked(&so->so_rcv, m, 0); 3148 tp->t_flags |= TF_WAKESOR; 3149 } else { 3150 /* 3151 * XXX: Due to the header drop above "th" is 3152 * theoretically invalid by now. Fortunately 3153 * m_adj() doesn't actually frees any mbufs 3154 * when trimming from the head. 3155 */ 3156 tcp_seq temp = save_start; 3157 3158 thflags = tcp_reass(tp, th, &temp, &tlen, m); 3159 tp->t_flags |= TF_ACKNOW; 3160 } 3161 if ((tp->t_flags & TF_SACK_PERMIT) && 3162 (save_tlen > 0) && 3163 TCPS_HAVEESTABLISHED(tp->t_state)) { 3164 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 3165 /* 3166 * DSACK actually handled in the fastpath 3167 * above. 3168 */ 3169 tcp_update_sack_list(tp, save_start, 3170 save_start + save_tlen); 3171 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 3172 if ((tp->rcv_numsacks >= 1) && 3173 (tp->sackblks[0].end == save_start)) { 3174 /* 3175 * Partial overlap, recorded at todrop 3176 * above. 3177 */ 3178 tcp_update_sack_list(tp, 3179 tp->sackblks[0].start, 3180 tp->sackblks[0].end); 3181 } else { 3182 tcp_update_dsack_list(tp, save_start, 3183 save_start + save_tlen); 3184 } 3185 } else if (tlen >= save_tlen) { 3186 /* Update of sackblks. */ 3187 tcp_update_dsack_list(tp, save_start, 3188 save_start + save_tlen); 3189 } else if (tlen > 0) { 3190 tcp_update_dsack_list(tp, save_start, 3191 save_start + tlen); 3192 } 3193 } 3194 tcp_handle_wakeup(tp); 3195 #if 0 3196 /* 3197 * Note the amount of data that peer has sent into 3198 * our window, in order to estimate the sender's 3199 * buffer size. 3200 * XXX: Unused. 3201 */ 3202 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) 3203 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 3204 else 3205 len = so->so_rcv.sb_hiwat; 3206 #endif 3207 } else { 3208 m_freem(m); 3209 thflags &= ~TH_FIN; 3210 } 3211 3212 /* 3213 * If FIN is received ACK the FIN and let the user know 3214 * that the connection is closing. 3215 */ 3216 if (thflags & TH_FIN) { 3217 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 3218 /* The socket upcall is handled by socantrcvmore. */ 3219 socantrcvmore(so); 3220 /* 3221 * If connection is half-synchronized 3222 * (ie NEEDSYN flag on) then delay ACK, 3223 * so it may be piggybacked when SYN is sent. 3224 * Otherwise, since we received a FIN then no 3225 * more input can be expected, send ACK now. 3226 */ 3227 if (tp->t_flags & TF_NEEDSYN) 3228 tp->t_flags |= TF_DELACK; 3229 else 3230 tp->t_flags |= TF_ACKNOW; 3231 tp->rcv_nxt++; 3232 } 3233 switch (tp->t_state) { 3234 /* 3235 * In SYN_RECEIVED and ESTABLISHED STATES 3236 * enter the CLOSE_WAIT state. 3237 */ 3238 case TCPS_SYN_RECEIVED: 3239 tp->t_starttime = ticks; 3240 /* FALLTHROUGH */ 3241 case TCPS_ESTABLISHED: 3242 tcp_state_change(tp, TCPS_CLOSE_WAIT); 3243 break; 3244 3245 /* 3246 * If still in FIN_WAIT_1 STATE FIN has not been acked so 3247 * enter the CLOSING state. 3248 */ 3249 case TCPS_FIN_WAIT_1: 3250 tcp_state_change(tp, TCPS_CLOSING); 3251 break; 3252 3253 /* 3254 * In FIN_WAIT_2 state enter the TIME_WAIT state, 3255 * starting the time-wait timer, turning off the other 3256 * standard timers. 3257 */ 3258 case TCPS_FIN_WAIT_2: 3259 tcp_twstart(tp); 3260 return; 3261 } 3262 } 3263 TCP_PROBE3(debug__input, tp, th, m); 3264 3265 /* 3266 * Return any desired output. 3267 */ 3268 if (needoutput || (tp->t_flags & TF_ACKNOW)) 3269 (void) tcp_output(tp); 3270 3271 check_delack: 3272 INP_WLOCK_ASSERT(inp); 3273 3274 if (tp->t_flags & TF_DELACK) { 3275 tp->t_flags &= ~TF_DELACK; 3276 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 3277 } 3278 INP_WUNLOCK(inp); 3279 return; 3280 3281 dropafterack: 3282 /* 3283 * Generate an ACK dropping incoming segment if it occupies 3284 * sequence space, where the ACK reflects our state. 3285 * 3286 * We can now skip the test for the RST flag since all 3287 * paths to this code happen after packets containing 3288 * RST have been dropped. 3289 * 3290 * In the SYN-RECEIVED state, don't send an ACK unless the 3291 * segment we received passes the SYN-RECEIVED ACK test. 3292 * If it fails send a RST. This breaks the loop in the 3293 * "LAND" DoS attack, and also prevents an ACK storm 3294 * between two listening ports that have been sent forged 3295 * SYN segments, each with the source address of the other. 3296 */ 3297 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 3298 (SEQ_GT(tp->snd_una, th->th_ack) || 3299 SEQ_GT(th->th_ack, tp->snd_max)) ) { 3300 rstreason = BANDLIM_RST_OPENPORT; 3301 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 3302 goto dropwithreset; 3303 } 3304 TCP_PROBE3(debug__input, tp, th, m); 3305 tp->t_flags |= TF_ACKNOW; 3306 (void) tcp_output(tp); 3307 INP_WUNLOCK(inp); 3308 m_freem(m); 3309 return; 3310 3311 dropwithreset: 3312 if (tp != NULL) { 3313 tcp_dropwithreset(m, th, tp, tlen, rstreason); 3314 INP_WUNLOCK(inp); 3315 } else 3316 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 3317 return; 3318 3319 drop: 3320 /* 3321 * Drop space held by incoming segment and return. 3322 */ 3323 TCP_PROBE3(debug__input, tp, th, m); 3324 if (tp != NULL) { 3325 INP_WUNLOCK(inp); 3326 } 3327 m_freem(m); 3328 } 3329 3330 /* 3331 * Issue RST and make ACK acceptable to originator of segment. 3332 * The mbuf must still include the original packet header. 3333 * tp may be NULL. 3334 */ 3335 void 3336 tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, 3337 int tlen, int rstreason) 3338 { 3339 #ifdef INET 3340 struct ip *ip; 3341 #endif 3342 #ifdef INET6 3343 struct ip6_hdr *ip6; 3344 #endif 3345 3346 if (tp != NULL) { 3347 INP_LOCK_ASSERT(tptoinpcb(tp)); 3348 } 3349 3350 /* Don't bother if destination was broadcast/multicast. */ 3351 if ((tcp_get_flags(th) & TH_RST) || m->m_flags & (M_BCAST|M_MCAST)) 3352 goto drop; 3353 #ifdef INET6 3354 if (mtod(m, struct ip *)->ip_v == 6) { 3355 ip6 = mtod(m, struct ip6_hdr *); 3356 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 3357 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 3358 goto drop; 3359 /* IPv6 anycast check is done at tcp6_input() */ 3360 } 3361 #endif 3362 #if defined(INET) && defined(INET6) 3363 else 3364 #endif 3365 #ifdef INET 3366 { 3367 ip = mtod(m, struct ip *); 3368 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 3369 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 3370 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 3371 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 3372 goto drop; 3373 } 3374 #endif 3375 3376 /* Perform bandwidth limiting. */ 3377 if (badport_bandlim(rstreason) < 0) 3378 goto drop; 3379 3380 /* tcp_respond consumes the mbuf chain. */ 3381 if (tcp_get_flags(th) & TH_ACK) { 3382 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, 3383 th->th_ack, TH_RST); 3384 } else { 3385 if (tcp_get_flags(th) & TH_SYN) 3386 tlen++; 3387 if (tcp_get_flags(th) & TH_FIN) 3388 tlen++; 3389 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen, 3390 (tcp_seq)0, TH_RST|TH_ACK); 3391 } 3392 return; 3393 drop: 3394 m_freem(m); 3395 } 3396 3397 /* 3398 * Parse TCP options and place in tcpopt. 3399 */ 3400 void 3401 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags) 3402 { 3403 int opt, optlen; 3404 3405 to->to_flags = 0; 3406 for (; cnt > 0; cnt -= optlen, cp += optlen) { 3407 opt = cp[0]; 3408 if (opt == TCPOPT_EOL) 3409 break; 3410 if (opt == TCPOPT_NOP) 3411 optlen = 1; 3412 else { 3413 if (cnt < 2) 3414 break; 3415 optlen = cp[1]; 3416 if (optlen < 2 || optlen > cnt) 3417 break; 3418 } 3419 switch (opt) { 3420 case TCPOPT_MAXSEG: 3421 if (optlen != TCPOLEN_MAXSEG) 3422 continue; 3423 if (!(flags & TO_SYN)) 3424 continue; 3425 to->to_flags |= TOF_MSS; 3426 bcopy((char *)cp + 2, 3427 (char *)&to->to_mss, sizeof(to->to_mss)); 3428 to->to_mss = ntohs(to->to_mss); 3429 break; 3430 case TCPOPT_WINDOW: 3431 if (optlen != TCPOLEN_WINDOW) 3432 continue; 3433 if (!(flags & TO_SYN)) 3434 continue; 3435 to->to_flags |= TOF_SCALE; 3436 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT); 3437 break; 3438 case TCPOPT_TIMESTAMP: 3439 if (optlen != TCPOLEN_TIMESTAMP) 3440 continue; 3441 to->to_flags |= TOF_TS; 3442 bcopy((char *)cp + 2, 3443 (char *)&to->to_tsval, sizeof(to->to_tsval)); 3444 to->to_tsval = ntohl(to->to_tsval); 3445 bcopy((char *)cp + 6, 3446 (char *)&to->to_tsecr, sizeof(to->to_tsecr)); 3447 to->to_tsecr = ntohl(to->to_tsecr); 3448 break; 3449 case TCPOPT_SIGNATURE: 3450 /* 3451 * In order to reply to a host which has set the 3452 * TCP_SIGNATURE option in its initial SYN, we have 3453 * to record the fact that the option was observed 3454 * here for the syncache code to perform the correct 3455 * response. 3456 */ 3457 if (optlen != TCPOLEN_SIGNATURE) 3458 continue; 3459 to->to_flags |= TOF_SIGNATURE; 3460 to->to_signature = cp + 2; 3461 break; 3462 case TCPOPT_SACK_PERMITTED: 3463 if (optlen != TCPOLEN_SACK_PERMITTED) 3464 continue; 3465 if (!(flags & TO_SYN)) 3466 continue; 3467 if (!V_tcp_do_sack) 3468 continue; 3469 to->to_flags |= TOF_SACKPERM; 3470 break; 3471 case TCPOPT_SACK: 3472 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) 3473 continue; 3474 if (flags & TO_SYN) 3475 continue; 3476 to->to_flags |= TOF_SACK; 3477 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK; 3478 to->to_sacks = cp + 2; 3479 TCPSTAT_INC(tcps_sack_rcv_blocks); 3480 break; 3481 case TCPOPT_FAST_OPEN: 3482 /* 3483 * Cookie length validation is performed by the 3484 * server side cookie checking code or the client 3485 * side cookie cache update code. 3486 */ 3487 if (!(flags & TO_SYN)) 3488 continue; 3489 if (!V_tcp_fastopen_client_enable && 3490 !V_tcp_fastopen_server_enable) 3491 continue; 3492 to->to_flags |= TOF_FASTOPEN; 3493 to->to_tfo_len = optlen - 2; 3494 to->to_tfo_cookie = to->to_tfo_len ? cp + 2 : NULL; 3495 break; 3496 default: 3497 continue; 3498 } 3499 } 3500 } 3501 3502 /* 3503 * Pull out of band byte out of a segment so 3504 * it doesn't appear in the user's data queue. 3505 * It is still reflected in the segment length for 3506 * sequencing purposes. 3507 */ 3508 void 3509 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, 3510 int off) 3511 { 3512 int cnt = off + th->th_urp - 1; 3513 3514 while (cnt >= 0) { 3515 if (m->m_len > cnt) { 3516 char *cp = mtod(m, caddr_t) + cnt; 3517 struct tcpcb *tp = sototcpcb(so); 3518 3519 INP_WLOCK_ASSERT(tptoinpcb(tp)); 3520 3521 tp->t_iobc = *cp; 3522 tp->t_oobflags |= TCPOOB_HAVEDATA; 3523 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); 3524 m->m_len--; 3525 if (m->m_flags & M_PKTHDR) 3526 m->m_pkthdr.len--; 3527 return; 3528 } 3529 cnt -= m->m_len; 3530 m = m->m_next; 3531 if (m == NULL) 3532 break; 3533 } 3534 panic("tcp_pulloutofband"); 3535 } 3536 3537 /* 3538 * Collect new round-trip time estimate 3539 * and update averages and current timeout. 3540 */ 3541 void 3542 tcp_xmit_timer(struct tcpcb *tp, int rtt) 3543 { 3544 int delta; 3545 3546 INP_WLOCK_ASSERT(tptoinpcb(tp)); 3547 3548 TCPSTAT_INC(tcps_rttupdated); 3549 if (tp->t_rttupdated < UCHAR_MAX) 3550 tp->t_rttupdated++; 3551 #ifdef STATS 3552 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, 3553 imax(0, rtt * 1000 / hz)); 3554 #endif 3555 if ((tp->t_srtt != 0) && (tp->t_rxtshift <= TCP_RTT_INVALIDATE)) { 3556 /* 3557 * srtt is stored as fixed point with 5 bits after the 3558 * binary point (i.e., scaled by 8). The following magic 3559 * is equivalent to the smoothing algorithm in rfc793 with 3560 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 3561 * point). Adjust rtt to origin 0. 3562 */ 3563 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 3564 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 3565 3566 if ((tp->t_srtt += delta) <= 0) 3567 tp->t_srtt = 1; 3568 3569 /* 3570 * We accumulate a smoothed rtt variance (actually, a 3571 * smoothed mean difference), then set the retransmit 3572 * timer to smoothed rtt + 4 times the smoothed variance. 3573 * rttvar is stored as fixed point with 4 bits after the 3574 * binary point (scaled by 16). The following is 3575 * equivalent to rfc793 smoothing with an alpha of .75 3576 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 3577 * rfc793's wired-in beta. 3578 */ 3579 if (delta < 0) 3580 delta = -delta; 3581 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 3582 if ((tp->t_rttvar += delta) <= 0) 3583 tp->t_rttvar = 1; 3584 } else { 3585 /* 3586 * No rtt measurement yet - use the unsmoothed rtt. 3587 * Set the variance to half the rtt (so our first 3588 * retransmit happens at 3*rtt). 3589 */ 3590 tp->t_srtt = rtt << TCP_RTT_SHIFT; 3591 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 3592 } 3593 tp->t_rtttime = 0; 3594 tp->t_rxtshift = 0; 3595 3596 /* 3597 * the retransmit should happen at rtt + 4 * rttvar. 3598 * Because of the way we do the smoothing, srtt and rttvar 3599 * will each average +1/2 tick of bias. When we compute 3600 * the retransmit timer, we want 1/2 tick of rounding and 3601 * 1 extra tick because of +-1/2 tick uncertainty in the 3602 * firing of the timer. The bias will give us exactly the 3603 * 1.5 tick we need. But, because the bias is 3604 * statistical, we have to test that we don't drop below 3605 * the minimum feasible timer (which is 2 ticks). 3606 */ 3607 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 3608 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 3609 3610 /* 3611 * We received an ack for a packet that wasn't retransmitted; 3612 * it is probably safe to discard any error indications we've 3613 * received recently. This isn't quite right, but close enough 3614 * for now (a route might have failed after we sent a segment, 3615 * and the return path might not be symmetrical). 3616 */ 3617 tp->t_softerror = 0; 3618 } 3619 3620 /* 3621 * Determine a reasonable value for maxseg size. 3622 * If the route is known, check route for mtu. 3623 * If none, use an mss that can be handled on the outgoing interface 3624 * without forcing IP to fragment. If no route is found, route has no mtu, 3625 * or the destination isn't local, use a default, hopefully conservative 3626 * size (usually 512 or the default IP max size, but no more than the mtu 3627 * of the interface), as we can't discover anything about intervening 3628 * gateways or networks. We also initialize the congestion/slow start 3629 * window to be a single segment if the destination isn't local. 3630 * While looking at the routing entry, we also initialize other path-dependent 3631 * parameters from pre-set or cached values in the routing entry. 3632 * 3633 * NOTE that resulting t_maxseg doesn't include space for TCP options or 3634 * IP options, e.g. IPSEC data, since length of this data may vary, and 3635 * thus it is calculated for every segment separately in tcp_output(). 3636 * 3637 * NOTE that this routine is only called when we process an incoming 3638 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS 3639 * settings are handled in tcp_mssopt(). 3640 */ 3641 void 3642 tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer, 3643 struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap) 3644 { 3645 int mss = 0; 3646 uint32_t maxmtu = 0; 3647 struct inpcb *inp = tptoinpcb(tp); 3648 struct hc_metrics_lite metrics; 3649 #ifdef INET6 3650 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 3651 size_t min_protoh = isipv6 ? 3652 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) : 3653 sizeof (struct tcpiphdr); 3654 #else 3655 size_t min_protoh = sizeof(struct tcpiphdr); 3656 #endif 3657 3658 INP_WLOCK_ASSERT(inp); 3659 3660 if (tp->t_port) 3661 min_protoh += V_tcp_udp_tunneling_overhead; 3662 if (mtuoffer != -1) { 3663 KASSERT(offer == -1, ("%s: conflict", __func__)); 3664 offer = mtuoffer - min_protoh; 3665 } 3666 3667 /* Initialize. */ 3668 #ifdef INET6 3669 if (isipv6) { 3670 maxmtu = tcp_maxmtu6(&inp->inp_inc, cap); 3671 tp->t_maxseg = V_tcp_v6mssdflt; 3672 } 3673 #endif 3674 #if defined(INET) && defined(INET6) 3675 else 3676 #endif 3677 #ifdef INET 3678 { 3679 maxmtu = tcp_maxmtu(&inp->inp_inc, cap); 3680 tp->t_maxseg = V_tcp_mssdflt; 3681 } 3682 #endif 3683 3684 /* 3685 * No route to sender, stay with default mss and return. 3686 */ 3687 if (maxmtu == 0) { 3688 /* 3689 * In case we return early we need to initialize metrics 3690 * to a defined state as tcp_hc_get() would do for us 3691 * if there was no cache hit. 3692 */ 3693 if (metricptr != NULL) 3694 bzero(metricptr, sizeof(struct hc_metrics_lite)); 3695 return; 3696 } 3697 3698 /* What have we got? */ 3699 switch (offer) { 3700 case 0: 3701 /* 3702 * Offer == 0 means that there was no MSS on the SYN 3703 * segment, in this case we use tcp_mssdflt as 3704 * already assigned to t_maxseg above. 3705 */ 3706 offer = tp->t_maxseg; 3707 break; 3708 3709 case -1: 3710 /* 3711 * Offer == -1 means that we didn't receive SYN yet. 3712 */ 3713 /* FALLTHROUGH */ 3714 3715 default: 3716 /* 3717 * Prevent DoS attack with too small MSS. Round up 3718 * to at least minmss. 3719 */ 3720 offer = max(offer, V_tcp_minmss); 3721 } 3722 3723 /* 3724 * rmx information is now retrieved from tcp_hostcache. 3725 */ 3726 tcp_hc_get(&inp->inp_inc, &metrics); 3727 if (metricptr != NULL) 3728 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite)); 3729 3730 /* 3731 * If there's a discovered mtu in tcp hostcache, use it. 3732 * Else, use the link mtu. 3733 */ 3734 if (metrics.rmx_mtu) 3735 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh; 3736 else { 3737 #ifdef INET6 3738 if (isipv6) { 3739 mss = maxmtu - min_protoh; 3740 if (!V_path_mtu_discovery && 3741 !in6_localaddr(&inp->in6p_faddr)) 3742 mss = min(mss, V_tcp_v6mssdflt); 3743 } 3744 #endif 3745 #if defined(INET) && defined(INET6) 3746 else 3747 #endif 3748 #ifdef INET 3749 { 3750 mss = maxmtu - min_protoh; 3751 if (!V_path_mtu_discovery && 3752 !in_localaddr(inp->inp_faddr)) 3753 mss = min(mss, V_tcp_mssdflt); 3754 } 3755 #endif 3756 /* 3757 * XXX - The above conditional (mss = maxmtu - min_protoh) 3758 * probably violates the TCP spec. 3759 * The problem is that, since we don't know the 3760 * other end's MSS, we are supposed to use a conservative 3761 * default. But, if we do that, then MTU discovery will 3762 * never actually take place, because the conservative 3763 * default is much less than the MTUs typically seen 3764 * on the Internet today. For the moment, we'll sweep 3765 * this under the carpet. 3766 * 3767 * The conservative default might not actually be a problem 3768 * if the only case this occurs is when sending an initial 3769 * SYN with options and data to a host we've never talked 3770 * to before. Then, they will reply with an MSS value which 3771 * will get recorded and the new parameters should get 3772 * recomputed. For Further Study. 3773 */ 3774 } 3775 mss = min(mss, offer); 3776 3777 /* 3778 * Sanity check: make sure that maxseg will be large 3779 * enough to allow some data on segments even if the 3780 * all the option space is used (40bytes). Otherwise 3781 * funny things may happen in tcp_output. 3782 * 3783 * XXXGL: shouldn't we reserve space for IP/IPv6 options? 3784 */ 3785 mss = max(mss, 64); 3786 3787 tp->t_maxseg = mss; 3788 } 3789 3790 void 3791 tcp_mss(struct tcpcb *tp, int offer) 3792 { 3793 int mss; 3794 uint32_t bufsize; 3795 struct inpcb *inp = tptoinpcb(tp); 3796 struct socket *so; 3797 struct hc_metrics_lite metrics; 3798 struct tcp_ifcap cap; 3799 3800 KASSERT(tp != NULL, ("%s: tp == NULL", __func__)); 3801 3802 bzero(&cap, sizeof(cap)); 3803 tcp_mss_update(tp, offer, -1, &metrics, &cap); 3804 3805 mss = tp->t_maxseg; 3806 3807 /* 3808 * If there's a pipesize, change the socket buffer to that size, 3809 * don't change if sb_hiwat is different than default (then it 3810 * has been changed on purpose with setsockopt). 3811 * Make the socket buffers an integral number of mss units; 3812 * if the mss is larger than the socket buffer, decrease the mss. 3813 */ 3814 so = inp->inp_socket; 3815 SOCKBUF_LOCK(&so->so_snd); 3816 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe) 3817 bufsize = metrics.rmx_sendpipe; 3818 else 3819 bufsize = so->so_snd.sb_hiwat; 3820 if (bufsize < mss) 3821 mss = bufsize; 3822 else { 3823 bufsize = roundup(bufsize, mss); 3824 if (bufsize > sb_max) 3825 bufsize = sb_max; 3826 if (bufsize > so->so_snd.sb_hiwat) 3827 (void)sbreserve_locked(so, SO_SND, bufsize, NULL); 3828 } 3829 SOCKBUF_UNLOCK(&so->so_snd); 3830 /* 3831 * Sanity check: make sure that maxseg will be large 3832 * enough to allow some data on segments even if the 3833 * all the option space is used (40bytes). Otherwise 3834 * funny things may happen in tcp_output. 3835 * 3836 * XXXGL: shouldn't we reserve space for IP/IPv6 options? 3837 */ 3838 tp->t_maxseg = max(mss, 64); 3839 3840 SOCKBUF_LOCK(&so->so_rcv); 3841 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe) 3842 bufsize = metrics.rmx_recvpipe; 3843 else 3844 bufsize = so->so_rcv.sb_hiwat; 3845 if (bufsize > mss) { 3846 bufsize = roundup(bufsize, mss); 3847 if (bufsize > sb_max) 3848 bufsize = sb_max; 3849 if (bufsize > so->so_rcv.sb_hiwat) 3850 (void)sbreserve_locked(so, SO_RCV, bufsize, NULL); 3851 } 3852 SOCKBUF_UNLOCK(&so->so_rcv); 3853 3854 /* Check the interface for TSO capabilities. */ 3855 if (cap.ifcap & CSUM_TSO) { 3856 tp->t_flags |= TF_TSO; 3857 tp->t_tsomax = cap.tsomax; 3858 tp->t_tsomaxsegcount = cap.tsomaxsegcount; 3859 tp->t_tsomaxsegsize = cap.tsomaxsegsize; 3860 } 3861 } 3862 3863 /* 3864 * Determine the MSS option to send on an outgoing SYN. 3865 */ 3866 int 3867 tcp_mssopt(struct in_conninfo *inc) 3868 { 3869 int mss = 0; 3870 uint32_t thcmtu = 0; 3871 uint32_t maxmtu = 0; 3872 size_t min_protoh; 3873 3874 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer")); 3875 3876 #ifdef INET6 3877 if (inc->inc_flags & INC_ISIPV6) { 3878 mss = V_tcp_v6mssdflt; 3879 maxmtu = tcp_maxmtu6(inc, NULL); 3880 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 3881 } 3882 #endif 3883 #if defined(INET) && defined(INET6) 3884 else 3885 #endif 3886 #ifdef INET 3887 { 3888 mss = V_tcp_mssdflt; 3889 maxmtu = tcp_maxmtu(inc, NULL); 3890 min_protoh = sizeof(struct tcpiphdr); 3891 } 3892 #endif 3893 #if defined(INET6) || defined(INET) 3894 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3895 #endif 3896 3897 if (maxmtu && thcmtu) 3898 mss = min(maxmtu, thcmtu) - min_protoh; 3899 else if (maxmtu || thcmtu) 3900 mss = max(maxmtu, thcmtu) - min_protoh; 3901 3902 return (mss); 3903 } 3904 3905 void 3906 tcp_do_prr_ack(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to) 3907 { 3908 int snd_cnt = 0, limit = 0, del_data = 0, pipe = 0; 3909 int maxseg = tcp_maxseg(tp); 3910 3911 INP_WLOCK_ASSERT(tptoinpcb(tp)); 3912 3913 /* 3914 * Compute the amount of data that this ACK is indicating 3915 * (del_data) and an estimate of how many bytes are in the 3916 * network. 3917 */ 3918 if (tcp_is_sack_recovery(tp, to) || 3919 (IN_CONGRECOVERY(tp->t_flags) && 3920 !IN_FASTRECOVERY(tp->t_flags))) { 3921 del_data = tp->sackhint.delivered_data; 3922 if (V_tcp_do_newsack) 3923 pipe = tcp_compute_pipe(tp); 3924 else 3925 pipe = (tp->snd_nxt - tp->snd_fack) + 3926 tp->sackhint.sack_bytes_rexmit; 3927 } else { 3928 if (tp->sackhint.prr_delivered < (tcprexmtthresh * maxseg + 3929 tp->snd_recover - tp->snd_una)) 3930 del_data = maxseg; 3931 pipe = imax(0, tp->snd_max - tp->snd_una - 3932 imin(INT_MAX / 65536, tp->t_dupacks) * maxseg); 3933 } 3934 tp->sackhint.prr_delivered += del_data; 3935 /* 3936 * Proportional Rate Reduction 3937 */ 3938 if (pipe >= tp->snd_ssthresh) { 3939 if (tp->sackhint.recover_fs == 0) 3940 tp->sackhint.recover_fs = 3941 imax(1, tp->snd_nxt - tp->snd_una); 3942 snd_cnt = howmany((long)tp->sackhint.prr_delivered * 3943 tp->snd_ssthresh, tp->sackhint.recover_fs) - 3944 tp->sackhint.prr_out; 3945 } else { 3946 if (V_tcp_do_prr_conservative || (del_data == 0)) 3947 limit = tp->sackhint.prr_delivered - 3948 tp->sackhint.prr_out; 3949 else 3950 limit = imax(tp->sackhint.prr_delivered - 3951 tp->sackhint.prr_out, del_data) + 3952 maxseg; 3953 snd_cnt = imin((tp->snd_ssthresh - pipe), limit); 3954 } 3955 snd_cnt = imax(snd_cnt, 0) / maxseg; 3956 /* 3957 * Send snd_cnt new data into the network in response to this ack. 3958 * If there is going to be a SACK retransmission, adjust snd_cwnd 3959 * accordingly. 3960 */ 3961 if (IN_FASTRECOVERY(tp->t_flags)) { 3962 if (tcp_is_sack_recovery(tp, to)) { 3963 tp->snd_cwnd = tp->snd_nxt - tp->snd_recover + 3964 tp->sackhint.sack_bytes_rexmit + 3965 (snd_cnt * maxseg); 3966 } else { 3967 tp->snd_cwnd = (tp->snd_max - tp->snd_una) + 3968 (snd_cnt * maxseg); 3969 } 3970 } else if (IN_CONGRECOVERY(tp->t_flags)) 3971 tp->snd_cwnd = pipe - del_data + (snd_cnt * maxseg); 3972 tp->snd_cwnd = imax(maxseg, tp->snd_cwnd); 3973 } 3974 3975 /* 3976 * On a partial ack arrives, force the retransmission of the 3977 * next unacknowledged segment. Do not clear tp->t_dupacks. 3978 * By setting snd_nxt to ti_ack, this forces retransmission timer to 3979 * be started again. 3980 */ 3981 void 3982 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th) 3983 { 3984 tcp_seq onxt = tp->snd_nxt; 3985 uint32_t ocwnd = tp->snd_cwnd; 3986 u_int maxseg = tcp_maxseg(tp); 3987 3988 INP_WLOCK_ASSERT(tptoinpcb(tp)); 3989 3990 tcp_timer_activate(tp, TT_REXMT, 0); 3991 tp->t_rtttime = 0; 3992 tp->snd_nxt = th->th_ack; 3993 /* 3994 * Set snd_cwnd to one segment beyond acknowledged offset. 3995 * (tp->snd_una has not yet been updated when this function is called.) 3996 */ 3997 tp->snd_cwnd = maxseg + BYTES_THIS_ACK(tp, th); 3998 tp->t_flags |= TF_ACKNOW; 3999 (void) tcp_output(tp); 4000 tp->snd_cwnd = ocwnd; 4001 if (SEQ_GT(onxt, tp->snd_nxt)) 4002 tp->snd_nxt = onxt; 4003 /* 4004 * Partial window deflation. Relies on fact that tp->snd_una 4005 * not updated yet. 4006 */ 4007 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th)) 4008 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th); 4009 else 4010 tp->snd_cwnd = 0; 4011 tp->snd_cwnd += maxseg; 4012 } 4013 4014 int 4015 tcp_compute_pipe(struct tcpcb *tp) 4016 { 4017 if (tp->t_fb->tfb_compute_pipe == NULL) { 4018 return (tp->snd_max - tp->snd_una + 4019 tp->sackhint.sack_bytes_rexmit - 4020 tp->sackhint.sacked_bytes); 4021 } else { 4022 return((*tp->t_fb->tfb_compute_pipe)(tp)); 4023 } 4024 } 4025 4026 uint32_t 4027 tcp_compute_initwnd(uint32_t maxseg) 4028 { 4029 /* 4030 * Calculate the Initial Window, also used as Restart Window 4031 * 4032 * RFC5681 Section 3.1 specifies the default conservative values. 4033 * RFC3390 specifies slightly more aggressive values. 4034 * RFC6928 increases it to ten segments. 4035 * Support for user specified value for initial flight size. 4036 */ 4037 if (V_tcp_initcwnd_segments) 4038 return min(V_tcp_initcwnd_segments * maxseg, 4039 max(2 * maxseg, V_tcp_initcwnd_segments * 1460)); 4040 else if (V_tcp_do_rfc3390) 4041 return min(4 * maxseg, max(2 * maxseg, 4380)); 4042 else { 4043 /* Per RFC5681 Section 3.1 */ 4044 if (maxseg > 2190) 4045 return (2 * maxseg); 4046 else if (maxseg > 1095) 4047 return (3 * maxseg); 4048 else 4049 return (4 * maxseg); 4050 } 4051 } 4052