1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 5 * The Regents of the University of California. All rights reserved. 6 * Copyright (c) 2007-2008,2010 7 * Swinburne University of Technology, Melbourne, Australia. 8 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org> 9 * Copyright (c) 2010 The FreeBSD Foundation 10 * Copyright (c) 2010-2011 Juniper Networks, Inc. 11 * All rights reserved. 12 * 13 * Portions of this software were developed at the Centre for Advanced Internet 14 * Architectures, Swinburne University of Technology, by Lawrence Stewart, 15 * James Healy and David Hayes, made possible in part by a grant from the Cisco 16 * University Research Program Fund at Community Foundation Silicon Valley. 17 * 18 * Portions of this software were developed at the Centre for Advanced 19 * Internet Architectures, Swinburne University of Technology, Melbourne, 20 * Australia by David Hayes under sponsorship from the FreeBSD Foundation. 21 * 22 * Portions of this software were developed by Robert N. M. Watson under 23 * contract to Juniper Networks, Inc. 24 * 25 * Redistribution and use in source and binary forms, with or without 26 * modification, are permitted provided that the following conditions 27 * are met: 28 * 1. Redistributions of source code must retain the above copyright 29 * notice, this list of conditions and the following disclaimer. 30 * 2. Redistributions in binary form must reproduce the above copyright 31 * notice, this list of conditions and the following disclaimer in the 32 * documentation and/or other materials provided with the distribution. 33 * 3. Neither the name of the University nor the names of its contributors 34 * may be used to endorse or promote products derived from this software 35 * without specific prior written permission. 36 * 37 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 38 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 39 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 40 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 42 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 43 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 45 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 46 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 47 * SUCH DAMAGE. 48 * 49 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 50 */ 51 52 #include <sys/cdefs.h> 53 __FBSDID("$FreeBSD$"); 54 55 #include "opt_inet.h" 56 #include "opt_inet6.h" 57 #include "opt_ipsec.h" 58 #include "opt_tcpdebug.h" 59 60 #include <sys/param.h> 61 #include <sys/kernel.h> 62 #ifdef TCP_HHOOK 63 #include <sys/hhook.h> 64 #endif 65 #include <sys/malloc.h> 66 #include <sys/mbuf.h> 67 #include <sys/proc.h> /* for proc0 declaration */ 68 #include <sys/protosw.h> 69 #include <sys/sdt.h> 70 #include <sys/signalvar.h> 71 #include <sys/socket.h> 72 #include <sys/socketvar.h> 73 #include <sys/sysctl.h> 74 #include <sys/syslog.h> 75 #include <sys/systm.h> 76 77 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 78 79 #include <vm/uma.h> 80 81 #include <net/if.h> 82 #include <net/if_var.h> 83 #include <net/route.h> 84 #include <net/vnet.h> 85 86 #define TCPSTATES /* for logging */ 87 88 #include <netinet/in.h> 89 #include <netinet/in_kdtrace.h> 90 #include <netinet/in_pcb.h> 91 #include <netinet/in_systm.h> 92 #include <netinet/ip.h> 93 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 94 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 95 #include <netinet/ip_var.h> 96 #include <netinet/ip_options.h> 97 #include <netinet/ip6.h> 98 #include <netinet/icmp6.h> 99 #include <netinet6/in6_pcb.h> 100 #include <netinet6/in6_var.h> 101 #include <netinet6/ip6_var.h> 102 #include <netinet6/nd6.h> 103 #include <netinet/tcp.h> 104 #include <netinet/tcp_fsm.h> 105 #include <netinet/tcp_log_buf.h> 106 #include <netinet/tcp_seq.h> 107 #include <netinet/tcp_timer.h> 108 #include <netinet/tcp_var.h> 109 #include <netinet6/tcp6_var.h> 110 #include <netinet/tcpip.h> 111 #include <netinet/cc/cc.h> 112 #include <netinet/tcp_fastopen.h> 113 #ifdef TCPPCAP 114 #include <netinet/tcp_pcap.h> 115 #endif 116 #include <netinet/tcp_syncache.h> 117 #ifdef TCPDEBUG 118 #include <netinet/tcp_debug.h> 119 #endif /* TCPDEBUG */ 120 #ifdef TCP_OFFLOAD 121 #include <netinet/tcp_offload.h> 122 #endif 123 124 #include <netipsec/ipsec_support.h> 125 126 #include <machine/in_cksum.h> 127 128 #include <security/mac/mac_framework.h> 129 130 const int tcprexmtthresh = 3; 131 132 int tcp_log_in_vain = 0; 133 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW, 134 &tcp_log_in_vain, 0, 135 "Log all incoming TCP segments to closed ports"); 136 137 VNET_DEFINE(int, blackhole) = 0; 138 #define V_blackhole VNET(blackhole) 139 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_VNET | CTLFLAG_RW, 140 &VNET_NAME(blackhole), 0, 141 "Do not send RST on segments to closed ports"); 142 143 VNET_DEFINE(int, tcp_delack_enabled) = 1; 144 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_VNET | CTLFLAG_RW, 145 &VNET_NAME(tcp_delack_enabled), 0, 146 "Delay ACK to try and piggyback it onto a data packet"); 147 148 VNET_DEFINE(int, drop_synfin) = 0; 149 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_VNET | CTLFLAG_RW, 150 &VNET_NAME(drop_synfin), 0, 151 "Drop TCP packets with SYN+FIN set"); 152 153 VNET_DEFINE(int, tcp_do_rfc6675_pipe) = 0; 154 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc6675_pipe, CTLFLAG_VNET | CTLFLAG_RW, 155 &VNET_NAME(tcp_do_rfc6675_pipe), 0, 156 "Use calculated pipe/in-flight bytes per RFC 6675"); 157 158 VNET_DEFINE(int, tcp_do_rfc3042) = 1; 159 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_VNET | CTLFLAG_RW, 160 &VNET_NAME(tcp_do_rfc3042), 0, 161 "Enable RFC 3042 (Limited Transmit)"); 162 163 VNET_DEFINE(int, tcp_do_rfc3390) = 1; 164 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_VNET | CTLFLAG_RW, 165 &VNET_NAME(tcp_do_rfc3390), 0, 166 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 167 168 VNET_DEFINE(int, tcp_initcwnd_segments) = 10; 169 SYSCTL_INT(_net_inet_tcp, OID_AUTO, initcwnd_segments, 170 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_initcwnd_segments), 0, 171 "Slow-start flight size (initial congestion window) in number of segments"); 172 173 VNET_DEFINE(int, tcp_do_rfc3465) = 1; 174 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_VNET | CTLFLAG_RW, 175 &VNET_NAME(tcp_do_rfc3465), 0, 176 "Enable RFC 3465 (Appropriate Byte Counting)"); 177 178 VNET_DEFINE(int, tcp_abc_l_var) = 2; 179 SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_VNET | CTLFLAG_RW, 180 &VNET_NAME(tcp_abc_l_var), 2, 181 "Cap the max cwnd increment during slow-start to this number of segments"); 182 183 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN"); 184 185 VNET_DEFINE(int, tcp_do_ecn) = 2; 186 SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW, 187 &VNET_NAME(tcp_do_ecn), 0, 188 "TCP ECN support"); 189 190 VNET_DEFINE(int, tcp_ecn_maxretries) = 1; 191 SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_VNET | CTLFLAG_RW, 192 &VNET_NAME(tcp_ecn_maxretries), 0, 193 "Max retries before giving up on ECN"); 194 195 VNET_DEFINE(int, tcp_insecure_syn) = 0; 196 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_syn, CTLFLAG_VNET | CTLFLAG_RW, 197 &VNET_NAME(tcp_insecure_syn), 0, 198 "Follow RFC793 instead of RFC5961 criteria for accepting SYN packets"); 199 200 VNET_DEFINE(int, tcp_insecure_rst) = 0; 201 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_VNET | CTLFLAG_RW, 202 &VNET_NAME(tcp_insecure_rst), 0, 203 "Follow RFC793 instead of RFC5961 criteria for accepting RST packets"); 204 205 VNET_DEFINE(int, tcp_recvspace) = 1024*64; 206 #define V_tcp_recvspace VNET(tcp_recvspace) 207 SYSCTL_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_VNET | CTLFLAG_RW, 208 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size"); 209 210 VNET_DEFINE(int, tcp_do_autorcvbuf) = 1; 211 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_VNET | CTLFLAG_RW, 212 &VNET_NAME(tcp_do_autorcvbuf), 0, 213 "Enable automatic receive buffer sizing"); 214 215 VNET_DEFINE(int, tcp_autorcvbuf_inc) = 16*1024; 216 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_VNET | CTLFLAG_RW, 217 &VNET_NAME(tcp_autorcvbuf_inc), 0, 218 "Incrementor step size of automatic receive buffer"); 219 220 VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024; 221 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_VNET | CTLFLAG_RW, 222 &VNET_NAME(tcp_autorcvbuf_max), 0, 223 "Max size of automatic receive buffer"); 224 225 VNET_DEFINE(struct inpcbhead, tcb); 226 #define tcb6 tcb /* for KAME src sync over BSD*'s */ 227 VNET_DEFINE(struct inpcbinfo, tcbinfo); 228 229 /* 230 * TCP statistics are stored in an array of counter(9)s, which size matches 231 * size of struct tcpstat. TCP running connection count is a regular array. 232 */ 233 VNET_PCPUSTAT_DEFINE(struct tcpstat, tcpstat); 234 SYSCTL_VNET_PCPUSTAT(_net_inet_tcp, TCPCTL_STATS, stats, struct tcpstat, 235 tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)"); 236 VNET_DEFINE(counter_u64_t, tcps_states[TCP_NSTATES]); 237 SYSCTL_COUNTER_U64_ARRAY(_net_inet_tcp, TCPCTL_STATES, states, CTLFLAG_RD | 238 CTLFLAG_VNET, &VNET_NAME(tcps_states)[0], TCP_NSTATES, 239 "TCP connection counts by TCP state"); 240 241 static void 242 tcp_vnet_init(const void *unused) 243 { 244 245 COUNTER_ARRAY_ALLOC(V_tcps_states, TCP_NSTATES, M_WAITOK); 246 VNET_PCPUSTAT_ALLOC(tcpstat, M_WAITOK); 247 } 248 VNET_SYSINIT(tcp_vnet_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 249 tcp_vnet_init, NULL); 250 251 #ifdef VIMAGE 252 static void 253 tcp_vnet_uninit(const void *unused) 254 { 255 256 COUNTER_ARRAY_FREE(V_tcps_states, TCP_NSTATES); 257 VNET_PCPUSTAT_FREE(tcpstat); 258 } 259 VNET_SYSUNINIT(tcp_vnet_uninit, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 260 tcp_vnet_uninit, NULL); 261 #endif /* VIMAGE */ 262 263 /* 264 * Kernel module interface for updating tcpstat. The argument is an index 265 * into tcpstat treated as an array. 266 */ 267 void 268 kmod_tcpstat_inc(int statnum) 269 { 270 271 counter_u64_add(VNET(tcpstat)[statnum], 1); 272 } 273 274 #ifdef TCP_HHOOK 275 /* 276 * Wrapper for the TCP established input helper hook. 277 */ 278 void 279 hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to) 280 { 281 struct tcp_hhook_data hhook_data; 282 283 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) { 284 hhook_data.tp = tp; 285 hhook_data.th = th; 286 hhook_data.to = to; 287 288 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data, 289 tp->osd); 290 } 291 } 292 #endif 293 294 /* 295 * CC wrapper hook functions 296 */ 297 void 298 cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t nsegs, 299 uint16_t type) 300 { 301 INP_WLOCK_ASSERT(tp->t_inpcb); 302 303 tp->ccv->nsegs = nsegs; 304 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th); 305 if (tp->snd_cwnd <= tp->snd_wnd) 306 tp->ccv->flags |= CCF_CWND_LIMITED; 307 else 308 tp->ccv->flags &= ~CCF_CWND_LIMITED; 309 310 if (type == CC_ACK) { 311 if (tp->snd_cwnd > tp->snd_ssthresh) { 312 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack, 313 nsegs * V_tcp_abc_l_var * tcp_maxseg(tp)); 314 if (tp->t_bytes_acked >= tp->snd_cwnd) { 315 tp->t_bytes_acked -= tp->snd_cwnd; 316 tp->ccv->flags |= CCF_ABC_SENTAWND; 317 } 318 } else { 319 tp->ccv->flags &= ~CCF_ABC_SENTAWND; 320 tp->t_bytes_acked = 0; 321 } 322 } 323 324 if (CC_ALGO(tp)->ack_received != NULL) { 325 /* XXXLAS: Find a way to live without this */ 326 tp->ccv->curack = th->th_ack; 327 CC_ALGO(tp)->ack_received(tp->ccv, type); 328 } 329 } 330 331 void 332 cc_conn_init(struct tcpcb *tp) 333 { 334 struct hc_metrics_lite metrics; 335 struct inpcb *inp = tp->t_inpcb; 336 u_int maxseg; 337 int rtt; 338 339 INP_WLOCK_ASSERT(tp->t_inpcb); 340 341 tcp_hc_get(&inp->inp_inc, &metrics); 342 maxseg = tcp_maxseg(tp); 343 344 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) { 345 tp->t_srtt = rtt; 346 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE; 347 TCPSTAT_INC(tcps_usedrtt); 348 if (metrics.rmx_rttvar) { 349 tp->t_rttvar = metrics.rmx_rttvar; 350 TCPSTAT_INC(tcps_usedrttvar); 351 } else { 352 /* default variation is +- 1 rtt */ 353 tp->t_rttvar = 354 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 355 } 356 TCPT_RANGESET(tp->t_rxtcur, 357 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 358 tp->t_rttmin, TCPTV_REXMTMAX); 359 } 360 if (metrics.rmx_ssthresh) { 361 /* 362 * There's some sort of gateway or interface 363 * buffer limit on the path. Use this to set 364 * the slow start threshold, but set the 365 * threshold to no less than 2*mss. 366 */ 367 tp->snd_ssthresh = max(2 * maxseg, metrics.rmx_ssthresh); 368 TCPSTAT_INC(tcps_usedssthresh); 369 } 370 371 /* 372 * Set the initial slow-start flight size. 373 * 374 * If a SYN or SYN/ACK was lost and retransmitted, we have to 375 * reduce the initial CWND to one segment as congestion is likely 376 * requiring us to be cautious. 377 */ 378 if (tp->snd_cwnd == 1) 379 tp->snd_cwnd = maxseg; /* SYN(-ACK) lost */ 380 else 381 tp->snd_cwnd = tcp_compute_initwnd(maxseg); 382 383 if (CC_ALGO(tp)->conn_init != NULL) 384 CC_ALGO(tp)->conn_init(tp->ccv); 385 } 386 387 void inline 388 cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type) 389 { 390 u_int maxseg; 391 392 INP_WLOCK_ASSERT(tp->t_inpcb); 393 394 switch(type) { 395 case CC_NDUPACK: 396 if (!IN_FASTRECOVERY(tp->t_flags)) { 397 tp->snd_recover = tp->snd_max; 398 if (tp->t_flags & TF_ECN_PERMIT) 399 tp->t_flags |= TF_ECN_SND_CWR; 400 } 401 break; 402 case CC_ECN: 403 if (!IN_CONGRECOVERY(tp->t_flags)) { 404 TCPSTAT_INC(tcps_ecn_rcwnd); 405 tp->snd_recover = tp->snd_max; 406 if (tp->t_flags & TF_ECN_PERMIT) 407 tp->t_flags |= TF_ECN_SND_CWR; 408 } 409 break; 410 case CC_RTO: 411 maxseg = tcp_maxseg(tp); 412 tp->t_dupacks = 0; 413 tp->t_bytes_acked = 0; 414 EXIT_RECOVERY(tp->t_flags); 415 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 / 416 maxseg) * maxseg; 417 tp->snd_cwnd = maxseg; 418 break; 419 case CC_RTO_ERR: 420 TCPSTAT_INC(tcps_sndrexmitbad); 421 /* RTO was unnecessary, so reset everything. */ 422 tp->snd_cwnd = tp->snd_cwnd_prev; 423 tp->snd_ssthresh = tp->snd_ssthresh_prev; 424 tp->snd_recover = tp->snd_recover_prev; 425 if (tp->t_flags & TF_WASFRECOVERY) 426 ENTER_FASTRECOVERY(tp->t_flags); 427 if (tp->t_flags & TF_WASCRECOVERY) 428 ENTER_CONGRECOVERY(tp->t_flags); 429 tp->snd_nxt = tp->snd_max; 430 tp->t_flags &= ~TF_PREVVALID; 431 tp->t_badrxtwin = 0; 432 break; 433 } 434 435 if (CC_ALGO(tp)->cong_signal != NULL) { 436 if (th != NULL) 437 tp->ccv->curack = th->th_ack; 438 CC_ALGO(tp)->cong_signal(tp->ccv, type); 439 } 440 } 441 442 void inline 443 cc_post_recovery(struct tcpcb *tp, struct tcphdr *th) 444 { 445 INP_WLOCK_ASSERT(tp->t_inpcb); 446 447 /* XXXLAS: KASSERT that we're in recovery? */ 448 449 if (CC_ALGO(tp)->post_recovery != NULL) { 450 tp->ccv->curack = th->th_ack; 451 CC_ALGO(tp)->post_recovery(tp->ccv); 452 } 453 /* XXXLAS: EXIT_RECOVERY ? */ 454 tp->t_bytes_acked = 0; 455 } 456 457 /* 458 * Indicate whether this ack should be delayed. We can delay the ack if 459 * following conditions are met: 460 * - There is no delayed ack timer in progress. 461 * - Our last ack wasn't a 0-sized window. We never want to delay 462 * the ack that opens up a 0-sized window. 463 * - LRO wasn't used for this segment. We make sure by checking that the 464 * segment size is not larger than the MSS. 465 */ 466 #define DELAY_ACK(tp, tlen) \ 467 ((!tcp_timer_active(tp, TT_DELACK) && \ 468 (tp->t_flags & TF_RXWIN0SENT) == 0) && \ 469 (tlen <= tp->t_maxseg) && \ 470 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN))) 471 472 static void inline 473 cc_ecnpkt_handler(struct tcpcb *tp, struct tcphdr *th, uint8_t iptos) 474 { 475 INP_WLOCK_ASSERT(tp->t_inpcb); 476 477 if (CC_ALGO(tp)->ecnpkt_handler != NULL) { 478 switch (iptos & IPTOS_ECN_MASK) { 479 case IPTOS_ECN_CE: 480 tp->ccv->flags |= CCF_IPHDR_CE; 481 break; 482 case IPTOS_ECN_ECT0: 483 tp->ccv->flags &= ~CCF_IPHDR_CE; 484 break; 485 case IPTOS_ECN_ECT1: 486 tp->ccv->flags &= ~CCF_IPHDR_CE; 487 break; 488 } 489 490 if (th->th_flags & TH_CWR) 491 tp->ccv->flags |= CCF_TCPHDR_CWR; 492 else 493 tp->ccv->flags &= ~CCF_TCPHDR_CWR; 494 495 if (tp->t_flags & TF_DELACK) 496 tp->ccv->flags |= CCF_DELACK; 497 else 498 tp->ccv->flags &= ~CCF_DELACK; 499 500 CC_ALGO(tp)->ecnpkt_handler(tp->ccv); 501 502 if (tp->ccv->flags & CCF_ACKNOW) 503 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 504 } 505 } 506 507 /* 508 * TCP input handling is split into multiple parts: 509 * tcp6_input is a thin wrapper around tcp_input for the extended 510 * ip6_protox[] call format in ip6_input 511 * tcp_input handles primary segment validation, inpcb lookup and 512 * SYN processing on listen sockets 513 * tcp_do_segment processes the ACK and text of the segment for 514 * establishing, established and closing connections 515 */ 516 #ifdef INET6 517 int 518 tcp6_input(struct mbuf **mp, int *offp, int proto) 519 { 520 struct mbuf *m = *mp; 521 struct in6_ifaddr *ia6; 522 struct ip6_hdr *ip6; 523 524 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE); 525 526 /* 527 * draft-itojun-ipv6-tcp-to-anycast 528 * better place to put this in? 529 */ 530 ip6 = mtod(m, struct ip6_hdr *); 531 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */); 532 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 533 struct ip6_hdr *ip6; 534 535 ifa_free(&ia6->ia_ifa); 536 ip6 = mtod(m, struct ip6_hdr *); 537 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 538 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6); 539 return (IPPROTO_DONE); 540 } 541 if (ia6) 542 ifa_free(&ia6->ia_ifa); 543 544 return (tcp_input(mp, offp, proto)); 545 } 546 #endif /* INET6 */ 547 548 int 549 tcp_input(struct mbuf **mp, int *offp, int proto) 550 { 551 struct mbuf *m = *mp; 552 struct tcphdr *th = NULL; 553 struct ip *ip = NULL; 554 struct inpcb *inp = NULL; 555 struct tcpcb *tp = NULL; 556 struct socket *so = NULL; 557 u_char *optp = NULL; 558 int off0; 559 int optlen = 0; 560 #ifdef INET 561 int len; 562 #endif 563 int tlen = 0, off; 564 int drop_hdrlen; 565 int thflags; 566 int rstreason = 0; /* For badport_bandlim accounting purposes */ 567 uint8_t iptos; 568 struct m_tag *fwd_tag = NULL; 569 struct epoch_tracker et; 570 #ifdef INET6 571 struct ip6_hdr *ip6 = NULL; 572 int isipv6; 573 #else 574 const void *ip6 = NULL; 575 #endif /* INET6 */ 576 struct tcpopt to; /* options in this segment */ 577 char *s = NULL; /* address and port logging */ 578 int ti_locked; 579 #ifdef TCPDEBUG 580 /* 581 * The size of tcp_saveipgen must be the size of the max ip header, 582 * now IPv6. 583 */ 584 u_char tcp_saveipgen[IP6_HDR_LEN]; 585 struct tcphdr tcp_savetcp; 586 short ostate = 0; 587 #endif 588 589 #ifdef INET6 590 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 591 #endif 592 593 off0 = *offp; 594 m = *mp; 595 *mp = NULL; 596 to.to_flags = 0; 597 TCPSTAT_INC(tcps_rcvtotal); 598 599 #ifdef INET6 600 if (isipv6) { 601 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */ 602 603 if (m->m_len < (sizeof(*ip6) + sizeof(*th))) { 604 m = m_pullup(m, sizeof(*ip6) + sizeof(*th)); 605 if (m == NULL) { 606 TCPSTAT_INC(tcps_rcvshort); 607 return (IPPROTO_DONE); 608 } 609 } 610 611 ip6 = mtod(m, struct ip6_hdr *); 612 th = (struct tcphdr *)((caddr_t)ip6 + off0); 613 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0; 614 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) { 615 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 616 th->th_sum = m->m_pkthdr.csum_data; 617 else 618 th->th_sum = in6_cksum_pseudo(ip6, tlen, 619 IPPROTO_TCP, m->m_pkthdr.csum_data); 620 th->th_sum ^= 0xffff; 621 } else 622 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen); 623 if (th->th_sum) { 624 TCPSTAT_INC(tcps_rcvbadsum); 625 goto drop; 626 } 627 628 /* 629 * Be proactive about unspecified IPv6 address in source. 630 * As we use all-zero to indicate unbounded/unconnected pcb, 631 * unspecified IPv6 address can be used to confuse us. 632 * 633 * Note that packets with unspecified IPv6 destination is 634 * already dropped in ip6_input. 635 */ 636 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 637 /* XXX stat */ 638 goto drop; 639 } 640 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; 641 } 642 #endif 643 #if defined(INET) && defined(INET6) 644 else 645 #endif 646 #ifdef INET 647 { 648 /* 649 * Get IP and TCP header together in first mbuf. 650 * Note: IP leaves IP header in first mbuf. 651 */ 652 if (off0 > sizeof (struct ip)) { 653 ip_stripoptions(m); 654 off0 = sizeof(struct ip); 655 } 656 if (m->m_len < sizeof (struct tcpiphdr)) { 657 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) 658 == NULL) { 659 TCPSTAT_INC(tcps_rcvshort); 660 return (IPPROTO_DONE); 661 } 662 } 663 ip = mtod(m, struct ip *); 664 th = (struct tcphdr *)((caddr_t)ip + off0); 665 tlen = ntohs(ip->ip_len) - off0; 666 667 iptos = ip->ip_tos; 668 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 669 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 670 th->th_sum = m->m_pkthdr.csum_data; 671 else 672 th->th_sum = in_pseudo(ip->ip_src.s_addr, 673 ip->ip_dst.s_addr, 674 htonl(m->m_pkthdr.csum_data + tlen + 675 IPPROTO_TCP)); 676 th->th_sum ^= 0xffff; 677 } else { 678 struct ipovly *ipov = (struct ipovly *)ip; 679 680 /* 681 * Checksum extended TCP header and data. 682 */ 683 len = off0 + tlen; 684 bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); 685 ipov->ih_len = htons(tlen); 686 th->th_sum = in_cksum(m, len); 687 /* Reset length for SDT probes. */ 688 ip->ip_len = htons(len); 689 /* Reset TOS bits */ 690 ip->ip_tos = iptos; 691 /* Re-initialization for later version check */ 692 ip->ip_v = IPVERSION; 693 ip->ip_hl = off0 >> 2; 694 } 695 696 if (th->th_sum) { 697 TCPSTAT_INC(tcps_rcvbadsum); 698 goto drop; 699 } 700 } 701 #endif /* INET */ 702 703 /* 704 * Check that TCP offset makes sense, 705 * pull out TCP options and adjust length. XXX 706 */ 707 off = th->th_off << 2; 708 if (off < sizeof (struct tcphdr) || off > tlen) { 709 TCPSTAT_INC(tcps_rcvbadoff); 710 goto drop; 711 } 712 tlen -= off; /* tlen is used instead of ti->ti_len */ 713 if (off > sizeof (struct tcphdr)) { 714 #ifdef INET6 715 if (isipv6) { 716 IP6_EXTHDR_CHECK(m, off0, off, IPPROTO_DONE); 717 ip6 = mtod(m, struct ip6_hdr *); 718 th = (struct tcphdr *)((caddr_t)ip6 + off0); 719 } 720 #endif 721 #if defined(INET) && defined(INET6) 722 else 723 #endif 724 #ifdef INET 725 { 726 if (m->m_len < sizeof(struct ip) + off) { 727 if ((m = m_pullup(m, sizeof (struct ip) + off)) 728 == NULL) { 729 TCPSTAT_INC(tcps_rcvshort); 730 return (IPPROTO_DONE); 731 } 732 ip = mtod(m, struct ip *); 733 th = (struct tcphdr *)((caddr_t)ip + off0); 734 } 735 } 736 #endif 737 optlen = off - sizeof (struct tcphdr); 738 optp = (u_char *)(th + 1); 739 } 740 thflags = th->th_flags; 741 742 /* 743 * Convert TCP protocol specific fields to host format. 744 */ 745 tcp_fields_to_host(th); 746 747 /* 748 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options. 749 */ 750 drop_hdrlen = off0 + off; 751 752 /* 753 * Locate pcb for segment; if we're likely to add or remove a 754 * connection then first acquire pcbinfo lock. There are three cases 755 * where we might discover later we need a write lock despite the 756 * flags: ACKs moving a connection out of the syncache, ACKs for a 757 * connection in TIMEWAIT and SYNs not targeting a listening socket. 758 */ 759 if ((thflags & (TH_FIN | TH_RST)) != 0) { 760 INP_INFO_RLOCK_ET(&V_tcbinfo, et); 761 ti_locked = TI_RLOCKED; 762 } else 763 ti_locked = TI_UNLOCKED; 764 765 /* 766 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. 767 */ 768 if ( 769 #ifdef INET6 770 (isipv6 && (m->m_flags & M_IP6_NEXTHOP)) 771 #ifdef INET 772 || (!isipv6 && (m->m_flags & M_IP_NEXTHOP)) 773 #endif 774 #endif 775 #if defined(INET) && !defined(INET6) 776 (m->m_flags & M_IP_NEXTHOP) 777 #endif 778 ) 779 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 780 781 findpcb: 782 #ifdef INVARIANTS 783 if (ti_locked == TI_RLOCKED) { 784 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 785 } else { 786 INP_INFO_WUNLOCK_ASSERT(&V_tcbinfo); 787 } 788 #endif 789 #ifdef INET6 790 if (isipv6 && fwd_tag != NULL) { 791 struct sockaddr_in6 *next_hop6; 792 793 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1); 794 /* 795 * Transparently forwarded. Pretend to be the destination. 796 * Already got one like this? 797 */ 798 inp = in6_pcblookup_mbuf(&V_tcbinfo, 799 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport, 800 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif, m); 801 if (!inp) { 802 /* 803 * It's new. Try to find the ambushing socket. 804 * Because we've rewritten the destination address, 805 * any hardware-generated hash is ignored. 806 */ 807 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src, 808 th->th_sport, &next_hop6->sin6_addr, 809 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) : 810 th->th_dport, INPLOOKUP_WILDCARD | 811 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif); 812 } 813 } else if (isipv6) { 814 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src, 815 th->th_sport, &ip6->ip6_dst, th->th_dport, 816 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB, 817 m->m_pkthdr.rcvif, m); 818 } 819 #endif /* INET6 */ 820 #if defined(INET6) && defined(INET) 821 else 822 #endif 823 #ifdef INET 824 if (fwd_tag != NULL) { 825 struct sockaddr_in *next_hop; 826 827 next_hop = (struct sockaddr_in *)(fwd_tag+1); 828 /* 829 * Transparently forwarded. Pretend to be the destination. 830 * already got one like this? 831 */ 832 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport, 833 ip->ip_dst, th->th_dport, INPLOOKUP_WLOCKPCB, 834 m->m_pkthdr.rcvif, m); 835 if (!inp) { 836 /* 837 * It's new. Try to find the ambushing socket. 838 * Because we've rewritten the destination address, 839 * any hardware-generated hash is ignored. 840 */ 841 inp = in_pcblookup(&V_tcbinfo, ip->ip_src, 842 th->th_sport, next_hop->sin_addr, 843 next_hop->sin_port ? ntohs(next_hop->sin_port) : 844 th->th_dport, INPLOOKUP_WILDCARD | 845 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif); 846 } 847 } else 848 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, 849 th->th_sport, ip->ip_dst, th->th_dport, 850 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB, 851 m->m_pkthdr.rcvif, m); 852 #endif /* INET */ 853 854 /* 855 * If the INPCB does not exist then all data in the incoming 856 * segment is discarded and an appropriate RST is sent back. 857 * XXX MRT Send RST using which routing table? 858 */ 859 if (inp == NULL) { 860 /* 861 * Log communication attempts to ports that are not 862 * in use. 863 */ 864 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) || 865 tcp_log_in_vain == 2) { 866 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6))) 867 log(LOG_INFO, "%s; %s: Connection attempt " 868 "to closed port\n", s, __func__); 869 } 870 /* 871 * When blackholing do not respond with a RST but 872 * completely ignore the segment and drop it. 873 */ 874 if ((V_blackhole == 1 && (thflags & TH_SYN)) || 875 V_blackhole == 2) 876 goto dropunlock; 877 878 rstreason = BANDLIM_RST_CLOSEDPORT; 879 goto dropwithreset; 880 } 881 INP_WLOCK_ASSERT(inp); 882 /* 883 * While waiting for inp lock during the lookup, another thread 884 * can have dropped the inpcb, in which case we need to loop back 885 * and try to find a new inpcb to deliver to. 886 */ 887 if (inp->inp_flags & INP_DROPPED) { 888 INP_WUNLOCK(inp); 889 inp = NULL; 890 goto findpcb; 891 } 892 if ((inp->inp_flowtype == M_HASHTYPE_NONE) && 893 (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) && 894 ((inp->inp_socket == NULL) || 895 (inp->inp_socket->so_options & SO_ACCEPTCONN) == 0)) { 896 inp->inp_flowid = m->m_pkthdr.flowid; 897 inp->inp_flowtype = M_HASHTYPE_GET(m); 898 } 899 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 900 #ifdef INET6 901 if (isipv6 && IPSEC_ENABLED(ipv6) && 902 IPSEC_CHECK_POLICY(ipv6, m, inp) != 0) { 903 goto dropunlock; 904 } 905 #ifdef INET 906 else 907 #endif 908 #endif /* INET6 */ 909 #ifdef INET 910 if (IPSEC_ENABLED(ipv4) && 911 IPSEC_CHECK_POLICY(ipv4, m, inp) != 0) { 912 goto dropunlock; 913 } 914 #endif /* INET */ 915 #endif /* IPSEC */ 916 917 /* 918 * Check the minimum TTL for socket. 919 */ 920 if (inp->inp_ip_minttl != 0) { 921 #ifdef INET6 922 if (isipv6) { 923 if (inp->inp_ip_minttl > ip6->ip6_hlim) 924 goto dropunlock; 925 } else 926 #endif 927 if (inp->inp_ip_minttl > ip->ip_ttl) 928 goto dropunlock; 929 } 930 931 /* 932 * A previous connection in TIMEWAIT state is supposed to catch stray 933 * or duplicate segments arriving late. If this segment was a 934 * legitimate new connection attempt, the old INPCB gets removed and 935 * we can try again to find a listening socket. 936 * 937 * At this point, due to earlier optimism, we may hold only an inpcb 938 * lock, and not the inpcbinfo write lock. If so, we need to try to 939 * acquire it, or if that fails, acquire a reference on the inpcb, 940 * drop all locks, acquire a global write lock, and then re-acquire 941 * the inpcb lock. We may at that point discover that another thread 942 * has tried to free the inpcb, in which case we need to loop back 943 * and try to find a new inpcb to deliver to. 944 * 945 * XXXRW: It may be time to rethink timewait locking. 946 */ 947 if (inp->inp_flags & INP_TIMEWAIT) { 948 if (ti_locked == TI_UNLOCKED) { 949 INP_INFO_RLOCK_ET(&V_tcbinfo, et); 950 ti_locked = TI_RLOCKED; 951 } 952 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 953 954 if (thflags & TH_SYN) 955 tcp_dooptions(&to, optp, optlen, TO_SYN); 956 /* 957 * NB: tcp_twcheck unlocks the INP and frees the mbuf. 958 */ 959 if (tcp_twcheck(inp, &to, th, m, tlen)) 960 goto findpcb; 961 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 962 return (IPPROTO_DONE); 963 } 964 /* 965 * The TCPCB may no longer exist if the connection is winding 966 * down or it is in the CLOSED state. Either way we drop the 967 * segment and send an appropriate response. 968 */ 969 tp = intotcpcb(inp); 970 if (tp == NULL || tp->t_state == TCPS_CLOSED) { 971 rstreason = BANDLIM_RST_CLOSEDPORT; 972 goto dropwithreset; 973 } 974 975 #ifdef TCP_OFFLOAD 976 if (tp->t_flags & TF_TOE) { 977 tcp_offload_input(tp, m); 978 m = NULL; /* consumed by the TOE driver */ 979 goto dropunlock; 980 } 981 #endif 982 983 /* 984 * We've identified a valid inpcb, but it could be that we need an 985 * inpcbinfo write lock but don't hold it. In this case, attempt to 986 * acquire using the same strategy as the TIMEWAIT case above. If we 987 * relock, we have to jump back to 'relocked' as the connection might 988 * now be in TIMEWAIT. 989 */ 990 #ifdef INVARIANTS 991 if ((thflags & (TH_FIN | TH_RST)) != 0) 992 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 993 #endif 994 if (!((tp->t_state == TCPS_ESTABLISHED && (thflags & TH_SYN) == 0) || 995 (tp->t_state == TCPS_LISTEN && (thflags & TH_SYN) && 996 !IS_FASTOPEN(tp->t_flags)))) { 997 if (ti_locked == TI_UNLOCKED) { 998 INP_INFO_RLOCK_ET(&V_tcbinfo, et); 999 ti_locked = TI_RLOCKED; 1000 } 1001 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1002 } 1003 1004 #ifdef MAC 1005 INP_WLOCK_ASSERT(inp); 1006 if (mac_inpcb_check_deliver(inp, m)) 1007 goto dropunlock; 1008 #endif 1009 so = inp->inp_socket; 1010 KASSERT(so != NULL, ("%s: so == NULL", __func__)); 1011 #ifdef TCPDEBUG 1012 if (so->so_options & SO_DEBUG) { 1013 ostate = tp->t_state; 1014 #ifdef INET6 1015 if (isipv6) { 1016 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6)); 1017 } else 1018 #endif 1019 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip)); 1020 tcp_savetcp = *th; 1021 } 1022 #endif /* TCPDEBUG */ 1023 /* 1024 * When the socket is accepting connections (the INPCB is in LISTEN 1025 * state) we look into the SYN cache if this is a new connection 1026 * attempt or the completion of a previous one. 1027 */ 1028 KASSERT(tp->t_state == TCPS_LISTEN || !(so->so_options & SO_ACCEPTCONN), 1029 ("%s: so accepting but tp %p not listening", __func__, tp)); 1030 if (tp->t_state == TCPS_LISTEN && (so->so_options & SO_ACCEPTCONN)) { 1031 struct in_conninfo inc; 1032 1033 bzero(&inc, sizeof(inc)); 1034 #ifdef INET6 1035 if (isipv6) { 1036 inc.inc_flags |= INC_ISIPV6; 1037 if (inp->inp_inc.inc_flags & INC_IPV6MINMTU) 1038 inc.inc_flags |= INC_IPV6MINMTU; 1039 inc.inc6_faddr = ip6->ip6_src; 1040 inc.inc6_laddr = ip6->ip6_dst; 1041 } else 1042 #endif 1043 { 1044 inc.inc_faddr = ip->ip_src; 1045 inc.inc_laddr = ip->ip_dst; 1046 } 1047 inc.inc_fport = th->th_sport; 1048 inc.inc_lport = th->th_dport; 1049 inc.inc_fibnum = so->so_fibnum; 1050 1051 /* 1052 * Check for an existing connection attempt in syncache if 1053 * the flag is only ACK. A successful lookup creates a new 1054 * socket appended to the listen queue in SYN_RECEIVED state. 1055 */ 1056 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) { 1057 1058 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1059 /* 1060 * Parse the TCP options here because 1061 * syncookies need access to the reflected 1062 * timestamp. 1063 */ 1064 tcp_dooptions(&to, optp, optlen, 0); 1065 /* 1066 * NB: syncache_expand() doesn't unlock 1067 * inp and tcpinfo locks. 1068 */ 1069 rstreason = syncache_expand(&inc, &to, th, &so, m); 1070 if (rstreason < 0) { 1071 /* 1072 * A failing TCP MD5 signature comparison 1073 * must result in the segment being dropped 1074 * and must not produce any response back 1075 * to the sender. 1076 */ 1077 goto dropunlock; 1078 } else if (rstreason == 0) { 1079 /* 1080 * No syncache entry or ACK was not 1081 * for our SYN/ACK. Send a RST. 1082 * NB: syncache did its own logging 1083 * of the failure cause. 1084 */ 1085 rstreason = BANDLIM_RST_OPENPORT; 1086 goto dropwithreset; 1087 } 1088 tfo_socket_result: 1089 if (so == NULL) { 1090 /* 1091 * We completed the 3-way handshake 1092 * but could not allocate a socket 1093 * either due to memory shortage, 1094 * listen queue length limits or 1095 * global socket limits. Send RST 1096 * or wait and have the remote end 1097 * retransmit the ACK for another 1098 * try. 1099 */ 1100 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1101 log(LOG_DEBUG, "%s; %s: Listen socket: " 1102 "Socket allocation failed due to " 1103 "limits or memory shortage, %s\n", 1104 s, __func__, 1105 V_tcp_sc_rst_sock_fail ? 1106 "sending RST" : "try again"); 1107 if (V_tcp_sc_rst_sock_fail) { 1108 rstreason = BANDLIM_UNLIMITED; 1109 goto dropwithreset; 1110 } else 1111 goto dropunlock; 1112 } 1113 /* 1114 * Socket is created in state SYN_RECEIVED. 1115 * Unlock the listen socket, lock the newly 1116 * created socket and update the tp variable. 1117 */ 1118 INP_WUNLOCK(inp); /* listen socket */ 1119 inp = sotoinpcb(so); 1120 /* 1121 * New connection inpcb is already locked by 1122 * syncache_expand(). 1123 */ 1124 INP_WLOCK_ASSERT(inp); 1125 tp = intotcpcb(inp); 1126 KASSERT(tp->t_state == TCPS_SYN_RECEIVED, 1127 ("%s: ", __func__)); 1128 /* 1129 * Process the segment and the data it 1130 * contains. tcp_do_segment() consumes 1131 * the mbuf chain and unlocks the inpcb. 1132 */ 1133 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1134 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, 1135 iptos); 1136 if (ti_locked == TI_RLOCKED) 1137 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 1138 return (IPPROTO_DONE); 1139 } 1140 /* 1141 * Segment flag validation for new connection attempts: 1142 * 1143 * Our (SYN|ACK) response was rejected. 1144 * Check with syncache and remove entry to prevent 1145 * retransmits. 1146 * 1147 * NB: syncache_chkrst does its own logging of failure 1148 * causes. 1149 */ 1150 if (thflags & TH_RST) { 1151 syncache_chkrst(&inc, th, m); 1152 goto dropunlock; 1153 } 1154 /* 1155 * We can't do anything without SYN. 1156 */ 1157 if ((thflags & TH_SYN) == 0) { 1158 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1159 log(LOG_DEBUG, "%s; %s: Listen socket: " 1160 "SYN is missing, segment ignored\n", 1161 s, __func__); 1162 TCPSTAT_INC(tcps_badsyn); 1163 goto dropunlock; 1164 } 1165 /* 1166 * (SYN|ACK) is bogus on a listen socket. 1167 */ 1168 if (thflags & TH_ACK) { 1169 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1170 log(LOG_DEBUG, "%s; %s: Listen socket: " 1171 "SYN|ACK invalid, segment rejected\n", 1172 s, __func__); 1173 syncache_badack(&inc); /* XXX: Not needed! */ 1174 TCPSTAT_INC(tcps_badsyn); 1175 rstreason = BANDLIM_RST_OPENPORT; 1176 goto dropwithreset; 1177 } 1178 /* 1179 * If the drop_synfin option is enabled, drop all 1180 * segments with both the SYN and FIN bits set. 1181 * This prevents e.g. nmap from identifying the 1182 * TCP/IP stack. 1183 * XXX: Poor reasoning. nmap has other methods 1184 * and is constantly refining its stack detection 1185 * strategies. 1186 * XXX: This is a violation of the TCP specification 1187 * and was used by RFC1644. 1188 */ 1189 if ((thflags & TH_FIN) && V_drop_synfin) { 1190 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1191 log(LOG_DEBUG, "%s; %s: Listen socket: " 1192 "SYN|FIN segment ignored (based on " 1193 "sysctl setting)\n", s, __func__); 1194 TCPSTAT_INC(tcps_badsyn); 1195 goto dropunlock; 1196 } 1197 /* 1198 * Segment's flags are (SYN) or (SYN|FIN). 1199 * 1200 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored 1201 * as they do not affect the state of the TCP FSM. 1202 * The data pointed to by TH_URG and th_urp is ignored. 1203 */ 1204 KASSERT((thflags & (TH_RST|TH_ACK)) == 0, 1205 ("%s: Listen socket: TH_RST or TH_ACK set", __func__)); 1206 KASSERT(thflags & (TH_SYN), 1207 ("%s: Listen socket: TH_SYN not set", __func__)); 1208 #ifdef INET6 1209 /* 1210 * If deprecated address is forbidden, 1211 * we do not accept SYN to deprecated interface 1212 * address to prevent any new inbound connection from 1213 * getting established. 1214 * When we do not accept SYN, we send a TCP RST, 1215 * with deprecated source address (instead of dropping 1216 * it). We compromise it as it is much better for peer 1217 * to send a RST, and RST will be the final packet 1218 * for the exchange. 1219 * 1220 * If we do not forbid deprecated addresses, we accept 1221 * the SYN packet. RFC2462 does not suggest dropping 1222 * SYN in this case. 1223 * If we decipher RFC2462 5.5.4, it says like this: 1224 * 1. use of deprecated addr with existing 1225 * communication is okay - "SHOULD continue to be 1226 * used" 1227 * 2. use of it with new communication: 1228 * (2a) "SHOULD NOT be used if alternate address 1229 * with sufficient scope is available" 1230 * (2b) nothing mentioned otherwise. 1231 * Here we fall into (2b) case as we have no choice in 1232 * our source address selection - we must obey the peer. 1233 * 1234 * The wording in RFC2462 is confusing, and there are 1235 * multiple description text for deprecated address 1236 * handling - worse, they are not exactly the same. 1237 * I believe 5.5.4 is the best one, so we follow 5.5.4. 1238 */ 1239 if (isipv6 && !V_ip6_use_deprecated) { 1240 struct in6_ifaddr *ia6; 1241 1242 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */); 1243 if (ia6 != NULL && 1244 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 1245 ifa_free(&ia6->ia_ifa); 1246 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1247 log(LOG_DEBUG, "%s; %s: Listen socket: " 1248 "Connection attempt to deprecated " 1249 "IPv6 address rejected\n", 1250 s, __func__); 1251 rstreason = BANDLIM_RST_OPENPORT; 1252 goto dropwithreset; 1253 } 1254 if (ia6) 1255 ifa_free(&ia6->ia_ifa); 1256 } 1257 #endif /* INET6 */ 1258 /* 1259 * Basic sanity checks on incoming SYN requests: 1260 * Don't respond if the destination is a link layer 1261 * broadcast according to RFC1122 4.2.3.10, p. 104. 1262 * If it is from this socket it must be forged. 1263 * Don't respond if the source or destination is a 1264 * global or subnet broad- or multicast address. 1265 * Note that it is quite possible to receive unicast 1266 * link-layer packets with a broadcast IP address. Use 1267 * in_broadcast() to find them. 1268 */ 1269 if (m->m_flags & (M_BCAST|M_MCAST)) { 1270 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1271 log(LOG_DEBUG, "%s; %s: Listen socket: " 1272 "Connection attempt from broad- or multicast " 1273 "link layer address ignored\n", s, __func__); 1274 goto dropunlock; 1275 } 1276 #ifdef INET6 1277 if (isipv6) { 1278 if (th->th_dport == th->th_sport && 1279 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) { 1280 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1281 log(LOG_DEBUG, "%s; %s: Listen socket: " 1282 "Connection attempt to/from self " 1283 "ignored\n", s, __func__); 1284 goto dropunlock; 1285 } 1286 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 1287 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) { 1288 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1289 log(LOG_DEBUG, "%s; %s: Listen socket: " 1290 "Connection attempt from/to multicast " 1291 "address ignored\n", s, __func__); 1292 goto dropunlock; 1293 } 1294 } 1295 #endif 1296 #if defined(INET) && defined(INET6) 1297 else 1298 #endif 1299 #ifdef INET 1300 { 1301 if (th->th_dport == th->th_sport && 1302 ip->ip_dst.s_addr == ip->ip_src.s_addr) { 1303 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1304 log(LOG_DEBUG, "%s; %s: Listen socket: " 1305 "Connection attempt from/to self " 1306 "ignored\n", s, __func__); 1307 goto dropunlock; 1308 } 1309 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 1310 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 1311 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 1312 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) { 1313 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1314 log(LOG_DEBUG, "%s; %s: Listen socket: " 1315 "Connection attempt from/to broad- " 1316 "or multicast address ignored\n", 1317 s, __func__); 1318 goto dropunlock; 1319 } 1320 } 1321 #endif 1322 /* 1323 * SYN appears to be valid. Create compressed TCP state 1324 * for syncache. 1325 */ 1326 #ifdef TCPDEBUG 1327 if (so->so_options & SO_DEBUG) 1328 tcp_trace(TA_INPUT, ostate, tp, 1329 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1330 #endif 1331 TCP_PROBE3(debug__input, tp, th, m); 1332 tcp_dooptions(&to, optp, optlen, TO_SYN); 1333 if (syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL)) 1334 goto tfo_socket_result; 1335 1336 /* 1337 * Entry added to syncache and mbuf consumed. 1338 * Only the listen socket is unlocked by syncache_add(). 1339 */ 1340 if (ti_locked == TI_RLOCKED) { 1341 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 1342 ti_locked = TI_UNLOCKED; 1343 } 1344 INP_INFO_WUNLOCK_ASSERT(&V_tcbinfo); 1345 return (IPPROTO_DONE); 1346 } else if (tp->t_state == TCPS_LISTEN) { 1347 /* 1348 * When a listen socket is torn down the SO_ACCEPTCONN 1349 * flag is removed first while connections are drained 1350 * from the accept queue in a unlock/lock cycle of the 1351 * ACCEPT_LOCK, opening a race condition allowing a SYN 1352 * attempt go through unhandled. 1353 */ 1354 goto dropunlock; 1355 } 1356 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1357 if (tp->t_flags & TF_SIGNATURE) { 1358 tcp_dooptions(&to, optp, optlen, thflags); 1359 if ((to.to_flags & TOF_SIGNATURE) == 0) { 1360 TCPSTAT_INC(tcps_sig_err_nosigopt); 1361 goto dropunlock; 1362 } 1363 if (!TCPMD5_ENABLED() || 1364 TCPMD5_INPUT(m, th, to.to_signature) != 0) 1365 goto dropunlock; 1366 } 1367 #endif 1368 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1369 1370 /* 1371 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later 1372 * state. tcp_do_segment() always consumes the mbuf chain, unlocks 1373 * the inpcb, and unlocks pcbinfo. 1374 */ 1375 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos); 1376 if (ti_locked == TI_RLOCKED) 1377 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 1378 return (IPPROTO_DONE); 1379 1380 dropwithreset: 1381 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1382 1383 if (ti_locked == TI_RLOCKED) { 1384 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 1385 ti_locked = TI_UNLOCKED; 1386 } 1387 #ifdef INVARIANTS 1388 else { 1389 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropwithreset " 1390 "ti_locked: %d", __func__, ti_locked)); 1391 INP_INFO_WUNLOCK_ASSERT(&V_tcbinfo); 1392 } 1393 #endif 1394 1395 if (inp != NULL) { 1396 tcp_dropwithreset(m, th, tp, tlen, rstreason); 1397 INP_WUNLOCK(inp); 1398 } else 1399 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 1400 m = NULL; /* mbuf chain got consumed. */ 1401 goto drop; 1402 1403 dropunlock: 1404 if (m != NULL) 1405 TCP_PROBE5(receive, NULL, tp, m, tp, th); 1406 1407 if (ti_locked == TI_RLOCKED) { 1408 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 1409 ti_locked = TI_UNLOCKED; 1410 } 1411 #ifdef INVARIANTS 1412 else { 1413 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropunlock " 1414 "ti_locked: %d", __func__, ti_locked)); 1415 INP_INFO_WUNLOCK_ASSERT(&V_tcbinfo); 1416 } 1417 #endif 1418 1419 if (inp != NULL) 1420 INP_WUNLOCK(inp); 1421 1422 drop: 1423 INP_INFO_WUNLOCK_ASSERT(&V_tcbinfo); 1424 if (s != NULL) 1425 free(s, M_TCPLOG); 1426 if (m != NULL) 1427 m_freem(m); 1428 return (IPPROTO_DONE); 1429 } 1430 1431 /* 1432 * Automatic sizing of receive socket buffer. Often the send 1433 * buffer size is not optimally adjusted to the actual network 1434 * conditions at hand (delay bandwidth product). Setting the 1435 * buffer size too small limits throughput on links with high 1436 * bandwidth and high delay (eg. trans-continental/oceanic links). 1437 * 1438 * On the receive side the socket buffer memory is only rarely 1439 * used to any significant extent. This allows us to be much 1440 * more aggressive in scaling the receive socket buffer. For 1441 * the case that the buffer space is actually used to a large 1442 * extent and we run out of kernel memory we can simply drop 1443 * the new segments; TCP on the sender will just retransmit it 1444 * later. Setting the buffer size too big may only consume too 1445 * much kernel memory if the application doesn't read() from 1446 * the socket or packet loss or reordering makes use of the 1447 * reassembly queue. 1448 * 1449 * The criteria to step up the receive buffer one notch are: 1450 * 1. Application has not set receive buffer size with 1451 * SO_RCVBUF. Setting SO_RCVBUF clears SB_AUTOSIZE. 1452 * 2. the number of bytes received during the time it takes 1453 * one timestamp to be reflected back to us (the RTT); 1454 * 3. received bytes per RTT is within seven eighth of the 1455 * current socket buffer size; 1456 * 4. receive buffer size has not hit maximal automatic size; 1457 * 1458 * This algorithm does one step per RTT at most and only if 1459 * we receive a bulk stream w/o packet losses or reorderings. 1460 * Shrinking the buffer during idle times is not necessary as 1461 * it doesn't consume any memory when idle. 1462 * 1463 * TODO: Only step up if the application is actually serving 1464 * the buffer to better manage the socket buffer resources. 1465 */ 1466 int 1467 tcp_autorcvbuf(struct mbuf *m, struct tcphdr *th, struct socket *so, 1468 struct tcpcb *tp, int tlen) 1469 { 1470 int newsize = 0; 1471 1472 if (V_tcp_do_autorcvbuf && (so->so_rcv.sb_flags & SB_AUTOSIZE) && 1473 tp->t_srtt != 0 && tp->rfbuf_ts != 0 && 1474 TCP_TS_TO_TICKS(tcp_ts_getticks() - tp->rfbuf_ts) > 1475 (tp->t_srtt >> TCP_RTT_SHIFT)) { 1476 if (tp->rfbuf_cnt > (so->so_rcv.sb_hiwat / 8 * 7) && 1477 so->so_rcv.sb_hiwat < V_tcp_autorcvbuf_max) { 1478 newsize = min(so->so_rcv.sb_hiwat + 1479 V_tcp_autorcvbuf_inc, V_tcp_autorcvbuf_max); 1480 } 1481 TCP_PROBE6(receive__autoresize, NULL, tp, m, tp, th, newsize); 1482 1483 /* Start over with next RTT. */ 1484 tp->rfbuf_ts = 0; 1485 tp->rfbuf_cnt = 0; 1486 } else { 1487 tp->rfbuf_cnt += tlen; /* add up */ 1488 } 1489 1490 return (newsize); 1491 } 1492 1493 void 1494 tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 1495 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos) 1496 { 1497 int thflags, acked, ourfinisacked, needoutput = 0, sack_changed; 1498 int rstreason, todrop, win; 1499 uint32_t tiwin; 1500 uint16_t nsegs; 1501 char *s; 1502 struct in_conninfo *inc; 1503 struct mbuf *mfree; 1504 struct tcpopt to; 1505 int tfo_syn; 1506 1507 #ifdef TCPDEBUG 1508 /* 1509 * The size of tcp_saveipgen must be the size of the max ip header, 1510 * now IPv6. 1511 */ 1512 u_char tcp_saveipgen[IP6_HDR_LEN]; 1513 struct tcphdr tcp_savetcp; 1514 short ostate = 0; 1515 #endif 1516 thflags = th->th_flags; 1517 inc = &tp->t_inpcb->inp_inc; 1518 tp->sackhint.last_sack_ack = 0; 1519 sack_changed = 0; 1520 nsegs = max(1, m->m_pkthdr.lro_nsegs); 1521 /* 1522 * If this is either a state-changing packet or current state isn't 1523 * established, we require a write lock on tcbinfo. Otherwise, we 1524 * allow the tcbinfo to be in either alocked or unlocked, as the 1525 * caller may have unnecessarily acquired a write lock due to a race. 1526 */ 1527 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 || 1528 tp->t_state != TCPS_ESTABLISHED) { 1529 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1530 } 1531 INP_WLOCK_ASSERT(tp->t_inpcb); 1532 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 1533 __func__)); 1534 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 1535 __func__)); 1536 1537 #ifdef TCPPCAP 1538 /* Save segment, if requested. */ 1539 tcp_pcap_add(th, m, &(tp->t_inpkts)); 1540 #endif 1541 TCP_LOG_EVENT(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 1542 tlen, NULL, true); 1543 1544 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 1545 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1546 log(LOG_DEBUG, "%s; %s: " 1547 "SYN|FIN segment ignored (based on " 1548 "sysctl setting)\n", s, __func__); 1549 free(s, M_TCPLOG); 1550 } 1551 goto drop; 1552 } 1553 1554 /* 1555 * If a segment with the ACK-bit set arrives in the SYN-SENT state 1556 * check SEQ.ACK first. 1557 */ 1558 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 1559 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 1560 rstreason = BANDLIM_UNLIMITED; 1561 goto dropwithreset; 1562 } 1563 1564 /* 1565 * Segment received on connection. 1566 * Reset idle time and keep-alive timer. 1567 * XXX: This should be done after segment 1568 * validation to ignore broken/spoofed segs. 1569 */ 1570 tp->t_rcvtime = ticks; 1571 1572 /* 1573 * Scale up the window into a 32-bit value. 1574 * For the SYN_SENT state the scale is zero. 1575 */ 1576 tiwin = th->th_win << tp->snd_scale; 1577 1578 /* 1579 * TCP ECN processing. 1580 */ 1581 if (tp->t_flags & TF_ECN_PERMIT) { 1582 if (thflags & TH_CWR) 1583 tp->t_flags &= ~TF_ECN_SND_ECE; 1584 switch (iptos & IPTOS_ECN_MASK) { 1585 case IPTOS_ECN_CE: 1586 tp->t_flags |= TF_ECN_SND_ECE; 1587 TCPSTAT_INC(tcps_ecn_ce); 1588 break; 1589 case IPTOS_ECN_ECT0: 1590 TCPSTAT_INC(tcps_ecn_ect0); 1591 break; 1592 case IPTOS_ECN_ECT1: 1593 TCPSTAT_INC(tcps_ecn_ect1); 1594 break; 1595 } 1596 1597 /* Process a packet differently from RFC3168. */ 1598 cc_ecnpkt_handler(tp, th, iptos); 1599 1600 /* Congestion experienced. */ 1601 if (thflags & TH_ECE) { 1602 cc_cong_signal(tp, th, CC_ECN); 1603 } 1604 } 1605 1606 /* 1607 * Parse options on any incoming segment. 1608 */ 1609 tcp_dooptions(&to, (u_char *)(th + 1), 1610 (th->th_off << 2) - sizeof(struct tcphdr), 1611 (thflags & TH_SYN) ? TO_SYN : 0); 1612 1613 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1614 if ((tp->t_flags & TF_SIGNATURE) != 0 && 1615 (to.to_flags & TOF_SIGNATURE) == 0) { 1616 TCPSTAT_INC(tcps_sig_err_sigopt); 1617 /* XXX: should drop? */ 1618 } 1619 #endif 1620 /* 1621 * If echoed timestamp is later than the current time, 1622 * fall back to non RFC1323 RTT calculation. Normalize 1623 * timestamp if syncookies were used when this connection 1624 * was established. 1625 */ 1626 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 1627 to.to_tsecr -= tp->ts_offset; 1628 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks())) 1629 to.to_tsecr = 0; 1630 else if (tp->t_flags & TF_PREVVALID && 1631 tp->t_badrxtwin != 0 && SEQ_LT(to.to_tsecr, tp->t_badrxtwin)) 1632 cc_cong_signal(tp, th, CC_RTO_ERR); 1633 } 1634 /* 1635 * Process options only when we get SYN/ACK back. The SYN case 1636 * for incoming connections is handled in tcp_syncache. 1637 * According to RFC1323 the window field in a SYN (i.e., a <SYN> 1638 * or <SYN,ACK>) segment itself is never scaled. 1639 * XXX this is traditional behavior, may need to be cleaned up. 1640 */ 1641 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 1642 if ((to.to_flags & TOF_SCALE) && 1643 (tp->t_flags & TF_REQ_SCALE)) { 1644 tp->t_flags |= TF_RCVD_SCALE; 1645 tp->snd_scale = to.to_wscale; 1646 } 1647 /* 1648 * Initial send window. It will be updated with 1649 * the next incoming segment to the scaled value. 1650 */ 1651 tp->snd_wnd = th->th_win; 1652 if (to.to_flags & TOF_TS) { 1653 tp->t_flags |= TF_RCVD_TSTMP; 1654 tp->ts_recent = to.to_tsval; 1655 tp->ts_recent_age = tcp_ts_getticks(); 1656 } 1657 if (to.to_flags & TOF_MSS) 1658 tcp_mss(tp, to.to_mss); 1659 if ((tp->t_flags & TF_SACK_PERMIT) && 1660 (to.to_flags & TOF_SACKPERM) == 0) 1661 tp->t_flags &= ~TF_SACK_PERMIT; 1662 if (IS_FASTOPEN(tp->t_flags)) { 1663 if (to.to_flags & TOF_FASTOPEN) { 1664 uint16_t mss; 1665 1666 if (to.to_flags & TOF_MSS) 1667 mss = to.to_mss; 1668 else 1669 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 1670 mss = TCP6_MSS; 1671 else 1672 mss = TCP_MSS; 1673 tcp_fastopen_update_cache(tp, mss, 1674 to.to_tfo_len, to.to_tfo_cookie); 1675 } else 1676 tcp_fastopen_disable_path(tp); 1677 } 1678 } 1679 1680 /* 1681 * If timestamps were negotiated during SYN/ACK they should 1682 * appear on every segment during this session and vice versa. 1683 */ 1684 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS)) { 1685 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1686 log(LOG_DEBUG, "%s; %s: Timestamp missing, " 1687 "no action\n", s, __func__); 1688 free(s, M_TCPLOG); 1689 } 1690 } 1691 if (!(tp->t_flags & TF_RCVD_TSTMP) && (to.to_flags & TOF_TS)) { 1692 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1693 log(LOG_DEBUG, "%s; %s: Timestamp not expected, " 1694 "no action\n", s, __func__); 1695 free(s, M_TCPLOG); 1696 } 1697 } 1698 1699 /* 1700 * Header prediction: check for the two common cases 1701 * of a uni-directional data xfer. If the packet has 1702 * no control flags, is in-sequence, the window didn't 1703 * change and we're not retransmitting, it's a 1704 * candidate. If the length is zero and the ack moved 1705 * forward, we're the sender side of the xfer. Just 1706 * free the data acked & wake any higher level process 1707 * that was blocked waiting for space. If the length 1708 * is non-zero and the ack didn't move, we're the 1709 * receiver side. If we're getting packets in-order 1710 * (the reassembly queue is empty), add the data to 1711 * the socket buffer and note that we need a delayed ack. 1712 * Make sure that the hidden state-flags are also off. 1713 * Since we check for TCPS_ESTABLISHED first, it can only 1714 * be TH_NEEDSYN. 1715 */ 1716 if (tp->t_state == TCPS_ESTABLISHED && 1717 th->th_seq == tp->rcv_nxt && 1718 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 1719 tp->snd_nxt == tp->snd_max && 1720 tiwin && tiwin == tp->snd_wnd && 1721 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && 1722 SEGQ_EMPTY(tp) && 1723 ((to.to_flags & TOF_TS) == 0 || 1724 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) { 1725 1726 /* 1727 * If last ACK falls within this segment's sequence numbers, 1728 * record the timestamp. 1729 * NOTE that the test is modified according to the latest 1730 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1731 */ 1732 if ((to.to_flags & TOF_TS) != 0 && 1733 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 1734 tp->ts_recent_age = tcp_ts_getticks(); 1735 tp->ts_recent = to.to_tsval; 1736 } 1737 1738 if (tlen == 0) { 1739 if (SEQ_GT(th->th_ack, tp->snd_una) && 1740 SEQ_LEQ(th->th_ack, tp->snd_max) && 1741 !IN_RECOVERY(tp->t_flags) && 1742 (to.to_flags & TOF_SACK) == 0 && 1743 TAILQ_EMPTY(&tp->snd_holes)) { 1744 /* 1745 * This is a pure ack for outstanding data. 1746 */ 1747 TCPSTAT_INC(tcps_predack); 1748 1749 /* 1750 * "bad retransmit" recovery without timestamps. 1751 */ 1752 if ((to.to_flags & TOF_TS) == 0 && 1753 tp->t_rxtshift == 1 && 1754 tp->t_flags & TF_PREVVALID && 1755 (int)(ticks - tp->t_badrxtwin) < 0) { 1756 cc_cong_signal(tp, th, CC_RTO_ERR); 1757 } 1758 1759 /* 1760 * Recalculate the transmit timer / rtt. 1761 * 1762 * Some boxes send broken timestamp replies 1763 * during the SYN+ACK phase, ignore 1764 * timestamps of 0 or we could calculate a 1765 * huge RTT and blow up the retransmit timer. 1766 */ 1767 if ((to.to_flags & TOF_TS) != 0 && 1768 to.to_tsecr) { 1769 uint32_t t; 1770 1771 t = tcp_ts_getticks() - to.to_tsecr; 1772 if (!tp->t_rttlow || tp->t_rttlow > t) 1773 tp->t_rttlow = t; 1774 tcp_xmit_timer(tp, 1775 TCP_TS_TO_TICKS(t) + 1); 1776 } else if (tp->t_rtttime && 1777 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1778 if (!tp->t_rttlow || 1779 tp->t_rttlow > ticks - tp->t_rtttime) 1780 tp->t_rttlow = ticks - tp->t_rtttime; 1781 tcp_xmit_timer(tp, 1782 ticks - tp->t_rtttime); 1783 } 1784 acked = BYTES_THIS_ACK(tp, th); 1785 1786 #ifdef TCP_HHOOK 1787 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 1788 hhook_run_tcp_est_in(tp, th, &to); 1789 #endif 1790 1791 TCPSTAT_ADD(tcps_rcvackpack, nsegs); 1792 TCPSTAT_ADD(tcps_rcvackbyte, acked); 1793 sbdrop(&so->so_snd, acked); 1794 if (SEQ_GT(tp->snd_una, tp->snd_recover) && 1795 SEQ_LEQ(th->th_ack, tp->snd_recover)) 1796 tp->snd_recover = th->th_ack - 1; 1797 1798 /* 1799 * Let the congestion control algorithm update 1800 * congestion control related information. This 1801 * typically means increasing the congestion 1802 * window. 1803 */ 1804 cc_ack_received(tp, th, nsegs, CC_ACK); 1805 1806 tp->snd_una = th->th_ack; 1807 /* 1808 * Pull snd_wl2 up to prevent seq wrap relative 1809 * to th_ack. 1810 */ 1811 tp->snd_wl2 = th->th_ack; 1812 tp->t_dupacks = 0; 1813 m_freem(m); 1814 1815 /* 1816 * If all outstanding data are acked, stop 1817 * retransmit timer, otherwise restart timer 1818 * using current (possibly backed-off) value. 1819 * If process is waiting for space, 1820 * wakeup/selwakeup/signal. If data 1821 * are ready to send, let tcp_output 1822 * decide between more output or persist. 1823 */ 1824 #ifdef TCPDEBUG 1825 if (so->so_options & SO_DEBUG) 1826 tcp_trace(TA_INPUT, ostate, tp, 1827 (void *)tcp_saveipgen, 1828 &tcp_savetcp, 0); 1829 #endif 1830 TCP_PROBE3(debug__input, tp, th, m); 1831 if (tp->snd_una == tp->snd_max) 1832 tcp_timer_activate(tp, TT_REXMT, 0); 1833 else if (!tcp_timer_active(tp, TT_PERSIST)) 1834 tcp_timer_activate(tp, TT_REXMT, 1835 tp->t_rxtcur); 1836 sowwakeup(so); 1837 if (sbavail(&so->so_snd)) 1838 (void) tp->t_fb->tfb_tcp_output(tp); 1839 goto check_delack; 1840 } 1841 } else if (th->th_ack == tp->snd_una && 1842 tlen <= sbspace(&so->so_rcv)) { 1843 int newsize = 0; /* automatic sockbuf scaling */ 1844 1845 /* 1846 * This is a pure, in-sequence data packet with 1847 * nothing on the reassembly queue and we have enough 1848 * buffer space to take it. 1849 */ 1850 /* Clean receiver SACK report if present */ 1851 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks) 1852 tcp_clean_sackreport(tp); 1853 TCPSTAT_INC(tcps_preddat); 1854 tp->rcv_nxt += tlen; 1855 /* 1856 * Pull snd_wl1 up to prevent seq wrap relative to 1857 * th_seq. 1858 */ 1859 tp->snd_wl1 = th->th_seq; 1860 /* 1861 * Pull rcv_up up to prevent seq wrap relative to 1862 * rcv_nxt. 1863 */ 1864 tp->rcv_up = tp->rcv_nxt; 1865 TCPSTAT_ADD(tcps_rcvpack, nsegs); 1866 TCPSTAT_ADD(tcps_rcvbyte, tlen); 1867 #ifdef TCPDEBUG 1868 if (so->so_options & SO_DEBUG) 1869 tcp_trace(TA_INPUT, ostate, tp, 1870 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1871 #endif 1872 TCP_PROBE3(debug__input, tp, th, m); 1873 1874 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 1875 1876 /* Add data to socket buffer. */ 1877 SOCKBUF_LOCK(&so->so_rcv); 1878 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1879 m_freem(m); 1880 } else { 1881 /* 1882 * Set new socket buffer size. 1883 * Give up when limit is reached. 1884 */ 1885 if (newsize) 1886 if (!sbreserve_locked(&so->so_rcv, 1887 newsize, so, NULL)) 1888 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 1889 m_adj(m, drop_hdrlen); /* delayed header drop */ 1890 sbappendstream_locked(&so->so_rcv, m, 0); 1891 } 1892 /* NB: sorwakeup_locked() does an implicit unlock. */ 1893 sorwakeup_locked(so); 1894 if (DELAY_ACK(tp, tlen)) { 1895 tp->t_flags |= TF_DELACK; 1896 } else { 1897 tp->t_flags |= TF_ACKNOW; 1898 tp->t_fb->tfb_tcp_output(tp); 1899 } 1900 goto check_delack; 1901 } 1902 } 1903 1904 /* 1905 * Calculate amount of space in receive window, 1906 * and then do TCP input processing. 1907 * Receive window is amount of space in rcv queue, 1908 * but not less than advertised window. 1909 */ 1910 win = sbspace(&so->so_rcv); 1911 if (win < 0) 1912 win = 0; 1913 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 1914 1915 switch (tp->t_state) { 1916 1917 /* 1918 * If the state is SYN_RECEIVED: 1919 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1920 */ 1921 case TCPS_SYN_RECEIVED: 1922 if ((thflags & TH_ACK) && 1923 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1924 SEQ_GT(th->th_ack, tp->snd_max))) { 1925 rstreason = BANDLIM_RST_OPENPORT; 1926 goto dropwithreset; 1927 } 1928 if (IS_FASTOPEN(tp->t_flags)) { 1929 /* 1930 * When a TFO connection is in SYN_RECEIVED, the 1931 * only valid packets are the initial SYN, a 1932 * retransmit/copy of the initial SYN (possibly with 1933 * a subset of the original data), a valid ACK, a 1934 * FIN, or a RST. 1935 */ 1936 if ((thflags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)) { 1937 rstreason = BANDLIM_RST_OPENPORT; 1938 goto dropwithreset; 1939 } else if (thflags & TH_SYN) { 1940 /* non-initial SYN is ignored */ 1941 if ((tcp_timer_active(tp, TT_DELACK) || 1942 tcp_timer_active(tp, TT_REXMT))) 1943 goto drop; 1944 } else if (!(thflags & (TH_ACK|TH_FIN|TH_RST))) { 1945 goto drop; 1946 } 1947 } 1948 break; 1949 1950 /* 1951 * If the state is SYN_SENT: 1952 * if seg contains a RST with valid ACK (SEQ.ACK has already 1953 * been verified), then drop the connection. 1954 * if seg contains a RST without an ACK, drop the seg. 1955 * if seg does not contain SYN, then drop the seg. 1956 * Otherwise this is an acceptable SYN segment 1957 * initialize tp->rcv_nxt and tp->irs 1958 * if seg contains ack then advance tp->snd_una 1959 * if seg contains an ECE and ECN support is enabled, the stream 1960 * is ECN capable. 1961 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1962 * arrange for segment to be acked (eventually) 1963 * continue processing rest of data/controls, beginning with URG 1964 */ 1965 case TCPS_SYN_SENT: 1966 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) { 1967 TCP_PROBE5(connect__refused, NULL, tp, 1968 m, tp, th); 1969 tp = tcp_drop(tp, ECONNREFUSED); 1970 } 1971 if (thflags & TH_RST) 1972 goto drop; 1973 if (!(thflags & TH_SYN)) 1974 goto drop; 1975 1976 tp->irs = th->th_seq; 1977 tcp_rcvseqinit(tp); 1978 if (thflags & TH_ACK) { 1979 int tfo_partial_ack = 0; 1980 1981 TCPSTAT_INC(tcps_connects); 1982 soisconnected(so); 1983 #ifdef MAC 1984 mac_socketpeer_set_from_mbuf(m, so); 1985 #endif 1986 /* Do window scaling on this connection? */ 1987 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1988 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1989 tp->rcv_scale = tp->request_r_scale; 1990 } 1991 tp->rcv_adv += min(tp->rcv_wnd, 1992 TCP_MAXWIN << tp->rcv_scale); 1993 tp->snd_una++; /* SYN is acked */ 1994 /* 1995 * If not all the data that was sent in the TFO SYN 1996 * has been acked, resend the remainder right away. 1997 */ 1998 if (IS_FASTOPEN(tp->t_flags) && 1999 (tp->snd_una != tp->snd_max)) { 2000 tp->snd_nxt = th->th_ack; 2001 tfo_partial_ack = 1; 2002 } 2003 /* 2004 * If there's data, delay ACK; if there's also a FIN 2005 * ACKNOW will be turned on later. 2006 */ 2007 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial_ack) 2008 tcp_timer_activate(tp, TT_DELACK, 2009 tcp_delacktime); 2010 else 2011 tp->t_flags |= TF_ACKNOW; 2012 2013 if (((thflags & (TH_CWR | TH_ECE)) == TH_ECE) && 2014 V_tcp_do_ecn) { 2015 tp->t_flags |= TF_ECN_PERMIT; 2016 TCPSTAT_INC(tcps_ecn_shs); 2017 } 2018 2019 /* 2020 * Received <SYN,ACK> in SYN_SENT[*] state. 2021 * Transitions: 2022 * SYN_SENT --> ESTABLISHED 2023 * SYN_SENT* --> FIN_WAIT_1 2024 */ 2025 tp->t_starttime = ticks; 2026 if (tp->t_flags & TF_NEEDFIN) { 2027 tcp_state_change(tp, TCPS_FIN_WAIT_1); 2028 tp->t_flags &= ~TF_NEEDFIN; 2029 thflags &= ~TH_SYN; 2030 } else { 2031 tcp_state_change(tp, TCPS_ESTABLISHED); 2032 TCP_PROBE5(connect__established, NULL, tp, 2033 m, tp, th); 2034 cc_conn_init(tp); 2035 tcp_timer_activate(tp, TT_KEEP, 2036 TP_KEEPIDLE(tp)); 2037 } 2038 } else { 2039 /* 2040 * Received initial SYN in SYN-SENT[*] state => 2041 * simultaneous open. 2042 * If it succeeds, connection is * half-synchronized. 2043 * Otherwise, do 3-way handshake: 2044 * SYN-SENT -> SYN-RECEIVED 2045 * SYN-SENT* -> SYN-RECEIVED* 2046 */ 2047 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 2048 tcp_timer_activate(tp, TT_REXMT, 0); 2049 tcp_state_change(tp, TCPS_SYN_RECEIVED); 2050 } 2051 2052 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2053 INP_WLOCK_ASSERT(tp->t_inpcb); 2054 2055 /* 2056 * Advance th->th_seq to correspond to first data byte. 2057 * If data, trim to stay within window, 2058 * dropping FIN if necessary. 2059 */ 2060 th->th_seq++; 2061 if (tlen > tp->rcv_wnd) { 2062 todrop = tlen - tp->rcv_wnd; 2063 m_adj(m, -todrop); 2064 tlen = tp->rcv_wnd; 2065 thflags &= ~TH_FIN; 2066 TCPSTAT_INC(tcps_rcvpackafterwin); 2067 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 2068 } 2069 tp->snd_wl1 = th->th_seq - 1; 2070 tp->rcv_up = th->th_seq; 2071 /* 2072 * Client side of transaction: already sent SYN and data. 2073 * If the remote host used T/TCP to validate the SYN, 2074 * our data will be ACK'd; if so, enter normal data segment 2075 * processing in the middle of step 5, ack processing. 2076 * Otherwise, goto step 6. 2077 */ 2078 if (thflags & TH_ACK) 2079 goto process_ACK; 2080 2081 goto step6; 2082 2083 /* 2084 * If the state is LAST_ACK or CLOSING or TIME_WAIT: 2085 * do normal processing. 2086 * 2087 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later. 2088 */ 2089 case TCPS_LAST_ACK: 2090 case TCPS_CLOSING: 2091 break; /* continue normal processing */ 2092 } 2093 2094 /* 2095 * States other than LISTEN or SYN_SENT. 2096 * First check the RST flag and sequence number since reset segments 2097 * are exempt from the timestamp and connection count tests. This 2098 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 2099 * below which allowed reset segments in half the sequence space 2100 * to fall though and be processed (which gives forged reset 2101 * segments with a random sequence number a 50 percent chance of 2102 * killing a connection). 2103 * Then check timestamp, if present. 2104 * Then check the connection count, if present. 2105 * Then check that at least some bytes of segment are within 2106 * receive window. If segment begins before rcv_nxt, 2107 * drop leading data (and SYN); if nothing left, just ack. 2108 */ 2109 if (thflags & TH_RST) { 2110 /* 2111 * RFC5961 Section 3.2 2112 * 2113 * - RST drops connection only if SEG.SEQ == RCV.NXT. 2114 * - If RST is in window, we send challenge ACK. 2115 * 2116 * Note: to take into account delayed ACKs, we should 2117 * test against last_ack_sent instead of rcv_nxt. 2118 * Note 2: we handle special case of closed window, not 2119 * covered by the RFC. 2120 */ 2121 if ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 2122 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || 2123 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) { 2124 2125 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2126 KASSERT(tp->t_state != TCPS_SYN_SENT, 2127 ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p", 2128 __func__, th, tp)); 2129 2130 if (V_tcp_insecure_rst || 2131 tp->last_ack_sent == th->th_seq) { 2132 TCPSTAT_INC(tcps_drops); 2133 /* Drop the connection. */ 2134 switch (tp->t_state) { 2135 case TCPS_SYN_RECEIVED: 2136 so->so_error = ECONNREFUSED; 2137 goto close; 2138 case TCPS_ESTABLISHED: 2139 case TCPS_FIN_WAIT_1: 2140 case TCPS_FIN_WAIT_2: 2141 case TCPS_CLOSE_WAIT: 2142 case TCPS_CLOSING: 2143 case TCPS_LAST_ACK: 2144 so->so_error = ECONNRESET; 2145 close: 2146 /* FALLTHROUGH */ 2147 default: 2148 tp = tcp_close(tp); 2149 } 2150 } else { 2151 TCPSTAT_INC(tcps_badrst); 2152 /* Send challenge ACK. */ 2153 tcp_respond(tp, mtod(m, void *), th, m, 2154 tp->rcv_nxt, tp->snd_nxt, TH_ACK); 2155 tp->last_ack_sent = tp->rcv_nxt; 2156 m = NULL; 2157 } 2158 } 2159 goto drop; 2160 } 2161 2162 /* 2163 * RFC5961 Section 4.2 2164 * Send challenge ACK for any SYN in synchronized state. 2165 */ 2166 if ((thflags & TH_SYN) && tp->t_state != TCPS_SYN_SENT && 2167 tp->t_state != TCPS_SYN_RECEIVED) { 2168 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2169 2170 TCPSTAT_INC(tcps_badsyn); 2171 if (V_tcp_insecure_syn && 2172 SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 2173 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 2174 tp = tcp_drop(tp, ECONNRESET); 2175 rstreason = BANDLIM_UNLIMITED; 2176 } else { 2177 /* Send challenge ACK. */ 2178 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt, 2179 tp->snd_nxt, TH_ACK); 2180 tp->last_ack_sent = tp->rcv_nxt; 2181 m = NULL; 2182 } 2183 goto drop; 2184 } 2185 2186 /* 2187 * RFC 1323 PAWS: If we have a timestamp reply on this segment 2188 * and it's less than ts_recent, drop it. 2189 */ 2190 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 2191 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 2192 2193 /* Check to see if ts_recent is over 24 days old. */ 2194 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) { 2195 /* 2196 * Invalidate ts_recent. If this segment updates 2197 * ts_recent, the age will be reset later and ts_recent 2198 * will get a valid value. If it does not, setting 2199 * ts_recent to zero will at least satisfy the 2200 * requirement that zero be placed in the timestamp 2201 * echo reply when ts_recent isn't valid. The 2202 * age isn't reset until we get a valid ts_recent 2203 * because we don't want out-of-order segments to be 2204 * dropped when ts_recent is old. 2205 */ 2206 tp->ts_recent = 0; 2207 } else { 2208 TCPSTAT_INC(tcps_rcvduppack); 2209 TCPSTAT_ADD(tcps_rcvdupbyte, tlen); 2210 TCPSTAT_INC(tcps_pawsdrop); 2211 if (tlen) 2212 goto dropafterack; 2213 goto drop; 2214 } 2215 } 2216 2217 /* 2218 * In the SYN-RECEIVED state, validate that the packet belongs to 2219 * this connection before trimming the data to fit the receive 2220 * window. Check the sequence number versus IRS since we know 2221 * the sequence numbers haven't wrapped. This is a partial fix 2222 * for the "LAND" DoS attack. 2223 */ 2224 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 2225 rstreason = BANDLIM_RST_OPENPORT; 2226 goto dropwithreset; 2227 } 2228 2229 todrop = tp->rcv_nxt - th->th_seq; 2230 if (todrop > 0) { 2231 if (thflags & TH_SYN) { 2232 thflags &= ~TH_SYN; 2233 th->th_seq++; 2234 if (th->th_urp > 1) 2235 th->th_urp--; 2236 else 2237 thflags &= ~TH_URG; 2238 todrop--; 2239 } 2240 /* 2241 * Following if statement from Stevens, vol. 2, p. 960. 2242 */ 2243 if (todrop > tlen 2244 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 2245 /* 2246 * Any valid FIN must be to the left of the window. 2247 * At this point the FIN must be a duplicate or out 2248 * of sequence; drop it. 2249 */ 2250 thflags &= ~TH_FIN; 2251 2252 /* 2253 * Send an ACK to resynchronize and drop any data. 2254 * But keep on processing for RST or ACK. 2255 */ 2256 tp->t_flags |= TF_ACKNOW; 2257 todrop = tlen; 2258 TCPSTAT_INC(tcps_rcvduppack); 2259 TCPSTAT_ADD(tcps_rcvdupbyte, todrop); 2260 } else { 2261 TCPSTAT_INC(tcps_rcvpartduppack); 2262 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop); 2263 } 2264 drop_hdrlen += todrop; /* drop from the top afterwards */ 2265 th->th_seq += todrop; 2266 tlen -= todrop; 2267 if (th->th_urp > todrop) 2268 th->th_urp -= todrop; 2269 else { 2270 thflags &= ~TH_URG; 2271 th->th_urp = 0; 2272 } 2273 } 2274 2275 /* 2276 * If new data are received on a connection after the 2277 * user processes are gone, then RST the other end. 2278 */ 2279 if ((so->so_state & SS_NOFDREF) && 2280 tp->t_state > TCPS_CLOSE_WAIT && tlen) { 2281 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2282 2283 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 2284 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data " 2285 "after socket was closed, " 2286 "sending RST and removing tcpcb\n", 2287 s, __func__, tcpstates[tp->t_state], tlen); 2288 free(s, M_TCPLOG); 2289 } 2290 tp = tcp_close(tp); 2291 TCPSTAT_INC(tcps_rcvafterclose); 2292 rstreason = BANDLIM_UNLIMITED; 2293 goto dropwithreset; 2294 } 2295 2296 /* 2297 * If segment ends after window, drop trailing data 2298 * (and PUSH and FIN); if nothing left, just ACK. 2299 */ 2300 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 2301 if (todrop > 0) { 2302 TCPSTAT_INC(tcps_rcvpackafterwin); 2303 if (todrop >= tlen) { 2304 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen); 2305 /* 2306 * If window is closed can only take segments at 2307 * window edge, and have to drop data and PUSH from 2308 * incoming segments. Continue processing, but 2309 * remember to ack. Otherwise, drop segment 2310 * and ack. 2311 */ 2312 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 2313 tp->t_flags |= TF_ACKNOW; 2314 TCPSTAT_INC(tcps_rcvwinprobe); 2315 } else 2316 goto dropafterack; 2317 } else 2318 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 2319 m_adj(m, -todrop); 2320 tlen -= todrop; 2321 thflags &= ~(TH_PUSH|TH_FIN); 2322 } 2323 2324 /* 2325 * If last ACK falls within this segment's sequence numbers, 2326 * record its timestamp. 2327 * NOTE: 2328 * 1) That the test incorporates suggestions from the latest 2329 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 2330 * 2) That updating only on newer timestamps interferes with 2331 * our earlier PAWS tests, so this check should be solely 2332 * predicated on the sequence space of this segment. 2333 * 3) That we modify the segment boundary check to be 2334 * Last.ACK.Sent <= SEG.SEQ + SEG.Len 2335 * instead of RFC1323's 2336 * Last.ACK.Sent < SEG.SEQ + SEG.Len, 2337 * This modified check allows us to overcome RFC1323's 2338 * limitations as described in Stevens TCP/IP Illustrated 2339 * Vol. 2 p.869. In such cases, we can still calculate the 2340 * RTT correctly when RCV.NXT == Last.ACK.Sent. 2341 */ 2342 if ((to.to_flags & TOF_TS) != 0 && 2343 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 2344 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 2345 ((thflags & (TH_SYN|TH_FIN)) != 0))) { 2346 tp->ts_recent_age = tcp_ts_getticks(); 2347 tp->ts_recent = to.to_tsval; 2348 } 2349 2350 /* 2351 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 2352 * flag is on (half-synchronized state), then queue data for 2353 * later processing; else drop segment and return. 2354 */ 2355 if ((thflags & TH_ACK) == 0) { 2356 if (tp->t_state == TCPS_SYN_RECEIVED || 2357 (tp->t_flags & TF_NEEDSYN)) { 2358 if (tp->t_state == TCPS_SYN_RECEIVED && 2359 IS_FASTOPEN(tp->t_flags)) { 2360 tp->snd_wnd = tiwin; 2361 cc_conn_init(tp); 2362 } 2363 goto step6; 2364 } else if (tp->t_flags & TF_ACKNOW) 2365 goto dropafterack; 2366 else 2367 goto drop; 2368 } 2369 2370 /* 2371 * Ack processing. 2372 */ 2373 switch (tp->t_state) { 2374 2375 /* 2376 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter 2377 * ESTABLISHED state and continue processing. 2378 * The ACK was checked above. 2379 */ 2380 case TCPS_SYN_RECEIVED: 2381 2382 TCPSTAT_INC(tcps_connects); 2383 soisconnected(so); 2384 /* Do window scaling? */ 2385 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2386 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2387 tp->rcv_scale = tp->request_r_scale; 2388 } 2389 tp->snd_wnd = tiwin; 2390 /* 2391 * Make transitions: 2392 * SYN-RECEIVED -> ESTABLISHED 2393 * SYN-RECEIVED* -> FIN-WAIT-1 2394 */ 2395 tp->t_starttime = ticks; 2396 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) { 2397 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 2398 tp->t_tfo_pending = NULL; 2399 2400 /* 2401 * Account for the ACK of our SYN prior to 2402 * regular ACK processing below. 2403 */ 2404 tp->snd_una++; 2405 } 2406 if (tp->t_flags & TF_NEEDFIN) { 2407 tcp_state_change(tp, TCPS_FIN_WAIT_1); 2408 tp->t_flags &= ~TF_NEEDFIN; 2409 } else { 2410 tcp_state_change(tp, TCPS_ESTABLISHED); 2411 TCP_PROBE5(accept__established, NULL, tp, 2412 m, tp, th); 2413 /* 2414 * TFO connections call cc_conn_init() during SYN 2415 * processing. Calling it again here for such 2416 * connections is not harmless as it would undo the 2417 * snd_cwnd reduction that occurs when a TFO SYN|ACK 2418 * is retransmitted. 2419 */ 2420 if (!IS_FASTOPEN(tp->t_flags)) 2421 cc_conn_init(tp); 2422 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp)); 2423 } 2424 /* 2425 * If segment contains data or ACK, will call tcp_reass() 2426 * later; if not, do so now to pass queued data to user. 2427 */ 2428 if (tlen == 0 && (thflags & TH_FIN) == 0) 2429 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 2430 (struct mbuf *)0); 2431 tp->snd_wl1 = th->th_seq - 1; 2432 /* FALLTHROUGH */ 2433 2434 /* 2435 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 2436 * ACKs. If the ack is in the range 2437 * tp->snd_una < th->th_ack <= tp->snd_max 2438 * then advance tp->snd_una to th->th_ack and drop 2439 * data from the retransmission queue. If this ACK reflects 2440 * more up to date window information we update our window information. 2441 */ 2442 case TCPS_ESTABLISHED: 2443 case TCPS_FIN_WAIT_1: 2444 case TCPS_FIN_WAIT_2: 2445 case TCPS_CLOSE_WAIT: 2446 case TCPS_CLOSING: 2447 case TCPS_LAST_ACK: 2448 if (SEQ_GT(th->th_ack, tp->snd_max)) { 2449 TCPSTAT_INC(tcps_rcvacktoomuch); 2450 goto dropafterack; 2451 } 2452 if ((tp->t_flags & TF_SACK_PERMIT) && 2453 ((to.to_flags & TOF_SACK) || 2454 !TAILQ_EMPTY(&tp->snd_holes))) 2455 sack_changed = tcp_sack_doack(tp, &to, th->th_ack); 2456 else 2457 /* 2458 * Reset the value so that previous (valid) value 2459 * from the last ack with SACK doesn't get used. 2460 */ 2461 tp->sackhint.sacked_bytes = 0; 2462 2463 #ifdef TCP_HHOOK 2464 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 2465 hhook_run_tcp_est_in(tp, th, &to); 2466 #endif 2467 2468 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 2469 u_int maxseg; 2470 2471 maxseg = tcp_maxseg(tp); 2472 if (tlen == 0 && 2473 (tiwin == tp->snd_wnd || 2474 (tp->t_flags & TF_SACK_PERMIT))) { 2475 /* 2476 * If this is the first time we've seen a 2477 * FIN from the remote, this is not a 2478 * duplicate and it needs to be processed 2479 * normally. This happens during a 2480 * simultaneous close. 2481 */ 2482 if ((thflags & TH_FIN) && 2483 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) { 2484 tp->t_dupacks = 0; 2485 break; 2486 } 2487 TCPSTAT_INC(tcps_rcvdupack); 2488 /* 2489 * If we have outstanding data (other than 2490 * a window probe), this is a completely 2491 * duplicate ack (ie, window info didn't 2492 * change and FIN isn't set), 2493 * the ack is the biggest we've 2494 * seen and we've seen exactly our rexmt 2495 * threshold of them, assume a packet 2496 * has been dropped and retransmit it. 2497 * Kludge snd_nxt & the congestion 2498 * window so we send only this one 2499 * packet. 2500 * 2501 * We know we're losing at the current 2502 * window size so do congestion avoidance 2503 * (set ssthresh to half the current window 2504 * and pull our congestion window back to 2505 * the new ssthresh). 2506 * 2507 * Dup acks mean that packets have left the 2508 * network (they're now cached at the receiver) 2509 * so bump cwnd by the amount in the receiver 2510 * to keep a constant cwnd packets in the 2511 * network. 2512 * 2513 * When using TCP ECN, notify the peer that 2514 * we reduced the cwnd. 2515 */ 2516 /* 2517 * Following 2 kinds of acks should not affect 2518 * dupack counting: 2519 * 1) Old acks 2520 * 2) Acks with SACK but without any new SACK 2521 * information in them. These could result from 2522 * any anomaly in the network like a switch 2523 * duplicating packets or a possible DoS attack. 2524 */ 2525 if (th->th_ack != tp->snd_una || 2526 ((tp->t_flags & TF_SACK_PERMIT) && 2527 !sack_changed)) 2528 break; 2529 else if (!tcp_timer_active(tp, TT_REXMT)) 2530 tp->t_dupacks = 0; 2531 else if (++tp->t_dupacks > tcprexmtthresh || 2532 IN_FASTRECOVERY(tp->t_flags)) { 2533 cc_ack_received(tp, th, nsegs, 2534 CC_DUPACK); 2535 if ((tp->t_flags & TF_SACK_PERMIT) && 2536 IN_FASTRECOVERY(tp->t_flags)) { 2537 int awnd; 2538 2539 /* 2540 * Compute the amount of data in flight first. 2541 * We can inject new data into the pipe iff 2542 * we have less than 1/2 the original window's 2543 * worth of data in flight. 2544 */ 2545 if (V_tcp_do_rfc6675_pipe) 2546 awnd = tcp_compute_pipe(tp); 2547 else 2548 awnd = (tp->snd_nxt - tp->snd_fack) + 2549 tp->sackhint.sack_bytes_rexmit; 2550 2551 if (awnd < tp->snd_ssthresh) { 2552 tp->snd_cwnd += maxseg; 2553 if (tp->snd_cwnd > tp->snd_ssthresh) 2554 tp->snd_cwnd = tp->snd_ssthresh; 2555 } 2556 } else 2557 tp->snd_cwnd += maxseg; 2558 (void) tp->t_fb->tfb_tcp_output(tp); 2559 goto drop; 2560 } else if (tp->t_dupacks == tcprexmtthresh) { 2561 tcp_seq onxt = tp->snd_nxt; 2562 2563 /* 2564 * If we're doing sack, check to 2565 * see if we're already in sack 2566 * recovery. If we're not doing sack, 2567 * check to see if we're in newreno 2568 * recovery. 2569 */ 2570 if (tp->t_flags & TF_SACK_PERMIT) { 2571 if (IN_FASTRECOVERY(tp->t_flags)) { 2572 tp->t_dupacks = 0; 2573 break; 2574 } 2575 } else { 2576 if (SEQ_LEQ(th->th_ack, 2577 tp->snd_recover)) { 2578 tp->t_dupacks = 0; 2579 break; 2580 } 2581 } 2582 /* Congestion signal before ack. */ 2583 cc_cong_signal(tp, th, CC_NDUPACK); 2584 cc_ack_received(tp, th, nsegs, 2585 CC_DUPACK); 2586 tcp_timer_activate(tp, TT_REXMT, 0); 2587 tp->t_rtttime = 0; 2588 if (tp->t_flags & TF_SACK_PERMIT) { 2589 TCPSTAT_INC( 2590 tcps_sack_recovery_episode); 2591 tp->sack_newdata = tp->snd_nxt; 2592 tp->snd_cwnd = maxseg; 2593 (void) tp->t_fb->tfb_tcp_output(tp); 2594 goto drop; 2595 } 2596 tp->snd_nxt = th->th_ack; 2597 tp->snd_cwnd = maxseg; 2598 (void) tp->t_fb->tfb_tcp_output(tp); 2599 KASSERT(tp->snd_limited <= 2, 2600 ("%s: tp->snd_limited too big", 2601 __func__)); 2602 tp->snd_cwnd = tp->snd_ssthresh + 2603 maxseg * 2604 (tp->t_dupacks - tp->snd_limited); 2605 if (SEQ_GT(onxt, tp->snd_nxt)) 2606 tp->snd_nxt = onxt; 2607 goto drop; 2608 } else if (V_tcp_do_rfc3042) { 2609 /* 2610 * Process first and second duplicate 2611 * ACKs. Each indicates a segment 2612 * leaving the network, creating room 2613 * for more. Make sure we can send a 2614 * packet on reception of each duplicate 2615 * ACK by increasing snd_cwnd by one 2616 * segment. Restore the original 2617 * snd_cwnd after packet transmission. 2618 */ 2619 cc_ack_received(tp, th, nsegs, 2620 CC_DUPACK); 2621 uint32_t oldcwnd = tp->snd_cwnd; 2622 tcp_seq oldsndmax = tp->snd_max; 2623 u_int sent; 2624 int avail; 2625 2626 KASSERT(tp->t_dupacks == 1 || 2627 tp->t_dupacks == 2, 2628 ("%s: dupacks not 1 or 2", 2629 __func__)); 2630 if (tp->t_dupacks == 1) 2631 tp->snd_limited = 0; 2632 tp->snd_cwnd = 2633 (tp->snd_nxt - tp->snd_una) + 2634 (tp->t_dupacks - tp->snd_limited) * 2635 maxseg; 2636 /* 2637 * Only call tcp_output when there 2638 * is new data available to be sent. 2639 * Otherwise we would send pure ACKs. 2640 */ 2641 SOCKBUF_LOCK(&so->so_snd); 2642 avail = sbavail(&so->so_snd) - 2643 (tp->snd_nxt - tp->snd_una); 2644 SOCKBUF_UNLOCK(&so->so_snd); 2645 if (avail > 0) 2646 (void) tp->t_fb->tfb_tcp_output(tp); 2647 sent = tp->snd_max - oldsndmax; 2648 if (sent > maxseg) { 2649 KASSERT((tp->t_dupacks == 2 && 2650 tp->snd_limited == 0) || 2651 (sent == maxseg + 1 && 2652 tp->t_flags & TF_SENTFIN), 2653 ("%s: sent too much", 2654 __func__)); 2655 tp->snd_limited = 2; 2656 } else if (sent > 0) 2657 ++tp->snd_limited; 2658 tp->snd_cwnd = oldcwnd; 2659 goto drop; 2660 } 2661 } 2662 break; 2663 } else { 2664 /* 2665 * This ack is advancing the left edge, reset the 2666 * counter. 2667 */ 2668 tp->t_dupacks = 0; 2669 /* 2670 * If this ack also has new SACK info, increment the 2671 * counter as per rfc6675. 2672 */ 2673 if ((tp->t_flags & TF_SACK_PERMIT) && sack_changed) 2674 tp->t_dupacks++; 2675 } 2676 2677 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), 2678 ("%s: th_ack <= snd_una", __func__)); 2679 2680 /* 2681 * If the congestion window was inflated to account 2682 * for the other side's cached packets, retract it. 2683 */ 2684 if (IN_FASTRECOVERY(tp->t_flags)) { 2685 if (SEQ_LT(th->th_ack, tp->snd_recover)) { 2686 if (tp->t_flags & TF_SACK_PERMIT) 2687 tcp_sack_partialack(tp, th); 2688 else 2689 tcp_newreno_partial_ack(tp, th); 2690 } else 2691 cc_post_recovery(tp, th); 2692 } 2693 /* 2694 * If we reach this point, ACK is not a duplicate, 2695 * i.e., it ACKs something we sent. 2696 */ 2697 if (tp->t_flags & TF_NEEDSYN) { 2698 /* 2699 * T/TCP: Connection was half-synchronized, and our 2700 * SYN has been ACK'd (so connection is now fully 2701 * synchronized). Go to non-starred state, 2702 * increment snd_una for ACK of SYN, and check if 2703 * we can do window scaling. 2704 */ 2705 tp->t_flags &= ~TF_NEEDSYN; 2706 tp->snd_una++; 2707 /* Do window scaling? */ 2708 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2709 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2710 tp->rcv_scale = tp->request_r_scale; 2711 /* Send window already scaled. */ 2712 } 2713 } 2714 2715 process_ACK: 2716 INP_WLOCK_ASSERT(tp->t_inpcb); 2717 2718 acked = BYTES_THIS_ACK(tp, th); 2719 KASSERT(acked >= 0, ("%s: acked unexepectedly negative " 2720 "(tp->snd_una=%u, th->th_ack=%u, tp=%p, m=%p)", __func__, 2721 tp->snd_una, th->th_ack, tp, m)); 2722 TCPSTAT_ADD(tcps_rcvackpack, nsegs); 2723 TCPSTAT_ADD(tcps_rcvackbyte, acked); 2724 2725 /* 2726 * If we just performed our first retransmit, and the ACK 2727 * arrives within our recovery window, then it was a mistake 2728 * to do the retransmit in the first place. Recover our 2729 * original cwnd and ssthresh, and proceed to transmit where 2730 * we left off. 2731 */ 2732 if (tp->t_rxtshift == 1 && 2733 tp->t_flags & TF_PREVVALID && 2734 tp->t_badrxtwin && 2735 SEQ_LT(to.to_tsecr, tp->t_badrxtwin)) 2736 cc_cong_signal(tp, th, CC_RTO_ERR); 2737 2738 /* 2739 * If we have a timestamp reply, update smoothed 2740 * round trip time. If no timestamp is present but 2741 * transmit timer is running and timed sequence 2742 * number was acked, update smoothed round trip time. 2743 * Since we now have an rtt measurement, cancel the 2744 * timer backoff (cf., Phil Karn's retransmit alg.). 2745 * Recompute the initial retransmit timer. 2746 * 2747 * Some boxes send broken timestamp replies 2748 * during the SYN+ACK phase, ignore 2749 * timestamps of 0 or we could calculate a 2750 * huge RTT and blow up the retransmit timer. 2751 */ 2752 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) { 2753 uint32_t t; 2754 2755 t = tcp_ts_getticks() - to.to_tsecr; 2756 if (!tp->t_rttlow || tp->t_rttlow > t) 2757 tp->t_rttlow = t; 2758 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1); 2759 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) { 2760 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime) 2761 tp->t_rttlow = ticks - tp->t_rtttime; 2762 tcp_xmit_timer(tp, ticks - tp->t_rtttime); 2763 } 2764 2765 /* 2766 * If all outstanding data is acked, stop retransmit 2767 * timer and remember to restart (more output or persist). 2768 * If there is more data to be acked, restart retransmit 2769 * timer, using current (possibly backed-off) value. 2770 */ 2771 if (th->th_ack == tp->snd_max) { 2772 tcp_timer_activate(tp, TT_REXMT, 0); 2773 needoutput = 1; 2774 } else if (!tcp_timer_active(tp, TT_PERSIST)) 2775 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur); 2776 2777 /* 2778 * If no data (only SYN) was ACK'd, 2779 * skip rest of ACK processing. 2780 */ 2781 if (acked == 0) 2782 goto step6; 2783 2784 /* 2785 * Let the congestion control algorithm update congestion 2786 * control related information. This typically means increasing 2787 * the congestion window. 2788 */ 2789 cc_ack_received(tp, th, nsegs, CC_ACK); 2790 2791 SOCKBUF_LOCK(&so->so_snd); 2792 if (acked > sbavail(&so->so_snd)) { 2793 if (tp->snd_wnd >= sbavail(&so->so_snd)) 2794 tp->snd_wnd -= sbavail(&so->so_snd); 2795 else 2796 tp->snd_wnd = 0; 2797 mfree = sbcut_locked(&so->so_snd, 2798 (int)sbavail(&so->so_snd)); 2799 ourfinisacked = 1; 2800 } else { 2801 mfree = sbcut_locked(&so->so_snd, acked); 2802 if (tp->snd_wnd >= (uint32_t) acked) 2803 tp->snd_wnd -= acked; 2804 else 2805 tp->snd_wnd = 0; 2806 ourfinisacked = 0; 2807 } 2808 /* NB: sowwakeup_locked() does an implicit unlock. */ 2809 sowwakeup_locked(so); 2810 m_freem(mfree); 2811 /* Detect una wraparound. */ 2812 if (!IN_RECOVERY(tp->t_flags) && 2813 SEQ_GT(tp->snd_una, tp->snd_recover) && 2814 SEQ_LEQ(th->th_ack, tp->snd_recover)) 2815 tp->snd_recover = th->th_ack - 1; 2816 /* XXXLAS: Can this be moved up into cc_post_recovery? */ 2817 if (IN_RECOVERY(tp->t_flags) && 2818 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 2819 EXIT_RECOVERY(tp->t_flags); 2820 } 2821 tp->snd_una = th->th_ack; 2822 if (tp->t_flags & TF_SACK_PERMIT) { 2823 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 2824 tp->snd_recover = tp->snd_una; 2825 } 2826 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2827 tp->snd_nxt = tp->snd_una; 2828 2829 switch (tp->t_state) { 2830 2831 /* 2832 * In FIN_WAIT_1 STATE in addition to the processing 2833 * for the ESTABLISHED state if our FIN is now acknowledged 2834 * then enter FIN_WAIT_2. 2835 */ 2836 case TCPS_FIN_WAIT_1: 2837 if (ourfinisacked) { 2838 /* 2839 * If we can't receive any more 2840 * data, then closing user can proceed. 2841 * Starting the timer is contrary to the 2842 * specification, but if we don't get a FIN 2843 * we'll hang forever. 2844 * 2845 * XXXjl: 2846 * we should release the tp also, and use a 2847 * compressed state. 2848 */ 2849 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2850 soisdisconnected(so); 2851 tcp_timer_activate(tp, TT_2MSL, 2852 (tcp_fast_finwait2_recycle ? 2853 tcp_finwait2_timeout : 2854 TP_MAXIDLE(tp))); 2855 } 2856 tcp_state_change(tp, TCPS_FIN_WAIT_2); 2857 } 2858 break; 2859 2860 /* 2861 * In CLOSING STATE in addition to the processing for 2862 * the ESTABLISHED state if the ACK acknowledges our FIN 2863 * then enter the TIME-WAIT state, otherwise ignore 2864 * the segment. 2865 */ 2866 case TCPS_CLOSING: 2867 if (ourfinisacked) { 2868 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2869 tcp_twstart(tp); 2870 m_freem(m); 2871 return; 2872 } 2873 break; 2874 2875 /* 2876 * In LAST_ACK, we may still be waiting for data to drain 2877 * and/or to be acked, as well as for the ack of our FIN. 2878 * If our FIN is now acknowledged, delete the TCB, 2879 * enter the closed state and return. 2880 */ 2881 case TCPS_LAST_ACK: 2882 if (ourfinisacked) { 2883 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2884 tp = tcp_close(tp); 2885 goto drop; 2886 } 2887 break; 2888 } 2889 } 2890 2891 step6: 2892 INP_WLOCK_ASSERT(tp->t_inpcb); 2893 2894 /* 2895 * Update window information. 2896 * Don't look at window if no ACK: TAC's send garbage on first SYN. 2897 */ 2898 if ((thflags & TH_ACK) && 2899 (SEQ_LT(tp->snd_wl1, th->th_seq) || 2900 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 2901 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 2902 /* keep track of pure window updates */ 2903 if (tlen == 0 && 2904 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 2905 TCPSTAT_INC(tcps_rcvwinupd); 2906 tp->snd_wnd = tiwin; 2907 tp->snd_wl1 = th->th_seq; 2908 tp->snd_wl2 = th->th_ack; 2909 if (tp->snd_wnd > tp->max_sndwnd) 2910 tp->max_sndwnd = tp->snd_wnd; 2911 needoutput = 1; 2912 } 2913 2914 /* 2915 * Process segments with URG. 2916 */ 2917 if ((thflags & TH_URG) && th->th_urp && 2918 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2919 /* 2920 * This is a kludge, but if we receive and accept 2921 * random urgent pointers, we'll crash in 2922 * soreceive. It's hard to imagine someone 2923 * actually wanting to send this much urgent data. 2924 */ 2925 SOCKBUF_LOCK(&so->so_rcv); 2926 if (th->th_urp + sbavail(&so->so_rcv) > sb_max) { 2927 th->th_urp = 0; /* XXX */ 2928 thflags &= ~TH_URG; /* XXX */ 2929 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */ 2930 goto dodata; /* XXX */ 2931 } 2932 /* 2933 * If this segment advances the known urgent pointer, 2934 * then mark the data stream. This should not happen 2935 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 2936 * a FIN has been received from the remote side. 2937 * In these states we ignore the URG. 2938 * 2939 * According to RFC961 (Assigned Protocols), 2940 * the urgent pointer points to the last octet 2941 * of urgent data. We continue, however, 2942 * to consider it to indicate the first octet 2943 * of data past the urgent section as the original 2944 * spec states (in one of two places). 2945 */ 2946 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) { 2947 tp->rcv_up = th->th_seq + th->th_urp; 2948 so->so_oobmark = sbavail(&so->so_rcv) + 2949 (tp->rcv_up - tp->rcv_nxt) - 1; 2950 if (so->so_oobmark == 0) 2951 so->so_rcv.sb_state |= SBS_RCVATMARK; 2952 sohasoutofband(so); 2953 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 2954 } 2955 SOCKBUF_UNLOCK(&so->so_rcv); 2956 /* 2957 * Remove out of band data so doesn't get presented to user. 2958 * This can happen independent of advancing the URG pointer, 2959 * but if two URG's are pending at once, some out-of-band 2960 * data may creep in... ick. 2961 */ 2962 if (th->th_urp <= (uint32_t)tlen && 2963 !(so->so_options & SO_OOBINLINE)) { 2964 /* hdr drop is delayed */ 2965 tcp_pulloutofband(so, th, m, drop_hdrlen); 2966 } 2967 } else { 2968 /* 2969 * If no out of band data is expected, 2970 * pull receive urgent pointer along 2971 * with the receive window. 2972 */ 2973 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 2974 tp->rcv_up = tp->rcv_nxt; 2975 } 2976 dodata: /* XXX */ 2977 INP_WLOCK_ASSERT(tp->t_inpcb); 2978 2979 /* 2980 * Process the segment text, merging it into the TCP sequencing queue, 2981 * and arranging for acknowledgment of receipt if necessary. 2982 * This process logically involves adjusting tp->rcv_wnd as data 2983 * is presented to the user (this happens in tcp_usrreq.c, 2984 * case PRU_RCVD). If a FIN has already been received on this 2985 * connection then we just ignore the text. 2986 */ 2987 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 2988 IS_FASTOPEN(tp->t_flags)); 2989 if ((tlen || (thflags & TH_FIN) || tfo_syn) && 2990 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2991 tcp_seq save_start = th->th_seq; 2992 m_adj(m, drop_hdrlen); /* delayed header drop */ 2993 /* 2994 * Insert segment which includes th into TCP reassembly queue 2995 * with control block tp. Set thflags to whether reassembly now 2996 * includes a segment with FIN. This handles the common case 2997 * inline (segment is the next to be received on an established 2998 * connection, and the queue is empty), avoiding linkage into 2999 * and removal from the queue and repetition of various 3000 * conversions. 3001 * Set DELACK for segments received in order, but ack 3002 * immediately when segments are out of order (so 3003 * fast retransmit can work). 3004 */ 3005 if (th->th_seq == tp->rcv_nxt && 3006 SEGQ_EMPTY(tp) && 3007 (TCPS_HAVEESTABLISHED(tp->t_state) || 3008 tfo_syn)) { 3009 if (DELAY_ACK(tp, tlen) || tfo_syn) 3010 tp->t_flags |= TF_DELACK; 3011 else 3012 tp->t_flags |= TF_ACKNOW; 3013 tp->rcv_nxt += tlen; 3014 thflags = th->th_flags & TH_FIN; 3015 TCPSTAT_INC(tcps_rcvpack); 3016 TCPSTAT_ADD(tcps_rcvbyte, tlen); 3017 SOCKBUF_LOCK(&so->so_rcv); 3018 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 3019 m_freem(m); 3020 else 3021 sbappendstream_locked(&so->so_rcv, m, 0); 3022 /* NB: sorwakeup_locked() does an implicit unlock. */ 3023 sorwakeup_locked(so); 3024 } else { 3025 /* 3026 * XXX: Due to the header drop above "th" is 3027 * theoretically invalid by now. Fortunately 3028 * m_adj() doesn't actually frees any mbufs 3029 * when trimming from the head. 3030 */ 3031 thflags = tcp_reass(tp, th, &save_start, &tlen, m); 3032 tp->t_flags |= TF_ACKNOW; 3033 } 3034 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT)) 3035 tcp_update_sack_list(tp, save_start, save_start + tlen); 3036 #if 0 3037 /* 3038 * Note the amount of data that peer has sent into 3039 * our window, in order to estimate the sender's 3040 * buffer size. 3041 * XXX: Unused. 3042 */ 3043 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) 3044 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 3045 else 3046 len = so->so_rcv.sb_hiwat; 3047 #endif 3048 } else { 3049 m_freem(m); 3050 thflags &= ~TH_FIN; 3051 } 3052 3053 /* 3054 * If FIN is received ACK the FIN and let the user know 3055 * that the connection is closing. 3056 */ 3057 if (thflags & TH_FIN) { 3058 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 3059 socantrcvmore(so); 3060 /* 3061 * If connection is half-synchronized 3062 * (ie NEEDSYN flag on) then delay ACK, 3063 * so it may be piggybacked when SYN is sent. 3064 * Otherwise, since we received a FIN then no 3065 * more input can be expected, send ACK now. 3066 */ 3067 if (tp->t_flags & TF_NEEDSYN) 3068 tp->t_flags |= TF_DELACK; 3069 else 3070 tp->t_flags |= TF_ACKNOW; 3071 tp->rcv_nxt++; 3072 } 3073 switch (tp->t_state) { 3074 3075 /* 3076 * In SYN_RECEIVED and ESTABLISHED STATES 3077 * enter the CLOSE_WAIT state. 3078 */ 3079 case TCPS_SYN_RECEIVED: 3080 tp->t_starttime = ticks; 3081 /* FALLTHROUGH */ 3082 case TCPS_ESTABLISHED: 3083 tcp_state_change(tp, TCPS_CLOSE_WAIT); 3084 break; 3085 3086 /* 3087 * If still in FIN_WAIT_1 STATE FIN has not been acked so 3088 * enter the CLOSING state. 3089 */ 3090 case TCPS_FIN_WAIT_1: 3091 tcp_state_change(tp, TCPS_CLOSING); 3092 break; 3093 3094 /* 3095 * In FIN_WAIT_2 state enter the TIME_WAIT state, 3096 * starting the time-wait timer, turning off the other 3097 * standard timers. 3098 */ 3099 case TCPS_FIN_WAIT_2: 3100 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 3101 3102 tcp_twstart(tp); 3103 return; 3104 } 3105 } 3106 #ifdef TCPDEBUG 3107 if (so->so_options & SO_DEBUG) 3108 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen, 3109 &tcp_savetcp, 0); 3110 #endif 3111 TCP_PROBE3(debug__input, tp, th, m); 3112 3113 /* 3114 * Return any desired output. 3115 */ 3116 if (needoutput || (tp->t_flags & TF_ACKNOW)) 3117 (void) tp->t_fb->tfb_tcp_output(tp); 3118 3119 check_delack: 3120 INP_WLOCK_ASSERT(tp->t_inpcb); 3121 3122 if (tp->t_flags & TF_DELACK) { 3123 tp->t_flags &= ~TF_DELACK; 3124 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 3125 } 3126 INP_WUNLOCK(tp->t_inpcb); 3127 return; 3128 3129 dropafterack: 3130 /* 3131 * Generate an ACK dropping incoming segment if it occupies 3132 * sequence space, where the ACK reflects our state. 3133 * 3134 * We can now skip the test for the RST flag since all 3135 * paths to this code happen after packets containing 3136 * RST have been dropped. 3137 * 3138 * In the SYN-RECEIVED state, don't send an ACK unless the 3139 * segment we received passes the SYN-RECEIVED ACK test. 3140 * If it fails send a RST. This breaks the loop in the 3141 * "LAND" DoS attack, and also prevents an ACK storm 3142 * between two listening ports that have been sent forged 3143 * SYN segments, each with the source address of the other. 3144 */ 3145 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 3146 (SEQ_GT(tp->snd_una, th->th_ack) || 3147 SEQ_GT(th->th_ack, tp->snd_max)) ) { 3148 rstreason = BANDLIM_RST_OPENPORT; 3149 goto dropwithreset; 3150 } 3151 #ifdef TCPDEBUG 3152 if (so->so_options & SO_DEBUG) 3153 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 3154 &tcp_savetcp, 0); 3155 #endif 3156 TCP_PROBE3(debug__input, tp, th, m); 3157 tp->t_flags |= TF_ACKNOW; 3158 (void) tp->t_fb->tfb_tcp_output(tp); 3159 INP_WUNLOCK(tp->t_inpcb); 3160 m_freem(m); 3161 return; 3162 3163 dropwithreset: 3164 if (tp != NULL) { 3165 tcp_dropwithreset(m, th, tp, tlen, rstreason); 3166 INP_WUNLOCK(tp->t_inpcb); 3167 } else 3168 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 3169 return; 3170 3171 drop: 3172 /* 3173 * Drop space held by incoming segment and return. 3174 */ 3175 #ifdef TCPDEBUG 3176 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 3177 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 3178 &tcp_savetcp, 0); 3179 #endif 3180 TCP_PROBE3(debug__input, tp, th, m); 3181 if (tp != NULL) 3182 INP_WUNLOCK(tp->t_inpcb); 3183 m_freem(m); 3184 } 3185 3186 /* 3187 * Issue RST and make ACK acceptable to originator of segment. 3188 * The mbuf must still include the original packet header. 3189 * tp may be NULL. 3190 */ 3191 void 3192 tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, 3193 int tlen, int rstreason) 3194 { 3195 #ifdef INET 3196 struct ip *ip; 3197 #endif 3198 #ifdef INET6 3199 struct ip6_hdr *ip6; 3200 #endif 3201 3202 if (tp != NULL) { 3203 INP_WLOCK_ASSERT(tp->t_inpcb); 3204 } 3205 3206 /* Don't bother if destination was broadcast/multicast. */ 3207 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST)) 3208 goto drop; 3209 #ifdef INET6 3210 if (mtod(m, struct ip *)->ip_v == 6) { 3211 ip6 = mtod(m, struct ip6_hdr *); 3212 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 3213 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 3214 goto drop; 3215 /* IPv6 anycast check is done at tcp6_input() */ 3216 } 3217 #endif 3218 #if defined(INET) && defined(INET6) 3219 else 3220 #endif 3221 #ifdef INET 3222 { 3223 ip = mtod(m, struct ip *); 3224 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 3225 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 3226 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 3227 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 3228 goto drop; 3229 } 3230 #endif 3231 3232 /* Perform bandwidth limiting. */ 3233 if (badport_bandlim(rstreason) < 0) 3234 goto drop; 3235 3236 /* tcp_respond consumes the mbuf chain. */ 3237 if (th->th_flags & TH_ACK) { 3238 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, 3239 th->th_ack, TH_RST); 3240 } else { 3241 if (th->th_flags & TH_SYN) 3242 tlen++; 3243 if (th->th_flags & TH_FIN) 3244 tlen++; 3245 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen, 3246 (tcp_seq)0, TH_RST|TH_ACK); 3247 } 3248 return; 3249 drop: 3250 m_freem(m); 3251 } 3252 3253 /* 3254 * Parse TCP options and place in tcpopt. 3255 */ 3256 void 3257 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags) 3258 { 3259 int opt, optlen; 3260 3261 to->to_flags = 0; 3262 for (; cnt > 0; cnt -= optlen, cp += optlen) { 3263 opt = cp[0]; 3264 if (opt == TCPOPT_EOL) 3265 break; 3266 if (opt == TCPOPT_NOP) 3267 optlen = 1; 3268 else { 3269 if (cnt < 2) 3270 break; 3271 optlen = cp[1]; 3272 if (optlen < 2 || optlen > cnt) 3273 break; 3274 } 3275 switch (opt) { 3276 case TCPOPT_MAXSEG: 3277 if (optlen != TCPOLEN_MAXSEG) 3278 continue; 3279 if (!(flags & TO_SYN)) 3280 continue; 3281 to->to_flags |= TOF_MSS; 3282 bcopy((char *)cp + 2, 3283 (char *)&to->to_mss, sizeof(to->to_mss)); 3284 to->to_mss = ntohs(to->to_mss); 3285 break; 3286 case TCPOPT_WINDOW: 3287 if (optlen != TCPOLEN_WINDOW) 3288 continue; 3289 if (!(flags & TO_SYN)) 3290 continue; 3291 to->to_flags |= TOF_SCALE; 3292 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT); 3293 break; 3294 case TCPOPT_TIMESTAMP: 3295 if (optlen != TCPOLEN_TIMESTAMP) 3296 continue; 3297 to->to_flags |= TOF_TS; 3298 bcopy((char *)cp + 2, 3299 (char *)&to->to_tsval, sizeof(to->to_tsval)); 3300 to->to_tsval = ntohl(to->to_tsval); 3301 bcopy((char *)cp + 6, 3302 (char *)&to->to_tsecr, sizeof(to->to_tsecr)); 3303 to->to_tsecr = ntohl(to->to_tsecr); 3304 break; 3305 case TCPOPT_SIGNATURE: 3306 /* 3307 * In order to reply to a host which has set the 3308 * TCP_SIGNATURE option in its initial SYN, we have 3309 * to record the fact that the option was observed 3310 * here for the syncache code to perform the correct 3311 * response. 3312 */ 3313 if (optlen != TCPOLEN_SIGNATURE) 3314 continue; 3315 to->to_flags |= TOF_SIGNATURE; 3316 to->to_signature = cp + 2; 3317 break; 3318 case TCPOPT_SACK_PERMITTED: 3319 if (optlen != TCPOLEN_SACK_PERMITTED) 3320 continue; 3321 if (!(flags & TO_SYN)) 3322 continue; 3323 if (!V_tcp_do_sack) 3324 continue; 3325 to->to_flags |= TOF_SACKPERM; 3326 break; 3327 case TCPOPT_SACK: 3328 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) 3329 continue; 3330 if (flags & TO_SYN) 3331 continue; 3332 to->to_flags |= TOF_SACK; 3333 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK; 3334 to->to_sacks = cp + 2; 3335 TCPSTAT_INC(tcps_sack_rcv_blocks); 3336 break; 3337 case TCPOPT_FAST_OPEN: 3338 /* 3339 * Cookie length validation is performed by the 3340 * server side cookie checking code or the client 3341 * side cookie cache update code. 3342 */ 3343 if (!(flags & TO_SYN)) 3344 continue; 3345 if (!V_tcp_fastopen_client_enable && 3346 !V_tcp_fastopen_server_enable) 3347 continue; 3348 to->to_flags |= TOF_FASTOPEN; 3349 to->to_tfo_len = optlen - 2; 3350 to->to_tfo_cookie = to->to_tfo_len ? cp + 2 : NULL; 3351 break; 3352 default: 3353 continue; 3354 } 3355 } 3356 } 3357 3358 /* 3359 * Pull out of band byte out of a segment so 3360 * it doesn't appear in the user's data queue. 3361 * It is still reflected in the segment length for 3362 * sequencing purposes. 3363 */ 3364 void 3365 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, 3366 int off) 3367 { 3368 int cnt = off + th->th_urp - 1; 3369 3370 while (cnt >= 0) { 3371 if (m->m_len > cnt) { 3372 char *cp = mtod(m, caddr_t) + cnt; 3373 struct tcpcb *tp = sototcpcb(so); 3374 3375 INP_WLOCK_ASSERT(tp->t_inpcb); 3376 3377 tp->t_iobc = *cp; 3378 tp->t_oobflags |= TCPOOB_HAVEDATA; 3379 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); 3380 m->m_len--; 3381 if (m->m_flags & M_PKTHDR) 3382 m->m_pkthdr.len--; 3383 return; 3384 } 3385 cnt -= m->m_len; 3386 m = m->m_next; 3387 if (m == NULL) 3388 break; 3389 } 3390 panic("tcp_pulloutofband"); 3391 } 3392 3393 /* 3394 * Collect new round-trip time estimate 3395 * and update averages and current timeout. 3396 */ 3397 void 3398 tcp_xmit_timer(struct tcpcb *tp, int rtt) 3399 { 3400 int delta; 3401 3402 INP_WLOCK_ASSERT(tp->t_inpcb); 3403 3404 TCPSTAT_INC(tcps_rttupdated); 3405 tp->t_rttupdated++; 3406 if ((tp->t_srtt != 0) && (tp->t_rxtshift <= TCP_RTT_INVALIDATE)) { 3407 /* 3408 * srtt is stored as fixed point with 5 bits after the 3409 * binary point (i.e., scaled by 8). The following magic 3410 * is equivalent to the smoothing algorithm in rfc793 with 3411 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 3412 * point). Adjust rtt to origin 0. 3413 */ 3414 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 3415 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 3416 3417 if ((tp->t_srtt += delta) <= 0) 3418 tp->t_srtt = 1; 3419 3420 /* 3421 * We accumulate a smoothed rtt variance (actually, a 3422 * smoothed mean difference), then set the retransmit 3423 * timer to smoothed rtt + 4 times the smoothed variance. 3424 * rttvar is stored as fixed point with 4 bits after the 3425 * binary point (scaled by 16). The following is 3426 * equivalent to rfc793 smoothing with an alpha of .75 3427 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 3428 * rfc793's wired-in beta. 3429 */ 3430 if (delta < 0) 3431 delta = -delta; 3432 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 3433 if ((tp->t_rttvar += delta) <= 0) 3434 tp->t_rttvar = 1; 3435 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 3436 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3437 } else { 3438 /* 3439 * No rtt measurement yet - use the unsmoothed rtt. 3440 * Set the variance to half the rtt (so our first 3441 * retransmit happens at 3*rtt). 3442 */ 3443 tp->t_srtt = rtt << TCP_RTT_SHIFT; 3444 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 3445 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3446 } 3447 tp->t_rtttime = 0; 3448 tp->t_rxtshift = 0; 3449 3450 /* 3451 * the retransmit should happen at rtt + 4 * rttvar. 3452 * Because of the way we do the smoothing, srtt and rttvar 3453 * will each average +1/2 tick of bias. When we compute 3454 * the retransmit timer, we want 1/2 tick of rounding and 3455 * 1 extra tick because of +-1/2 tick uncertainty in the 3456 * firing of the timer. The bias will give us exactly the 3457 * 1.5 tick we need. But, because the bias is 3458 * statistical, we have to test that we don't drop below 3459 * the minimum feasible timer (which is 2 ticks). 3460 */ 3461 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 3462 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 3463 3464 /* 3465 * We received an ack for a packet that wasn't retransmitted; 3466 * it is probably safe to discard any error indications we've 3467 * received recently. This isn't quite right, but close enough 3468 * for now (a route might have failed after we sent a segment, 3469 * and the return path might not be symmetrical). 3470 */ 3471 tp->t_softerror = 0; 3472 } 3473 3474 /* 3475 * Determine a reasonable value for maxseg size. 3476 * If the route is known, check route for mtu. 3477 * If none, use an mss that can be handled on the outgoing interface 3478 * without forcing IP to fragment. If no route is found, route has no mtu, 3479 * or the destination isn't local, use a default, hopefully conservative 3480 * size (usually 512 or the default IP max size, but no more than the mtu 3481 * of the interface), as we can't discover anything about intervening 3482 * gateways or networks. We also initialize the congestion/slow start 3483 * window to be a single segment if the destination isn't local. 3484 * While looking at the routing entry, we also initialize other path-dependent 3485 * parameters from pre-set or cached values in the routing entry. 3486 * 3487 * NOTE that resulting t_maxseg doesn't include space for TCP options or 3488 * IP options, e.g. IPSEC data, since length of this data may vary, and 3489 * thus it is calculated for every segment separately in tcp_output(). 3490 * 3491 * NOTE that this routine is only called when we process an incoming 3492 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS 3493 * settings are handled in tcp_mssopt(). 3494 */ 3495 void 3496 tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer, 3497 struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap) 3498 { 3499 int mss = 0; 3500 uint32_t maxmtu = 0; 3501 struct inpcb *inp = tp->t_inpcb; 3502 struct hc_metrics_lite metrics; 3503 #ifdef INET6 3504 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 3505 size_t min_protoh = isipv6 ? 3506 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) : 3507 sizeof (struct tcpiphdr); 3508 #else 3509 const size_t min_protoh = sizeof(struct tcpiphdr); 3510 #endif 3511 3512 INP_WLOCK_ASSERT(tp->t_inpcb); 3513 3514 if (mtuoffer != -1) { 3515 KASSERT(offer == -1, ("%s: conflict", __func__)); 3516 offer = mtuoffer - min_protoh; 3517 } 3518 3519 /* Initialize. */ 3520 #ifdef INET6 3521 if (isipv6) { 3522 maxmtu = tcp_maxmtu6(&inp->inp_inc, cap); 3523 tp->t_maxseg = V_tcp_v6mssdflt; 3524 } 3525 #endif 3526 #if defined(INET) && defined(INET6) 3527 else 3528 #endif 3529 #ifdef INET 3530 { 3531 maxmtu = tcp_maxmtu(&inp->inp_inc, cap); 3532 tp->t_maxseg = V_tcp_mssdflt; 3533 } 3534 #endif 3535 3536 /* 3537 * No route to sender, stay with default mss and return. 3538 */ 3539 if (maxmtu == 0) { 3540 /* 3541 * In case we return early we need to initialize metrics 3542 * to a defined state as tcp_hc_get() would do for us 3543 * if there was no cache hit. 3544 */ 3545 if (metricptr != NULL) 3546 bzero(metricptr, sizeof(struct hc_metrics_lite)); 3547 return; 3548 } 3549 3550 /* What have we got? */ 3551 switch (offer) { 3552 case 0: 3553 /* 3554 * Offer == 0 means that there was no MSS on the SYN 3555 * segment, in this case we use tcp_mssdflt as 3556 * already assigned to t_maxseg above. 3557 */ 3558 offer = tp->t_maxseg; 3559 break; 3560 3561 case -1: 3562 /* 3563 * Offer == -1 means that we didn't receive SYN yet. 3564 */ 3565 /* FALLTHROUGH */ 3566 3567 default: 3568 /* 3569 * Prevent DoS attack with too small MSS. Round up 3570 * to at least minmss. 3571 */ 3572 offer = max(offer, V_tcp_minmss); 3573 } 3574 3575 /* 3576 * rmx information is now retrieved from tcp_hostcache. 3577 */ 3578 tcp_hc_get(&inp->inp_inc, &metrics); 3579 if (metricptr != NULL) 3580 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite)); 3581 3582 /* 3583 * If there's a discovered mtu in tcp hostcache, use it. 3584 * Else, use the link mtu. 3585 */ 3586 if (metrics.rmx_mtu) 3587 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh; 3588 else { 3589 #ifdef INET6 3590 if (isipv6) { 3591 mss = maxmtu - min_protoh; 3592 if (!V_path_mtu_discovery && 3593 !in6_localaddr(&inp->in6p_faddr)) 3594 mss = min(mss, V_tcp_v6mssdflt); 3595 } 3596 #endif 3597 #if defined(INET) && defined(INET6) 3598 else 3599 #endif 3600 #ifdef INET 3601 { 3602 mss = maxmtu - min_protoh; 3603 if (!V_path_mtu_discovery && 3604 !in_localaddr(inp->inp_faddr)) 3605 mss = min(mss, V_tcp_mssdflt); 3606 } 3607 #endif 3608 /* 3609 * XXX - The above conditional (mss = maxmtu - min_protoh) 3610 * probably violates the TCP spec. 3611 * The problem is that, since we don't know the 3612 * other end's MSS, we are supposed to use a conservative 3613 * default. But, if we do that, then MTU discovery will 3614 * never actually take place, because the conservative 3615 * default is much less than the MTUs typically seen 3616 * on the Internet today. For the moment, we'll sweep 3617 * this under the carpet. 3618 * 3619 * The conservative default might not actually be a problem 3620 * if the only case this occurs is when sending an initial 3621 * SYN with options and data to a host we've never talked 3622 * to before. Then, they will reply with an MSS value which 3623 * will get recorded and the new parameters should get 3624 * recomputed. For Further Study. 3625 */ 3626 } 3627 mss = min(mss, offer); 3628 3629 /* 3630 * Sanity check: make sure that maxseg will be large 3631 * enough to allow some data on segments even if the 3632 * all the option space is used (40bytes). Otherwise 3633 * funny things may happen in tcp_output. 3634 * 3635 * XXXGL: shouldn't we reserve space for IP/IPv6 options? 3636 */ 3637 mss = max(mss, 64); 3638 3639 tp->t_maxseg = mss; 3640 } 3641 3642 void 3643 tcp_mss(struct tcpcb *tp, int offer) 3644 { 3645 int mss; 3646 uint32_t bufsize; 3647 struct inpcb *inp; 3648 struct socket *so; 3649 struct hc_metrics_lite metrics; 3650 struct tcp_ifcap cap; 3651 3652 KASSERT(tp != NULL, ("%s: tp == NULL", __func__)); 3653 3654 bzero(&cap, sizeof(cap)); 3655 tcp_mss_update(tp, offer, -1, &metrics, &cap); 3656 3657 mss = tp->t_maxseg; 3658 inp = tp->t_inpcb; 3659 3660 /* 3661 * If there's a pipesize, change the socket buffer to that size, 3662 * don't change if sb_hiwat is different than default (then it 3663 * has been changed on purpose with setsockopt). 3664 * Make the socket buffers an integral number of mss units; 3665 * if the mss is larger than the socket buffer, decrease the mss. 3666 */ 3667 so = inp->inp_socket; 3668 SOCKBUF_LOCK(&so->so_snd); 3669 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe) 3670 bufsize = metrics.rmx_sendpipe; 3671 else 3672 bufsize = so->so_snd.sb_hiwat; 3673 if (bufsize < mss) 3674 mss = bufsize; 3675 else { 3676 bufsize = roundup(bufsize, mss); 3677 if (bufsize > sb_max) 3678 bufsize = sb_max; 3679 if (bufsize > so->so_snd.sb_hiwat) 3680 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL); 3681 } 3682 SOCKBUF_UNLOCK(&so->so_snd); 3683 /* 3684 * Sanity check: make sure that maxseg will be large 3685 * enough to allow some data on segments even if the 3686 * all the option space is used (40bytes). Otherwise 3687 * funny things may happen in tcp_output. 3688 * 3689 * XXXGL: shouldn't we reserve space for IP/IPv6 options? 3690 */ 3691 tp->t_maxseg = max(mss, 64); 3692 3693 SOCKBUF_LOCK(&so->so_rcv); 3694 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe) 3695 bufsize = metrics.rmx_recvpipe; 3696 else 3697 bufsize = so->so_rcv.sb_hiwat; 3698 if (bufsize > mss) { 3699 bufsize = roundup(bufsize, mss); 3700 if (bufsize > sb_max) 3701 bufsize = sb_max; 3702 if (bufsize > so->so_rcv.sb_hiwat) 3703 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL); 3704 } 3705 SOCKBUF_UNLOCK(&so->so_rcv); 3706 3707 /* Check the interface for TSO capabilities. */ 3708 if (cap.ifcap & CSUM_TSO) { 3709 tp->t_flags |= TF_TSO; 3710 tp->t_tsomax = cap.tsomax; 3711 tp->t_tsomaxsegcount = cap.tsomaxsegcount; 3712 tp->t_tsomaxsegsize = cap.tsomaxsegsize; 3713 } 3714 } 3715 3716 /* 3717 * Determine the MSS option to send on an outgoing SYN. 3718 */ 3719 int 3720 tcp_mssopt(struct in_conninfo *inc) 3721 { 3722 int mss = 0; 3723 uint32_t thcmtu = 0; 3724 uint32_t maxmtu = 0; 3725 size_t min_protoh; 3726 3727 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer")); 3728 3729 #ifdef INET6 3730 if (inc->inc_flags & INC_ISIPV6) { 3731 mss = V_tcp_v6mssdflt; 3732 maxmtu = tcp_maxmtu6(inc, NULL); 3733 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 3734 } 3735 #endif 3736 #if defined(INET) && defined(INET6) 3737 else 3738 #endif 3739 #ifdef INET 3740 { 3741 mss = V_tcp_mssdflt; 3742 maxmtu = tcp_maxmtu(inc, NULL); 3743 min_protoh = sizeof(struct tcpiphdr); 3744 } 3745 #endif 3746 #if defined(INET6) || defined(INET) 3747 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3748 #endif 3749 3750 if (maxmtu && thcmtu) 3751 mss = min(maxmtu, thcmtu) - min_protoh; 3752 else if (maxmtu || thcmtu) 3753 mss = max(maxmtu, thcmtu) - min_protoh; 3754 3755 return (mss); 3756 } 3757 3758 3759 /* 3760 * On a partial ack arrives, force the retransmission of the 3761 * next unacknowledged segment. Do not clear tp->t_dupacks. 3762 * By setting snd_nxt to ti_ack, this forces retransmission timer to 3763 * be started again. 3764 */ 3765 void 3766 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th) 3767 { 3768 tcp_seq onxt = tp->snd_nxt; 3769 uint32_t ocwnd = tp->snd_cwnd; 3770 u_int maxseg = tcp_maxseg(tp); 3771 3772 INP_WLOCK_ASSERT(tp->t_inpcb); 3773 3774 tcp_timer_activate(tp, TT_REXMT, 0); 3775 tp->t_rtttime = 0; 3776 tp->snd_nxt = th->th_ack; 3777 /* 3778 * Set snd_cwnd to one segment beyond acknowledged offset. 3779 * (tp->snd_una has not yet been updated when this function is called.) 3780 */ 3781 tp->snd_cwnd = maxseg + BYTES_THIS_ACK(tp, th); 3782 tp->t_flags |= TF_ACKNOW; 3783 (void) tp->t_fb->tfb_tcp_output(tp); 3784 tp->snd_cwnd = ocwnd; 3785 if (SEQ_GT(onxt, tp->snd_nxt)) 3786 tp->snd_nxt = onxt; 3787 /* 3788 * Partial window deflation. Relies on fact that tp->snd_una 3789 * not updated yet. 3790 */ 3791 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th)) 3792 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th); 3793 else 3794 tp->snd_cwnd = 0; 3795 tp->snd_cwnd += maxseg; 3796 } 3797 3798 int 3799 tcp_compute_pipe(struct tcpcb *tp) 3800 { 3801 return (tp->snd_max - tp->snd_una + 3802 tp->sackhint.sack_bytes_rexmit - 3803 tp->sackhint.sacked_bytes); 3804 } 3805 3806 uint32_t 3807 tcp_compute_initwnd(uint32_t maxseg) 3808 { 3809 /* 3810 * Calculate the Initial Window, also used as Restart Window 3811 * 3812 * RFC5681 Section 3.1 specifies the default conservative values. 3813 * RFC3390 specifies slightly more aggressive values. 3814 * RFC6928 increases it to ten segments. 3815 * Support for user specified value for initial flight size. 3816 */ 3817 if (V_tcp_initcwnd_segments) 3818 return min(V_tcp_initcwnd_segments * maxseg, 3819 max(2 * maxseg, V_tcp_initcwnd_segments * 1460)); 3820 else if (V_tcp_do_rfc3390) 3821 return min(4 * maxseg, max(2 * maxseg, 4380)); 3822 else { 3823 /* Per RFC5681 Section 3.1 */ 3824 if (maxseg > 2190) 3825 return (2 * maxseg); 3826 else if (maxseg > 1095) 3827 return (3 * maxseg); 3828 else 3829 return (4 * maxseg); 3830 } 3831 } 3832