1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (c) 2007-2008,2010 5 * Swinburne University of Technology, Melbourne, Australia. 6 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org> 7 * Copyright (c) 2010 The FreeBSD Foundation 8 * Copyright (c) 2010-2011 Juniper Networks, Inc. 9 * All rights reserved. 10 * 11 * Portions of this software were developed at the Centre for Advanced Internet 12 * Architectures, Swinburne University of Technology, by Lawrence Stewart, 13 * James Healy and David Hayes, made possible in part by a grant from the Cisco 14 * University Research Program Fund at Community Foundation Silicon Valley. 15 * 16 * Portions of this software were developed at the Centre for Advanced 17 * Internet Architectures, Swinburne University of Technology, Melbourne, 18 * Australia by David Hayes under sponsorship from the FreeBSD Foundation. 19 * 20 * Portions of this software were developed by Robert N. M. Watson under 21 * contract to Juniper Networks, Inc. 22 * 23 * Redistribution and use in source and binary forms, with or without 24 * modification, are permitted provided that the following conditions 25 * are met: 26 * 1. Redistributions of source code must retain the above copyright 27 * notice, this list of conditions and the following disclaimer. 28 * 2. Redistributions in binary form must reproduce the above copyright 29 * notice, this list of conditions and the following disclaimer in the 30 * documentation and/or other materials provided with the distribution. 31 * 4. Neither the name of the University nor the names of its contributors 32 * may be used to endorse or promote products derived from this software 33 * without specific prior written permission. 34 * 35 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 36 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 38 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 40 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 41 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 42 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 43 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 44 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 45 * SUCH DAMAGE. 46 * 47 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 48 */ 49 50 #include <sys/cdefs.h> 51 __FBSDID("$FreeBSD$"); 52 53 #include "opt_ipfw.h" /* for ipfw_fwd */ 54 #include "opt_inet.h" 55 #include "opt_inet6.h" 56 #include "opt_ipsec.h" 57 #include "opt_tcpdebug.h" 58 59 #include <sys/param.h> 60 #include <sys/kernel.h> 61 #include <sys/hhook.h> 62 #include <sys/malloc.h> 63 #include <sys/mbuf.h> 64 #include <sys/proc.h> /* for proc0 declaration */ 65 #include <sys/protosw.h> 66 #include <sys/sdt.h> 67 #include <sys/signalvar.h> 68 #include <sys/socket.h> 69 #include <sys/socketvar.h> 70 #include <sys/sysctl.h> 71 #include <sys/syslog.h> 72 #include <sys/systm.h> 73 74 #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 75 76 #include <vm/uma.h> 77 78 #include <net/if.h> 79 #include <net/if_var.h> 80 #include <net/route.h> 81 #include <net/vnet.h> 82 83 #define TCPSTATES /* for logging */ 84 85 #include <netinet/cc.h> 86 #include <netinet/in.h> 87 #include <netinet/in_kdtrace.h> 88 #include <netinet/in_pcb.h> 89 #include <netinet/in_systm.h> 90 #include <netinet/in_var.h> 91 #include <netinet/ip.h> 92 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 93 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 94 #include <netinet/ip_var.h> 95 #include <netinet/ip_options.h> 96 #include <netinet/ip6.h> 97 #include <netinet/icmp6.h> 98 #include <netinet6/in6_pcb.h> 99 #include <netinet6/ip6_var.h> 100 #include <netinet6/nd6.h> 101 #include <netinet/tcp_fsm.h> 102 #include <netinet/tcp_seq.h> 103 #include <netinet/tcp_timer.h> 104 #include <netinet/tcp_var.h> 105 #include <netinet6/tcp6_var.h> 106 #include <netinet/tcpip.h> 107 #ifdef TCPPCAP 108 #include <netinet/tcp_pcap.h> 109 #endif 110 #include <netinet/tcp_syncache.h> 111 #ifdef TCPDEBUG 112 #include <netinet/tcp_debug.h> 113 #endif /* TCPDEBUG */ 114 #ifdef TCP_OFFLOAD 115 #include <netinet/tcp_offload.h> 116 #endif 117 118 #ifdef IPSEC 119 #include <netipsec/ipsec.h> 120 #include <netipsec/ipsec6.h> 121 #endif /*IPSEC*/ 122 123 #include <machine/in_cksum.h> 124 125 #include <security/mac/mac_framework.h> 126 127 const int tcprexmtthresh = 3; 128 129 int tcp_log_in_vain = 0; 130 SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW, 131 &tcp_log_in_vain, 0, 132 "Log all incoming TCP segments to closed ports"); 133 134 VNET_DEFINE(int, blackhole) = 0; 135 #define V_blackhole VNET(blackhole) 136 SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_VNET | CTLFLAG_RW, 137 &VNET_NAME(blackhole), 0, 138 "Do not send RST on segments to closed ports"); 139 140 VNET_DEFINE(int, tcp_delack_enabled) = 1; 141 SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_VNET | CTLFLAG_RW, 142 &VNET_NAME(tcp_delack_enabled), 0, 143 "Delay ACK to try and piggyback it onto a data packet"); 144 145 VNET_DEFINE(int, drop_synfin) = 0; 146 #define V_drop_synfin VNET(drop_synfin) 147 SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_VNET | CTLFLAG_RW, 148 &VNET_NAME(drop_synfin), 0, 149 "Drop TCP packets with SYN+FIN set"); 150 151 VNET_DEFINE(int, tcp_do_rfc6675_pipe) = 0; 152 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc6675_pipe, CTLFLAG_VNET | CTLFLAG_RW, 153 &VNET_NAME(tcp_do_rfc6675_pipe), 0, 154 "Use calculated pipe/in-flight bytes per RFC 6675"); 155 156 VNET_DEFINE(int, tcp_do_rfc3042) = 1; 157 #define V_tcp_do_rfc3042 VNET(tcp_do_rfc3042) 158 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_VNET | CTLFLAG_RW, 159 &VNET_NAME(tcp_do_rfc3042), 0, 160 "Enable RFC 3042 (Limited Transmit)"); 161 162 VNET_DEFINE(int, tcp_do_rfc3390) = 1; 163 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_VNET | CTLFLAG_RW, 164 &VNET_NAME(tcp_do_rfc3390), 0, 165 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 166 167 VNET_DEFINE(int, tcp_initcwnd_segments) = 10; 168 SYSCTL_INT(_net_inet_tcp, OID_AUTO, initcwnd_segments, 169 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_initcwnd_segments), 0, 170 "Slow-start flight size (initial congestion window) in number of segments"); 171 172 VNET_DEFINE(int, tcp_do_rfc3465) = 1; 173 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_VNET | CTLFLAG_RW, 174 &VNET_NAME(tcp_do_rfc3465), 0, 175 "Enable RFC 3465 (Appropriate Byte Counting)"); 176 177 VNET_DEFINE(int, tcp_abc_l_var) = 2; 178 SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_VNET | CTLFLAG_RW, 179 &VNET_NAME(tcp_abc_l_var), 2, 180 "Cap the max cwnd increment during slow-start to this number of segments"); 181 182 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN"); 183 184 VNET_DEFINE(int, tcp_do_ecn) = 0; 185 SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW, 186 &VNET_NAME(tcp_do_ecn), 0, 187 "TCP ECN support"); 188 189 VNET_DEFINE(int, tcp_ecn_maxretries) = 1; 190 SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_VNET | CTLFLAG_RW, 191 &VNET_NAME(tcp_ecn_maxretries), 0, 192 "Max retries before giving up on ECN"); 193 194 VNET_DEFINE(int, tcp_insecure_syn) = 0; 195 #define V_tcp_insecure_syn VNET(tcp_insecure_syn) 196 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_syn, CTLFLAG_VNET | CTLFLAG_RW, 197 &VNET_NAME(tcp_insecure_syn), 0, 198 "Follow RFC793 instead of RFC5961 criteria for accepting SYN packets"); 199 200 VNET_DEFINE(int, tcp_insecure_rst) = 0; 201 #define V_tcp_insecure_rst VNET(tcp_insecure_rst) 202 SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_VNET | CTLFLAG_RW, 203 &VNET_NAME(tcp_insecure_rst), 0, 204 "Follow RFC793 instead of RFC5961 criteria for accepting RST packets"); 205 206 VNET_DEFINE(int, tcp_recvspace) = 1024*64; 207 #define V_tcp_recvspace VNET(tcp_recvspace) 208 SYSCTL_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_VNET | CTLFLAG_RW, 209 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size"); 210 211 VNET_DEFINE(int, tcp_do_autorcvbuf) = 1; 212 #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf) 213 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_VNET | CTLFLAG_RW, 214 &VNET_NAME(tcp_do_autorcvbuf), 0, 215 "Enable automatic receive buffer sizing"); 216 217 VNET_DEFINE(int, tcp_autorcvbuf_inc) = 16*1024; 218 #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc) 219 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_VNET | CTLFLAG_RW, 220 &VNET_NAME(tcp_autorcvbuf_inc), 0, 221 "Incrementor step size of automatic receive buffer"); 222 223 VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024; 224 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max) 225 SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_VNET | CTLFLAG_RW, 226 &VNET_NAME(tcp_autorcvbuf_max), 0, 227 "Max size of automatic receive buffer"); 228 229 VNET_DEFINE(struct inpcbhead, tcb); 230 #define tcb6 tcb /* for KAME src sync over BSD*'s */ 231 VNET_DEFINE(struct inpcbinfo, tcbinfo); 232 233 static void tcp_dooptions(struct tcpopt *, u_char *, int, int); 234 static void tcp_do_segment(struct mbuf *, struct tcphdr *, 235 struct socket *, struct tcpcb *, int, int, uint8_t, 236 int); 237 static void tcp_dropwithreset(struct mbuf *, struct tcphdr *, 238 struct tcpcb *, int, int); 239 static void tcp_pulloutofband(struct socket *, 240 struct tcphdr *, struct mbuf *, int); 241 static void tcp_xmit_timer(struct tcpcb *, int); 242 static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *); 243 static void inline cc_ack_received(struct tcpcb *tp, struct tcphdr *th, 244 uint16_t type); 245 static void inline cc_conn_init(struct tcpcb *tp); 246 static void inline cc_post_recovery(struct tcpcb *tp, struct tcphdr *th); 247 static void inline hhook_run_tcp_est_in(struct tcpcb *tp, 248 struct tcphdr *th, struct tcpopt *to); 249 250 /* 251 * TCP statistics are stored in an "array" of counter(9)s. 252 */ 253 VNET_PCPUSTAT_DEFINE(struct tcpstat, tcpstat); 254 VNET_PCPUSTAT_SYSINIT(tcpstat); 255 SYSCTL_VNET_PCPUSTAT(_net_inet_tcp, TCPCTL_STATS, stats, struct tcpstat, 256 tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)"); 257 258 #ifdef VIMAGE 259 VNET_PCPUSTAT_SYSUNINIT(tcpstat); 260 #endif /* VIMAGE */ 261 /* 262 * Kernel module interface for updating tcpstat. The argument is an index 263 * into tcpstat treated as an array. 264 */ 265 void 266 kmod_tcpstat_inc(int statnum) 267 { 268 269 counter_u64_add(VNET(tcpstat)[statnum], 1); 270 } 271 272 /* 273 * Wrapper for the TCP established input helper hook. 274 */ 275 static void inline 276 hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to) 277 { 278 struct tcp_hhook_data hhook_data; 279 280 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) { 281 hhook_data.tp = tp; 282 hhook_data.th = th; 283 hhook_data.to = to; 284 285 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data, 286 tp->osd); 287 } 288 } 289 290 /* 291 * CC wrapper hook functions 292 */ 293 static void inline 294 cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t type) 295 { 296 INP_WLOCK_ASSERT(tp->t_inpcb); 297 298 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th); 299 if (tp->snd_cwnd <= tp->snd_wnd) 300 tp->ccv->flags |= CCF_CWND_LIMITED; 301 else 302 tp->ccv->flags &= ~CCF_CWND_LIMITED; 303 304 if (type == CC_ACK) { 305 if (tp->snd_cwnd > tp->snd_ssthresh) { 306 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack, 307 V_tcp_abc_l_var * tp->t_maxseg); 308 if (tp->t_bytes_acked >= tp->snd_cwnd) { 309 tp->t_bytes_acked -= tp->snd_cwnd; 310 tp->ccv->flags |= CCF_ABC_SENTAWND; 311 } 312 } else { 313 tp->ccv->flags &= ~CCF_ABC_SENTAWND; 314 tp->t_bytes_acked = 0; 315 } 316 } 317 318 if (CC_ALGO(tp)->ack_received != NULL) { 319 /* XXXLAS: Find a way to live without this */ 320 tp->ccv->curack = th->th_ack; 321 CC_ALGO(tp)->ack_received(tp->ccv, type); 322 } 323 } 324 325 static void inline 326 cc_conn_init(struct tcpcb *tp) 327 { 328 struct hc_metrics_lite metrics; 329 struct inpcb *inp = tp->t_inpcb; 330 int rtt; 331 332 INP_WLOCK_ASSERT(tp->t_inpcb); 333 334 tcp_hc_get(&inp->inp_inc, &metrics); 335 336 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) { 337 tp->t_srtt = rtt; 338 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE; 339 TCPSTAT_INC(tcps_usedrtt); 340 if (metrics.rmx_rttvar) { 341 tp->t_rttvar = metrics.rmx_rttvar; 342 TCPSTAT_INC(tcps_usedrttvar); 343 } else { 344 /* default variation is +- 1 rtt */ 345 tp->t_rttvar = 346 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 347 } 348 TCPT_RANGESET(tp->t_rxtcur, 349 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 350 tp->t_rttmin, TCPTV_REXMTMAX); 351 } 352 if (metrics.rmx_ssthresh) { 353 /* 354 * There's some sort of gateway or interface 355 * buffer limit on the path. Use this to set 356 * the slow start threshhold, but set the 357 * threshold to no less than 2*mss. 358 */ 359 tp->snd_ssthresh = max(2 * tp->t_maxseg, metrics.rmx_ssthresh); 360 TCPSTAT_INC(tcps_usedssthresh); 361 } 362 363 /* 364 * Set the initial slow-start flight size. 365 * 366 * RFC5681 Section 3.1 specifies the default conservative values. 367 * RFC3390 specifies slightly more aggressive values. 368 * RFC6928 increases it to ten segments. 369 * Support for user specified value for initial flight size. 370 * 371 * If a SYN or SYN/ACK was lost and retransmitted, we have to 372 * reduce the initial CWND to one segment as congestion is likely 373 * requiring us to be cautious. 374 */ 375 if (tp->snd_cwnd == 1) 376 tp->snd_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 377 else if (V_tcp_initcwnd_segments) 378 tp->snd_cwnd = min(V_tcp_initcwnd_segments * tp->t_maxseg, 379 max(2 * tp->t_maxseg, V_tcp_initcwnd_segments * 1460)); 380 else if (V_tcp_do_rfc3390) 381 tp->snd_cwnd = min(4 * tp->t_maxseg, 382 max(2 * tp->t_maxseg, 4380)); 383 else { 384 /* Per RFC5681 Section 3.1 */ 385 if (tp->t_maxseg > 2190) 386 tp->snd_cwnd = 2 * tp->t_maxseg; 387 else if (tp->t_maxseg > 1095) 388 tp->snd_cwnd = 3 * tp->t_maxseg; 389 else 390 tp->snd_cwnd = 4 * tp->t_maxseg; 391 } 392 393 if (CC_ALGO(tp)->conn_init != NULL) 394 CC_ALGO(tp)->conn_init(tp->ccv); 395 } 396 397 void inline 398 cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type) 399 { 400 INP_WLOCK_ASSERT(tp->t_inpcb); 401 402 switch(type) { 403 case CC_NDUPACK: 404 if (!IN_FASTRECOVERY(tp->t_flags)) { 405 tp->snd_recover = tp->snd_max; 406 if (tp->t_flags & TF_ECN_PERMIT) 407 tp->t_flags |= TF_ECN_SND_CWR; 408 } 409 break; 410 case CC_ECN: 411 if (!IN_CONGRECOVERY(tp->t_flags)) { 412 TCPSTAT_INC(tcps_ecn_rcwnd); 413 tp->snd_recover = tp->snd_max; 414 if (tp->t_flags & TF_ECN_PERMIT) 415 tp->t_flags |= TF_ECN_SND_CWR; 416 } 417 break; 418 case CC_RTO: 419 tp->t_dupacks = 0; 420 tp->t_bytes_acked = 0; 421 EXIT_RECOVERY(tp->t_flags); 422 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 / 423 tp->t_maxseg) * tp->t_maxseg; 424 tp->snd_cwnd = tp->t_maxseg; 425 break; 426 case CC_RTO_ERR: 427 TCPSTAT_INC(tcps_sndrexmitbad); 428 /* RTO was unnecessary, so reset everything. */ 429 tp->snd_cwnd = tp->snd_cwnd_prev; 430 tp->snd_ssthresh = tp->snd_ssthresh_prev; 431 tp->snd_recover = tp->snd_recover_prev; 432 if (tp->t_flags & TF_WASFRECOVERY) 433 ENTER_FASTRECOVERY(tp->t_flags); 434 if (tp->t_flags & TF_WASCRECOVERY) 435 ENTER_CONGRECOVERY(tp->t_flags); 436 tp->snd_nxt = tp->snd_max; 437 tp->t_flags &= ~TF_PREVVALID; 438 tp->t_badrxtwin = 0; 439 break; 440 } 441 442 if (CC_ALGO(tp)->cong_signal != NULL) { 443 if (th != NULL) 444 tp->ccv->curack = th->th_ack; 445 CC_ALGO(tp)->cong_signal(tp->ccv, type); 446 } 447 } 448 449 static void inline 450 cc_post_recovery(struct tcpcb *tp, struct tcphdr *th) 451 { 452 INP_WLOCK_ASSERT(tp->t_inpcb); 453 454 /* XXXLAS: KASSERT that we're in recovery? */ 455 456 if (CC_ALGO(tp)->post_recovery != NULL) { 457 tp->ccv->curack = th->th_ack; 458 CC_ALGO(tp)->post_recovery(tp->ccv); 459 } 460 /* XXXLAS: EXIT_RECOVERY ? */ 461 tp->t_bytes_acked = 0; 462 } 463 464 #ifdef TCP_SIGNATURE 465 static inline int 466 tcp_signature_verify_input(struct mbuf *m, int off0, int tlen, int optlen, 467 struct tcpopt *to, struct tcphdr *th, u_int tcpbflag) 468 { 469 int ret; 470 471 tcp_fields_to_net(th); 472 ret = tcp_signature_verify(m, off0, tlen, optlen, to, th, tcpbflag); 473 tcp_fields_to_host(th); 474 return (ret); 475 } 476 #endif 477 478 /* 479 * Indicate whether this ack should be delayed. We can delay the ack if 480 * following conditions are met: 481 * - There is no delayed ack timer in progress. 482 * - Our last ack wasn't a 0-sized window. We never want to delay 483 * the ack that opens up a 0-sized window. 484 * - LRO wasn't used for this segment. We make sure by checking that the 485 * segment size is not larger than the MSS. 486 * - Delayed acks are enabled or this is a half-synchronized T/TCP 487 * connection. 488 */ 489 #define DELAY_ACK(tp, tlen) \ 490 ((!tcp_timer_active(tp, TT_DELACK) && \ 491 (tp->t_flags & TF_RXWIN0SENT) == 0) && \ 492 (tlen <= tp->t_maxopd) && \ 493 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN))) 494 495 static void inline 496 cc_ecnpkt_handler(struct tcpcb *tp, struct tcphdr *th, uint8_t iptos) 497 { 498 INP_WLOCK_ASSERT(tp->t_inpcb); 499 500 if (CC_ALGO(tp)->ecnpkt_handler != NULL) { 501 switch (iptos & IPTOS_ECN_MASK) { 502 case IPTOS_ECN_CE: 503 tp->ccv->flags |= CCF_IPHDR_CE; 504 break; 505 case IPTOS_ECN_ECT0: 506 tp->ccv->flags &= ~CCF_IPHDR_CE; 507 break; 508 case IPTOS_ECN_ECT1: 509 tp->ccv->flags &= ~CCF_IPHDR_CE; 510 break; 511 } 512 513 if (th->th_flags & TH_CWR) 514 tp->ccv->flags |= CCF_TCPHDR_CWR; 515 else 516 tp->ccv->flags &= ~CCF_TCPHDR_CWR; 517 518 if (tp->t_flags & TF_DELACK) 519 tp->ccv->flags |= CCF_DELACK; 520 else 521 tp->ccv->flags &= ~CCF_DELACK; 522 523 CC_ALGO(tp)->ecnpkt_handler(tp->ccv); 524 525 if (tp->ccv->flags & CCF_ACKNOW) 526 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 527 } 528 } 529 530 /* 531 * TCP input handling is split into multiple parts: 532 * tcp6_input is a thin wrapper around tcp_input for the extended 533 * ip6_protox[] call format in ip6_input 534 * tcp_input handles primary segment validation, inpcb lookup and 535 * SYN processing on listen sockets 536 * tcp_do_segment processes the ACK and text of the segment for 537 * establishing, established and closing connections 538 */ 539 #ifdef INET6 540 int 541 tcp6_input(struct mbuf **mp, int *offp, int proto) 542 { 543 struct mbuf *m = *mp; 544 struct in6_ifaddr *ia6; 545 struct ip6_hdr *ip6; 546 547 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE); 548 549 /* 550 * draft-itojun-ipv6-tcp-to-anycast 551 * better place to put this in? 552 */ 553 ip6 = mtod(m, struct ip6_hdr *); 554 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */); 555 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 556 struct ip6_hdr *ip6; 557 558 ifa_free(&ia6->ia_ifa); 559 ip6 = mtod(m, struct ip6_hdr *); 560 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 561 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6); 562 return (IPPROTO_DONE); 563 } 564 if (ia6) 565 ifa_free(&ia6->ia_ifa); 566 567 return (tcp_input(mp, offp, proto)); 568 } 569 #endif /* INET6 */ 570 571 int 572 tcp_input(struct mbuf **mp, int *offp, int proto) 573 { 574 struct mbuf *m = *mp; 575 struct tcphdr *th = NULL; 576 struct ip *ip = NULL; 577 struct inpcb *inp = NULL; 578 struct tcpcb *tp = NULL; 579 struct socket *so = NULL; 580 u_char *optp = NULL; 581 int off0; 582 int optlen = 0; 583 #ifdef INET 584 int len; 585 #endif 586 int tlen = 0, off; 587 int drop_hdrlen; 588 int thflags; 589 int rstreason = 0; /* For badport_bandlim accounting purposes */ 590 #ifdef TCP_SIGNATURE 591 uint8_t sig_checked = 0; 592 #endif 593 uint8_t iptos = 0; 594 struct m_tag *fwd_tag = NULL; 595 #ifdef INET6 596 struct ip6_hdr *ip6 = NULL; 597 int isipv6; 598 #else 599 const void *ip6 = NULL; 600 #endif /* INET6 */ 601 struct tcpopt to; /* options in this segment */ 602 char *s = NULL; /* address and port logging */ 603 int ti_locked; 604 #define TI_UNLOCKED 1 605 #define TI_RLOCKED 2 606 607 #ifdef TCPDEBUG 608 /* 609 * The size of tcp_saveipgen must be the size of the max ip header, 610 * now IPv6. 611 */ 612 u_char tcp_saveipgen[IP6_HDR_LEN]; 613 struct tcphdr tcp_savetcp; 614 short ostate = 0; 615 #endif 616 617 #ifdef INET6 618 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 619 #endif 620 621 off0 = *offp; 622 m = *mp; 623 *mp = NULL; 624 to.to_flags = 0; 625 TCPSTAT_INC(tcps_rcvtotal); 626 627 #ifdef INET6 628 if (isipv6) { 629 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */ 630 631 if (m->m_len < (sizeof(*ip6) + sizeof(*th))) { 632 m = m_pullup(m, sizeof(*ip6) + sizeof(*th)); 633 if (m == NULL) { 634 TCPSTAT_INC(tcps_rcvshort); 635 return (IPPROTO_DONE); 636 } 637 } 638 639 ip6 = mtod(m, struct ip6_hdr *); 640 th = (struct tcphdr *)((caddr_t)ip6 + off0); 641 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0; 642 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) { 643 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 644 th->th_sum = m->m_pkthdr.csum_data; 645 else 646 th->th_sum = in6_cksum_pseudo(ip6, tlen, 647 IPPROTO_TCP, m->m_pkthdr.csum_data); 648 th->th_sum ^= 0xffff; 649 } else 650 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen); 651 if (th->th_sum) { 652 TCPSTAT_INC(tcps_rcvbadsum); 653 goto drop; 654 } 655 656 /* 657 * Be proactive about unspecified IPv6 address in source. 658 * As we use all-zero to indicate unbounded/unconnected pcb, 659 * unspecified IPv6 address can be used to confuse us. 660 * 661 * Note that packets with unspecified IPv6 destination is 662 * already dropped in ip6_input. 663 */ 664 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 665 /* XXX stat */ 666 goto drop; 667 } 668 } 669 #endif 670 #if defined(INET) && defined(INET6) 671 else 672 #endif 673 #ifdef INET 674 { 675 /* 676 * Get IP and TCP header together in first mbuf. 677 * Note: IP leaves IP header in first mbuf. 678 */ 679 if (off0 > sizeof (struct ip)) { 680 ip_stripoptions(m); 681 off0 = sizeof(struct ip); 682 } 683 if (m->m_len < sizeof (struct tcpiphdr)) { 684 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) 685 == NULL) { 686 TCPSTAT_INC(tcps_rcvshort); 687 return (IPPROTO_DONE); 688 } 689 } 690 ip = mtod(m, struct ip *); 691 th = (struct tcphdr *)((caddr_t)ip + off0); 692 tlen = ntohs(ip->ip_len) - off0; 693 694 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 695 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 696 th->th_sum = m->m_pkthdr.csum_data; 697 else 698 th->th_sum = in_pseudo(ip->ip_src.s_addr, 699 ip->ip_dst.s_addr, 700 htonl(m->m_pkthdr.csum_data + tlen + 701 IPPROTO_TCP)); 702 th->th_sum ^= 0xffff; 703 } else { 704 struct ipovly *ipov = (struct ipovly *)ip; 705 706 /* 707 * Checksum extended TCP header and data. 708 */ 709 len = off0 + tlen; 710 bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); 711 ipov->ih_len = htons(tlen); 712 th->th_sum = in_cksum(m, len); 713 /* Reset length for SDT probes. */ 714 ip->ip_len = htons(tlen + off0); 715 } 716 717 if (th->th_sum) { 718 TCPSTAT_INC(tcps_rcvbadsum); 719 goto drop; 720 } 721 /* Re-initialization for later version check */ 722 ip->ip_v = IPVERSION; 723 } 724 #endif /* INET */ 725 726 #ifdef INET6 727 if (isipv6) 728 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; 729 #endif 730 #if defined(INET) && defined(INET6) 731 else 732 #endif 733 #ifdef INET 734 iptos = ip->ip_tos; 735 #endif 736 737 /* 738 * Check that TCP offset makes sense, 739 * pull out TCP options and adjust length. XXX 740 */ 741 off = th->th_off << 2; 742 if (off < sizeof (struct tcphdr) || off > tlen) { 743 TCPSTAT_INC(tcps_rcvbadoff); 744 goto drop; 745 } 746 tlen -= off; /* tlen is used instead of ti->ti_len */ 747 if (off > sizeof (struct tcphdr)) { 748 #ifdef INET6 749 if (isipv6) { 750 IP6_EXTHDR_CHECK(m, off0, off, IPPROTO_DONE); 751 ip6 = mtod(m, struct ip6_hdr *); 752 th = (struct tcphdr *)((caddr_t)ip6 + off0); 753 } 754 #endif 755 #if defined(INET) && defined(INET6) 756 else 757 #endif 758 #ifdef INET 759 { 760 if (m->m_len < sizeof(struct ip) + off) { 761 if ((m = m_pullup(m, sizeof (struct ip) + off)) 762 == NULL) { 763 TCPSTAT_INC(tcps_rcvshort); 764 return (IPPROTO_DONE); 765 } 766 ip = mtod(m, struct ip *); 767 th = (struct tcphdr *)((caddr_t)ip + off0); 768 } 769 } 770 #endif 771 optlen = off - sizeof (struct tcphdr); 772 optp = (u_char *)(th + 1); 773 } 774 thflags = th->th_flags; 775 776 /* 777 * Convert TCP protocol specific fields to host format. 778 */ 779 tcp_fields_to_host(th); 780 781 /* 782 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options. 783 */ 784 drop_hdrlen = off0 + off; 785 786 /* 787 * Locate pcb for segment; if we're likely to add or remove a 788 * connection then first acquire pcbinfo lock. There are three cases 789 * where we might discover later we need a write lock despite the 790 * flags: ACKs moving a connection out of the syncache, ACKs for a 791 * connection in TIMEWAIT and SYNs not targeting a listening socket. 792 */ 793 if ((thflags & (TH_FIN | TH_RST)) != 0) { 794 INP_INFO_RLOCK(&V_tcbinfo); 795 ti_locked = TI_RLOCKED; 796 } else 797 ti_locked = TI_UNLOCKED; 798 799 /* 800 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. 801 */ 802 if ( 803 #ifdef INET6 804 (isipv6 && (m->m_flags & M_IP6_NEXTHOP)) 805 #ifdef INET 806 || (!isipv6 && (m->m_flags & M_IP_NEXTHOP)) 807 #endif 808 #endif 809 #if defined(INET) && !defined(INET6) 810 (m->m_flags & M_IP_NEXTHOP) 811 #endif 812 ) 813 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 814 815 findpcb: 816 #ifdef INVARIANTS 817 if (ti_locked == TI_RLOCKED) { 818 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 819 } else { 820 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 821 } 822 #endif 823 #ifdef INET6 824 if (isipv6 && fwd_tag != NULL) { 825 struct sockaddr_in6 *next_hop6; 826 827 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1); 828 /* 829 * Transparently forwarded. Pretend to be the destination. 830 * Already got one like this? 831 */ 832 inp = in6_pcblookup_mbuf(&V_tcbinfo, 833 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport, 834 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif, m); 835 if (!inp) { 836 /* 837 * It's new. Try to find the ambushing socket. 838 * Because we've rewritten the destination address, 839 * any hardware-generated hash is ignored. 840 */ 841 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src, 842 th->th_sport, &next_hop6->sin6_addr, 843 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) : 844 th->th_dport, INPLOOKUP_WILDCARD | 845 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif); 846 } 847 } else if (isipv6) { 848 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src, 849 th->th_sport, &ip6->ip6_dst, th->th_dport, 850 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB, 851 m->m_pkthdr.rcvif, m); 852 } 853 #endif /* INET6 */ 854 #if defined(INET6) && defined(INET) 855 else 856 #endif 857 #ifdef INET 858 if (fwd_tag != NULL) { 859 struct sockaddr_in *next_hop; 860 861 next_hop = (struct sockaddr_in *)(fwd_tag+1); 862 /* 863 * Transparently forwarded. Pretend to be the destination. 864 * already got one like this? 865 */ 866 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport, 867 ip->ip_dst, th->th_dport, INPLOOKUP_WLOCKPCB, 868 m->m_pkthdr.rcvif, m); 869 if (!inp) { 870 /* 871 * It's new. Try to find the ambushing socket. 872 * Because we've rewritten the destination address, 873 * any hardware-generated hash is ignored. 874 */ 875 inp = in_pcblookup(&V_tcbinfo, ip->ip_src, 876 th->th_sport, next_hop->sin_addr, 877 next_hop->sin_port ? ntohs(next_hop->sin_port) : 878 th->th_dport, INPLOOKUP_WILDCARD | 879 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif); 880 } 881 } else 882 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, 883 th->th_sport, ip->ip_dst, th->th_dport, 884 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB, 885 m->m_pkthdr.rcvif, m); 886 #endif /* INET */ 887 888 /* 889 * If the INPCB does not exist then all data in the incoming 890 * segment is discarded and an appropriate RST is sent back. 891 * XXX MRT Send RST using which routing table? 892 */ 893 if (inp == NULL) { 894 /* 895 * Log communication attempts to ports that are not 896 * in use. 897 */ 898 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) || 899 tcp_log_in_vain == 2) { 900 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6))) 901 log(LOG_INFO, "%s; %s: Connection attempt " 902 "to closed port\n", s, __func__); 903 } 904 /* 905 * When blackholing do not respond with a RST but 906 * completely ignore the segment and drop it. 907 */ 908 if ((V_blackhole == 1 && (thflags & TH_SYN)) || 909 V_blackhole == 2) 910 goto dropunlock; 911 912 rstreason = BANDLIM_RST_CLOSEDPORT; 913 goto dropwithreset; 914 } 915 INP_WLOCK_ASSERT(inp); 916 if ((inp->inp_flowtype == M_HASHTYPE_NONE) && 917 (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) && 918 ((inp->inp_socket == NULL) || 919 (inp->inp_socket->so_options & SO_ACCEPTCONN) == 0)) { 920 inp->inp_flowid = m->m_pkthdr.flowid; 921 inp->inp_flowtype = M_HASHTYPE_GET(m); 922 } 923 #ifdef IPSEC 924 #ifdef INET6 925 if (isipv6 && ipsec6_in_reject(m, inp)) { 926 goto dropunlock; 927 } else 928 #endif /* INET6 */ 929 if (ipsec4_in_reject(m, inp) != 0) { 930 goto dropunlock; 931 } 932 #endif /* IPSEC */ 933 934 /* 935 * Check the minimum TTL for socket. 936 */ 937 if (inp->inp_ip_minttl != 0) { 938 #ifdef INET6 939 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim) 940 goto dropunlock; 941 else 942 #endif 943 if (inp->inp_ip_minttl > ip->ip_ttl) 944 goto dropunlock; 945 } 946 947 /* 948 * A previous connection in TIMEWAIT state is supposed to catch stray 949 * or duplicate segments arriving late. If this segment was a 950 * legitimate new connection attempt, the old INPCB gets removed and 951 * we can try again to find a listening socket. 952 * 953 * At this point, due to earlier optimism, we may hold only an inpcb 954 * lock, and not the inpcbinfo write lock. If so, we need to try to 955 * acquire it, or if that fails, acquire a reference on the inpcb, 956 * drop all locks, acquire a global write lock, and then re-acquire 957 * the inpcb lock. We may at that point discover that another thread 958 * has tried to free the inpcb, in which case we need to loop back 959 * and try to find a new inpcb to deliver to. 960 * 961 * XXXRW: It may be time to rethink timewait locking. 962 */ 963 relocked: 964 if (inp->inp_flags & INP_TIMEWAIT) { 965 if (ti_locked == TI_UNLOCKED) { 966 if (INP_INFO_TRY_RLOCK(&V_tcbinfo) == 0) { 967 in_pcbref(inp); 968 INP_WUNLOCK(inp); 969 INP_INFO_RLOCK(&V_tcbinfo); 970 ti_locked = TI_RLOCKED; 971 INP_WLOCK(inp); 972 if (in_pcbrele_wlocked(inp)) { 973 inp = NULL; 974 goto findpcb; 975 } 976 } else 977 ti_locked = TI_RLOCKED; 978 } 979 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 980 981 if (thflags & TH_SYN) 982 tcp_dooptions(&to, optp, optlen, TO_SYN); 983 /* 984 * NB: tcp_twcheck unlocks the INP and frees the mbuf. 985 */ 986 if (tcp_twcheck(inp, &to, th, m, tlen)) 987 goto findpcb; 988 INP_INFO_RUNLOCK(&V_tcbinfo); 989 return (IPPROTO_DONE); 990 } 991 /* 992 * The TCPCB may no longer exist if the connection is winding 993 * down or it is in the CLOSED state. Either way we drop the 994 * segment and send an appropriate response. 995 */ 996 tp = intotcpcb(inp); 997 if (tp == NULL || tp->t_state == TCPS_CLOSED) { 998 rstreason = BANDLIM_RST_CLOSEDPORT; 999 goto dropwithreset; 1000 } 1001 1002 #ifdef TCP_OFFLOAD 1003 if (tp->t_flags & TF_TOE) { 1004 tcp_offload_input(tp, m); 1005 m = NULL; /* consumed by the TOE driver */ 1006 goto dropunlock; 1007 } 1008 #endif 1009 1010 /* 1011 * We've identified a valid inpcb, but it could be that we need an 1012 * inpcbinfo write lock but don't hold it. In this case, attempt to 1013 * acquire using the same strategy as the TIMEWAIT case above. If we 1014 * relock, we have to jump back to 'relocked' as the connection might 1015 * now be in TIMEWAIT. 1016 */ 1017 #ifdef INVARIANTS 1018 if ((thflags & (TH_FIN | TH_RST)) != 0) 1019 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1020 #endif 1021 if (!((tp->t_state == TCPS_ESTABLISHED && (thflags & TH_SYN) == 0) || 1022 (tp->t_state == TCPS_LISTEN && (thflags & TH_SYN)))) { 1023 if (ti_locked == TI_UNLOCKED) { 1024 if (INP_INFO_TRY_RLOCK(&V_tcbinfo) == 0) { 1025 in_pcbref(inp); 1026 INP_WUNLOCK(inp); 1027 INP_INFO_RLOCK(&V_tcbinfo); 1028 ti_locked = TI_RLOCKED; 1029 INP_WLOCK(inp); 1030 if (in_pcbrele_wlocked(inp)) { 1031 inp = NULL; 1032 goto findpcb; 1033 } 1034 goto relocked; 1035 } else 1036 ti_locked = TI_RLOCKED; 1037 } 1038 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1039 } 1040 1041 #ifdef MAC 1042 INP_WLOCK_ASSERT(inp); 1043 if (mac_inpcb_check_deliver(inp, m)) 1044 goto dropunlock; 1045 #endif 1046 so = inp->inp_socket; 1047 KASSERT(so != NULL, ("%s: so == NULL", __func__)); 1048 #ifdef TCPDEBUG 1049 if (so->so_options & SO_DEBUG) { 1050 ostate = tp->t_state; 1051 #ifdef INET6 1052 if (isipv6) { 1053 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6)); 1054 } else 1055 #endif 1056 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip)); 1057 tcp_savetcp = *th; 1058 } 1059 #endif /* TCPDEBUG */ 1060 /* 1061 * When the socket is accepting connections (the INPCB is in LISTEN 1062 * state) we look into the SYN cache if this is a new connection 1063 * attempt or the completion of a previous one. 1064 */ 1065 if (so->so_options & SO_ACCEPTCONN) { 1066 struct in_conninfo inc; 1067 1068 KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but " 1069 "tp not listening", __func__)); 1070 bzero(&inc, sizeof(inc)); 1071 #ifdef INET6 1072 if (isipv6) { 1073 inc.inc_flags |= INC_ISIPV6; 1074 inc.inc6_faddr = ip6->ip6_src; 1075 inc.inc6_laddr = ip6->ip6_dst; 1076 } else 1077 #endif 1078 { 1079 inc.inc_faddr = ip->ip_src; 1080 inc.inc_laddr = ip->ip_dst; 1081 } 1082 inc.inc_fport = th->th_sport; 1083 inc.inc_lport = th->th_dport; 1084 inc.inc_fibnum = so->so_fibnum; 1085 1086 /* 1087 * Check for an existing connection attempt in syncache if 1088 * the flag is only ACK. A successful lookup creates a new 1089 * socket appended to the listen queue in SYN_RECEIVED state. 1090 */ 1091 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) { 1092 1093 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1094 /* 1095 * Parse the TCP options here because 1096 * syncookies need access to the reflected 1097 * timestamp. 1098 */ 1099 tcp_dooptions(&to, optp, optlen, 0); 1100 /* 1101 * NB: syncache_expand() doesn't unlock 1102 * inp and tcpinfo locks. 1103 */ 1104 if (!syncache_expand(&inc, &to, th, &so, m)) { 1105 /* 1106 * No syncache entry or ACK was not 1107 * for our SYN/ACK. Send a RST. 1108 * NB: syncache did its own logging 1109 * of the failure cause. 1110 */ 1111 rstreason = BANDLIM_RST_OPENPORT; 1112 goto dropwithreset; 1113 } 1114 if (so == NULL) { 1115 /* 1116 * We completed the 3-way handshake 1117 * but could not allocate a socket 1118 * either due to memory shortage, 1119 * listen queue length limits or 1120 * global socket limits. Send RST 1121 * or wait and have the remote end 1122 * retransmit the ACK for another 1123 * try. 1124 */ 1125 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1126 log(LOG_DEBUG, "%s; %s: Listen socket: " 1127 "Socket allocation failed due to " 1128 "limits or memory shortage, %s\n", 1129 s, __func__, 1130 V_tcp_sc_rst_sock_fail ? 1131 "sending RST" : "try again"); 1132 if (V_tcp_sc_rst_sock_fail) { 1133 rstreason = BANDLIM_UNLIMITED; 1134 goto dropwithreset; 1135 } else 1136 goto dropunlock; 1137 } 1138 /* 1139 * Socket is created in state SYN_RECEIVED. 1140 * Unlock the listen socket, lock the newly 1141 * created socket and update the tp variable. 1142 */ 1143 INP_WUNLOCK(inp); /* listen socket */ 1144 inp = sotoinpcb(so); 1145 /* 1146 * New connection inpcb is already locked by 1147 * syncache_expand(). 1148 */ 1149 INP_WLOCK_ASSERT(inp); 1150 tp = intotcpcb(inp); 1151 KASSERT(tp->t_state == TCPS_SYN_RECEIVED, 1152 ("%s: ", __func__)); 1153 #ifdef TCP_SIGNATURE 1154 if (sig_checked == 0) { 1155 tcp_dooptions(&to, optp, optlen, 1156 (thflags & TH_SYN) ? TO_SYN : 0); 1157 if (!tcp_signature_verify_input(m, off0, tlen, 1158 optlen, &to, th, tp->t_flags)) { 1159 1160 /* 1161 * In SYN_SENT state if it receives an 1162 * RST, it is allowed for further 1163 * processing. 1164 */ 1165 if ((thflags & TH_RST) == 0 || 1166 (tp->t_state == TCPS_SYN_SENT) == 0) 1167 goto dropunlock; 1168 } 1169 sig_checked = 1; 1170 } 1171 #endif 1172 1173 /* 1174 * Process the segment and the data it 1175 * contains. tcp_do_segment() consumes 1176 * the mbuf chain and unlocks the inpcb. 1177 */ 1178 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, 1179 iptos, ti_locked); 1180 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1181 return (IPPROTO_DONE); 1182 } 1183 /* 1184 * Segment flag validation for new connection attempts: 1185 * 1186 * Our (SYN|ACK) response was rejected. 1187 * Check with syncache and remove entry to prevent 1188 * retransmits. 1189 * 1190 * NB: syncache_chkrst does its own logging of failure 1191 * causes. 1192 */ 1193 if (thflags & TH_RST) { 1194 syncache_chkrst(&inc, th); 1195 goto dropunlock; 1196 } 1197 /* 1198 * We can't do anything without SYN. 1199 */ 1200 if ((thflags & TH_SYN) == 0) { 1201 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1202 log(LOG_DEBUG, "%s; %s: Listen socket: " 1203 "SYN is missing, segment ignored\n", 1204 s, __func__); 1205 TCPSTAT_INC(tcps_badsyn); 1206 goto dropunlock; 1207 } 1208 /* 1209 * (SYN|ACK) is bogus on a listen socket. 1210 */ 1211 if (thflags & TH_ACK) { 1212 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1213 log(LOG_DEBUG, "%s; %s: Listen socket: " 1214 "SYN|ACK invalid, segment rejected\n", 1215 s, __func__); 1216 syncache_badack(&inc); /* XXX: Not needed! */ 1217 TCPSTAT_INC(tcps_badsyn); 1218 rstreason = BANDLIM_RST_OPENPORT; 1219 goto dropwithreset; 1220 } 1221 /* 1222 * If the drop_synfin option is enabled, drop all 1223 * segments with both the SYN and FIN bits set. 1224 * This prevents e.g. nmap from identifying the 1225 * TCP/IP stack. 1226 * XXX: Poor reasoning. nmap has other methods 1227 * and is constantly refining its stack detection 1228 * strategies. 1229 * XXX: This is a violation of the TCP specification 1230 * and was used by RFC1644. 1231 */ 1232 if ((thflags & TH_FIN) && V_drop_synfin) { 1233 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1234 log(LOG_DEBUG, "%s; %s: Listen socket: " 1235 "SYN|FIN segment ignored (based on " 1236 "sysctl setting)\n", s, __func__); 1237 TCPSTAT_INC(tcps_badsyn); 1238 goto dropunlock; 1239 } 1240 /* 1241 * Segment's flags are (SYN) or (SYN|FIN). 1242 * 1243 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored 1244 * as they do not affect the state of the TCP FSM. 1245 * The data pointed to by TH_URG and th_urp is ignored. 1246 */ 1247 KASSERT((thflags & (TH_RST|TH_ACK)) == 0, 1248 ("%s: Listen socket: TH_RST or TH_ACK set", __func__)); 1249 KASSERT(thflags & (TH_SYN), 1250 ("%s: Listen socket: TH_SYN not set", __func__)); 1251 #ifdef INET6 1252 /* 1253 * If deprecated address is forbidden, 1254 * we do not accept SYN to deprecated interface 1255 * address to prevent any new inbound connection from 1256 * getting established. 1257 * When we do not accept SYN, we send a TCP RST, 1258 * with deprecated source address (instead of dropping 1259 * it). We compromise it as it is much better for peer 1260 * to send a RST, and RST will be the final packet 1261 * for the exchange. 1262 * 1263 * If we do not forbid deprecated addresses, we accept 1264 * the SYN packet. RFC2462 does not suggest dropping 1265 * SYN in this case. 1266 * If we decipher RFC2462 5.5.4, it says like this: 1267 * 1. use of deprecated addr with existing 1268 * communication is okay - "SHOULD continue to be 1269 * used" 1270 * 2. use of it with new communication: 1271 * (2a) "SHOULD NOT be used if alternate address 1272 * with sufficient scope is available" 1273 * (2b) nothing mentioned otherwise. 1274 * Here we fall into (2b) case as we have no choice in 1275 * our source address selection - we must obey the peer. 1276 * 1277 * The wording in RFC2462 is confusing, and there are 1278 * multiple description text for deprecated address 1279 * handling - worse, they are not exactly the same. 1280 * I believe 5.5.4 is the best one, so we follow 5.5.4. 1281 */ 1282 if (isipv6 && !V_ip6_use_deprecated) { 1283 struct in6_ifaddr *ia6; 1284 1285 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */); 1286 if (ia6 != NULL && 1287 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 1288 ifa_free(&ia6->ia_ifa); 1289 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1290 log(LOG_DEBUG, "%s; %s: Listen socket: " 1291 "Connection attempt to deprecated " 1292 "IPv6 address rejected\n", 1293 s, __func__); 1294 rstreason = BANDLIM_RST_OPENPORT; 1295 goto dropwithreset; 1296 } 1297 if (ia6) 1298 ifa_free(&ia6->ia_ifa); 1299 } 1300 #endif /* INET6 */ 1301 /* 1302 * Basic sanity checks on incoming SYN requests: 1303 * Don't respond if the destination is a link layer 1304 * broadcast according to RFC1122 4.2.3.10, p. 104. 1305 * If it is from this socket it must be forged. 1306 * Don't respond if the source or destination is a 1307 * global or subnet broad- or multicast address. 1308 * Note that it is quite possible to receive unicast 1309 * link-layer packets with a broadcast IP address. Use 1310 * in_broadcast() to find them. 1311 */ 1312 if (m->m_flags & (M_BCAST|M_MCAST)) { 1313 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1314 log(LOG_DEBUG, "%s; %s: Listen socket: " 1315 "Connection attempt from broad- or multicast " 1316 "link layer address ignored\n", s, __func__); 1317 goto dropunlock; 1318 } 1319 #ifdef INET6 1320 if (isipv6) { 1321 if (th->th_dport == th->th_sport && 1322 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) { 1323 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1324 log(LOG_DEBUG, "%s; %s: Listen socket: " 1325 "Connection attempt to/from self " 1326 "ignored\n", s, __func__); 1327 goto dropunlock; 1328 } 1329 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 1330 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) { 1331 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1332 log(LOG_DEBUG, "%s; %s: Listen socket: " 1333 "Connection attempt from/to multicast " 1334 "address ignored\n", s, __func__); 1335 goto dropunlock; 1336 } 1337 } 1338 #endif 1339 #if defined(INET) && defined(INET6) 1340 else 1341 #endif 1342 #ifdef INET 1343 { 1344 if (th->th_dport == th->th_sport && 1345 ip->ip_dst.s_addr == ip->ip_src.s_addr) { 1346 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1347 log(LOG_DEBUG, "%s; %s: Listen socket: " 1348 "Connection attempt from/to self " 1349 "ignored\n", s, __func__); 1350 goto dropunlock; 1351 } 1352 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 1353 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 1354 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 1355 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) { 1356 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1357 log(LOG_DEBUG, "%s; %s: Listen socket: " 1358 "Connection attempt from/to broad- " 1359 "or multicast address ignored\n", 1360 s, __func__); 1361 goto dropunlock; 1362 } 1363 } 1364 #endif 1365 /* 1366 * SYN appears to be valid. Create compressed TCP state 1367 * for syncache. 1368 */ 1369 #ifdef TCPDEBUG 1370 if (so->so_options & SO_DEBUG) 1371 tcp_trace(TA_INPUT, ostate, tp, 1372 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1373 #endif 1374 TCP_PROBE3(debug__input, tp, th, mtod(m, const char *)); 1375 tcp_dooptions(&to, optp, optlen, TO_SYN); 1376 syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL); 1377 /* 1378 * Entry added to syncache and mbuf consumed. 1379 * Only the listen socket is unlocked by syncache_add(). 1380 */ 1381 if (ti_locked == TI_RLOCKED) { 1382 INP_INFO_RUNLOCK(&V_tcbinfo); 1383 ti_locked = TI_UNLOCKED; 1384 } 1385 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1386 return (IPPROTO_DONE); 1387 } else if (tp->t_state == TCPS_LISTEN) { 1388 /* 1389 * When a listen socket is torn down the SO_ACCEPTCONN 1390 * flag is removed first while connections are drained 1391 * from the accept queue in a unlock/lock cycle of the 1392 * ACCEPT_LOCK, opening a race condition allowing a SYN 1393 * attempt go through unhandled. 1394 */ 1395 goto dropunlock; 1396 } 1397 1398 #ifdef TCP_SIGNATURE 1399 if (sig_checked == 0) { 1400 tcp_dooptions(&to, optp, optlen, 1401 (thflags & TH_SYN) ? TO_SYN : 0); 1402 if (!tcp_signature_verify_input(m, off0, tlen, optlen, &to, 1403 th, tp->t_flags)) { 1404 1405 /* 1406 * In SYN_SENT state if it receives an RST, it is 1407 * allowed for further processing. 1408 */ 1409 if ((thflags & TH_RST) == 0 || 1410 (tp->t_state == TCPS_SYN_SENT) == 0) 1411 goto dropunlock; 1412 } 1413 sig_checked = 1; 1414 } 1415 #endif 1416 1417 TCP_PROBE5(receive, NULL, tp, mtod(m, const char *), tp, th); 1418 1419 /* 1420 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later 1421 * state. tcp_do_segment() always consumes the mbuf chain, unlocks 1422 * the inpcb, and unlocks pcbinfo. 1423 */ 1424 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos, ti_locked); 1425 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1426 return (IPPROTO_DONE); 1427 1428 dropwithreset: 1429 TCP_PROBE5(receive, NULL, tp, mtod(m, const char *), tp, th); 1430 1431 if (ti_locked == TI_RLOCKED) { 1432 INP_INFO_RUNLOCK(&V_tcbinfo); 1433 ti_locked = TI_UNLOCKED; 1434 } 1435 #ifdef INVARIANTS 1436 else { 1437 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropwithreset " 1438 "ti_locked: %d", __func__, ti_locked)); 1439 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1440 } 1441 #endif 1442 1443 if (inp != NULL) { 1444 tcp_dropwithreset(m, th, tp, tlen, rstreason); 1445 INP_WUNLOCK(inp); 1446 } else 1447 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 1448 m = NULL; /* mbuf chain got consumed. */ 1449 goto drop; 1450 1451 dropunlock: 1452 if (m != NULL) 1453 TCP_PROBE5(receive, NULL, tp, mtod(m, const char *), tp, th); 1454 1455 if (ti_locked == TI_RLOCKED) { 1456 INP_INFO_RUNLOCK(&V_tcbinfo); 1457 ti_locked = TI_UNLOCKED; 1458 } 1459 #ifdef INVARIANTS 1460 else { 1461 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropunlock " 1462 "ti_locked: %d", __func__, ti_locked)); 1463 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1464 } 1465 #endif 1466 1467 if (inp != NULL) 1468 INP_WUNLOCK(inp); 1469 1470 drop: 1471 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1472 if (s != NULL) 1473 free(s, M_TCPLOG); 1474 if (m != NULL) 1475 m_freem(m); 1476 return (IPPROTO_DONE); 1477 } 1478 1479 static void 1480 tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 1481 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos, 1482 int ti_locked) 1483 { 1484 int thflags, acked, ourfinisacked, needoutput = 0; 1485 int rstreason, todrop, win; 1486 u_long tiwin; 1487 char *s; 1488 struct in_conninfo *inc; 1489 struct mbuf *mfree; 1490 struct tcpopt to; 1491 1492 #ifdef TCPDEBUG 1493 /* 1494 * The size of tcp_saveipgen must be the size of the max ip header, 1495 * now IPv6. 1496 */ 1497 u_char tcp_saveipgen[IP6_HDR_LEN]; 1498 struct tcphdr tcp_savetcp; 1499 short ostate = 0; 1500 #endif 1501 thflags = th->th_flags; 1502 inc = &tp->t_inpcb->inp_inc; 1503 tp->sackhint.last_sack_ack = 0; 1504 1505 /* 1506 * If this is either a state-changing packet or current state isn't 1507 * established, we require a write lock on tcbinfo. Otherwise, we 1508 * allow the tcbinfo to be in either alocked or unlocked, as the 1509 * caller may have unnecessarily acquired a write lock due to a race. 1510 */ 1511 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 || 1512 tp->t_state != TCPS_ESTABLISHED) { 1513 KASSERT(ti_locked == TI_RLOCKED, ("%s ti_locked %d for " 1514 "SYN/FIN/RST/!EST", __func__, ti_locked)); 1515 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1516 } else { 1517 #ifdef INVARIANTS 1518 if (ti_locked == TI_RLOCKED) 1519 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1520 else { 1521 KASSERT(ti_locked == TI_UNLOCKED, ("%s: EST " 1522 "ti_locked: %d", __func__, ti_locked)); 1523 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1524 } 1525 #endif 1526 } 1527 INP_WLOCK_ASSERT(tp->t_inpcb); 1528 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 1529 __func__)); 1530 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 1531 __func__)); 1532 1533 #ifdef TCPPCAP 1534 /* Save segment, if requested. */ 1535 tcp_pcap_add(th, m, &(tp->t_inpkts)); 1536 #endif 1537 1538 /* 1539 * Segment received on connection. 1540 * Reset idle time and keep-alive timer. 1541 * XXX: This should be done after segment 1542 * validation to ignore broken/spoofed segs. 1543 */ 1544 tp->t_rcvtime = ticks; 1545 if (TCPS_HAVEESTABLISHED(tp->t_state)) 1546 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp)); 1547 1548 /* 1549 * Scale up the window into a 32-bit value. 1550 * For the SYN_SENT state the scale is zero. 1551 */ 1552 tiwin = th->th_win << tp->snd_scale; 1553 1554 /* 1555 * TCP ECN processing. 1556 */ 1557 if (tp->t_flags & TF_ECN_PERMIT) { 1558 if (thflags & TH_CWR) 1559 tp->t_flags &= ~TF_ECN_SND_ECE; 1560 switch (iptos & IPTOS_ECN_MASK) { 1561 case IPTOS_ECN_CE: 1562 tp->t_flags |= TF_ECN_SND_ECE; 1563 TCPSTAT_INC(tcps_ecn_ce); 1564 break; 1565 case IPTOS_ECN_ECT0: 1566 TCPSTAT_INC(tcps_ecn_ect0); 1567 break; 1568 case IPTOS_ECN_ECT1: 1569 TCPSTAT_INC(tcps_ecn_ect1); 1570 break; 1571 } 1572 1573 /* Process a packet differently from RFC3168. */ 1574 cc_ecnpkt_handler(tp, th, iptos); 1575 1576 /* Congestion experienced. */ 1577 if (thflags & TH_ECE) { 1578 cc_cong_signal(tp, th, CC_ECN); 1579 } 1580 } 1581 1582 /* 1583 * Parse options on any incoming segment. 1584 */ 1585 tcp_dooptions(&to, (u_char *)(th + 1), 1586 (th->th_off << 2) - sizeof(struct tcphdr), 1587 (thflags & TH_SYN) ? TO_SYN : 0); 1588 1589 /* 1590 * If echoed timestamp is later than the current time, 1591 * fall back to non RFC1323 RTT calculation. Normalize 1592 * timestamp if syncookies were used when this connection 1593 * was established. 1594 */ 1595 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 1596 to.to_tsecr -= tp->ts_offset; 1597 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks())) 1598 to.to_tsecr = 0; 1599 } 1600 /* 1601 * If timestamps were negotiated during SYN/ACK they should 1602 * appear on every segment during this session and vice versa. 1603 */ 1604 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS)) { 1605 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1606 log(LOG_DEBUG, "%s; %s: Timestamp missing, " 1607 "no action\n", s, __func__); 1608 free(s, M_TCPLOG); 1609 } 1610 } 1611 if (!(tp->t_flags & TF_RCVD_TSTMP) && (to.to_flags & TOF_TS)) { 1612 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1613 log(LOG_DEBUG, "%s; %s: Timestamp not expected, " 1614 "no action\n", s, __func__); 1615 free(s, M_TCPLOG); 1616 } 1617 } 1618 1619 /* 1620 * Process options only when we get SYN/ACK back. The SYN case 1621 * for incoming connections is handled in tcp_syncache. 1622 * According to RFC1323 the window field in a SYN (i.e., a <SYN> 1623 * or <SYN,ACK>) segment itself is never scaled. 1624 * XXX this is traditional behavior, may need to be cleaned up. 1625 */ 1626 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 1627 if ((to.to_flags & TOF_SCALE) && 1628 (tp->t_flags & TF_REQ_SCALE)) { 1629 tp->t_flags |= TF_RCVD_SCALE; 1630 tp->snd_scale = to.to_wscale; 1631 } 1632 /* 1633 * Initial send window. It will be updated with 1634 * the next incoming segment to the scaled value. 1635 */ 1636 tp->snd_wnd = th->th_win; 1637 if (to.to_flags & TOF_TS) { 1638 tp->t_flags |= TF_RCVD_TSTMP; 1639 tp->ts_recent = to.to_tsval; 1640 tp->ts_recent_age = tcp_ts_getticks(); 1641 } 1642 if (to.to_flags & TOF_MSS) 1643 tcp_mss(tp, to.to_mss); 1644 if ((tp->t_flags & TF_SACK_PERMIT) && 1645 (to.to_flags & TOF_SACKPERM) == 0) 1646 tp->t_flags &= ~TF_SACK_PERMIT; 1647 } 1648 1649 /* 1650 * Header prediction: check for the two common cases 1651 * of a uni-directional data xfer. If the packet has 1652 * no control flags, is in-sequence, the window didn't 1653 * change and we're not retransmitting, it's a 1654 * candidate. If the length is zero and the ack moved 1655 * forward, we're the sender side of the xfer. Just 1656 * free the data acked & wake any higher level process 1657 * that was blocked waiting for space. If the length 1658 * is non-zero and the ack didn't move, we're the 1659 * receiver side. If we're getting packets in-order 1660 * (the reassembly queue is empty), add the data to 1661 * the socket buffer and note that we need a delayed ack. 1662 * Make sure that the hidden state-flags are also off. 1663 * Since we check for TCPS_ESTABLISHED first, it can only 1664 * be TH_NEEDSYN. 1665 */ 1666 if (tp->t_state == TCPS_ESTABLISHED && 1667 th->th_seq == tp->rcv_nxt && 1668 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 1669 tp->snd_nxt == tp->snd_max && 1670 tiwin && tiwin == tp->snd_wnd && 1671 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && 1672 LIST_EMPTY(&tp->t_segq) && 1673 ((to.to_flags & TOF_TS) == 0 || 1674 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) { 1675 1676 /* 1677 * If last ACK falls within this segment's sequence numbers, 1678 * record the timestamp. 1679 * NOTE that the test is modified according to the latest 1680 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1681 */ 1682 if ((to.to_flags & TOF_TS) != 0 && 1683 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 1684 tp->ts_recent_age = tcp_ts_getticks(); 1685 tp->ts_recent = to.to_tsval; 1686 } 1687 1688 if (tlen == 0) { 1689 if (SEQ_GT(th->th_ack, tp->snd_una) && 1690 SEQ_LEQ(th->th_ack, tp->snd_max) && 1691 !IN_RECOVERY(tp->t_flags) && 1692 (to.to_flags & TOF_SACK) == 0 && 1693 TAILQ_EMPTY(&tp->snd_holes)) { 1694 /* 1695 * This is a pure ack for outstanding data. 1696 */ 1697 if (ti_locked == TI_RLOCKED) 1698 INP_INFO_RUNLOCK(&V_tcbinfo); 1699 ti_locked = TI_UNLOCKED; 1700 1701 TCPSTAT_INC(tcps_predack); 1702 1703 /* 1704 * "bad retransmit" recovery. 1705 */ 1706 if (tp->t_rxtshift == 1 && 1707 tp->t_flags & TF_PREVVALID && 1708 (int)(ticks - tp->t_badrxtwin) < 0) { 1709 cc_cong_signal(tp, th, CC_RTO_ERR); 1710 } 1711 1712 /* 1713 * Recalculate the transmit timer / rtt. 1714 * 1715 * Some boxes send broken timestamp replies 1716 * during the SYN+ACK phase, ignore 1717 * timestamps of 0 or we could calculate a 1718 * huge RTT and blow up the retransmit timer. 1719 */ 1720 if ((to.to_flags & TOF_TS) != 0 && 1721 to.to_tsecr) { 1722 u_int t; 1723 1724 t = tcp_ts_getticks() - to.to_tsecr; 1725 if (!tp->t_rttlow || tp->t_rttlow > t) 1726 tp->t_rttlow = t; 1727 tcp_xmit_timer(tp, 1728 TCP_TS_TO_TICKS(t) + 1); 1729 } else if (tp->t_rtttime && 1730 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1731 if (!tp->t_rttlow || 1732 tp->t_rttlow > ticks - tp->t_rtttime) 1733 tp->t_rttlow = ticks - tp->t_rtttime; 1734 tcp_xmit_timer(tp, 1735 ticks - tp->t_rtttime); 1736 } 1737 acked = BYTES_THIS_ACK(tp, th); 1738 1739 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 1740 hhook_run_tcp_est_in(tp, th, &to); 1741 1742 TCPSTAT_INC(tcps_rcvackpack); 1743 TCPSTAT_ADD(tcps_rcvackbyte, acked); 1744 sbdrop(&so->so_snd, acked); 1745 if (SEQ_GT(tp->snd_una, tp->snd_recover) && 1746 SEQ_LEQ(th->th_ack, tp->snd_recover)) 1747 tp->snd_recover = th->th_ack - 1; 1748 1749 /* 1750 * Let the congestion control algorithm update 1751 * congestion control related information. This 1752 * typically means increasing the congestion 1753 * window. 1754 */ 1755 cc_ack_received(tp, th, CC_ACK); 1756 1757 tp->snd_una = th->th_ack; 1758 /* 1759 * Pull snd_wl2 up to prevent seq wrap relative 1760 * to th_ack. 1761 */ 1762 tp->snd_wl2 = th->th_ack; 1763 tp->t_dupacks = 0; 1764 m_freem(m); 1765 1766 /* 1767 * If all outstanding data are acked, stop 1768 * retransmit timer, otherwise restart timer 1769 * using current (possibly backed-off) value. 1770 * If process is waiting for space, 1771 * wakeup/selwakeup/signal. If data 1772 * are ready to send, let tcp_output 1773 * decide between more output or persist. 1774 */ 1775 #ifdef TCPDEBUG 1776 if (so->so_options & SO_DEBUG) 1777 tcp_trace(TA_INPUT, ostate, tp, 1778 (void *)tcp_saveipgen, 1779 &tcp_savetcp, 0); 1780 #endif 1781 TCP_PROBE3(debug__input, tp, th, 1782 mtod(m, const char *)); 1783 if (tp->snd_una == tp->snd_max) 1784 tcp_timer_activate(tp, TT_REXMT, 0); 1785 else if (!tcp_timer_active(tp, TT_PERSIST)) 1786 tcp_timer_activate(tp, TT_REXMT, 1787 tp->t_rxtcur); 1788 sowwakeup(so); 1789 if (sbavail(&so->so_snd)) 1790 (void) tcp_output(tp); 1791 goto check_delack; 1792 } 1793 } else if (th->th_ack == tp->snd_una && 1794 tlen <= sbspace(&so->so_rcv)) { 1795 int newsize = 0; /* automatic sockbuf scaling */ 1796 1797 /* 1798 * This is a pure, in-sequence data packet with 1799 * nothing on the reassembly queue and we have enough 1800 * buffer space to take it. 1801 */ 1802 if (ti_locked == TI_RLOCKED) 1803 INP_INFO_RUNLOCK(&V_tcbinfo); 1804 ti_locked = TI_UNLOCKED; 1805 1806 /* Clean receiver SACK report if present */ 1807 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks) 1808 tcp_clean_sackreport(tp); 1809 TCPSTAT_INC(tcps_preddat); 1810 tp->rcv_nxt += tlen; 1811 /* 1812 * Pull snd_wl1 up to prevent seq wrap relative to 1813 * th_seq. 1814 */ 1815 tp->snd_wl1 = th->th_seq; 1816 /* 1817 * Pull rcv_up up to prevent seq wrap relative to 1818 * rcv_nxt. 1819 */ 1820 tp->rcv_up = tp->rcv_nxt; 1821 TCPSTAT_INC(tcps_rcvpack); 1822 TCPSTAT_ADD(tcps_rcvbyte, tlen); 1823 #ifdef TCPDEBUG 1824 if (so->so_options & SO_DEBUG) 1825 tcp_trace(TA_INPUT, ostate, tp, 1826 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1827 #endif 1828 TCP_PROBE3(debug__input, tp, th, mtod(m, const char *)); 1829 1830 /* 1831 * Automatic sizing of receive socket buffer. Often the send 1832 * buffer size is not optimally adjusted to the actual network 1833 * conditions at hand (delay bandwidth product). Setting the 1834 * buffer size too small limits throughput on links with high 1835 * bandwidth and high delay (eg. trans-continental/oceanic links). 1836 * 1837 * On the receive side the socket buffer memory is only rarely 1838 * used to any significant extent. This allows us to be much 1839 * more aggressive in scaling the receive socket buffer. For 1840 * the case that the buffer space is actually used to a large 1841 * extent and we run out of kernel memory we can simply drop 1842 * the new segments; TCP on the sender will just retransmit it 1843 * later. Setting the buffer size too big may only consume too 1844 * much kernel memory if the application doesn't read() from 1845 * the socket or packet loss or reordering makes use of the 1846 * reassembly queue. 1847 * 1848 * The criteria to step up the receive buffer one notch are: 1849 * 1. Application has not set receive buffer size with 1850 * SO_RCVBUF. Setting SO_RCVBUF clears SB_AUTOSIZE. 1851 * 2. the number of bytes received during the time it takes 1852 * one timestamp to be reflected back to us (the RTT); 1853 * 3. received bytes per RTT is within seven eighth of the 1854 * current socket buffer size; 1855 * 4. receive buffer size has not hit maximal automatic size; 1856 * 1857 * This algorithm does one step per RTT at most and only if 1858 * we receive a bulk stream w/o packet losses or reorderings. 1859 * Shrinking the buffer during idle times is not necessary as 1860 * it doesn't consume any memory when idle. 1861 * 1862 * TODO: Only step up if the application is actually serving 1863 * the buffer to better manage the socket buffer resources. 1864 */ 1865 if (V_tcp_do_autorcvbuf && 1866 (to.to_flags & TOF_TS) && 1867 to.to_tsecr && 1868 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { 1869 if (TSTMP_GT(to.to_tsecr, tp->rfbuf_ts) && 1870 to.to_tsecr - tp->rfbuf_ts < hz) { 1871 if (tp->rfbuf_cnt > 1872 (so->so_rcv.sb_hiwat / 8 * 7) && 1873 so->so_rcv.sb_hiwat < 1874 V_tcp_autorcvbuf_max) { 1875 newsize = 1876 min(so->so_rcv.sb_hiwat + 1877 V_tcp_autorcvbuf_inc, 1878 V_tcp_autorcvbuf_max); 1879 } 1880 /* Start over with next RTT. */ 1881 tp->rfbuf_ts = 0; 1882 tp->rfbuf_cnt = 0; 1883 } else 1884 tp->rfbuf_cnt += tlen; /* add up */ 1885 } 1886 1887 /* Add data to socket buffer. */ 1888 SOCKBUF_LOCK(&so->so_rcv); 1889 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1890 m_freem(m); 1891 } else { 1892 /* 1893 * Set new socket buffer size. 1894 * Give up when limit is reached. 1895 */ 1896 if (newsize) 1897 if (!sbreserve_locked(&so->so_rcv, 1898 newsize, so, NULL)) 1899 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 1900 m_adj(m, drop_hdrlen); /* delayed header drop */ 1901 sbappendstream_locked(&so->so_rcv, m, 0); 1902 } 1903 /* NB: sorwakeup_locked() does an implicit unlock. */ 1904 sorwakeup_locked(so); 1905 if (DELAY_ACK(tp, tlen)) { 1906 tp->t_flags |= TF_DELACK; 1907 } else { 1908 tp->t_flags |= TF_ACKNOW; 1909 tcp_output(tp); 1910 } 1911 goto check_delack; 1912 } 1913 } 1914 1915 /* 1916 * Calculate amount of space in receive window, 1917 * and then do TCP input processing. 1918 * Receive window is amount of space in rcv queue, 1919 * but not less than advertised window. 1920 */ 1921 win = sbspace(&so->so_rcv); 1922 if (win < 0) 1923 win = 0; 1924 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 1925 1926 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 1927 tp->rfbuf_ts = 0; 1928 tp->rfbuf_cnt = 0; 1929 1930 switch (tp->t_state) { 1931 1932 /* 1933 * If the state is SYN_RECEIVED: 1934 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1935 */ 1936 case TCPS_SYN_RECEIVED: 1937 if ((thflags & TH_ACK) && 1938 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1939 SEQ_GT(th->th_ack, tp->snd_max))) { 1940 rstreason = BANDLIM_RST_OPENPORT; 1941 goto dropwithreset; 1942 } 1943 break; 1944 1945 /* 1946 * If the state is SYN_SENT: 1947 * if seg contains an ACK, but not for our SYN, drop the input. 1948 * if seg contains a RST, then drop the connection. 1949 * if seg does not contain SYN, then drop it. 1950 * Otherwise this is an acceptable SYN segment 1951 * initialize tp->rcv_nxt and tp->irs 1952 * if seg contains ack then advance tp->snd_una 1953 * if seg contains an ECE and ECN support is enabled, the stream 1954 * is ECN capable. 1955 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1956 * arrange for segment to be acked (eventually) 1957 * continue processing rest of data/controls, beginning with URG 1958 */ 1959 case TCPS_SYN_SENT: 1960 if ((thflags & TH_ACK) && 1961 (SEQ_LEQ(th->th_ack, tp->iss) || 1962 SEQ_GT(th->th_ack, tp->snd_max))) { 1963 rstreason = BANDLIM_UNLIMITED; 1964 goto dropwithreset; 1965 } 1966 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) { 1967 TCP_PROBE5(connect__refused, NULL, tp, 1968 mtod(m, const char *), tp, th); 1969 tp = tcp_drop(tp, ECONNREFUSED); 1970 } 1971 if (thflags & TH_RST) 1972 goto drop; 1973 if (!(thflags & TH_SYN)) 1974 goto drop; 1975 1976 tp->irs = th->th_seq; 1977 tcp_rcvseqinit(tp); 1978 if (thflags & TH_ACK) { 1979 TCPSTAT_INC(tcps_connects); 1980 soisconnected(so); 1981 #ifdef MAC 1982 mac_socketpeer_set_from_mbuf(m, so); 1983 #endif 1984 /* Do window scaling on this connection? */ 1985 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1986 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1987 tp->rcv_scale = tp->request_r_scale; 1988 } 1989 tp->rcv_adv += imin(tp->rcv_wnd, 1990 TCP_MAXWIN << tp->rcv_scale); 1991 tp->snd_una++; /* SYN is acked */ 1992 /* 1993 * If there's data, delay ACK; if there's also a FIN 1994 * ACKNOW will be turned on later. 1995 */ 1996 if (DELAY_ACK(tp, tlen) && tlen != 0) 1997 tcp_timer_activate(tp, TT_DELACK, 1998 tcp_delacktime); 1999 else 2000 tp->t_flags |= TF_ACKNOW; 2001 2002 if ((thflags & TH_ECE) && V_tcp_do_ecn) { 2003 tp->t_flags |= TF_ECN_PERMIT; 2004 TCPSTAT_INC(tcps_ecn_shs); 2005 } 2006 2007 /* 2008 * Received <SYN,ACK> in SYN_SENT[*] state. 2009 * Transitions: 2010 * SYN_SENT --> ESTABLISHED 2011 * SYN_SENT* --> FIN_WAIT_1 2012 */ 2013 tp->t_starttime = ticks; 2014 if (tp->t_flags & TF_NEEDFIN) { 2015 tcp_state_change(tp, TCPS_FIN_WAIT_1); 2016 tp->t_flags &= ~TF_NEEDFIN; 2017 thflags &= ~TH_SYN; 2018 } else { 2019 tcp_state_change(tp, TCPS_ESTABLISHED); 2020 TCP_PROBE5(connect__established, NULL, tp, 2021 mtod(m, const char *), tp, th); 2022 cc_conn_init(tp); 2023 tcp_timer_activate(tp, TT_KEEP, 2024 TP_KEEPIDLE(tp)); 2025 } 2026 } else { 2027 /* 2028 * Received initial SYN in SYN-SENT[*] state => 2029 * simultaneous open. 2030 * If it succeeds, connection is * half-synchronized. 2031 * Otherwise, do 3-way handshake: 2032 * SYN-SENT -> SYN-RECEIVED 2033 * SYN-SENT* -> SYN-RECEIVED* 2034 */ 2035 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 2036 tcp_timer_activate(tp, TT_REXMT, 0); 2037 tcp_state_change(tp, TCPS_SYN_RECEIVED); 2038 } 2039 2040 KASSERT(ti_locked == TI_RLOCKED, ("%s: trimthenstep6: " 2041 "ti_locked %d", __func__, ti_locked)); 2042 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2043 INP_WLOCK_ASSERT(tp->t_inpcb); 2044 2045 /* 2046 * Advance th->th_seq to correspond to first data byte. 2047 * If data, trim to stay within window, 2048 * dropping FIN if necessary. 2049 */ 2050 th->th_seq++; 2051 if (tlen > tp->rcv_wnd) { 2052 todrop = tlen - tp->rcv_wnd; 2053 m_adj(m, -todrop); 2054 tlen = tp->rcv_wnd; 2055 thflags &= ~TH_FIN; 2056 TCPSTAT_INC(tcps_rcvpackafterwin); 2057 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 2058 } 2059 tp->snd_wl1 = th->th_seq - 1; 2060 tp->rcv_up = th->th_seq; 2061 /* 2062 * Client side of transaction: already sent SYN and data. 2063 * If the remote host used T/TCP to validate the SYN, 2064 * our data will be ACK'd; if so, enter normal data segment 2065 * processing in the middle of step 5, ack processing. 2066 * Otherwise, goto step 6. 2067 */ 2068 if (thflags & TH_ACK) 2069 goto process_ACK; 2070 2071 goto step6; 2072 2073 /* 2074 * If the state is LAST_ACK or CLOSING or TIME_WAIT: 2075 * do normal processing. 2076 * 2077 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later. 2078 */ 2079 case TCPS_LAST_ACK: 2080 case TCPS_CLOSING: 2081 break; /* continue normal processing */ 2082 } 2083 2084 /* 2085 * States other than LISTEN or SYN_SENT. 2086 * First check the RST flag and sequence number since reset segments 2087 * are exempt from the timestamp and connection count tests. This 2088 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 2089 * below which allowed reset segments in half the sequence space 2090 * to fall though and be processed (which gives forged reset 2091 * segments with a random sequence number a 50 percent chance of 2092 * killing a connection). 2093 * Then check timestamp, if present. 2094 * Then check the connection count, if present. 2095 * Then check that at least some bytes of segment are within 2096 * receive window. If segment begins before rcv_nxt, 2097 * drop leading data (and SYN); if nothing left, just ack. 2098 */ 2099 if (thflags & TH_RST) { 2100 /* 2101 * RFC5961 Section 3.2 2102 * 2103 * - RST drops connection only if SEG.SEQ == RCV.NXT. 2104 * - If RST is in window, we send challenge ACK. 2105 * 2106 * Note: to take into account delayed ACKs, we should 2107 * test against last_ack_sent instead of rcv_nxt. 2108 * Note 2: we handle special case of closed window, not 2109 * covered by the RFC. 2110 */ 2111 if ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 2112 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || 2113 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) { 2114 2115 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2116 KASSERT(ti_locked == TI_RLOCKED, 2117 ("%s: TH_RST ti_locked %d, th %p tp %p", 2118 __func__, ti_locked, th, tp)); 2119 KASSERT(tp->t_state != TCPS_SYN_SENT, 2120 ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p", 2121 __func__, th, tp)); 2122 2123 if (V_tcp_insecure_rst || 2124 tp->last_ack_sent == th->th_seq) { 2125 TCPSTAT_INC(tcps_drops); 2126 /* Drop the connection. */ 2127 switch (tp->t_state) { 2128 case TCPS_SYN_RECEIVED: 2129 so->so_error = ECONNREFUSED; 2130 goto close; 2131 case TCPS_ESTABLISHED: 2132 case TCPS_FIN_WAIT_1: 2133 case TCPS_FIN_WAIT_2: 2134 case TCPS_CLOSE_WAIT: 2135 so->so_error = ECONNRESET; 2136 close: 2137 tcp_state_change(tp, TCPS_CLOSED); 2138 /* FALLTHROUGH */ 2139 default: 2140 tp = tcp_close(tp); 2141 } 2142 } else { 2143 TCPSTAT_INC(tcps_badrst); 2144 /* Send challenge ACK. */ 2145 tcp_respond(tp, mtod(m, void *), th, m, 2146 tp->rcv_nxt, tp->snd_nxt, TH_ACK); 2147 tp->last_ack_sent = tp->rcv_nxt; 2148 m = NULL; 2149 } 2150 } 2151 goto drop; 2152 } 2153 2154 /* 2155 * RFC5961 Section 4.2 2156 * Send challenge ACK for any SYN in synchronized state. 2157 */ 2158 if ((thflags & TH_SYN) && tp->t_state != TCPS_SYN_SENT) { 2159 KASSERT(ti_locked == TI_RLOCKED, 2160 ("tcp_do_segment: TH_SYN ti_locked %d", ti_locked)); 2161 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2162 2163 TCPSTAT_INC(tcps_badsyn); 2164 if (V_tcp_insecure_syn && 2165 SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 2166 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 2167 tp = tcp_drop(tp, ECONNRESET); 2168 rstreason = BANDLIM_UNLIMITED; 2169 } else { 2170 /* Send challenge ACK. */ 2171 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt, 2172 tp->snd_nxt, TH_ACK); 2173 tp->last_ack_sent = tp->rcv_nxt; 2174 m = NULL; 2175 } 2176 goto drop; 2177 } 2178 2179 /* 2180 * RFC 1323 PAWS: If we have a timestamp reply on this segment 2181 * and it's less than ts_recent, drop it. 2182 */ 2183 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 2184 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 2185 2186 /* Check to see if ts_recent is over 24 days old. */ 2187 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) { 2188 /* 2189 * Invalidate ts_recent. If this segment updates 2190 * ts_recent, the age will be reset later and ts_recent 2191 * will get a valid value. If it does not, setting 2192 * ts_recent to zero will at least satisfy the 2193 * requirement that zero be placed in the timestamp 2194 * echo reply when ts_recent isn't valid. The 2195 * age isn't reset until we get a valid ts_recent 2196 * because we don't want out-of-order segments to be 2197 * dropped when ts_recent is old. 2198 */ 2199 tp->ts_recent = 0; 2200 } else { 2201 TCPSTAT_INC(tcps_rcvduppack); 2202 TCPSTAT_ADD(tcps_rcvdupbyte, tlen); 2203 TCPSTAT_INC(tcps_pawsdrop); 2204 if (tlen) 2205 goto dropafterack; 2206 goto drop; 2207 } 2208 } 2209 2210 /* 2211 * In the SYN-RECEIVED state, validate that the packet belongs to 2212 * this connection before trimming the data to fit the receive 2213 * window. Check the sequence number versus IRS since we know 2214 * the sequence numbers haven't wrapped. This is a partial fix 2215 * for the "LAND" DoS attack. 2216 */ 2217 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 2218 rstreason = BANDLIM_RST_OPENPORT; 2219 goto dropwithreset; 2220 } 2221 2222 todrop = tp->rcv_nxt - th->th_seq; 2223 if (todrop > 0) { 2224 if (thflags & TH_SYN) { 2225 thflags &= ~TH_SYN; 2226 th->th_seq++; 2227 if (th->th_urp > 1) 2228 th->th_urp--; 2229 else 2230 thflags &= ~TH_URG; 2231 todrop--; 2232 } 2233 /* 2234 * Following if statement from Stevens, vol. 2, p. 960. 2235 */ 2236 if (todrop > tlen 2237 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 2238 /* 2239 * Any valid FIN must be to the left of the window. 2240 * At this point the FIN must be a duplicate or out 2241 * of sequence; drop it. 2242 */ 2243 thflags &= ~TH_FIN; 2244 2245 /* 2246 * Send an ACK to resynchronize and drop any data. 2247 * But keep on processing for RST or ACK. 2248 */ 2249 tp->t_flags |= TF_ACKNOW; 2250 todrop = tlen; 2251 TCPSTAT_INC(tcps_rcvduppack); 2252 TCPSTAT_ADD(tcps_rcvdupbyte, todrop); 2253 } else { 2254 TCPSTAT_INC(tcps_rcvpartduppack); 2255 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop); 2256 } 2257 drop_hdrlen += todrop; /* drop from the top afterwards */ 2258 th->th_seq += todrop; 2259 tlen -= todrop; 2260 if (th->th_urp > todrop) 2261 th->th_urp -= todrop; 2262 else { 2263 thflags &= ~TH_URG; 2264 th->th_urp = 0; 2265 } 2266 } 2267 2268 /* 2269 * If new data are received on a connection after the 2270 * user processes are gone, then RST the other end. 2271 */ 2272 if ((so->so_state & SS_NOFDREF) && 2273 tp->t_state > TCPS_CLOSE_WAIT && tlen) { 2274 KASSERT(ti_locked == TI_RLOCKED, ("%s: SS_NOFDEREF && " 2275 "CLOSE_WAIT && tlen ti_locked %d", __func__, ti_locked)); 2276 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2277 2278 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 2279 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data " 2280 "after socket was closed, " 2281 "sending RST and removing tcpcb\n", 2282 s, __func__, tcpstates[tp->t_state], tlen); 2283 free(s, M_TCPLOG); 2284 } 2285 tp = tcp_close(tp); 2286 TCPSTAT_INC(tcps_rcvafterclose); 2287 rstreason = BANDLIM_UNLIMITED; 2288 goto dropwithreset; 2289 } 2290 2291 /* 2292 * If segment ends after window, drop trailing data 2293 * (and PUSH and FIN); if nothing left, just ACK. 2294 */ 2295 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 2296 if (todrop > 0) { 2297 TCPSTAT_INC(tcps_rcvpackafterwin); 2298 if (todrop >= tlen) { 2299 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen); 2300 /* 2301 * If window is closed can only take segments at 2302 * window edge, and have to drop data and PUSH from 2303 * incoming segments. Continue processing, but 2304 * remember to ack. Otherwise, drop segment 2305 * and ack. 2306 */ 2307 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 2308 tp->t_flags |= TF_ACKNOW; 2309 TCPSTAT_INC(tcps_rcvwinprobe); 2310 } else 2311 goto dropafterack; 2312 } else 2313 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 2314 m_adj(m, -todrop); 2315 tlen -= todrop; 2316 thflags &= ~(TH_PUSH|TH_FIN); 2317 } 2318 2319 /* 2320 * If last ACK falls within this segment's sequence numbers, 2321 * record its timestamp. 2322 * NOTE: 2323 * 1) That the test incorporates suggestions from the latest 2324 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 2325 * 2) That updating only on newer timestamps interferes with 2326 * our earlier PAWS tests, so this check should be solely 2327 * predicated on the sequence space of this segment. 2328 * 3) That we modify the segment boundary check to be 2329 * Last.ACK.Sent <= SEG.SEQ + SEG.Len 2330 * instead of RFC1323's 2331 * Last.ACK.Sent < SEG.SEQ + SEG.Len, 2332 * This modified check allows us to overcome RFC1323's 2333 * limitations as described in Stevens TCP/IP Illustrated 2334 * Vol. 2 p.869. In such cases, we can still calculate the 2335 * RTT correctly when RCV.NXT == Last.ACK.Sent. 2336 */ 2337 if ((to.to_flags & TOF_TS) != 0 && 2338 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 2339 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 2340 ((thflags & (TH_SYN|TH_FIN)) != 0))) { 2341 tp->ts_recent_age = tcp_ts_getticks(); 2342 tp->ts_recent = to.to_tsval; 2343 } 2344 2345 /* 2346 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 2347 * flag is on (half-synchronized state), then queue data for 2348 * later processing; else drop segment and return. 2349 */ 2350 if ((thflags & TH_ACK) == 0) { 2351 if (tp->t_state == TCPS_SYN_RECEIVED || 2352 (tp->t_flags & TF_NEEDSYN)) 2353 goto step6; 2354 else if (tp->t_flags & TF_ACKNOW) 2355 goto dropafterack; 2356 else 2357 goto drop; 2358 } 2359 2360 /* 2361 * Ack processing. 2362 */ 2363 switch (tp->t_state) { 2364 2365 /* 2366 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter 2367 * ESTABLISHED state and continue processing. 2368 * The ACK was checked above. 2369 */ 2370 case TCPS_SYN_RECEIVED: 2371 2372 TCPSTAT_INC(tcps_connects); 2373 soisconnected(so); 2374 /* Do window scaling? */ 2375 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2376 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2377 tp->rcv_scale = tp->request_r_scale; 2378 tp->snd_wnd = tiwin; 2379 } 2380 /* 2381 * Make transitions: 2382 * SYN-RECEIVED -> ESTABLISHED 2383 * SYN-RECEIVED* -> FIN-WAIT-1 2384 */ 2385 tp->t_starttime = ticks; 2386 if (tp->t_flags & TF_NEEDFIN) { 2387 tcp_state_change(tp, TCPS_FIN_WAIT_1); 2388 tp->t_flags &= ~TF_NEEDFIN; 2389 } else { 2390 tcp_state_change(tp, TCPS_ESTABLISHED); 2391 TCP_PROBE5(accept__established, NULL, tp, 2392 mtod(m, const char *), tp, th); 2393 cc_conn_init(tp); 2394 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp)); 2395 } 2396 /* 2397 * If segment contains data or ACK, will call tcp_reass() 2398 * later; if not, do so now to pass queued data to user. 2399 */ 2400 if (tlen == 0 && (thflags & TH_FIN) == 0) 2401 (void) tcp_reass(tp, (struct tcphdr *)0, 0, 2402 (struct mbuf *)0); 2403 tp->snd_wl1 = th->th_seq - 1; 2404 /* FALLTHROUGH */ 2405 2406 /* 2407 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 2408 * ACKs. If the ack is in the range 2409 * tp->snd_una < th->th_ack <= tp->snd_max 2410 * then advance tp->snd_una to th->th_ack and drop 2411 * data from the retransmission queue. If this ACK reflects 2412 * more up to date window information we update our window information. 2413 */ 2414 case TCPS_ESTABLISHED: 2415 case TCPS_FIN_WAIT_1: 2416 case TCPS_FIN_WAIT_2: 2417 case TCPS_CLOSE_WAIT: 2418 case TCPS_CLOSING: 2419 case TCPS_LAST_ACK: 2420 if (SEQ_GT(th->th_ack, tp->snd_max)) { 2421 TCPSTAT_INC(tcps_rcvacktoomuch); 2422 goto dropafterack; 2423 } 2424 if ((tp->t_flags & TF_SACK_PERMIT) && 2425 ((to.to_flags & TOF_SACK) || 2426 !TAILQ_EMPTY(&tp->snd_holes))) 2427 tcp_sack_doack(tp, &to, th->th_ack); 2428 else 2429 /* 2430 * Reset the value so that previous (valid) value 2431 * from the last ack with SACK doesn't get used. 2432 */ 2433 tp->sackhint.sacked_bytes = 0; 2434 2435 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 2436 hhook_run_tcp_est_in(tp, th, &to); 2437 2438 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 2439 if (tlen == 0 && tiwin == tp->snd_wnd) { 2440 /* 2441 * If this is the first time we've seen a 2442 * FIN from the remote, this is not a 2443 * duplicate and it needs to be processed 2444 * normally. This happens during a 2445 * simultaneous close. 2446 */ 2447 if ((thflags & TH_FIN) && 2448 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) { 2449 tp->t_dupacks = 0; 2450 break; 2451 } 2452 TCPSTAT_INC(tcps_rcvdupack); 2453 /* 2454 * If we have outstanding data (other than 2455 * a window probe), this is a completely 2456 * duplicate ack (ie, window info didn't 2457 * change and FIN isn't set), 2458 * the ack is the biggest we've 2459 * seen and we've seen exactly our rexmt 2460 * threshhold of them, assume a packet 2461 * has been dropped and retransmit it. 2462 * Kludge snd_nxt & the congestion 2463 * window so we send only this one 2464 * packet. 2465 * 2466 * We know we're losing at the current 2467 * window size so do congestion avoidance 2468 * (set ssthresh to half the current window 2469 * and pull our congestion window back to 2470 * the new ssthresh). 2471 * 2472 * Dup acks mean that packets have left the 2473 * network (they're now cached at the receiver) 2474 * so bump cwnd by the amount in the receiver 2475 * to keep a constant cwnd packets in the 2476 * network. 2477 * 2478 * When using TCP ECN, notify the peer that 2479 * we reduced the cwnd. 2480 */ 2481 if (!tcp_timer_active(tp, TT_REXMT) || 2482 th->th_ack != tp->snd_una) 2483 tp->t_dupacks = 0; 2484 else if (++tp->t_dupacks > tcprexmtthresh || 2485 IN_FASTRECOVERY(tp->t_flags)) { 2486 cc_ack_received(tp, th, CC_DUPACK); 2487 if ((tp->t_flags & TF_SACK_PERMIT) && 2488 IN_FASTRECOVERY(tp->t_flags)) { 2489 int awnd; 2490 2491 /* 2492 * Compute the amount of data in flight first. 2493 * We can inject new data into the pipe iff 2494 * we have less than 1/2 the original window's 2495 * worth of data in flight. 2496 */ 2497 if (V_tcp_do_rfc6675_pipe) 2498 awnd = tcp_compute_pipe(tp); 2499 else 2500 awnd = (tp->snd_nxt - tp->snd_fack) + 2501 tp->sackhint.sack_bytes_rexmit; 2502 2503 if (awnd < tp->snd_ssthresh) { 2504 tp->snd_cwnd += tp->t_maxseg; 2505 if (tp->snd_cwnd > tp->snd_ssthresh) 2506 tp->snd_cwnd = tp->snd_ssthresh; 2507 } 2508 } else 2509 tp->snd_cwnd += tp->t_maxseg; 2510 (void) tcp_output(tp); 2511 goto drop; 2512 } else if (tp->t_dupacks == tcprexmtthresh) { 2513 tcp_seq onxt = tp->snd_nxt; 2514 2515 /* 2516 * If we're doing sack, check to 2517 * see if we're already in sack 2518 * recovery. If we're not doing sack, 2519 * check to see if we're in newreno 2520 * recovery. 2521 */ 2522 if (tp->t_flags & TF_SACK_PERMIT) { 2523 if (IN_FASTRECOVERY(tp->t_flags)) { 2524 tp->t_dupacks = 0; 2525 break; 2526 } 2527 } else { 2528 if (SEQ_LEQ(th->th_ack, 2529 tp->snd_recover)) { 2530 tp->t_dupacks = 0; 2531 break; 2532 } 2533 } 2534 /* Congestion signal before ack. */ 2535 cc_cong_signal(tp, th, CC_NDUPACK); 2536 cc_ack_received(tp, th, CC_DUPACK); 2537 tcp_timer_activate(tp, TT_REXMT, 0); 2538 tp->t_rtttime = 0; 2539 if (tp->t_flags & TF_SACK_PERMIT) { 2540 TCPSTAT_INC( 2541 tcps_sack_recovery_episode); 2542 tp->sack_newdata = tp->snd_nxt; 2543 tp->snd_cwnd = tp->t_maxseg; 2544 (void) tcp_output(tp); 2545 goto drop; 2546 } 2547 tp->snd_nxt = th->th_ack; 2548 tp->snd_cwnd = tp->t_maxseg; 2549 (void) tcp_output(tp); 2550 KASSERT(tp->snd_limited <= 2, 2551 ("%s: tp->snd_limited too big", 2552 __func__)); 2553 tp->snd_cwnd = tp->snd_ssthresh + 2554 tp->t_maxseg * 2555 (tp->t_dupacks - tp->snd_limited); 2556 if (SEQ_GT(onxt, tp->snd_nxt)) 2557 tp->snd_nxt = onxt; 2558 goto drop; 2559 } else if (V_tcp_do_rfc3042) { 2560 /* 2561 * Process first and second duplicate 2562 * ACKs. Each indicates a segment 2563 * leaving the network, creating room 2564 * for more. Make sure we can send a 2565 * packet on reception of each duplicate 2566 * ACK by increasing snd_cwnd by one 2567 * segment. Restore the original 2568 * snd_cwnd after packet transmission. 2569 */ 2570 cc_ack_received(tp, th, CC_DUPACK); 2571 u_long oldcwnd = tp->snd_cwnd; 2572 tcp_seq oldsndmax = tp->snd_max; 2573 u_int sent; 2574 int avail; 2575 2576 KASSERT(tp->t_dupacks == 1 || 2577 tp->t_dupacks == 2, 2578 ("%s: dupacks not 1 or 2", 2579 __func__)); 2580 if (tp->t_dupacks == 1) 2581 tp->snd_limited = 0; 2582 tp->snd_cwnd = 2583 (tp->snd_nxt - tp->snd_una) + 2584 (tp->t_dupacks - tp->snd_limited) * 2585 tp->t_maxseg; 2586 /* 2587 * Only call tcp_output when there 2588 * is new data available to be sent. 2589 * Otherwise we would send pure ACKs. 2590 */ 2591 SOCKBUF_LOCK(&so->so_snd); 2592 avail = sbavail(&so->so_snd) - 2593 (tp->snd_nxt - tp->snd_una); 2594 SOCKBUF_UNLOCK(&so->so_snd); 2595 if (avail > 0) 2596 (void) tcp_output(tp); 2597 sent = tp->snd_max - oldsndmax; 2598 if (sent > tp->t_maxseg) { 2599 KASSERT((tp->t_dupacks == 2 && 2600 tp->snd_limited == 0) || 2601 (sent == tp->t_maxseg + 1 && 2602 tp->t_flags & TF_SENTFIN), 2603 ("%s: sent too much", 2604 __func__)); 2605 tp->snd_limited = 2; 2606 } else if (sent > 0) 2607 ++tp->snd_limited; 2608 tp->snd_cwnd = oldcwnd; 2609 goto drop; 2610 } 2611 } else 2612 tp->t_dupacks = 0; 2613 break; 2614 } 2615 2616 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), 2617 ("%s: th_ack <= snd_una", __func__)); 2618 2619 /* 2620 * If the congestion window was inflated to account 2621 * for the other side's cached packets, retract it. 2622 */ 2623 if (IN_FASTRECOVERY(tp->t_flags)) { 2624 if (SEQ_LT(th->th_ack, tp->snd_recover)) { 2625 if (tp->t_flags & TF_SACK_PERMIT) 2626 tcp_sack_partialack(tp, th); 2627 else 2628 tcp_newreno_partial_ack(tp, th); 2629 } else 2630 cc_post_recovery(tp, th); 2631 } 2632 tp->t_dupacks = 0; 2633 /* 2634 * If we reach this point, ACK is not a duplicate, 2635 * i.e., it ACKs something we sent. 2636 */ 2637 if (tp->t_flags & TF_NEEDSYN) { 2638 /* 2639 * T/TCP: Connection was half-synchronized, and our 2640 * SYN has been ACK'd (so connection is now fully 2641 * synchronized). Go to non-starred state, 2642 * increment snd_una for ACK of SYN, and check if 2643 * we can do window scaling. 2644 */ 2645 tp->t_flags &= ~TF_NEEDSYN; 2646 tp->snd_una++; 2647 /* Do window scaling? */ 2648 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2649 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2650 tp->rcv_scale = tp->request_r_scale; 2651 /* Send window already scaled. */ 2652 } 2653 } 2654 2655 process_ACK: 2656 INP_WLOCK_ASSERT(tp->t_inpcb); 2657 2658 acked = BYTES_THIS_ACK(tp, th); 2659 TCPSTAT_INC(tcps_rcvackpack); 2660 TCPSTAT_ADD(tcps_rcvackbyte, acked); 2661 2662 /* 2663 * If we just performed our first retransmit, and the ACK 2664 * arrives within our recovery window, then it was a mistake 2665 * to do the retransmit in the first place. Recover our 2666 * original cwnd and ssthresh, and proceed to transmit where 2667 * we left off. 2668 */ 2669 if (tp->t_rxtshift == 1 && tp->t_flags & TF_PREVVALID && 2670 (int)(ticks - tp->t_badrxtwin) < 0) 2671 cc_cong_signal(tp, th, CC_RTO_ERR); 2672 2673 /* 2674 * If we have a timestamp reply, update smoothed 2675 * round trip time. If no timestamp is present but 2676 * transmit timer is running and timed sequence 2677 * number was acked, update smoothed round trip time. 2678 * Since we now have an rtt measurement, cancel the 2679 * timer backoff (cf., Phil Karn's retransmit alg.). 2680 * Recompute the initial retransmit timer. 2681 * 2682 * Some boxes send broken timestamp replies 2683 * during the SYN+ACK phase, ignore 2684 * timestamps of 0 or we could calculate a 2685 * huge RTT and blow up the retransmit timer. 2686 */ 2687 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) { 2688 u_int t; 2689 2690 t = tcp_ts_getticks() - to.to_tsecr; 2691 if (!tp->t_rttlow || tp->t_rttlow > t) 2692 tp->t_rttlow = t; 2693 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1); 2694 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) { 2695 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime) 2696 tp->t_rttlow = ticks - tp->t_rtttime; 2697 tcp_xmit_timer(tp, ticks - tp->t_rtttime); 2698 } 2699 2700 /* 2701 * If all outstanding data is acked, stop retransmit 2702 * timer and remember to restart (more output or persist). 2703 * If there is more data to be acked, restart retransmit 2704 * timer, using current (possibly backed-off) value. 2705 */ 2706 if (th->th_ack == tp->snd_max) { 2707 tcp_timer_activate(tp, TT_REXMT, 0); 2708 needoutput = 1; 2709 } else if (!tcp_timer_active(tp, TT_PERSIST)) 2710 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur); 2711 2712 /* 2713 * If no data (only SYN) was ACK'd, 2714 * skip rest of ACK processing. 2715 */ 2716 if (acked == 0) 2717 goto step6; 2718 2719 /* 2720 * Let the congestion control algorithm update congestion 2721 * control related information. This typically means increasing 2722 * the congestion window. 2723 */ 2724 cc_ack_received(tp, th, CC_ACK); 2725 2726 SOCKBUF_LOCK(&so->so_snd); 2727 if (acked > sbavail(&so->so_snd)) { 2728 tp->snd_wnd -= sbavail(&so->so_snd); 2729 mfree = sbcut_locked(&so->so_snd, 2730 (int)sbavail(&so->so_snd)); 2731 ourfinisacked = 1; 2732 } else { 2733 mfree = sbcut_locked(&so->so_snd, acked); 2734 tp->snd_wnd -= acked; 2735 ourfinisacked = 0; 2736 } 2737 /* NB: sowwakeup_locked() does an implicit unlock. */ 2738 sowwakeup_locked(so); 2739 m_freem(mfree); 2740 /* Detect una wraparound. */ 2741 if (!IN_RECOVERY(tp->t_flags) && 2742 SEQ_GT(tp->snd_una, tp->snd_recover) && 2743 SEQ_LEQ(th->th_ack, tp->snd_recover)) 2744 tp->snd_recover = th->th_ack - 1; 2745 /* XXXLAS: Can this be moved up into cc_post_recovery? */ 2746 if (IN_RECOVERY(tp->t_flags) && 2747 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 2748 EXIT_RECOVERY(tp->t_flags); 2749 } 2750 tp->snd_una = th->th_ack; 2751 if (tp->t_flags & TF_SACK_PERMIT) { 2752 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 2753 tp->snd_recover = tp->snd_una; 2754 } 2755 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2756 tp->snd_nxt = tp->snd_una; 2757 2758 switch (tp->t_state) { 2759 2760 /* 2761 * In FIN_WAIT_1 STATE in addition to the processing 2762 * for the ESTABLISHED state if our FIN is now acknowledged 2763 * then enter FIN_WAIT_2. 2764 */ 2765 case TCPS_FIN_WAIT_1: 2766 if (ourfinisacked) { 2767 /* 2768 * If we can't receive any more 2769 * data, then closing user can proceed. 2770 * Starting the timer is contrary to the 2771 * specification, but if we don't get a FIN 2772 * we'll hang forever. 2773 * 2774 * XXXjl: 2775 * we should release the tp also, and use a 2776 * compressed state. 2777 */ 2778 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2779 soisdisconnected(so); 2780 tcp_timer_activate(tp, TT_2MSL, 2781 (tcp_fast_finwait2_recycle ? 2782 tcp_finwait2_timeout : 2783 TP_MAXIDLE(tp))); 2784 } 2785 tcp_state_change(tp, TCPS_FIN_WAIT_2); 2786 } 2787 break; 2788 2789 /* 2790 * In CLOSING STATE in addition to the processing for 2791 * the ESTABLISHED state if the ACK acknowledges our FIN 2792 * then enter the TIME-WAIT state, otherwise ignore 2793 * the segment. 2794 */ 2795 case TCPS_CLOSING: 2796 if (ourfinisacked) { 2797 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2798 tcp_twstart(tp); 2799 INP_INFO_RUNLOCK(&V_tcbinfo); 2800 m_freem(m); 2801 return; 2802 } 2803 break; 2804 2805 /* 2806 * In LAST_ACK, we may still be waiting for data to drain 2807 * and/or to be acked, as well as for the ack of our FIN. 2808 * If our FIN is now acknowledged, delete the TCB, 2809 * enter the closed state and return. 2810 */ 2811 case TCPS_LAST_ACK: 2812 if (ourfinisacked) { 2813 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2814 tp = tcp_close(tp); 2815 goto drop; 2816 } 2817 break; 2818 } 2819 } 2820 2821 step6: 2822 INP_WLOCK_ASSERT(tp->t_inpcb); 2823 2824 /* 2825 * Update window information. 2826 * Don't look at window if no ACK: TAC's send garbage on first SYN. 2827 */ 2828 if ((thflags & TH_ACK) && 2829 (SEQ_LT(tp->snd_wl1, th->th_seq) || 2830 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 2831 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 2832 /* keep track of pure window updates */ 2833 if (tlen == 0 && 2834 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 2835 TCPSTAT_INC(tcps_rcvwinupd); 2836 tp->snd_wnd = tiwin; 2837 tp->snd_wl1 = th->th_seq; 2838 tp->snd_wl2 = th->th_ack; 2839 if (tp->snd_wnd > tp->max_sndwnd) 2840 tp->max_sndwnd = tp->snd_wnd; 2841 needoutput = 1; 2842 } 2843 2844 /* 2845 * Process segments with URG. 2846 */ 2847 if ((thflags & TH_URG) && th->th_urp && 2848 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2849 /* 2850 * This is a kludge, but if we receive and accept 2851 * random urgent pointers, we'll crash in 2852 * soreceive. It's hard to imagine someone 2853 * actually wanting to send this much urgent data. 2854 */ 2855 SOCKBUF_LOCK(&so->so_rcv); 2856 if (th->th_urp + sbavail(&so->so_rcv) > sb_max) { 2857 th->th_urp = 0; /* XXX */ 2858 thflags &= ~TH_URG; /* XXX */ 2859 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */ 2860 goto dodata; /* XXX */ 2861 } 2862 /* 2863 * If this segment advances the known urgent pointer, 2864 * then mark the data stream. This should not happen 2865 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 2866 * a FIN has been received from the remote side. 2867 * In these states we ignore the URG. 2868 * 2869 * According to RFC961 (Assigned Protocols), 2870 * the urgent pointer points to the last octet 2871 * of urgent data. We continue, however, 2872 * to consider it to indicate the first octet 2873 * of data past the urgent section as the original 2874 * spec states (in one of two places). 2875 */ 2876 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) { 2877 tp->rcv_up = th->th_seq + th->th_urp; 2878 so->so_oobmark = sbavail(&so->so_rcv) + 2879 (tp->rcv_up - tp->rcv_nxt) - 1; 2880 if (so->so_oobmark == 0) 2881 so->so_rcv.sb_state |= SBS_RCVATMARK; 2882 sohasoutofband(so); 2883 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 2884 } 2885 SOCKBUF_UNLOCK(&so->so_rcv); 2886 /* 2887 * Remove out of band data so doesn't get presented to user. 2888 * This can happen independent of advancing the URG pointer, 2889 * but if two URG's are pending at once, some out-of-band 2890 * data may creep in... ick. 2891 */ 2892 if (th->th_urp <= (u_long)tlen && 2893 !(so->so_options & SO_OOBINLINE)) { 2894 /* hdr drop is delayed */ 2895 tcp_pulloutofband(so, th, m, drop_hdrlen); 2896 } 2897 } else { 2898 /* 2899 * If no out of band data is expected, 2900 * pull receive urgent pointer along 2901 * with the receive window. 2902 */ 2903 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 2904 tp->rcv_up = tp->rcv_nxt; 2905 } 2906 dodata: /* XXX */ 2907 INP_WLOCK_ASSERT(tp->t_inpcb); 2908 2909 /* 2910 * Process the segment text, merging it into the TCP sequencing queue, 2911 * and arranging for acknowledgment of receipt if necessary. 2912 * This process logically involves adjusting tp->rcv_wnd as data 2913 * is presented to the user (this happens in tcp_usrreq.c, 2914 * case PRU_RCVD). If a FIN has already been received on this 2915 * connection then we just ignore the text. 2916 */ 2917 if ((tlen || (thflags & TH_FIN)) && 2918 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2919 tcp_seq save_start = th->th_seq; 2920 m_adj(m, drop_hdrlen); /* delayed header drop */ 2921 /* 2922 * Insert segment which includes th into TCP reassembly queue 2923 * with control block tp. Set thflags to whether reassembly now 2924 * includes a segment with FIN. This handles the common case 2925 * inline (segment is the next to be received on an established 2926 * connection, and the queue is empty), avoiding linkage into 2927 * and removal from the queue and repetition of various 2928 * conversions. 2929 * Set DELACK for segments received in order, but ack 2930 * immediately when segments are out of order (so 2931 * fast retransmit can work). 2932 */ 2933 if (th->th_seq == tp->rcv_nxt && 2934 LIST_EMPTY(&tp->t_segq) && 2935 TCPS_HAVEESTABLISHED(tp->t_state)) { 2936 if (DELAY_ACK(tp, tlen)) 2937 tp->t_flags |= TF_DELACK; 2938 else 2939 tp->t_flags |= TF_ACKNOW; 2940 tp->rcv_nxt += tlen; 2941 thflags = th->th_flags & TH_FIN; 2942 TCPSTAT_INC(tcps_rcvpack); 2943 TCPSTAT_ADD(tcps_rcvbyte, tlen); 2944 SOCKBUF_LOCK(&so->so_rcv); 2945 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 2946 m_freem(m); 2947 else 2948 sbappendstream_locked(&so->so_rcv, m, 0); 2949 /* NB: sorwakeup_locked() does an implicit unlock. */ 2950 sorwakeup_locked(so); 2951 } else { 2952 /* 2953 * XXX: Due to the header drop above "th" is 2954 * theoretically invalid by now. Fortunately 2955 * m_adj() doesn't actually frees any mbufs 2956 * when trimming from the head. 2957 */ 2958 thflags = tcp_reass(tp, th, &tlen, m); 2959 tp->t_flags |= TF_ACKNOW; 2960 } 2961 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT)) 2962 tcp_update_sack_list(tp, save_start, save_start + tlen); 2963 #if 0 2964 /* 2965 * Note the amount of data that peer has sent into 2966 * our window, in order to estimate the sender's 2967 * buffer size. 2968 * XXX: Unused. 2969 */ 2970 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) 2971 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 2972 else 2973 len = so->so_rcv.sb_hiwat; 2974 #endif 2975 } else { 2976 m_freem(m); 2977 thflags &= ~TH_FIN; 2978 } 2979 2980 /* 2981 * If FIN is received ACK the FIN and let the user know 2982 * that the connection is closing. 2983 */ 2984 if (thflags & TH_FIN) { 2985 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2986 socantrcvmore(so); 2987 /* 2988 * If connection is half-synchronized 2989 * (ie NEEDSYN flag on) then delay ACK, 2990 * so it may be piggybacked when SYN is sent. 2991 * Otherwise, since we received a FIN then no 2992 * more input can be expected, send ACK now. 2993 */ 2994 if (tp->t_flags & TF_NEEDSYN) 2995 tp->t_flags |= TF_DELACK; 2996 else 2997 tp->t_flags |= TF_ACKNOW; 2998 tp->rcv_nxt++; 2999 } 3000 switch (tp->t_state) { 3001 3002 /* 3003 * In SYN_RECEIVED and ESTABLISHED STATES 3004 * enter the CLOSE_WAIT state. 3005 */ 3006 case TCPS_SYN_RECEIVED: 3007 tp->t_starttime = ticks; 3008 /* FALLTHROUGH */ 3009 case TCPS_ESTABLISHED: 3010 tcp_state_change(tp, TCPS_CLOSE_WAIT); 3011 break; 3012 3013 /* 3014 * If still in FIN_WAIT_1 STATE FIN has not been acked so 3015 * enter the CLOSING state. 3016 */ 3017 case TCPS_FIN_WAIT_1: 3018 tcp_state_change(tp, TCPS_CLOSING); 3019 break; 3020 3021 /* 3022 * In FIN_WAIT_2 state enter the TIME_WAIT state, 3023 * starting the time-wait timer, turning off the other 3024 * standard timers. 3025 */ 3026 case TCPS_FIN_WAIT_2: 3027 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 3028 KASSERT(ti_locked == TI_RLOCKED, ("%s: dodata " 3029 "TCP_FIN_WAIT_2 ti_locked: %d", __func__, 3030 ti_locked)); 3031 3032 tcp_twstart(tp); 3033 INP_INFO_RUNLOCK(&V_tcbinfo); 3034 return; 3035 } 3036 } 3037 if (ti_locked == TI_RLOCKED) 3038 INP_INFO_RUNLOCK(&V_tcbinfo); 3039 ti_locked = TI_UNLOCKED; 3040 3041 #ifdef TCPDEBUG 3042 if (so->so_options & SO_DEBUG) 3043 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen, 3044 &tcp_savetcp, 0); 3045 #endif 3046 TCP_PROBE3(debug__input, tp, th, mtod(m, const char *)); 3047 3048 /* 3049 * Return any desired output. 3050 */ 3051 if (needoutput || (tp->t_flags & TF_ACKNOW)) 3052 (void) tcp_output(tp); 3053 3054 check_delack: 3055 KASSERT(ti_locked == TI_UNLOCKED, ("%s: check_delack ti_locked %d", 3056 __func__, ti_locked)); 3057 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 3058 INP_WLOCK_ASSERT(tp->t_inpcb); 3059 3060 if (tp->t_flags & TF_DELACK) { 3061 tp->t_flags &= ~TF_DELACK; 3062 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 3063 } 3064 INP_WUNLOCK(tp->t_inpcb); 3065 return; 3066 3067 dropafterack: 3068 /* 3069 * Generate an ACK dropping incoming segment if it occupies 3070 * sequence space, where the ACK reflects our state. 3071 * 3072 * We can now skip the test for the RST flag since all 3073 * paths to this code happen after packets containing 3074 * RST have been dropped. 3075 * 3076 * In the SYN-RECEIVED state, don't send an ACK unless the 3077 * segment we received passes the SYN-RECEIVED ACK test. 3078 * If it fails send a RST. This breaks the loop in the 3079 * "LAND" DoS attack, and also prevents an ACK storm 3080 * between two listening ports that have been sent forged 3081 * SYN segments, each with the source address of the other. 3082 */ 3083 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 3084 (SEQ_GT(tp->snd_una, th->th_ack) || 3085 SEQ_GT(th->th_ack, tp->snd_max)) ) { 3086 rstreason = BANDLIM_RST_OPENPORT; 3087 goto dropwithreset; 3088 } 3089 #ifdef TCPDEBUG 3090 if (so->so_options & SO_DEBUG) 3091 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 3092 &tcp_savetcp, 0); 3093 #endif 3094 TCP_PROBE3(debug__input, tp, th, mtod(m, const char *)); 3095 if (ti_locked == TI_RLOCKED) 3096 INP_INFO_RUNLOCK(&V_tcbinfo); 3097 ti_locked = TI_UNLOCKED; 3098 3099 tp->t_flags |= TF_ACKNOW; 3100 (void) tcp_output(tp); 3101 INP_WUNLOCK(tp->t_inpcb); 3102 m_freem(m); 3103 return; 3104 3105 dropwithreset: 3106 if (ti_locked == TI_RLOCKED) 3107 INP_INFO_RUNLOCK(&V_tcbinfo); 3108 ti_locked = TI_UNLOCKED; 3109 3110 if (tp != NULL) { 3111 tcp_dropwithreset(m, th, tp, tlen, rstreason); 3112 INP_WUNLOCK(tp->t_inpcb); 3113 } else 3114 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 3115 return; 3116 3117 drop: 3118 if (ti_locked == TI_RLOCKED) { 3119 INP_INFO_RUNLOCK(&V_tcbinfo); 3120 ti_locked = TI_UNLOCKED; 3121 } 3122 #ifdef INVARIANTS 3123 else 3124 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 3125 #endif 3126 3127 /* 3128 * Drop space held by incoming segment and return. 3129 */ 3130 #ifdef TCPDEBUG 3131 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 3132 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 3133 &tcp_savetcp, 0); 3134 #endif 3135 TCP_PROBE3(debug__input, tp, th, mtod(m, const char *)); 3136 if (tp != NULL) 3137 INP_WUNLOCK(tp->t_inpcb); 3138 m_freem(m); 3139 } 3140 3141 /* 3142 * Issue RST and make ACK acceptable to originator of segment. 3143 * The mbuf must still include the original packet header. 3144 * tp may be NULL. 3145 */ 3146 static void 3147 tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, 3148 int tlen, int rstreason) 3149 { 3150 #ifdef INET 3151 struct ip *ip; 3152 #endif 3153 #ifdef INET6 3154 struct ip6_hdr *ip6; 3155 #endif 3156 3157 if (tp != NULL) { 3158 INP_WLOCK_ASSERT(tp->t_inpcb); 3159 } 3160 3161 /* Don't bother if destination was broadcast/multicast. */ 3162 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST)) 3163 goto drop; 3164 #ifdef INET6 3165 if (mtod(m, struct ip *)->ip_v == 6) { 3166 ip6 = mtod(m, struct ip6_hdr *); 3167 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 3168 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 3169 goto drop; 3170 /* IPv6 anycast check is done at tcp6_input() */ 3171 } 3172 #endif 3173 #if defined(INET) && defined(INET6) 3174 else 3175 #endif 3176 #ifdef INET 3177 { 3178 ip = mtod(m, struct ip *); 3179 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 3180 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 3181 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 3182 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 3183 goto drop; 3184 } 3185 #endif 3186 3187 /* Perform bandwidth limiting. */ 3188 if (badport_bandlim(rstreason) < 0) 3189 goto drop; 3190 3191 /* tcp_respond consumes the mbuf chain. */ 3192 if (th->th_flags & TH_ACK) { 3193 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, 3194 th->th_ack, TH_RST); 3195 } else { 3196 if (th->th_flags & TH_SYN) 3197 tlen++; 3198 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen, 3199 (tcp_seq)0, TH_RST|TH_ACK); 3200 } 3201 return; 3202 drop: 3203 m_freem(m); 3204 } 3205 3206 /* 3207 * Parse TCP options and place in tcpopt. 3208 */ 3209 static void 3210 tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags) 3211 { 3212 int opt, optlen; 3213 3214 to->to_flags = 0; 3215 for (; cnt > 0; cnt -= optlen, cp += optlen) { 3216 opt = cp[0]; 3217 if (opt == TCPOPT_EOL) 3218 break; 3219 if (opt == TCPOPT_NOP) 3220 optlen = 1; 3221 else { 3222 if (cnt < 2) 3223 break; 3224 optlen = cp[1]; 3225 if (optlen < 2 || optlen > cnt) 3226 break; 3227 } 3228 switch (opt) { 3229 case TCPOPT_MAXSEG: 3230 if (optlen != TCPOLEN_MAXSEG) 3231 continue; 3232 if (!(flags & TO_SYN)) 3233 continue; 3234 to->to_flags |= TOF_MSS; 3235 bcopy((char *)cp + 2, 3236 (char *)&to->to_mss, sizeof(to->to_mss)); 3237 to->to_mss = ntohs(to->to_mss); 3238 break; 3239 case TCPOPT_WINDOW: 3240 if (optlen != TCPOLEN_WINDOW) 3241 continue; 3242 if (!(flags & TO_SYN)) 3243 continue; 3244 to->to_flags |= TOF_SCALE; 3245 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT); 3246 break; 3247 case TCPOPT_TIMESTAMP: 3248 if (optlen != TCPOLEN_TIMESTAMP) 3249 continue; 3250 to->to_flags |= TOF_TS; 3251 bcopy((char *)cp + 2, 3252 (char *)&to->to_tsval, sizeof(to->to_tsval)); 3253 to->to_tsval = ntohl(to->to_tsval); 3254 bcopy((char *)cp + 6, 3255 (char *)&to->to_tsecr, sizeof(to->to_tsecr)); 3256 to->to_tsecr = ntohl(to->to_tsecr); 3257 break; 3258 #ifdef TCP_SIGNATURE 3259 /* 3260 * XXX In order to reply to a host which has set the 3261 * TCP_SIGNATURE option in its initial SYN, we have to 3262 * record the fact that the option was observed here 3263 * for the syncache code to perform the correct response. 3264 */ 3265 case TCPOPT_SIGNATURE: 3266 if (optlen != TCPOLEN_SIGNATURE) 3267 continue; 3268 to->to_flags |= TOF_SIGNATURE; 3269 to->to_signature = cp + 2; 3270 break; 3271 #endif 3272 case TCPOPT_SACK_PERMITTED: 3273 if (optlen != TCPOLEN_SACK_PERMITTED) 3274 continue; 3275 if (!(flags & TO_SYN)) 3276 continue; 3277 if (!V_tcp_do_sack) 3278 continue; 3279 to->to_flags |= TOF_SACKPERM; 3280 break; 3281 case TCPOPT_SACK: 3282 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) 3283 continue; 3284 if (flags & TO_SYN) 3285 continue; 3286 to->to_flags |= TOF_SACK; 3287 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK; 3288 to->to_sacks = cp + 2; 3289 TCPSTAT_INC(tcps_sack_rcv_blocks); 3290 break; 3291 default: 3292 continue; 3293 } 3294 } 3295 } 3296 3297 /* 3298 * Pull out of band byte out of a segment so 3299 * it doesn't appear in the user's data queue. 3300 * It is still reflected in the segment length for 3301 * sequencing purposes. 3302 */ 3303 static void 3304 tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, 3305 int off) 3306 { 3307 int cnt = off + th->th_urp - 1; 3308 3309 while (cnt >= 0) { 3310 if (m->m_len > cnt) { 3311 char *cp = mtod(m, caddr_t) + cnt; 3312 struct tcpcb *tp = sototcpcb(so); 3313 3314 INP_WLOCK_ASSERT(tp->t_inpcb); 3315 3316 tp->t_iobc = *cp; 3317 tp->t_oobflags |= TCPOOB_HAVEDATA; 3318 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); 3319 m->m_len--; 3320 if (m->m_flags & M_PKTHDR) 3321 m->m_pkthdr.len--; 3322 return; 3323 } 3324 cnt -= m->m_len; 3325 m = m->m_next; 3326 if (m == NULL) 3327 break; 3328 } 3329 panic("tcp_pulloutofband"); 3330 } 3331 3332 /* 3333 * Collect new round-trip time estimate 3334 * and update averages and current timeout. 3335 */ 3336 static void 3337 tcp_xmit_timer(struct tcpcb *tp, int rtt) 3338 { 3339 int delta; 3340 3341 INP_WLOCK_ASSERT(tp->t_inpcb); 3342 3343 TCPSTAT_INC(tcps_rttupdated); 3344 tp->t_rttupdated++; 3345 if (tp->t_srtt != 0) { 3346 /* 3347 * srtt is stored as fixed point with 5 bits after the 3348 * binary point (i.e., scaled by 8). The following magic 3349 * is equivalent to the smoothing algorithm in rfc793 with 3350 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 3351 * point). Adjust rtt to origin 0. 3352 */ 3353 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 3354 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 3355 3356 if ((tp->t_srtt += delta) <= 0) 3357 tp->t_srtt = 1; 3358 3359 /* 3360 * We accumulate a smoothed rtt variance (actually, a 3361 * smoothed mean difference), then set the retransmit 3362 * timer to smoothed rtt + 4 times the smoothed variance. 3363 * rttvar is stored as fixed point with 4 bits after the 3364 * binary point (scaled by 16). The following is 3365 * equivalent to rfc793 smoothing with an alpha of .75 3366 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 3367 * rfc793's wired-in beta. 3368 */ 3369 if (delta < 0) 3370 delta = -delta; 3371 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 3372 if ((tp->t_rttvar += delta) <= 0) 3373 tp->t_rttvar = 1; 3374 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 3375 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3376 } else { 3377 /* 3378 * No rtt measurement yet - use the unsmoothed rtt. 3379 * Set the variance to half the rtt (so our first 3380 * retransmit happens at 3*rtt). 3381 */ 3382 tp->t_srtt = rtt << TCP_RTT_SHIFT; 3383 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 3384 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3385 } 3386 tp->t_rtttime = 0; 3387 tp->t_rxtshift = 0; 3388 3389 /* 3390 * the retransmit should happen at rtt + 4 * rttvar. 3391 * Because of the way we do the smoothing, srtt and rttvar 3392 * will each average +1/2 tick of bias. When we compute 3393 * the retransmit timer, we want 1/2 tick of rounding and 3394 * 1 extra tick because of +-1/2 tick uncertainty in the 3395 * firing of the timer. The bias will give us exactly the 3396 * 1.5 tick we need. But, because the bias is 3397 * statistical, we have to test that we don't drop below 3398 * the minimum feasible timer (which is 2 ticks). 3399 */ 3400 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 3401 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 3402 3403 /* 3404 * We received an ack for a packet that wasn't retransmitted; 3405 * it is probably safe to discard any error indications we've 3406 * received recently. This isn't quite right, but close enough 3407 * for now (a route might have failed after we sent a segment, 3408 * and the return path might not be symmetrical). 3409 */ 3410 tp->t_softerror = 0; 3411 } 3412 3413 /* 3414 * Determine a reasonable value for maxseg size. 3415 * If the route is known, check route for mtu. 3416 * If none, use an mss that can be handled on the outgoing interface 3417 * without forcing IP to fragment. If no route is found, route has no mtu, 3418 * or the destination isn't local, use a default, hopefully conservative 3419 * size (usually 512 or the default IP max size, but no more than the mtu 3420 * of the interface), as we can't discover anything about intervening 3421 * gateways or networks. We also initialize the congestion/slow start 3422 * window to be a single segment if the destination isn't local. 3423 * While looking at the routing entry, we also initialize other path-dependent 3424 * parameters from pre-set or cached values in the routing entry. 3425 * 3426 * Also take into account the space needed for options that we 3427 * send regularly. Make maxseg shorter by that amount to assure 3428 * that we can send maxseg amount of data even when the options 3429 * are present. Store the upper limit of the length of options plus 3430 * data in maxopd. 3431 * 3432 * NOTE that this routine is only called when we process an incoming 3433 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS 3434 * settings are handled in tcp_mssopt(). 3435 */ 3436 void 3437 tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer, 3438 struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap) 3439 { 3440 int mss = 0; 3441 u_long maxmtu = 0; 3442 struct inpcb *inp = tp->t_inpcb; 3443 struct hc_metrics_lite metrics; 3444 int origoffer; 3445 #ifdef INET6 3446 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 3447 size_t min_protoh = isipv6 ? 3448 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) : 3449 sizeof (struct tcpiphdr); 3450 #else 3451 const size_t min_protoh = sizeof(struct tcpiphdr); 3452 #endif 3453 3454 INP_WLOCK_ASSERT(tp->t_inpcb); 3455 3456 if (mtuoffer != -1) { 3457 KASSERT(offer == -1, ("%s: conflict", __func__)); 3458 offer = mtuoffer - min_protoh; 3459 } 3460 origoffer = offer; 3461 3462 /* Initialize. */ 3463 #ifdef INET6 3464 if (isipv6) { 3465 maxmtu = tcp_maxmtu6(&inp->inp_inc, cap); 3466 tp->t_maxopd = tp->t_maxseg = V_tcp_v6mssdflt; 3467 } 3468 #endif 3469 #if defined(INET) && defined(INET6) 3470 else 3471 #endif 3472 #ifdef INET 3473 { 3474 maxmtu = tcp_maxmtu(&inp->inp_inc, cap); 3475 tp->t_maxopd = tp->t_maxseg = V_tcp_mssdflt; 3476 } 3477 #endif 3478 3479 /* 3480 * No route to sender, stay with default mss and return. 3481 */ 3482 if (maxmtu == 0) { 3483 /* 3484 * In case we return early we need to initialize metrics 3485 * to a defined state as tcp_hc_get() would do for us 3486 * if there was no cache hit. 3487 */ 3488 if (metricptr != NULL) 3489 bzero(metricptr, sizeof(struct hc_metrics_lite)); 3490 return; 3491 } 3492 3493 /* What have we got? */ 3494 switch (offer) { 3495 case 0: 3496 /* 3497 * Offer == 0 means that there was no MSS on the SYN 3498 * segment, in this case we use tcp_mssdflt as 3499 * already assigned to t_maxopd above. 3500 */ 3501 offer = tp->t_maxopd; 3502 break; 3503 3504 case -1: 3505 /* 3506 * Offer == -1 means that we didn't receive SYN yet. 3507 */ 3508 /* FALLTHROUGH */ 3509 3510 default: 3511 /* 3512 * Prevent DoS attack with too small MSS. Round up 3513 * to at least minmss. 3514 */ 3515 offer = max(offer, V_tcp_minmss); 3516 } 3517 3518 /* 3519 * rmx information is now retrieved from tcp_hostcache. 3520 */ 3521 tcp_hc_get(&inp->inp_inc, &metrics); 3522 if (metricptr != NULL) 3523 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite)); 3524 3525 /* 3526 * If there's a discovered mtu in tcp hostcache, use it. 3527 * Else, use the link mtu. 3528 */ 3529 if (metrics.rmx_mtu) 3530 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh; 3531 else { 3532 #ifdef INET6 3533 if (isipv6) { 3534 mss = maxmtu - min_protoh; 3535 if (!V_path_mtu_discovery && 3536 !in6_localaddr(&inp->in6p_faddr)) 3537 mss = min(mss, V_tcp_v6mssdflt); 3538 } 3539 #endif 3540 #if defined(INET) && defined(INET6) 3541 else 3542 #endif 3543 #ifdef INET 3544 { 3545 mss = maxmtu - min_protoh; 3546 if (!V_path_mtu_discovery && 3547 !in_localaddr(inp->inp_faddr)) 3548 mss = min(mss, V_tcp_mssdflt); 3549 } 3550 #endif 3551 /* 3552 * XXX - The above conditional (mss = maxmtu - min_protoh) 3553 * probably violates the TCP spec. 3554 * The problem is that, since we don't know the 3555 * other end's MSS, we are supposed to use a conservative 3556 * default. But, if we do that, then MTU discovery will 3557 * never actually take place, because the conservative 3558 * default is much less than the MTUs typically seen 3559 * on the Internet today. For the moment, we'll sweep 3560 * this under the carpet. 3561 * 3562 * The conservative default might not actually be a problem 3563 * if the only case this occurs is when sending an initial 3564 * SYN with options and data to a host we've never talked 3565 * to before. Then, they will reply with an MSS value which 3566 * will get recorded and the new parameters should get 3567 * recomputed. For Further Study. 3568 */ 3569 } 3570 mss = min(mss, offer); 3571 3572 /* 3573 * Sanity check: make sure that maxopd will be large 3574 * enough to allow some data on segments even if the 3575 * all the option space is used (40bytes). Otherwise 3576 * funny things may happen in tcp_output. 3577 */ 3578 mss = max(mss, 64); 3579 3580 /* 3581 * maxopd stores the maximum length of data AND options 3582 * in a segment; maxseg is the amount of data in a normal 3583 * segment. We need to store this value (maxopd) apart 3584 * from maxseg, because now every segment carries options 3585 * and thus we normally have somewhat less data in segments. 3586 */ 3587 tp->t_maxopd = mss; 3588 3589 /* 3590 * origoffer==-1 indicates that no segments were received yet. 3591 * In this case we just guess. 3592 */ 3593 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 3594 (origoffer == -1 || 3595 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) 3596 mss -= TCPOLEN_TSTAMP_APPA; 3597 3598 tp->t_maxseg = mss; 3599 } 3600 3601 void 3602 tcp_mss(struct tcpcb *tp, int offer) 3603 { 3604 int mss; 3605 u_long bufsize; 3606 struct inpcb *inp; 3607 struct socket *so; 3608 struct hc_metrics_lite metrics; 3609 struct tcp_ifcap cap; 3610 3611 KASSERT(tp != NULL, ("%s: tp == NULL", __func__)); 3612 3613 bzero(&cap, sizeof(cap)); 3614 tcp_mss_update(tp, offer, -1, &metrics, &cap); 3615 3616 mss = tp->t_maxseg; 3617 inp = tp->t_inpcb; 3618 3619 /* 3620 * If there's a pipesize, change the socket buffer to that size, 3621 * don't change if sb_hiwat is different than default (then it 3622 * has been changed on purpose with setsockopt). 3623 * Make the socket buffers an integral number of mss units; 3624 * if the mss is larger than the socket buffer, decrease the mss. 3625 */ 3626 so = inp->inp_socket; 3627 SOCKBUF_LOCK(&so->so_snd); 3628 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe) 3629 bufsize = metrics.rmx_sendpipe; 3630 else 3631 bufsize = so->so_snd.sb_hiwat; 3632 if (bufsize < mss) 3633 mss = bufsize; 3634 else { 3635 bufsize = roundup(bufsize, mss); 3636 if (bufsize > sb_max) 3637 bufsize = sb_max; 3638 if (bufsize > so->so_snd.sb_hiwat) 3639 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL); 3640 } 3641 SOCKBUF_UNLOCK(&so->so_snd); 3642 tp->t_maxseg = mss; 3643 3644 SOCKBUF_LOCK(&so->so_rcv); 3645 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe) 3646 bufsize = metrics.rmx_recvpipe; 3647 else 3648 bufsize = so->so_rcv.sb_hiwat; 3649 if (bufsize > mss) { 3650 bufsize = roundup(bufsize, mss); 3651 if (bufsize > sb_max) 3652 bufsize = sb_max; 3653 if (bufsize > so->so_rcv.sb_hiwat) 3654 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL); 3655 } 3656 SOCKBUF_UNLOCK(&so->so_rcv); 3657 3658 /* Check the interface for TSO capabilities. */ 3659 if (cap.ifcap & CSUM_TSO) { 3660 tp->t_flags |= TF_TSO; 3661 tp->t_tsomax = cap.tsomax; 3662 tp->t_tsomaxsegcount = cap.tsomaxsegcount; 3663 tp->t_tsomaxsegsize = cap.tsomaxsegsize; 3664 } 3665 } 3666 3667 /* 3668 * Determine the MSS option to send on an outgoing SYN. 3669 */ 3670 int 3671 tcp_mssopt(struct in_conninfo *inc) 3672 { 3673 int mss = 0; 3674 u_long maxmtu = 0; 3675 u_long thcmtu = 0; 3676 size_t min_protoh; 3677 3678 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer")); 3679 3680 #ifdef INET6 3681 if (inc->inc_flags & INC_ISIPV6) { 3682 mss = V_tcp_v6mssdflt; 3683 maxmtu = tcp_maxmtu6(inc, NULL); 3684 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 3685 } 3686 #endif 3687 #if defined(INET) && defined(INET6) 3688 else 3689 #endif 3690 #ifdef INET 3691 { 3692 mss = V_tcp_mssdflt; 3693 maxmtu = tcp_maxmtu(inc, NULL); 3694 min_protoh = sizeof(struct tcpiphdr); 3695 } 3696 #endif 3697 #if defined(INET6) || defined(INET) 3698 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3699 #endif 3700 3701 if (maxmtu && thcmtu) 3702 mss = min(maxmtu, thcmtu) - min_protoh; 3703 else if (maxmtu || thcmtu) 3704 mss = max(maxmtu, thcmtu) - min_protoh; 3705 3706 return (mss); 3707 } 3708 3709 3710 /* 3711 * On a partial ack arrives, force the retransmission of the 3712 * next unacknowledged segment. Do not clear tp->t_dupacks. 3713 * By setting snd_nxt to ti_ack, this forces retransmission timer to 3714 * be started again. 3715 */ 3716 static void 3717 tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th) 3718 { 3719 tcp_seq onxt = tp->snd_nxt; 3720 u_long ocwnd = tp->snd_cwnd; 3721 3722 INP_WLOCK_ASSERT(tp->t_inpcb); 3723 3724 tcp_timer_activate(tp, TT_REXMT, 0); 3725 tp->t_rtttime = 0; 3726 tp->snd_nxt = th->th_ack; 3727 /* 3728 * Set snd_cwnd to one segment beyond acknowledged offset. 3729 * (tp->snd_una has not yet been updated when this function is called.) 3730 */ 3731 tp->snd_cwnd = tp->t_maxseg + BYTES_THIS_ACK(tp, th); 3732 tp->t_flags |= TF_ACKNOW; 3733 (void) tcp_output(tp); 3734 tp->snd_cwnd = ocwnd; 3735 if (SEQ_GT(onxt, tp->snd_nxt)) 3736 tp->snd_nxt = onxt; 3737 /* 3738 * Partial window deflation. Relies on fact that tp->snd_una 3739 * not updated yet. 3740 */ 3741 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th)) 3742 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th); 3743 else 3744 tp->snd_cwnd = 0; 3745 tp->snd_cwnd += tp->t_maxseg; 3746 } 3747 3748 int 3749 tcp_compute_pipe(struct tcpcb *tp) 3750 { 3751 return (tp->snd_max - tp->snd_una + 3752 tp->sackhint.sack_bytes_rexmit - 3753 tp->sackhint.sacked_bytes); 3754 } 3755