1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 #include "opt_inet.h" 34 #include "opt_inet6.h" 35 #include "opt_rss.h" 36 37 #include <sys/param.h> 38 #include <sys/kernel.h> 39 #include <sys/lock.h> 40 #include <sys/mbuf.h> 41 #include <sys/mutex.h> 42 #include <sys/protosw.h> 43 #include <sys/smp.h> 44 #include <sys/socket.h> 45 #include <sys/socketvar.h> 46 #include <sys/sysctl.h> 47 #include <sys/systm.h> 48 49 #include <net/if.h> 50 #include <net/route.h> 51 #include <net/rss_config.h> 52 #include <net/vnet.h> 53 #include <net/netisr.h> 54 55 #include <netinet/in.h> 56 #include <netinet/in_kdtrace.h> 57 #include <netinet/in_pcb.h> 58 #include <netinet/in_rss.h> 59 #include <netinet/in_systm.h> 60 #ifdef INET6 61 #include <netinet6/in6_pcb.h> 62 #endif 63 #include <netinet/ip_var.h> 64 #include <netinet/tcp.h> 65 #include <netinet/tcp_fsm.h> 66 #include <netinet/tcp_timer.h> 67 #include <netinet/tcp_var.h> 68 #include <netinet/tcp_log_buf.h> 69 #include <netinet/tcp_seq.h> 70 #include <netinet/cc/cc.h> 71 #ifdef INET6 72 #include <netinet6/tcp6_var.h> 73 #endif 74 #include <netinet/tcpip.h> 75 76 int tcp_persmin; 77 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, persmin, CTLTYPE_INT | CTLFLAG_RW, 78 &tcp_persmin, 0, sysctl_msec_to_ticks, "I", 79 "minimum persistence interval"); 80 81 int tcp_persmax; 82 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, persmax, CTLTYPE_INT | CTLFLAG_RW, 83 &tcp_persmax, 0, sysctl_msec_to_ticks, "I", 84 "maximum persistence interval"); 85 86 int tcp_keepinit; 87 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, CTLTYPE_INT | CTLFLAG_RW, 88 &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", 89 "time to establish connection"); 90 91 int tcp_keepidle; 92 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, CTLTYPE_INT | CTLFLAG_RW, 93 &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", 94 "time before keepalive probes begin"); 95 96 int tcp_keepintvl; 97 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, 98 CTLTYPE_INT | CTLFLAG_RW, &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", 99 "time between keepalive probes"); 100 101 int tcp_delacktime; 102 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DELACKTIME, delacktime, 103 CTLTYPE_INT | CTLFLAG_RW, &tcp_delacktime, 0, sysctl_msec_to_ticks, "I", 104 "Time before a delayed ACK is sent"); 105 106 VNET_DEFINE(int, tcp_msl); 107 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, 108 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET, 109 &VNET_NAME(tcp_msl), 0, sysctl_msec_to_ticks, "I", 110 "Maximum segment lifetime"); 111 112 VNET_DEFINE(int, tcp_msl_local); 113 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl_local, 114 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET, 115 &VNET_NAME(tcp_msl_local), 0, sysctl_msec_to_ticks, "I", 116 "Maximum segment lifetime for local communication"); 117 118 int tcp_rexmit_initial; 119 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_initial, CTLTYPE_INT | CTLFLAG_RW, 120 &tcp_rexmit_initial, 0, sysctl_msec_to_ticks, "I", 121 "Initial Retransmission Timeout"); 122 123 int tcp_rexmit_min; 124 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_min, CTLTYPE_INT | CTLFLAG_RW, 125 &tcp_rexmit_min, 0, sysctl_msec_to_ticks, "I", 126 "Minimum Retransmission Timeout"); 127 128 int tcp_rexmit_max; 129 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_max, CTLTYPE_INT | CTLFLAG_RW, 130 &tcp_rexmit_max, 0, sysctl_msec_to_ticks, "I", 131 "Maximum Retransmission Timeout"); 132 133 int tcp_rexmit_slop; 134 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_slop, CTLTYPE_INT | CTLFLAG_RW, 135 &tcp_rexmit_slop, 0, sysctl_msec_to_ticks, "I", 136 "Retransmission Timer Slop"); 137 138 VNET_DEFINE(int, tcp_always_keepalive) = 1; 139 SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_VNET|CTLFLAG_RW, 140 &VNET_NAME(tcp_always_keepalive) , 0, 141 "Assume SO_KEEPALIVE on all TCP connections"); 142 143 int tcp_fast_finwait2_recycle = 0; 144 SYSCTL_INT(_net_inet_tcp, OID_AUTO, fast_finwait2_recycle, CTLFLAG_RW, 145 &tcp_fast_finwait2_recycle, 0, 146 "Recycle closed FIN_WAIT_2 connections faster"); 147 148 int tcp_finwait2_timeout; 149 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, finwait2_timeout, CTLTYPE_INT | CTLFLAG_RW, 150 &tcp_finwait2_timeout, 0, sysctl_msec_to_ticks, "I", 151 "FIN-WAIT2 timeout"); 152 153 int tcp_keepcnt = TCPTV_KEEPCNT; 154 SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt, CTLFLAG_RW, &tcp_keepcnt, 0, 155 "Number of keepalive probes to send"); 156 157 /* max idle probes */ 158 int tcp_maxpersistidle; 159 160 int tcp_rexmit_drop_options = 0; 161 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rexmit_drop_options, CTLFLAG_RW, 162 &tcp_rexmit_drop_options, 0, 163 "Drop TCP options from 3rd and later retransmitted SYN"); 164 165 int tcp_maxunacktime = TCPTV_MAXUNACKTIME; 166 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, maxunacktime, CTLTYPE_INT | CTLFLAG_RW, 167 &tcp_maxunacktime, 0, sysctl_msec_to_ticks, "I", 168 "Maximum time (in ms) that a session can linger without making progress"); 169 170 VNET_DEFINE(int, tcp_pmtud_blackhole_detect); 171 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_detection, 172 CTLFLAG_RW|CTLFLAG_VNET, 173 &VNET_NAME(tcp_pmtud_blackhole_detect), 0, 174 "Path MTU Discovery Black Hole Detection Enabled"); 175 176 #ifdef INET 177 VNET_DEFINE(int, tcp_pmtud_blackhole_mss) = 1200; 178 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_mss, 179 CTLFLAG_RW|CTLFLAG_VNET, 180 &VNET_NAME(tcp_pmtud_blackhole_mss), 0, 181 "Path MTU Discovery Black Hole Detection lowered MSS"); 182 #endif 183 184 #ifdef INET6 185 VNET_DEFINE(int, tcp_v6pmtud_blackhole_mss) = 1220; 186 SYSCTL_INT(_net_inet_tcp, OID_AUTO, v6pmtud_blackhole_mss, 187 CTLFLAG_RW|CTLFLAG_VNET, 188 &VNET_NAME(tcp_v6pmtud_blackhole_mss), 0, 189 "Path MTU Discovery IPv6 Black Hole Detection lowered MSS"); 190 #endif 191 192 #ifdef RSS 193 static int per_cpu_timers = 1; 194 #else 195 static int per_cpu_timers = 0; 196 #endif 197 SYSCTL_INT(_net_inet_tcp, OID_AUTO, per_cpu_timers, CTLFLAG_RW, 198 &per_cpu_timers , 0, "run tcp timers on all cpus"); 199 200 static int 201 sysctl_net_inet_tcp_retries(SYSCTL_HANDLER_ARGS) 202 { 203 int error, new; 204 205 new = V_tcp_retries; 206 error = sysctl_handle_int(oidp, &new, 0, req); 207 if (error == 0 && req->newptr) { 208 if ((new < 1) || (new > TCP_MAXRXTSHIFT)) 209 error = EINVAL; 210 else 211 V_tcp_retries = new; 212 } 213 return (error); 214 } 215 216 VNET_DEFINE(int, tcp_retries) = TCP_MAXRXTSHIFT; 217 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, retries, 218 CTLTYPE_INT | CTLFLAG_VNET | CTLFLAG_RW, 219 &VNET_NAME(tcp_retries), 0, sysctl_net_inet_tcp_retries, "I", 220 "maximum number of consecutive timer based retransmissions"); 221 222 /* 223 * Map the given inp to a CPU id. 224 * 225 * This queries RSS if it's compiled in, else it defaults to the current 226 * CPU ID. 227 */ 228 inline int 229 inp_to_cpuid(struct inpcb *inp) 230 { 231 u_int cpuid; 232 233 if (per_cpu_timers) { 234 #ifdef RSS 235 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype); 236 if (cpuid == NETISR_CPUID_NONE) 237 return (curcpu); /* XXX */ 238 else 239 return (cpuid); 240 #endif 241 /* 242 * We don't have a flowid -> cpuid mapping, so cheat and 243 * just map unknown cpuids to curcpu. Not the best, but 244 * apparently better than defaulting to swi 0. 245 */ 246 cpuid = inp->inp_flowid % (mp_maxid + 1); 247 if (! CPU_ABSENT(cpuid)) 248 return (cpuid); 249 return (curcpu); 250 } else { 251 return (0); 252 } 253 } 254 255 int tcp_backoff[TCP_MAXRXTSHIFT + 1] = 256 { 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 512, 512, 512 }; 257 258 int tcp_totbackoff = 2559; /* sum of tcp_backoff[] */ 259 260 /* 261 * TCP timer processing. 262 * 263 * Each connection has 5 timers associated with it, which can be scheduled 264 * simultaneously. They all are serviced by one callout tcp_timer_enter(). 265 * This function executes the next timer via tcp_timersw[] vector. Each 266 * timer is supposed to return 'true' unless the connection was destroyed. 267 * In the former case tcp_timer_enter() will schedule callout for next timer. 268 */ 269 270 typedef bool tcp_timer_t(struct tcpcb *); 271 static tcp_timer_t tcp_timer_delack; 272 static tcp_timer_t tcp_timer_2msl; 273 static tcp_timer_t tcp_timer_keep; 274 static tcp_timer_t tcp_timer_persist; 275 static tcp_timer_t tcp_timer_rexmt; 276 277 static tcp_timer_t * const tcp_timersw[TT_N] = { 278 [TT_DELACK] = tcp_timer_delack, 279 [TT_REXMT] = tcp_timer_rexmt, 280 [TT_PERSIST] = tcp_timer_persist, 281 [TT_KEEP] = tcp_timer_keep, 282 [TT_2MSL] = tcp_timer_2msl, 283 }; 284 285 /* 286 * tcp_output_locked() s a timer specific variation of call to tcp_output(), 287 * see tcp_var.h for the rest. It handles drop request from advanced stacks, 288 * but keeps tcpcb locked unless tcp_drop() destroyed it. 289 * Returns true if tcpcb is valid and locked. 290 */ 291 static inline bool 292 tcp_output_locked(struct tcpcb *tp) 293 { 294 int rv; 295 296 INP_WLOCK_ASSERT(tptoinpcb(tp)); 297 298 if ((rv = tp->t_fb->tfb_tcp_output(tp)) < 0) { 299 KASSERT(tp->t_fb->tfb_flags & TCP_FUNC_OUTPUT_CANDROP, 300 ("TCP stack %s requested tcp_drop(%p)", 301 tp->t_fb->tfb_tcp_block_name, tp)); 302 tp = tcp_drop(tp, -rv); 303 } 304 305 return (tp != NULL); 306 } 307 308 static bool 309 tcp_timer_delack(struct tcpcb *tp) 310 { 311 struct epoch_tracker et; 312 #if defined(INVARIANTS) || defined(VIMAGE) 313 struct inpcb *inp = tptoinpcb(tp); 314 #endif 315 bool rv; 316 317 INP_WLOCK_ASSERT(inp); 318 319 CURVNET_SET(inp->inp_vnet); 320 tp->t_flags |= TF_ACKNOW; 321 TCPSTAT_INC(tcps_delack); 322 NET_EPOCH_ENTER(et); 323 rv = tcp_output_locked(tp); 324 NET_EPOCH_EXIT(et); 325 CURVNET_RESTORE(); 326 327 return (rv); 328 } 329 330 static bool 331 tcp_timer_2msl(struct tcpcb *tp) 332 { 333 struct inpcb *inp = tptoinpcb(tp); 334 bool close = false; 335 336 INP_WLOCK_ASSERT(inp); 337 338 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 339 CURVNET_SET(inp->inp_vnet); 340 tcp_log_end_status(tp, TCP_EI_STATUS_2MSL); 341 tcp_free_sackholes(tp); 342 /* 343 * 2 MSL timeout in shutdown went off. If we're closed but 344 * still waiting for peer to close and connection has been idle 345 * too long delete connection control block. Otherwise, check 346 * again in a bit. 347 * 348 * If fastrecycle of FIN_WAIT_2, in FIN_WAIT_2 and receiver has closed, 349 * there's no point in hanging onto FIN_WAIT_2 socket. Just close it. 350 * Ignore fact that there were recent incoming segments. 351 * 352 * XXXGL: check if inp_socket shall always be !NULL here? 353 */ 354 if (tp->t_state == TCPS_TIME_WAIT) { 355 close = true; 356 } else if (tp->t_state == TCPS_FIN_WAIT_2 && 357 tcp_fast_finwait2_recycle && inp->inp_socket && 358 (inp->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE)) { 359 TCPSTAT_INC(tcps_finwait2_drops); 360 close = true; 361 } else { 362 if (ticks - tp->t_rcvtime <= TP_MAXIDLE(tp)) 363 tcp_timer_activate(tp, TT_2MSL, TP_KEEPINTVL(tp)); 364 else 365 close = true; 366 } 367 if (close) { 368 struct epoch_tracker et; 369 370 NET_EPOCH_ENTER(et); 371 tp = tcp_close(tp); 372 NET_EPOCH_EXIT(et); 373 } 374 CURVNET_RESTORE(); 375 376 return (tp != NULL); 377 } 378 379 static bool 380 tcp_timer_keep(struct tcpcb *tp) 381 { 382 struct epoch_tracker et; 383 struct inpcb *inp = tptoinpcb(tp); 384 struct tcptemp *t_template; 385 386 INP_WLOCK_ASSERT(inp); 387 388 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 389 CURVNET_SET(inp->inp_vnet); 390 /* 391 * Because we don't regularly reset the keepalive callout in 392 * the ESTABLISHED state, it may be that we don't actually need 393 * to send a keepalive yet. If that occurs, schedule another 394 * call for the next time the keepalive timer might expire. 395 */ 396 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 397 u_int idletime; 398 399 idletime = ticks - tp->t_rcvtime; 400 if (idletime < TP_KEEPIDLE(tp)) { 401 tcp_timer_activate(tp, TT_KEEP, 402 TP_KEEPIDLE(tp) - idletime); 403 CURVNET_RESTORE(); 404 return (true); 405 } 406 } 407 408 /* 409 * Keep-alive timer went off; send something 410 * or drop connection if idle for too long. 411 */ 412 TCPSTAT_INC(tcps_keeptimeo); 413 if (tp->t_state < TCPS_ESTABLISHED) 414 goto dropit; 415 if ((V_tcp_always_keepalive || 416 inp->inp_socket->so_options & SO_KEEPALIVE) && 417 tp->t_state <= TCPS_CLOSING) { 418 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 419 goto dropit; 420 /* 421 * Send a packet designed to force a response 422 * if the peer is up and reachable: 423 * either an ACK if the connection is still alive, 424 * or an RST if the peer has closed the connection 425 * due to timeout or reboot. 426 * Using sequence number tp->snd_una-1 427 * causes the transmitted zero-length segment 428 * to lie outside the receive window; 429 * by the protocol spec, this requires the 430 * correspondent TCP to respond. 431 */ 432 TCPSTAT_INC(tcps_keepprobe); 433 t_template = tcpip_maketemplate(inp); 434 if (t_template) { 435 NET_EPOCH_ENTER(et); 436 tcp_respond(tp, t_template->tt_ipgen, 437 &t_template->tt_t, (struct mbuf *)NULL, 438 tp->rcv_nxt, tp->snd_una - 1, 0); 439 NET_EPOCH_EXIT(et); 440 free(t_template, M_TEMP); 441 } 442 tcp_timer_activate(tp, TT_KEEP, TP_KEEPINTVL(tp)); 443 } else 444 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp)); 445 446 CURVNET_RESTORE(); 447 return (true); 448 449 dropit: 450 TCPSTAT_INC(tcps_keepdrops); 451 NET_EPOCH_ENTER(et); 452 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 453 tp = tcp_drop(tp, ETIMEDOUT); 454 NET_EPOCH_EXIT(et); 455 CURVNET_RESTORE(); 456 457 return (tp != NULL); 458 } 459 460 /* 461 * Has this session exceeded the maximum time without seeing a substantive 462 * acknowledgement? If so, return true; otherwise false. 463 */ 464 static bool 465 tcp_maxunacktime_check(struct tcpcb *tp) 466 { 467 468 /* Are we tracking this timer for this session? */ 469 if (TP_MAXUNACKTIME(tp) == 0) 470 return false; 471 472 /* Do we have a current measurement. */ 473 if (tp->t_acktime == 0) 474 return false; 475 476 /* Are we within the acceptable range? */ 477 if (TSTMP_GT(TP_MAXUNACKTIME(tp) + tp->t_acktime, (u_int)ticks)) 478 return false; 479 480 /* We exceeded the timer. */ 481 TCPSTAT_INC(tcps_progdrops); 482 return true; 483 } 484 485 static bool 486 tcp_timer_persist(struct tcpcb *tp) 487 { 488 struct epoch_tracker et; 489 #if defined(INVARIANTS) || defined(VIMAGE) 490 struct inpcb *inp = tptoinpcb(tp); 491 #endif 492 bool progdrop, rv; 493 494 INP_WLOCK_ASSERT(inp); 495 496 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 497 CURVNET_SET(inp->inp_vnet); 498 /* 499 * Persistence timer into zero window. 500 * Force a byte to be output, if possible. 501 */ 502 TCPSTAT_INC(tcps_persisttimeo); 503 /* 504 * Hack: if the peer is dead/unreachable, we do not 505 * time out if the window is closed. After a full 506 * backoff, drop the connection if the idle time 507 * (no responses to probes) reaches the maximum 508 * backoff that we would use if retransmitting. 509 * Also, drop the connection if we haven't been making 510 * progress. 511 */ 512 progdrop = tcp_maxunacktime_check(tp); 513 if (progdrop || (tp->t_rxtshift >= V_tcp_retries && 514 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 515 ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff))) { 516 if (progdrop) { 517 tcp_log_end_status(tp, TCP_EI_STATUS_PROGRESS); 518 } else { 519 TCPSTAT_INC(tcps_persistdrop); 520 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 521 } 522 goto dropit; 523 } 524 /* 525 * If the user has closed the socket then drop a persisting 526 * connection after a much reduced timeout. 527 */ 528 if (tp->t_state > TCPS_CLOSE_WAIT && 529 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 530 TCPSTAT_INC(tcps_persistdrop); 531 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 532 goto dropit; 533 } 534 tcp_setpersist(tp); 535 tp->t_flags |= TF_FORCEDATA; 536 NET_EPOCH_ENTER(et); 537 if ((rv = tcp_output_locked(tp))) 538 tp->t_flags &= ~TF_FORCEDATA; 539 NET_EPOCH_EXIT(et); 540 CURVNET_RESTORE(); 541 542 return (rv); 543 544 dropit: 545 NET_EPOCH_ENTER(et); 546 tp = tcp_drop(tp, ETIMEDOUT); 547 NET_EPOCH_EXIT(et); 548 CURVNET_RESTORE(); 549 550 return (tp != NULL); 551 } 552 553 static bool 554 tcp_timer_rexmt(struct tcpcb *tp) 555 { 556 struct epoch_tracker et; 557 struct inpcb *inp = tptoinpcb(tp); 558 int rexmt; 559 bool isipv6, rv; 560 561 INP_WLOCK_ASSERT(inp); 562 563 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 564 CURVNET_SET(inp->inp_vnet); 565 if (tp->t_fb->tfb_tcp_rexmit_tmr) { 566 /* The stack has a timer action too. */ 567 (*tp->t_fb->tfb_tcp_rexmit_tmr)(tp); 568 } 569 /* 570 * Retransmission timer went off. Message has not 571 * been acked within retransmit interval. Back off 572 * to a longer retransmit interval and retransmit one segment. 573 * 574 * If we've either exceeded the maximum number of retransmissions, 575 * or we've gone long enough without making progress, then drop 576 * the session. 577 */ 578 if (++tp->t_rxtshift > V_tcp_retries || tcp_maxunacktime_check(tp)) { 579 if (tp->t_rxtshift > V_tcp_retries) 580 TCPSTAT_INC(tcps_timeoutdrop); 581 tp->t_rxtshift = V_tcp_retries; 582 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 583 NET_EPOCH_ENTER(et); 584 tp = tcp_drop(tp, ETIMEDOUT); 585 NET_EPOCH_EXIT(et); 586 CURVNET_RESTORE(); 587 588 return (tp != NULL); 589 } 590 if (tp->t_state == TCPS_SYN_SENT) { 591 /* 592 * If the SYN was retransmitted, indicate CWND to be 593 * limited to 1 segment in cc_conn_init(). 594 */ 595 tp->snd_cwnd = 1; 596 } else if (tp->t_rxtshift == 1) { 597 /* 598 * first retransmit; record ssthresh and cwnd so they can 599 * be recovered if this turns out to be a "bad" retransmit. 600 * A retransmit is considered "bad" if an ACK for this 601 * segment is received within RTT/2 interval; the assumption 602 * here is that the ACK was already in flight. See 603 * "On Estimating End-to-End Network Path Properties" by 604 * Allman and Paxson for more details. 605 */ 606 tp->snd_cwnd_prev = tp->snd_cwnd; 607 tp->snd_ssthresh_prev = tp->snd_ssthresh; 608 tp->snd_recover_prev = tp->snd_recover; 609 if (IN_FASTRECOVERY(tp->t_flags)) 610 tp->t_flags |= TF_WASFRECOVERY; 611 else 612 tp->t_flags &= ~TF_WASFRECOVERY; 613 if (IN_CONGRECOVERY(tp->t_flags)) 614 tp->t_flags |= TF_WASCRECOVERY; 615 else 616 tp->t_flags &= ~TF_WASCRECOVERY; 617 if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 618 tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1)); 619 /* In the event that we've negotiated timestamps 620 * badrxtwin will be set to the value that we set 621 * the retransmitted packet's to_tsval to by tcp_output 622 */ 623 tp->t_flags |= TF_PREVVALID; 624 tcp_resend_sackholes(tp); 625 } else { 626 tp->t_flags &= ~TF_PREVVALID; 627 tcp_free_sackholes(tp); 628 } 629 TCPSTAT_INC(tcps_rexmttimeo); 630 if ((tp->t_state == TCPS_SYN_SENT) || 631 (tp->t_state == TCPS_SYN_RECEIVED)) 632 rexmt = tcp_rexmit_initial * tcp_backoff[tp->t_rxtshift]; 633 else 634 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; 635 TCPT_RANGESET(tp->t_rxtcur, rexmt, tp->t_rttmin, tcp_rexmit_max); 636 637 /* 638 * We enter the path for PLMTUD if connection is established or, if 639 * connection is FIN_WAIT_1 status, reason for the last is that if 640 * amount of data we send is very small, we could send it in couple of 641 * packets and process straight to FIN. In that case we won't catch 642 * ESTABLISHED state. 643 */ 644 #ifdef INET6 645 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; 646 #else 647 isipv6 = false; 648 #endif 649 if (((V_tcp_pmtud_blackhole_detect == 1) || 650 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 651 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 652 ((tp->t_state == TCPS_ESTABLISHED) || 653 (tp->t_state == TCPS_FIN_WAIT_1))) { 654 if (tp->t_rxtshift == 1) { 655 /* 656 * We enter blackhole detection after the first 657 * unsuccessful timer based retransmission. 658 * Then we reduce up to two times the MSS, each 659 * candidate giving two tries of retransmissions. 660 * But we give a candidate only two tries, if it 661 * actually reduces the MSS. 662 */ 663 tp->t_blackhole_enter = 2; 664 tp->t_blackhole_exit = tp->t_blackhole_enter; 665 if (isipv6) { 666 #ifdef INET6 667 if (tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) 668 tp->t_blackhole_exit += 2; 669 if (tp->t_maxseg > V_tcp_v6mssdflt && 670 V_tcp_v6pmtud_blackhole_mss > V_tcp_v6mssdflt) 671 tp->t_blackhole_exit += 2; 672 #endif 673 } else { 674 #ifdef INET 675 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) 676 tp->t_blackhole_exit += 2; 677 if (tp->t_maxseg > V_tcp_mssdflt && 678 V_tcp_pmtud_blackhole_mss > V_tcp_mssdflt) 679 tp->t_blackhole_exit += 2; 680 #endif 681 } 682 } 683 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) == 684 (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) && 685 (tp->t_rxtshift >= tp->t_blackhole_enter && 686 tp->t_rxtshift < tp->t_blackhole_exit && 687 (tp->t_rxtshift - tp->t_blackhole_enter) % 2 == 0)) { 688 /* 689 * Enter Path MTU Black-hole Detection mechanism: 690 * - Disable Path MTU Discovery (IP "DF" bit). 691 * - Reduce MTU to lower value than what we 692 * negotiated with peer. 693 */ 694 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 695 /* Record that we may have found a black hole. */ 696 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 697 /* Keep track of previous MSS. */ 698 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 699 } 700 701 /* 702 * Reduce the MSS to blackhole value or to the default 703 * in an attempt to retransmit. 704 */ 705 #ifdef INET6 706 if (isipv6 && 707 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss && 708 V_tcp_v6pmtud_blackhole_mss > V_tcp_v6mssdflt) { 709 /* Use the sysctl tuneable blackhole MSS. */ 710 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 711 TCPSTAT_INC(tcps_pmtud_blackhole_activated); 712 } else if (isipv6) { 713 /* Use the default MSS. */ 714 tp->t_maxseg = V_tcp_v6mssdflt; 715 /* 716 * Disable Path MTU Discovery when we switch to 717 * minmss. 718 */ 719 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 720 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 721 } 722 #endif 723 #if defined(INET6) && defined(INET) 724 else 725 #endif 726 #ifdef INET 727 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss && 728 V_tcp_pmtud_blackhole_mss > V_tcp_mssdflt) { 729 /* Use the sysctl tuneable blackhole MSS. */ 730 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 731 TCPSTAT_INC(tcps_pmtud_blackhole_activated); 732 } else { 733 /* Use the default MSS. */ 734 tp->t_maxseg = V_tcp_mssdflt; 735 /* 736 * Disable Path MTU Discovery when we switch to 737 * minmss. 738 */ 739 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 740 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 741 } 742 #endif 743 /* 744 * Reset the slow-start flight size 745 * as it may depend on the new MSS. 746 */ 747 if (CC_ALGO(tp)->conn_init != NULL) 748 CC_ALGO(tp)->conn_init(&tp->t_ccv); 749 } else { 750 /* 751 * If further retransmissions are still unsuccessful 752 * with a lowered MTU, maybe this isn't a blackhole and 753 * we restore the previous MSS and blackhole detection 754 * flags. 755 */ 756 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 757 (tp->t_rxtshift >= tp->t_blackhole_exit)) { 758 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 759 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 760 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 761 if (tp->t_maxseg < V_tcp_mssdflt) { 762 /* 763 * The MSS is so small we should not 764 * process incoming SACK's since we are 765 * subject to attack in such a case. 766 */ 767 tp->t_flags2 |= TF2_PROC_SACK_PROHIBIT; 768 } else { 769 tp->t_flags2 &= ~TF2_PROC_SACK_PROHIBIT; 770 } 771 TCPSTAT_INC(tcps_pmtud_blackhole_failed); 772 /* 773 * Reset the slow-start flight size as it 774 * may depend on the new MSS. 775 */ 776 if (CC_ALGO(tp)->conn_init != NULL) 777 CC_ALGO(tp)->conn_init(&tp->t_ccv); 778 } 779 } 780 } 781 782 /* 783 * Disable RFC1323 and SACK if we haven't got any response to 784 * our third SYN to work-around some broken terminal servers 785 * (most of which have hopefully been retired) that have bad VJ 786 * header compression code which trashes TCP segments containing 787 * unknown-to-them TCP options. 788 */ 789 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 790 (tp->t_rxtshift == 3)) 791 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 792 /* 793 * If we backed off this far, notify the L3 protocol that we're having 794 * connection problems. 795 */ 796 if (tp->t_rxtshift > TCP_RTT_INVALIDATE) { 797 #ifdef INET6 798 if ((inp->inp_vflag & INP_IPV6) != 0) 799 in6_losing(inp); 800 else 801 #endif 802 in_losing(inp); 803 } 804 tp->snd_nxt = tp->snd_una; 805 tp->snd_recover = tp->snd_max; 806 /* 807 * Force a segment to be sent. 808 */ 809 tp->t_flags |= TF_ACKNOW; 810 /* 811 * If timing a segment in this window, stop the timer. 812 */ 813 tp->t_rtttime = 0; 814 815 /* Do not overwrite the snd_cwnd on SYN retransmissions. */ 816 if (tp->t_state != TCPS_SYN_SENT) 817 cc_cong_signal(tp, NULL, CC_RTO); 818 NET_EPOCH_ENTER(et); 819 rv = tcp_output_locked(tp); 820 NET_EPOCH_EXIT(et); 821 CURVNET_RESTORE(); 822 823 return (rv); 824 } 825 826 static void 827 tcp_bblog_timer(struct tcpcb *tp, tt_which which, tt_what what, uint32_t ticks) 828 { 829 struct tcp_log_buffer *lgb; 830 uint64_t ms; 831 832 INP_WLOCK_ASSERT(tptoinpcb(tp)); 833 if (tcp_bblogging_on(tp)) 834 lgb = tcp_log_event(tp, NULL, NULL, NULL, TCP_LOG_RTO, 0, 0, 835 NULL, false, NULL, NULL, 0, NULL); 836 else 837 lgb = NULL; 838 if (lgb != NULL) { 839 lgb->tlb_flex1 = (what << 8) | which; 840 if (what == TT_STARTING) { 841 /* Convert ticks to ms and store it in tlb_flex2. */ 842 if (hz == 1000) 843 lgb->tlb_flex2 = ticks; 844 else { 845 ms = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 846 if (ms > UINT32_MAX) 847 lgb->tlb_flex2 = UINT32_MAX; 848 else 849 lgb->tlb_flex2 = (uint32_t)ms; 850 } 851 } 852 } 853 } 854 855 static inline tt_which 856 tcp_timer_next(struct tcpcb *tp, sbintime_t *precision) 857 { 858 tt_which i, rv; 859 sbintime_t after, before; 860 861 for (i = 0, rv = TT_N, after = before = SBT_MAX; i < TT_N; i++) { 862 if (tp->t_timers[i] < after) { 863 after = tp->t_timers[i]; 864 rv = i; 865 } 866 before = MIN(before, tp->t_timers[i] + tp->t_precisions[i]); 867 } 868 if (precision != NULL) 869 *precision = before - after; 870 871 return (rv); 872 } 873 874 static void 875 tcp_timer_enter(void *xtp) 876 { 877 struct tcpcb *tp = xtp; 878 struct inpcb *inp = tptoinpcb(tp); 879 sbintime_t precision; 880 tt_which which; 881 882 INP_WLOCK_ASSERT(inp); 883 884 which = tcp_timer_next(tp, NULL); 885 MPASS(which < TT_N); 886 tp->t_timers[which] = SBT_MAX; 887 tp->t_precisions[which] = 0; 888 889 tcp_bblog_timer(tp, which, TT_PROCESSING, 0); 890 if (tcp_timersw[which](tp)) { 891 tcp_bblog_timer(tp, which, TT_PROCESSED, 0); 892 if ((which = tcp_timer_next(tp, &precision)) != TT_N) { 893 MPASS(tp->t_state > TCPS_CLOSED); 894 callout_reset_sbt_on(&tp->t_callout, 895 tp->t_timers[which], precision, tcp_timer_enter, 896 tp, inp_to_cpuid(inp), C_ABSOLUTE); 897 } 898 INP_WUNLOCK(inp); 899 } 900 } 901 902 /* 903 * Activate or stop (delta == 0) a TCP timer. 904 */ 905 void 906 tcp_timer_activate(struct tcpcb *tp, tt_which which, u_int delta) 907 { 908 struct inpcb *inp = tptoinpcb(tp); 909 sbintime_t precision; 910 tt_what what; 911 912 #ifdef TCP_OFFLOAD 913 if (tp->t_flags & TF_TOE) 914 return; 915 #endif 916 917 INP_WLOCK_ASSERT(inp); 918 MPASS(tp->t_state > TCPS_CLOSED); 919 920 if (delta > 0) { 921 what = TT_STARTING; 922 callout_when(tick_sbt * delta, 0, C_HARDCLOCK, 923 &tp->t_timers[which], &tp->t_precisions[which]); 924 } else { 925 what = TT_STOPPING; 926 tp->t_timers[which] = SBT_MAX; 927 } 928 tcp_bblog_timer(tp, which, what, delta); 929 930 if ((which = tcp_timer_next(tp, &precision)) != TT_N) 931 callout_reset_sbt_on(&tp->t_callout, tp->t_timers[which], 932 precision, tcp_timer_enter, tp, inp_to_cpuid(inp), 933 C_ABSOLUTE); 934 else 935 callout_stop(&tp->t_callout); 936 } 937 938 bool 939 tcp_timer_active(struct tcpcb *tp, tt_which which) 940 { 941 942 INP_WLOCK_ASSERT(tptoinpcb(tp)); 943 944 return (tp->t_timers[which] != SBT_MAX); 945 } 946 947 /* 948 * Stop all timers associated with tcpcb. 949 * Called when tcpcb moves to TCPS_CLOSED. 950 */ 951 void 952 tcp_timer_stop(struct tcpcb *tp) 953 { 954 955 INP_WLOCK_ASSERT(tptoinpcb(tp)); 956 957 /* 958 * We don't check return value from callout_stop(). There are two 959 * reasons why it can return 0. First, a legitimate one: we could have 960 * been called from the callout itself. Second, callout(9) has a bug. 961 * It can race internally in softclock_call_cc(), when callout has 962 * already completed, but cc_exec_curr still points at the callout. 963 */ 964 (void )callout_stop(&tp->t_callout); 965 /* 966 * In case of being called from callout itself, we must make sure that 967 * we don't reschedule. 968 */ 969 for (tt_which i = 0; i < TT_N; i++) 970 tp->t_timers[i] = SBT_MAX; 971 } 972