1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 #include "opt_rss.h" 40 41 #include <sys/param.h> 42 #include <sys/kernel.h> 43 #include <sys/lock.h> 44 #include <sys/mbuf.h> 45 #include <sys/mutex.h> 46 #include <sys/protosw.h> 47 #include <sys/smp.h> 48 #include <sys/socket.h> 49 #include <sys/socketvar.h> 50 #include <sys/sysctl.h> 51 #include <sys/systm.h> 52 53 #include <net/if.h> 54 #include <net/route.h> 55 #include <net/rss_config.h> 56 #include <net/vnet.h> 57 #include <net/netisr.h> 58 59 #include <netinet/in.h> 60 #include <netinet/in_kdtrace.h> 61 #include <netinet/in_pcb.h> 62 #include <netinet/in_rss.h> 63 #include <netinet/in_systm.h> 64 #ifdef INET6 65 #include <netinet6/in6_pcb.h> 66 #endif 67 #include <netinet/ip_var.h> 68 #include <netinet/tcp.h> 69 #include <netinet/tcp_fsm.h> 70 #include <netinet/tcp_timer.h> 71 #include <netinet/tcp_var.h> 72 #include <netinet/tcp_log_buf.h> 73 #include <netinet/tcp_seq.h> 74 #include <netinet/cc/cc.h> 75 #ifdef INET6 76 #include <netinet6/tcp6_var.h> 77 #endif 78 #include <netinet/tcpip.h> 79 80 int tcp_persmin; 81 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, persmin, 82 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 83 &tcp_persmin, 0, sysctl_msec_to_ticks, "I", 84 "minimum persistence interval"); 85 86 int tcp_persmax; 87 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, persmax, 88 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 89 &tcp_persmax, 0, sysctl_msec_to_ticks, "I", 90 "maximum persistence interval"); 91 92 int tcp_keepinit; 93 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, 94 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 95 &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", 96 "time to establish connection"); 97 98 int tcp_keepidle; 99 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, 100 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 101 &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", 102 "time before keepalive probes begin"); 103 104 int tcp_keepintvl; 105 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, 106 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 107 &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", 108 "time between keepalive probes"); 109 110 int tcp_delacktime; 111 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DELACKTIME, delacktime, 112 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 113 &tcp_delacktime, 0, sysctl_msec_to_ticks, "I", 114 "Time before a delayed ACK is sent"); 115 116 VNET_DEFINE(int, tcp_msl); 117 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, 118 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET, 119 &VNET_NAME(tcp_msl), 0, sysctl_msec_to_ticks, "I", 120 "Maximum segment lifetime"); 121 122 int tcp_rexmit_initial; 123 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_initial, 124 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 125 &tcp_rexmit_initial, 0, sysctl_msec_to_ticks, "I", 126 "Initial Retransmission Timeout"); 127 128 int tcp_rexmit_min; 129 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_min, 130 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 131 &tcp_rexmit_min, 0, sysctl_msec_to_ticks, "I", 132 "Minimum Retransmission Timeout"); 133 134 int tcp_rexmit_slop; 135 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_slop, 136 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 137 &tcp_rexmit_slop, 0, sysctl_msec_to_ticks, "I", 138 "Retransmission Timer Slop"); 139 140 VNET_DEFINE(int, tcp_always_keepalive) = 1; 141 SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_VNET|CTLFLAG_RW, 142 &VNET_NAME(tcp_always_keepalive) , 0, 143 "Assume SO_KEEPALIVE on all TCP connections"); 144 145 int tcp_fast_finwait2_recycle = 0; 146 SYSCTL_INT(_net_inet_tcp, OID_AUTO, fast_finwait2_recycle, CTLFLAG_RW, 147 &tcp_fast_finwait2_recycle, 0, 148 "Recycle closed FIN_WAIT_2 connections faster"); 149 150 int tcp_finwait2_timeout; 151 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, finwait2_timeout, 152 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 153 &tcp_finwait2_timeout, 0, sysctl_msec_to_ticks, "I", 154 "FIN-WAIT2 timeout"); 155 156 int tcp_keepcnt = TCPTV_KEEPCNT; 157 SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt, CTLFLAG_RW, &tcp_keepcnt, 0, 158 "Number of keepalive probes to send"); 159 160 /* max idle probes */ 161 int tcp_maxpersistidle; 162 163 int tcp_rexmit_drop_options = 0; 164 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rexmit_drop_options, CTLFLAG_RW, 165 &tcp_rexmit_drop_options, 0, 166 "Drop TCP options from 3rd and later retransmitted SYN"); 167 168 int tcp_maxunacktime = TCPTV_MAXUNACKTIME; 169 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, maxunacktime, 170 CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_NEEDGIANT, 171 &tcp_maxunacktime, 0, sysctl_msec_to_ticks, "I", 172 "Maximum time (in ms) that a session can linger without making progress"); 173 174 VNET_DEFINE(int, tcp_pmtud_blackhole_detect); 175 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_detection, 176 CTLFLAG_RW|CTLFLAG_VNET, 177 &VNET_NAME(tcp_pmtud_blackhole_detect), 0, 178 "Path MTU Discovery Black Hole Detection Enabled"); 179 180 #ifdef INET 181 VNET_DEFINE(int, tcp_pmtud_blackhole_mss) = 1200; 182 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_mss, 183 CTLFLAG_RW|CTLFLAG_VNET, 184 &VNET_NAME(tcp_pmtud_blackhole_mss), 0, 185 "Path MTU Discovery Black Hole Detection lowered MSS"); 186 #endif 187 188 #ifdef INET6 189 VNET_DEFINE(int, tcp_v6pmtud_blackhole_mss) = 1220; 190 SYSCTL_INT(_net_inet_tcp, OID_AUTO, v6pmtud_blackhole_mss, 191 CTLFLAG_RW|CTLFLAG_VNET, 192 &VNET_NAME(tcp_v6pmtud_blackhole_mss), 0, 193 "Path MTU Discovery IPv6 Black Hole Detection lowered MSS"); 194 #endif 195 196 #ifdef RSS 197 static int per_cpu_timers = 1; 198 #else 199 static int per_cpu_timers = 0; 200 #endif 201 SYSCTL_INT(_net_inet_tcp, OID_AUTO, per_cpu_timers, CTLFLAG_RW, 202 &per_cpu_timers , 0, "run tcp timers on all cpus"); 203 204 static int 205 sysctl_net_inet_tcp_retries(SYSCTL_HANDLER_ARGS) 206 { 207 int error, new; 208 209 new = V_tcp_retries; 210 error = sysctl_handle_int(oidp, &new, 0, req); 211 if (error == 0 && req->newptr) { 212 if ((new < 1) || (new > TCP_MAXRXTSHIFT)) 213 error = EINVAL; 214 else 215 V_tcp_retries = new; 216 } 217 return (error); 218 } 219 220 VNET_DEFINE(int, tcp_retries) = TCP_MAXRXTSHIFT; 221 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, retries, 222 CTLTYPE_INT | CTLFLAG_VNET | CTLFLAG_RW, 223 &VNET_NAME(tcp_retries), 0, sysctl_net_inet_tcp_retries, "I", 224 "maximum number of consecutive timer based retransmissions"); 225 226 /* 227 * Map the given inp to a CPU id. 228 * 229 * This queries RSS if it's compiled in, else it defaults to the current 230 * CPU ID. 231 */ 232 inline int 233 inp_to_cpuid(struct inpcb *inp) 234 { 235 u_int cpuid; 236 237 if (per_cpu_timers) { 238 #ifdef RSS 239 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype); 240 if (cpuid == NETISR_CPUID_NONE) 241 return (curcpu); /* XXX */ 242 else 243 return (cpuid); 244 #endif 245 /* 246 * We don't have a flowid -> cpuid mapping, so cheat and 247 * just map unknown cpuids to curcpu. Not the best, but 248 * apparently better than defaulting to swi 0. 249 */ 250 cpuid = inp->inp_flowid % (mp_maxid + 1); 251 if (! CPU_ABSENT(cpuid)) 252 return (cpuid); 253 return (curcpu); 254 } else { 255 return (0); 256 } 257 } 258 259 int tcp_backoff[TCP_MAXRXTSHIFT + 1] = 260 { 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 512, 512, 512 }; 261 262 int tcp_totbackoff = 2559; /* sum of tcp_backoff[] */ 263 264 /* 265 * TCP timer processing. 266 * 267 * Each connection has 5 timers associated with it, which can be scheduled 268 * simultaneously. They all are serviced by one callout tcp_timer_enter(). 269 * This function executes the next timer via tcp_timersw[] vector. Each 270 * timer is supposed to return 'true' unless the connection was destroyed. 271 * In the former case tcp_timer_enter() will schedule callout for next timer. 272 */ 273 274 typedef bool tcp_timer_t(struct tcpcb *); 275 static tcp_timer_t tcp_timer_delack; 276 static tcp_timer_t tcp_timer_2msl; 277 static tcp_timer_t tcp_timer_keep; 278 static tcp_timer_t tcp_timer_persist; 279 static tcp_timer_t tcp_timer_rexmt; 280 281 static tcp_timer_t * const tcp_timersw[TT_N] = { 282 [TT_DELACK] = tcp_timer_delack, 283 [TT_REXMT] = tcp_timer_rexmt, 284 [TT_PERSIST] = tcp_timer_persist, 285 [TT_KEEP] = tcp_timer_keep, 286 [TT_2MSL] = tcp_timer_2msl, 287 }; 288 289 /* 290 * tcp_output_locked() s a timer specific variation of call to tcp_output(), 291 * see tcp_var.h for the rest. It handles drop request from advanced stacks, 292 * but keeps tcpcb locked unless tcp_drop() destroyed it. 293 * Returns true if tcpcb is valid and locked. 294 */ 295 static inline bool 296 tcp_output_locked(struct tcpcb *tp) 297 { 298 int rv; 299 300 INP_WLOCK_ASSERT(tptoinpcb(tp)); 301 302 if ((rv = tp->t_fb->tfb_tcp_output(tp)) < 0) { 303 KASSERT(tp->t_fb->tfb_flags & TCP_FUNC_OUTPUT_CANDROP, 304 ("TCP stack %s requested tcp_drop(%p)", 305 tp->t_fb->tfb_tcp_block_name, tp)); 306 tp = tcp_drop(tp, rv); 307 } 308 309 return (tp != NULL); 310 } 311 312 static bool 313 tcp_timer_delack(struct tcpcb *tp) 314 { 315 struct epoch_tracker et; 316 #if defined(INVARIANTS) || defined(VIMAGE) 317 struct inpcb *inp = tptoinpcb(tp); 318 #endif 319 bool rv; 320 321 INP_WLOCK_ASSERT(inp); 322 323 CURVNET_SET(inp->inp_vnet); 324 tp->t_flags |= TF_ACKNOW; 325 TCPSTAT_INC(tcps_delack); 326 NET_EPOCH_ENTER(et); 327 rv = tcp_output_locked(tp); 328 NET_EPOCH_EXIT(et); 329 CURVNET_RESTORE(); 330 331 return (rv); 332 } 333 334 static bool 335 tcp_timer_2msl(struct tcpcb *tp) 336 { 337 struct inpcb *inp = tptoinpcb(tp); 338 bool close = false; 339 340 INP_WLOCK_ASSERT(inp); 341 342 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 343 CURVNET_SET(inp->inp_vnet); 344 tcp_log_end_status(tp, TCP_EI_STATUS_2MSL); 345 tcp_free_sackholes(tp); 346 /* 347 * 2 MSL timeout in shutdown went off. If we're closed but 348 * still waiting for peer to close and connection has been idle 349 * too long delete connection control block. Otherwise, check 350 * again in a bit. 351 * 352 * If fastrecycle of FIN_WAIT_2, in FIN_WAIT_2 and receiver has closed, 353 * there's no point in hanging onto FIN_WAIT_2 socket. Just close it. 354 * Ignore fact that there were recent incoming segments. 355 * 356 * XXXGL: check if inp_socket shall always be !NULL here? 357 */ 358 if (tp->t_state == TCPS_TIME_WAIT) { 359 close = true; 360 } else if (tp->t_state == TCPS_FIN_WAIT_2 && 361 tcp_fast_finwait2_recycle && inp->inp_socket && 362 (inp->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE)) { 363 TCPSTAT_INC(tcps_finwait2_drops); 364 close = true; 365 } else { 366 if (ticks - tp->t_rcvtime <= TP_MAXIDLE(tp)) 367 tcp_timer_activate(tp, TT_2MSL, TP_KEEPINTVL(tp)); 368 else 369 close = true; 370 } 371 if (close) { 372 struct epoch_tracker et; 373 374 NET_EPOCH_ENTER(et); 375 tp = tcp_close(tp); 376 NET_EPOCH_EXIT(et); 377 } 378 CURVNET_RESTORE(); 379 380 return (tp != NULL); 381 } 382 383 static bool 384 tcp_timer_keep(struct tcpcb *tp) 385 { 386 struct epoch_tracker et; 387 struct inpcb *inp = tptoinpcb(tp); 388 struct tcptemp *t_template; 389 390 INP_WLOCK_ASSERT(inp); 391 392 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 393 CURVNET_SET(inp->inp_vnet); 394 /* 395 * Because we don't regularly reset the keepalive callout in 396 * the ESTABLISHED state, it may be that we don't actually need 397 * to send a keepalive yet. If that occurs, schedule another 398 * call for the next time the keepalive timer might expire. 399 */ 400 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 401 u_int idletime; 402 403 idletime = ticks - tp->t_rcvtime; 404 if (idletime < TP_KEEPIDLE(tp)) { 405 tcp_timer_activate(tp, TT_KEEP, 406 TP_KEEPIDLE(tp) - idletime); 407 CURVNET_RESTORE(); 408 return (true); 409 } 410 } 411 412 /* 413 * Keep-alive timer went off; send something 414 * or drop connection if idle for too long. 415 */ 416 TCPSTAT_INC(tcps_keeptimeo); 417 if (tp->t_state < TCPS_ESTABLISHED) 418 goto dropit; 419 if ((V_tcp_always_keepalive || 420 inp->inp_socket->so_options & SO_KEEPALIVE) && 421 tp->t_state <= TCPS_CLOSING) { 422 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 423 goto dropit; 424 /* 425 * Send a packet designed to force a response 426 * if the peer is up and reachable: 427 * either an ACK if the connection is still alive, 428 * or an RST if the peer has closed the connection 429 * due to timeout or reboot. 430 * Using sequence number tp->snd_una-1 431 * causes the transmitted zero-length segment 432 * to lie outside the receive window; 433 * by the protocol spec, this requires the 434 * correspondent TCP to respond. 435 */ 436 TCPSTAT_INC(tcps_keepprobe); 437 t_template = tcpip_maketemplate(inp); 438 if (t_template) { 439 NET_EPOCH_ENTER(et); 440 tcp_respond(tp, t_template->tt_ipgen, 441 &t_template->tt_t, (struct mbuf *)NULL, 442 tp->rcv_nxt, tp->snd_una - 1, 0); 443 NET_EPOCH_EXIT(et); 444 free(t_template, M_TEMP); 445 } 446 tcp_timer_activate(tp, TT_KEEP, TP_KEEPINTVL(tp)); 447 } else 448 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp)); 449 450 CURVNET_RESTORE(); 451 return (true); 452 453 dropit: 454 TCPSTAT_INC(tcps_keepdrops); 455 NET_EPOCH_ENTER(et); 456 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 457 tp = tcp_drop(tp, ETIMEDOUT); 458 NET_EPOCH_EXIT(et); 459 CURVNET_RESTORE(); 460 461 return (tp != NULL); 462 } 463 464 /* 465 * Has this session exceeded the maximum time without seeing a substantive 466 * acknowledgement? If so, return true; otherwise false. 467 */ 468 static bool 469 tcp_maxunacktime_check(struct tcpcb *tp) 470 { 471 472 /* Are we tracking this timer for this session? */ 473 if (TP_MAXUNACKTIME(tp) == 0) 474 return false; 475 476 /* Do we have a current measurement. */ 477 if (tp->t_acktime == 0) 478 return false; 479 480 /* Are we within the acceptable range? */ 481 if (TSTMP_GT(TP_MAXUNACKTIME(tp) + tp->t_acktime, (u_int)ticks)) 482 return false; 483 484 /* We exceeded the timer. */ 485 TCPSTAT_INC(tcps_progdrops); 486 return true; 487 } 488 489 static bool 490 tcp_timer_persist(struct tcpcb *tp) 491 { 492 struct epoch_tracker et; 493 #if defined(INVARIANTS) || defined(VIMAGE) 494 struct inpcb *inp = tptoinpcb(tp); 495 #endif 496 bool progdrop, rv; 497 498 INP_WLOCK_ASSERT(inp); 499 500 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 501 CURVNET_SET(inp->inp_vnet); 502 /* 503 * Persistence timer into zero window. 504 * Force a byte to be output, if possible. 505 */ 506 TCPSTAT_INC(tcps_persisttimeo); 507 /* 508 * Hack: if the peer is dead/unreachable, we do not 509 * time out if the window is closed. After a full 510 * backoff, drop the connection if the idle time 511 * (no responses to probes) reaches the maximum 512 * backoff that we would use if retransmitting. 513 * Also, drop the connection if we haven't been making 514 * progress. 515 */ 516 progdrop = tcp_maxunacktime_check(tp); 517 if (progdrop || (tp->t_rxtshift >= V_tcp_retries && 518 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 519 ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff))) { 520 if (!progdrop) 521 TCPSTAT_INC(tcps_persistdrop); 522 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 523 goto dropit; 524 } 525 /* 526 * If the user has closed the socket then drop a persisting 527 * connection after a much reduced timeout. 528 */ 529 if (tp->t_state > TCPS_CLOSE_WAIT && 530 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 531 TCPSTAT_INC(tcps_persistdrop); 532 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 533 goto dropit; 534 } 535 tcp_setpersist(tp); 536 tp->t_flags |= TF_FORCEDATA; 537 NET_EPOCH_ENTER(et); 538 if ((rv = tcp_output_locked(tp))) 539 tp->t_flags &= ~TF_FORCEDATA; 540 NET_EPOCH_EXIT(et); 541 CURVNET_RESTORE(); 542 543 return (rv); 544 545 dropit: 546 NET_EPOCH_ENTER(et); 547 tp = tcp_drop(tp, ETIMEDOUT); 548 NET_EPOCH_EXIT(et); 549 CURVNET_RESTORE(); 550 551 return (tp != NULL); 552 } 553 554 static bool 555 tcp_timer_rexmt(struct tcpcb *tp) 556 { 557 struct epoch_tracker et; 558 struct inpcb *inp = tptoinpcb(tp); 559 int rexmt; 560 bool isipv6, rv; 561 562 INP_WLOCK_ASSERT(inp); 563 564 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 565 CURVNET_SET(inp->inp_vnet); 566 tcp_free_sackholes(tp); 567 if (tp->t_fb->tfb_tcp_rexmit_tmr) { 568 /* The stack has a timer action too. */ 569 (*tp->t_fb->tfb_tcp_rexmit_tmr)(tp); 570 } 571 /* 572 * Retransmission timer went off. Message has not 573 * been acked within retransmit interval. Back off 574 * to a longer retransmit interval and retransmit one segment. 575 * 576 * If we've either exceeded the maximum number of retransmissions, 577 * or we've gone long enough without making progress, then drop 578 * the session. 579 */ 580 if (++tp->t_rxtshift > V_tcp_retries || tcp_maxunacktime_check(tp)) { 581 if (tp->t_rxtshift > V_tcp_retries) 582 TCPSTAT_INC(tcps_timeoutdrop); 583 tp->t_rxtshift = V_tcp_retries; 584 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 585 NET_EPOCH_ENTER(et); 586 tp = tcp_drop(tp, ETIMEDOUT); 587 NET_EPOCH_EXIT(et); 588 CURVNET_RESTORE(); 589 590 return (tp != NULL); 591 } 592 if (tp->t_state == TCPS_SYN_SENT) { 593 /* 594 * If the SYN was retransmitted, indicate CWND to be 595 * limited to 1 segment in cc_conn_init(). 596 */ 597 tp->snd_cwnd = 1; 598 } else if (tp->t_rxtshift == 1) { 599 /* 600 * first retransmit; record ssthresh and cwnd so they can 601 * be recovered if this turns out to be a "bad" retransmit. 602 * A retransmit is considered "bad" if an ACK for this 603 * segment is received within RTT/2 interval; the assumption 604 * here is that the ACK was already in flight. See 605 * "On Estimating End-to-End Network Path Properties" by 606 * Allman and Paxson for more details. 607 */ 608 tp->snd_cwnd_prev = tp->snd_cwnd; 609 tp->snd_ssthresh_prev = tp->snd_ssthresh; 610 tp->snd_recover_prev = tp->snd_recover; 611 if (IN_FASTRECOVERY(tp->t_flags)) 612 tp->t_flags |= TF_WASFRECOVERY; 613 else 614 tp->t_flags &= ~TF_WASFRECOVERY; 615 if (IN_CONGRECOVERY(tp->t_flags)) 616 tp->t_flags |= TF_WASCRECOVERY; 617 else 618 tp->t_flags &= ~TF_WASCRECOVERY; 619 if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 620 tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1)); 621 /* In the event that we've negotiated timestamps 622 * badrxtwin will be set to the value that we set 623 * the retransmitted packet's to_tsval to by tcp_output 624 */ 625 tp->t_flags |= TF_PREVVALID; 626 } else 627 tp->t_flags &= ~TF_PREVVALID; 628 TCPSTAT_INC(tcps_rexmttimeo); 629 if ((tp->t_state == TCPS_SYN_SENT) || 630 (tp->t_state == TCPS_SYN_RECEIVED)) 631 rexmt = tcp_rexmit_initial * tcp_backoff[tp->t_rxtshift]; 632 else 633 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; 634 TCPT_RANGESET(tp->t_rxtcur, rexmt, 635 tp->t_rttmin, TCPTV_REXMTMAX); 636 637 /* 638 * We enter the path for PLMTUD if connection is established or, if 639 * connection is FIN_WAIT_1 status, reason for the last is that if 640 * amount of data we send is very small, we could send it in couple of 641 * packets and process straight to FIN. In that case we won't catch 642 * ESTABLISHED state. 643 */ 644 #ifdef INET6 645 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; 646 #else 647 isipv6 = false; 648 #endif 649 if (((V_tcp_pmtud_blackhole_detect == 1) || 650 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 651 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 652 ((tp->t_state == TCPS_ESTABLISHED) || 653 (tp->t_state == TCPS_FIN_WAIT_1))) { 654 if (tp->t_rxtshift == 1) { 655 /* 656 * We enter blackhole detection after the first 657 * unsuccessful timer based retransmission. 658 * Then we reduce up to two times the MSS, each 659 * candidate giving two tries of retransmissions. 660 * But we give a candidate only two tries, if it 661 * actually reduces the MSS. 662 */ 663 tp->t_blackhole_enter = 2; 664 tp->t_blackhole_exit = tp->t_blackhole_enter; 665 if (isipv6) { 666 #ifdef INET6 667 if (tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) 668 tp->t_blackhole_exit += 2; 669 if (tp->t_maxseg > V_tcp_v6mssdflt && 670 V_tcp_v6pmtud_blackhole_mss > V_tcp_v6mssdflt) 671 tp->t_blackhole_exit += 2; 672 #endif 673 } else { 674 #ifdef INET 675 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) 676 tp->t_blackhole_exit += 2; 677 if (tp->t_maxseg > V_tcp_mssdflt && 678 V_tcp_pmtud_blackhole_mss > V_tcp_mssdflt) 679 tp->t_blackhole_exit += 2; 680 #endif 681 } 682 } 683 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) == 684 (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) && 685 (tp->t_rxtshift >= tp->t_blackhole_enter && 686 tp->t_rxtshift < tp->t_blackhole_exit && 687 (tp->t_rxtshift - tp->t_blackhole_enter) % 2 == 0)) { 688 /* 689 * Enter Path MTU Black-hole Detection mechanism: 690 * - Disable Path MTU Discovery (IP "DF" bit). 691 * - Reduce MTU to lower value than what we 692 * negotiated with peer. 693 */ 694 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 695 /* Record that we may have found a black hole. */ 696 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 697 /* Keep track of previous MSS. */ 698 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 699 } 700 701 /* 702 * Reduce the MSS to blackhole value or to the default 703 * in an attempt to retransmit. 704 */ 705 #ifdef INET6 706 if (isipv6 && 707 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss && 708 V_tcp_v6pmtud_blackhole_mss > V_tcp_v6mssdflt) { 709 /* Use the sysctl tuneable blackhole MSS. */ 710 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 711 TCPSTAT_INC(tcps_pmtud_blackhole_activated); 712 } else if (isipv6) { 713 /* Use the default MSS. */ 714 tp->t_maxseg = V_tcp_v6mssdflt; 715 /* 716 * Disable Path MTU Discovery when we switch to 717 * minmss. 718 */ 719 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 720 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 721 } 722 #endif 723 #if defined(INET6) && defined(INET) 724 else 725 #endif 726 #ifdef INET 727 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss && 728 V_tcp_pmtud_blackhole_mss > V_tcp_mssdflt) { 729 /* Use the sysctl tuneable blackhole MSS. */ 730 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 731 TCPSTAT_INC(tcps_pmtud_blackhole_activated); 732 } else { 733 /* Use the default MSS. */ 734 tp->t_maxseg = V_tcp_mssdflt; 735 /* 736 * Disable Path MTU Discovery when we switch to 737 * minmss. 738 */ 739 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 740 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 741 } 742 #endif 743 /* 744 * Reset the slow-start flight size 745 * as it may depend on the new MSS. 746 */ 747 if (CC_ALGO(tp)->conn_init != NULL) 748 CC_ALGO(tp)->conn_init(&tp->t_ccv); 749 } else { 750 /* 751 * If further retransmissions are still unsuccessful 752 * with a lowered MTU, maybe this isn't a blackhole and 753 * we restore the previous MSS and blackhole detection 754 * flags. 755 */ 756 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 757 (tp->t_rxtshift >= tp->t_blackhole_exit)) { 758 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 759 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 760 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 761 TCPSTAT_INC(tcps_pmtud_blackhole_failed); 762 /* 763 * Reset the slow-start flight size as it 764 * may depend on the new MSS. 765 */ 766 if (CC_ALGO(tp)->conn_init != NULL) 767 CC_ALGO(tp)->conn_init(&tp->t_ccv); 768 } 769 } 770 } 771 772 /* 773 * Disable RFC1323 and SACK if we haven't got any response to 774 * our third SYN to work-around some broken terminal servers 775 * (most of which have hopefully been retired) that have bad VJ 776 * header compression code which trashes TCP segments containing 777 * unknown-to-them TCP options. 778 */ 779 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 780 (tp->t_rxtshift == 3)) 781 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 782 /* 783 * If we backed off this far, notify the L3 protocol that we're having 784 * connection problems. 785 */ 786 if (tp->t_rxtshift > TCP_RTT_INVALIDATE) { 787 #ifdef INET6 788 if ((inp->inp_vflag & INP_IPV6) != 0) 789 in6_losing(inp); 790 else 791 #endif 792 in_losing(inp); 793 } 794 tp->snd_nxt = tp->snd_una; 795 tp->snd_recover = tp->snd_max; 796 /* 797 * Force a segment to be sent. 798 */ 799 tp->t_flags |= TF_ACKNOW; 800 /* 801 * If timing a segment in this window, stop the timer. 802 */ 803 tp->t_rtttime = 0; 804 805 cc_cong_signal(tp, NULL, CC_RTO); 806 NET_EPOCH_ENTER(et); 807 rv = tcp_output_locked(tp); 808 NET_EPOCH_EXIT(et); 809 CURVNET_RESTORE(); 810 811 return (rv); 812 } 813 814 static void 815 tcp_bblog_timer(struct tcpcb *tp, tt_which which, tt_what what, uint32_t ticks) 816 { 817 struct tcp_log_buffer *lgb; 818 uint64_t ms; 819 820 INP_WLOCK_ASSERT(tptoinpcb(tp)); 821 if (tcp_bblogging_on(tp)) 822 lgb = tcp_log_event(tp, NULL, NULL, NULL, TCP_LOG_RTO, 0, 0, 823 NULL, false, NULL, NULL, 0, NULL); 824 else 825 lgb = NULL; 826 if (lgb != NULL) { 827 lgb->tlb_flex1 = (what << 8) | which; 828 if (what == TT_STARTING) { 829 /* Convert ticks to ms and store it in tlb_flex2. */ 830 if (hz == 1000) 831 lgb->tlb_flex2 = ticks; 832 else { 833 ms = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 834 if (ms > UINT32_MAX) 835 lgb->tlb_flex2 = UINT32_MAX; 836 else 837 lgb->tlb_flex2 = (uint32_t)ms; 838 } 839 } 840 } 841 } 842 843 static inline tt_which 844 tcp_timer_next(struct tcpcb *tp, sbintime_t *precision) 845 { 846 tt_which i, rv; 847 sbintime_t after, before; 848 849 for (i = 0, rv = TT_N, after = before = SBT_MAX; i < TT_N; i++) { 850 if (tp->t_timers[i] < after) { 851 after = tp->t_timers[i]; 852 rv = i; 853 } 854 before = MIN(before, tp->t_timers[i] + tp->t_precisions[i]); 855 } 856 if (precision != NULL) 857 *precision = before - after; 858 859 return (rv); 860 } 861 862 static void 863 tcp_timer_enter(void *xtp) 864 { 865 struct tcpcb *tp = xtp; 866 struct inpcb *inp = tptoinpcb(tp); 867 sbintime_t precision; 868 tt_which which; 869 bool tp_valid; 870 871 INP_WLOCK_ASSERT(inp); 872 MPASS((curthread->td_pflags & TDP_INTCPCALLOUT) == 0); 873 874 curthread->td_pflags |= TDP_INTCPCALLOUT; 875 876 which = tcp_timer_next(tp, NULL); 877 MPASS(which < TT_N); 878 tp->t_timers[which] = SBT_MAX; 879 tp->t_precisions[which] = 0; 880 881 tcp_bblog_timer(tp, which, TT_PROCESSING, 0); 882 tp_valid = tcp_timersw[which](tp); 883 if (tp_valid) { 884 tcp_bblog_timer(tp, which, TT_PROCESSED, 0); 885 if ((which = tcp_timer_next(tp, &precision)) != TT_N) { 886 callout_reset_sbt_on(&tp->t_callout, 887 tp->t_timers[which], precision, tcp_timer_enter, 888 tp, inp_to_cpuid(inp), C_ABSOLUTE); 889 } 890 INP_WUNLOCK(inp); 891 } 892 893 curthread->td_pflags &= ~TDP_INTCPCALLOUT; 894 } 895 896 /* 897 * Activate or stop (delta == 0) a TCP timer. 898 */ 899 void 900 tcp_timer_activate(struct tcpcb *tp, tt_which which, u_int delta) 901 { 902 struct inpcb *inp = tptoinpcb(tp); 903 sbintime_t precision; 904 tt_what what; 905 906 #ifdef TCP_OFFLOAD 907 if (tp->t_flags & TF_TOE) 908 return; 909 #endif 910 911 INP_WLOCK_ASSERT(inp); 912 913 if (delta > 0) { 914 what = TT_STARTING; 915 callout_when(tick_sbt * delta, 0, C_HARDCLOCK, 916 &tp->t_timers[which], &tp->t_precisions[which]); 917 } else { 918 what = TT_STOPPING; 919 tp->t_timers[which] = SBT_MAX; 920 } 921 tcp_bblog_timer(tp, which, what, delta); 922 923 if ((which = tcp_timer_next(tp, &precision)) != TT_N) 924 callout_reset_sbt_on(&tp->t_callout, tp->t_timers[which], 925 precision, tcp_timer_enter, tp, inp_to_cpuid(inp), 926 C_ABSOLUTE); 927 else 928 callout_stop(&tp->t_callout); 929 } 930 931 bool 932 tcp_timer_active(struct tcpcb *tp, tt_which which) 933 { 934 935 INP_WLOCK_ASSERT(tptoinpcb(tp)); 936 937 return (tp->t_timers[which] != SBT_MAX); 938 } 939 940 /* 941 * Stop all timers associated with tcpcb. 942 * 943 * Called only on tcpcb destruction. The tcpcb shall already be dropped from 944 * the pcb lookup database and socket is not losing the last reference. 945 * 946 * XXXGL: unfortunately our callout(9) is not able to fully stop a locked 947 * callout even when only two threads are involved: the callout itself and the 948 * thread that does callout_stop(). See where softclock_call_cc() swaps the 949 * callwheel lock to callout lock and then checks cc_exec_cancel(). This is 950 * the race window. If it happens, the tcp_timer_enter() won't be executed, 951 * however pcb lock will be locked and released, hence we can't free memory. 952 * Until callout(9) is improved, just keep retrying. In my profiling I've seen 953 * such event happening less than 1 time per hour with 20-30 Gbit/s of traffic. 954 */ 955 void 956 tcp_timer_stop(struct tcpcb *tp) 957 { 958 struct inpcb *inp = tptoinpcb(tp); 959 960 INP_WLOCK_ASSERT(inp); 961 962 if (curthread->td_pflags & TDP_INTCPCALLOUT) { 963 int stopped __diagused; 964 965 stopped = callout_stop(&tp->t_callout); 966 MPASS(stopped == 0); 967 } else while(__predict_false(callout_stop(&tp->t_callout) == 0)) { 968 INP_WUNLOCK(inp); 969 kern_yield(PRI_UNCHANGED); 970 INP_WLOCK(inp); 971 } 972 } 973