1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95 32 */ 33 34 #include <sys/cdefs.h> 35 #include "opt_inet.h" 36 #include "opt_inet6.h" 37 #include "opt_rss.h" 38 39 #include <sys/param.h> 40 #include <sys/kernel.h> 41 #include <sys/lock.h> 42 #include <sys/mbuf.h> 43 #include <sys/mutex.h> 44 #include <sys/protosw.h> 45 #include <sys/smp.h> 46 #include <sys/socket.h> 47 #include <sys/socketvar.h> 48 #include <sys/sysctl.h> 49 #include <sys/systm.h> 50 51 #include <net/if.h> 52 #include <net/route.h> 53 #include <net/rss_config.h> 54 #include <net/vnet.h> 55 #include <net/netisr.h> 56 57 #include <netinet/in.h> 58 #include <netinet/in_kdtrace.h> 59 #include <netinet/in_pcb.h> 60 #include <netinet/in_rss.h> 61 #include <netinet/in_systm.h> 62 #ifdef INET6 63 #include <netinet6/in6_pcb.h> 64 #endif 65 #include <netinet/ip_var.h> 66 #include <netinet/tcp.h> 67 #include <netinet/tcp_fsm.h> 68 #include <netinet/tcp_timer.h> 69 #include <netinet/tcp_var.h> 70 #include <netinet/tcp_log_buf.h> 71 #include <netinet/tcp_seq.h> 72 #include <netinet/cc/cc.h> 73 #ifdef INET6 74 #include <netinet6/tcp6_var.h> 75 #endif 76 #include <netinet/tcpip.h> 77 78 int tcp_persmin; 79 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, persmin, 80 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 81 &tcp_persmin, 0, sysctl_msec_to_ticks, "I", 82 "minimum persistence interval"); 83 84 int tcp_persmax; 85 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, persmax, 86 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 87 &tcp_persmax, 0, sysctl_msec_to_ticks, "I", 88 "maximum persistence interval"); 89 90 int tcp_keepinit; 91 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, 92 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 93 &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", 94 "time to establish connection"); 95 96 int tcp_keepidle; 97 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, 98 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 99 &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", 100 "time before keepalive probes begin"); 101 102 int tcp_keepintvl; 103 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, 104 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 105 &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", 106 "time between keepalive probes"); 107 108 int tcp_delacktime; 109 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DELACKTIME, delacktime, 110 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 111 &tcp_delacktime, 0, sysctl_msec_to_ticks, "I", 112 "Time before a delayed ACK is sent"); 113 114 VNET_DEFINE(int, tcp_msl); 115 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, 116 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET, 117 &VNET_NAME(tcp_msl), 0, sysctl_msec_to_ticks, "I", 118 "Maximum segment lifetime"); 119 120 int tcp_rexmit_initial; 121 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_initial, 122 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 123 &tcp_rexmit_initial, 0, sysctl_msec_to_ticks, "I", 124 "Initial Retransmission Timeout"); 125 126 int tcp_rexmit_min; 127 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_min, 128 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 129 &tcp_rexmit_min, 0, sysctl_msec_to_ticks, "I", 130 "Minimum Retransmission Timeout"); 131 132 int tcp_rexmit_slop; 133 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_slop, 134 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 135 &tcp_rexmit_slop, 0, sysctl_msec_to_ticks, "I", 136 "Retransmission Timer Slop"); 137 138 VNET_DEFINE(int, tcp_always_keepalive) = 1; 139 SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_VNET|CTLFLAG_RW, 140 &VNET_NAME(tcp_always_keepalive) , 0, 141 "Assume SO_KEEPALIVE on all TCP connections"); 142 143 int tcp_fast_finwait2_recycle = 0; 144 SYSCTL_INT(_net_inet_tcp, OID_AUTO, fast_finwait2_recycle, CTLFLAG_RW, 145 &tcp_fast_finwait2_recycle, 0, 146 "Recycle closed FIN_WAIT_2 connections faster"); 147 148 int tcp_finwait2_timeout; 149 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, finwait2_timeout, 150 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 151 &tcp_finwait2_timeout, 0, sysctl_msec_to_ticks, "I", 152 "FIN-WAIT2 timeout"); 153 154 int tcp_keepcnt = TCPTV_KEEPCNT; 155 SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt, CTLFLAG_RW, &tcp_keepcnt, 0, 156 "Number of keepalive probes to send"); 157 158 /* max idle probes */ 159 int tcp_maxpersistidle; 160 161 int tcp_rexmit_drop_options = 0; 162 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rexmit_drop_options, CTLFLAG_RW, 163 &tcp_rexmit_drop_options, 0, 164 "Drop TCP options from 3rd and later retransmitted SYN"); 165 166 int tcp_maxunacktime = TCPTV_MAXUNACKTIME; 167 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, maxunacktime, 168 CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_NEEDGIANT, 169 &tcp_maxunacktime, 0, sysctl_msec_to_ticks, "I", 170 "Maximum time (in ms) that a session can linger without making progress"); 171 172 VNET_DEFINE(int, tcp_pmtud_blackhole_detect); 173 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_detection, 174 CTLFLAG_RW|CTLFLAG_VNET, 175 &VNET_NAME(tcp_pmtud_blackhole_detect), 0, 176 "Path MTU Discovery Black Hole Detection Enabled"); 177 178 #ifdef INET 179 VNET_DEFINE(int, tcp_pmtud_blackhole_mss) = 1200; 180 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_mss, 181 CTLFLAG_RW|CTLFLAG_VNET, 182 &VNET_NAME(tcp_pmtud_blackhole_mss), 0, 183 "Path MTU Discovery Black Hole Detection lowered MSS"); 184 #endif 185 186 #ifdef INET6 187 VNET_DEFINE(int, tcp_v6pmtud_blackhole_mss) = 1220; 188 SYSCTL_INT(_net_inet_tcp, OID_AUTO, v6pmtud_blackhole_mss, 189 CTLFLAG_RW|CTLFLAG_VNET, 190 &VNET_NAME(tcp_v6pmtud_blackhole_mss), 0, 191 "Path MTU Discovery IPv6 Black Hole Detection lowered MSS"); 192 #endif 193 194 #ifdef RSS 195 static int per_cpu_timers = 1; 196 #else 197 static int per_cpu_timers = 0; 198 #endif 199 SYSCTL_INT(_net_inet_tcp, OID_AUTO, per_cpu_timers, CTLFLAG_RW, 200 &per_cpu_timers , 0, "run tcp timers on all cpus"); 201 202 static int 203 sysctl_net_inet_tcp_retries(SYSCTL_HANDLER_ARGS) 204 { 205 int error, new; 206 207 new = V_tcp_retries; 208 error = sysctl_handle_int(oidp, &new, 0, req); 209 if (error == 0 && req->newptr) { 210 if ((new < 1) || (new > TCP_MAXRXTSHIFT)) 211 error = EINVAL; 212 else 213 V_tcp_retries = new; 214 } 215 return (error); 216 } 217 218 VNET_DEFINE(int, tcp_retries) = TCP_MAXRXTSHIFT; 219 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, retries, 220 CTLTYPE_INT | CTLFLAG_VNET | CTLFLAG_RW, 221 &VNET_NAME(tcp_retries), 0, sysctl_net_inet_tcp_retries, "I", 222 "maximum number of consecutive timer based retransmissions"); 223 224 /* 225 * Map the given inp to a CPU id. 226 * 227 * This queries RSS if it's compiled in, else it defaults to the current 228 * CPU ID. 229 */ 230 inline int 231 inp_to_cpuid(struct inpcb *inp) 232 { 233 u_int cpuid; 234 235 if (per_cpu_timers) { 236 #ifdef RSS 237 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype); 238 if (cpuid == NETISR_CPUID_NONE) 239 return (curcpu); /* XXX */ 240 else 241 return (cpuid); 242 #endif 243 /* 244 * We don't have a flowid -> cpuid mapping, so cheat and 245 * just map unknown cpuids to curcpu. Not the best, but 246 * apparently better than defaulting to swi 0. 247 */ 248 cpuid = inp->inp_flowid % (mp_maxid + 1); 249 if (! CPU_ABSENT(cpuid)) 250 return (cpuid); 251 return (curcpu); 252 } else { 253 return (0); 254 } 255 } 256 257 int tcp_backoff[TCP_MAXRXTSHIFT + 1] = 258 { 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 512, 512, 512 }; 259 260 int tcp_totbackoff = 2559; /* sum of tcp_backoff[] */ 261 262 /* 263 * TCP timer processing. 264 * 265 * Each connection has 5 timers associated with it, which can be scheduled 266 * simultaneously. They all are serviced by one callout tcp_timer_enter(). 267 * This function executes the next timer via tcp_timersw[] vector. Each 268 * timer is supposed to return 'true' unless the connection was destroyed. 269 * In the former case tcp_timer_enter() will schedule callout for next timer. 270 */ 271 272 typedef bool tcp_timer_t(struct tcpcb *); 273 static tcp_timer_t tcp_timer_delack; 274 static tcp_timer_t tcp_timer_2msl; 275 static tcp_timer_t tcp_timer_keep; 276 static tcp_timer_t tcp_timer_persist; 277 static tcp_timer_t tcp_timer_rexmt; 278 279 static tcp_timer_t * const tcp_timersw[TT_N] = { 280 [TT_DELACK] = tcp_timer_delack, 281 [TT_REXMT] = tcp_timer_rexmt, 282 [TT_PERSIST] = tcp_timer_persist, 283 [TT_KEEP] = tcp_timer_keep, 284 [TT_2MSL] = tcp_timer_2msl, 285 }; 286 287 /* 288 * tcp_output_locked() s a timer specific variation of call to tcp_output(), 289 * see tcp_var.h for the rest. It handles drop request from advanced stacks, 290 * but keeps tcpcb locked unless tcp_drop() destroyed it. 291 * Returns true if tcpcb is valid and locked. 292 */ 293 static inline bool 294 tcp_output_locked(struct tcpcb *tp) 295 { 296 int rv; 297 298 INP_WLOCK_ASSERT(tptoinpcb(tp)); 299 300 if ((rv = tp->t_fb->tfb_tcp_output(tp)) < 0) { 301 KASSERT(tp->t_fb->tfb_flags & TCP_FUNC_OUTPUT_CANDROP, 302 ("TCP stack %s requested tcp_drop(%p)", 303 tp->t_fb->tfb_tcp_block_name, tp)); 304 tp = tcp_drop(tp, rv); 305 } 306 307 return (tp != NULL); 308 } 309 310 static bool 311 tcp_timer_delack(struct tcpcb *tp) 312 { 313 struct epoch_tracker et; 314 #if defined(INVARIANTS) || defined(VIMAGE) 315 struct inpcb *inp = tptoinpcb(tp); 316 #endif 317 bool rv; 318 319 INP_WLOCK_ASSERT(inp); 320 321 CURVNET_SET(inp->inp_vnet); 322 tp->t_flags |= TF_ACKNOW; 323 TCPSTAT_INC(tcps_delack); 324 NET_EPOCH_ENTER(et); 325 rv = tcp_output_locked(tp); 326 NET_EPOCH_EXIT(et); 327 CURVNET_RESTORE(); 328 329 return (rv); 330 } 331 332 static bool 333 tcp_timer_2msl(struct tcpcb *tp) 334 { 335 struct inpcb *inp = tptoinpcb(tp); 336 bool close = false; 337 338 INP_WLOCK_ASSERT(inp); 339 340 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 341 CURVNET_SET(inp->inp_vnet); 342 tcp_log_end_status(tp, TCP_EI_STATUS_2MSL); 343 tcp_free_sackholes(tp); 344 /* 345 * 2 MSL timeout in shutdown went off. If we're closed but 346 * still waiting for peer to close and connection has been idle 347 * too long delete connection control block. Otherwise, check 348 * again in a bit. 349 * 350 * If fastrecycle of FIN_WAIT_2, in FIN_WAIT_2 and receiver has closed, 351 * there's no point in hanging onto FIN_WAIT_2 socket. Just close it. 352 * Ignore fact that there were recent incoming segments. 353 * 354 * XXXGL: check if inp_socket shall always be !NULL here? 355 */ 356 if (tp->t_state == TCPS_TIME_WAIT) { 357 close = true; 358 } else if (tp->t_state == TCPS_FIN_WAIT_2 && 359 tcp_fast_finwait2_recycle && inp->inp_socket && 360 (inp->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE)) { 361 TCPSTAT_INC(tcps_finwait2_drops); 362 close = true; 363 } else { 364 if (ticks - tp->t_rcvtime <= TP_MAXIDLE(tp)) 365 tcp_timer_activate(tp, TT_2MSL, TP_KEEPINTVL(tp)); 366 else 367 close = true; 368 } 369 if (close) { 370 struct epoch_tracker et; 371 372 NET_EPOCH_ENTER(et); 373 tp = tcp_close(tp); 374 NET_EPOCH_EXIT(et); 375 } 376 CURVNET_RESTORE(); 377 378 return (tp != NULL); 379 } 380 381 static bool 382 tcp_timer_keep(struct tcpcb *tp) 383 { 384 struct epoch_tracker et; 385 struct inpcb *inp = tptoinpcb(tp); 386 struct tcptemp *t_template; 387 388 INP_WLOCK_ASSERT(inp); 389 390 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 391 CURVNET_SET(inp->inp_vnet); 392 /* 393 * Because we don't regularly reset the keepalive callout in 394 * the ESTABLISHED state, it may be that we don't actually need 395 * to send a keepalive yet. If that occurs, schedule another 396 * call for the next time the keepalive timer might expire. 397 */ 398 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 399 u_int idletime; 400 401 idletime = ticks - tp->t_rcvtime; 402 if (idletime < TP_KEEPIDLE(tp)) { 403 tcp_timer_activate(tp, TT_KEEP, 404 TP_KEEPIDLE(tp) - idletime); 405 CURVNET_RESTORE(); 406 return (true); 407 } 408 } 409 410 /* 411 * Keep-alive timer went off; send something 412 * or drop connection if idle for too long. 413 */ 414 TCPSTAT_INC(tcps_keeptimeo); 415 if (tp->t_state < TCPS_ESTABLISHED) 416 goto dropit; 417 if ((V_tcp_always_keepalive || 418 inp->inp_socket->so_options & SO_KEEPALIVE) && 419 tp->t_state <= TCPS_CLOSING) { 420 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 421 goto dropit; 422 /* 423 * Send a packet designed to force a response 424 * if the peer is up and reachable: 425 * either an ACK if the connection is still alive, 426 * or an RST if the peer has closed the connection 427 * due to timeout or reboot. 428 * Using sequence number tp->snd_una-1 429 * causes the transmitted zero-length segment 430 * to lie outside the receive window; 431 * by the protocol spec, this requires the 432 * correspondent TCP to respond. 433 */ 434 TCPSTAT_INC(tcps_keepprobe); 435 t_template = tcpip_maketemplate(inp); 436 if (t_template) { 437 NET_EPOCH_ENTER(et); 438 tcp_respond(tp, t_template->tt_ipgen, 439 &t_template->tt_t, (struct mbuf *)NULL, 440 tp->rcv_nxt, tp->snd_una - 1, 0); 441 NET_EPOCH_EXIT(et); 442 free(t_template, M_TEMP); 443 } 444 tcp_timer_activate(tp, TT_KEEP, TP_KEEPINTVL(tp)); 445 } else 446 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp)); 447 448 CURVNET_RESTORE(); 449 return (true); 450 451 dropit: 452 TCPSTAT_INC(tcps_keepdrops); 453 NET_EPOCH_ENTER(et); 454 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 455 tp = tcp_drop(tp, ETIMEDOUT); 456 NET_EPOCH_EXIT(et); 457 CURVNET_RESTORE(); 458 459 return (tp != NULL); 460 } 461 462 /* 463 * Has this session exceeded the maximum time without seeing a substantive 464 * acknowledgement? If so, return true; otherwise false. 465 */ 466 static bool 467 tcp_maxunacktime_check(struct tcpcb *tp) 468 { 469 470 /* Are we tracking this timer for this session? */ 471 if (TP_MAXUNACKTIME(tp) == 0) 472 return false; 473 474 /* Do we have a current measurement. */ 475 if (tp->t_acktime == 0) 476 return false; 477 478 /* Are we within the acceptable range? */ 479 if (TSTMP_GT(TP_MAXUNACKTIME(tp) + tp->t_acktime, (u_int)ticks)) 480 return false; 481 482 /* We exceeded the timer. */ 483 TCPSTAT_INC(tcps_progdrops); 484 return true; 485 } 486 487 static bool 488 tcp_timer_persist(struct tcpcb *tp) 489 { 490 struct epoch_tracker et; 491 #if defined(INVARIANTS) || defined(VIMAGE) 492 struct inpcb *inp = tptoinpcb(tp); 493 #endif 494 bool progdrop, rv; 495 496 INP_WLOCK_ASSERT(inp); 497 498 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 499 CURVNET_SET(inp->inp_vnet); 500 /* 501 * Persistence timer into zero window. 502 * Force a byte to be output, if possible. 503 */ 504 TCPSTAT_INC(tcps_persisttimeo); 505 /* 506 * Hack: if the peer is dead/unreachable, we do not 507 * time out if the window is closed. After a full 508 * backoff, drop the connection if the idle time 509 * (no responses to probes) reaches the maximum 510 * backoff that we would use if retransmitting. 511 * Also, drop the connection if we haven't been making 512 * progress. 513 */ 514 progdrop = tcp_maxunacktime_check(tp); 515 if (progdrop || (tp->t_rxtshift >= V_tcp_retries && 516 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 517 ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff))) { 518 if (!progdrop) 519 TCPSTAT_INC(tcps_persistdrop); 520 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 521 goto dropit; 522 } 523 /* 524 * If the user has closed the socket then drop a persisting 525 * connection after a much reduced timeout. 526 */ 527 if (tp->t_state > TCPS_CLOSE_WAIT && 528 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 529 TCPSTAT_INC(tcps_persistdrop); 530 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 531 goto dropit; 532 } 533 tcp_setpersist(tp); 534 tp->t_flags |= TF_FORCEDATA; 535 NET_EPOCH_ENTER(et); 536 if ((rv = tcp_output_locked(tp))) 537 tp->t_flags &= ~TF_FORCEDATA; 538 NET_EPOCH_EXIT(et); 539 CURVNET_RESTORE(); 540 541 return (rv); 542 543 dropit: 544 NET_EPOCH_ENTER(et); 545 tp = tcp_drop(tp, ETIMEDOUT); 546 NET_EPOCH_EXIT(et); 547 CURVNET_RESTORE(); 548 549 return (tp != NULL); 550 } 551 552 static bool 553 tcp_timer_rexmt(struct tcpcb *tp) 554 { 555 struct epoch_tracker et; 556 struct inpcb *inp = tptoinpcb(tp); 557 int rexmt; 558 bool isipv6, rv; 559 560 INP_WLOCK_ASSERT(inp); 561 562 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 563 CURVNET_SET(inp->inp_vnet); 564 tcp_free_sackholes(tp); 565 if (tp->t_fb->tfb_tcp_rexmit_tmr) { 566 /* The stack has a timer action too. */ 567 (*tp->t_fb->tfb_tcp_rexmit_tmr)(tp); 568 } 569 /* 570 * Retransmission timer went off. Message has not 571 * been acked within retransmit interval. Back off 572 * to a longer retransmit interval and retransmit one segment. 573 * 574 * If we've either exceeded the maximum number of retransmissions, 575 * or we've gone long enough without making progress, then drop 576 * the session. 577 */ 578 if (++tp->t_rxtshift > V_tcp_retries || tcp_maxunacktime_check(tp)) { 579 if (tp->t_rxtshift > V_tcp_retries) 580 TCPSTAT_INC(tcps_timeoutdrop); 581 tp->t_rxtshift = V_tcp_retries; 582 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 583 NET_EPOCH_ENTER(et); 584 tp = tcp_drop(tp, ETIMEDOUT); 585 NET_EPOCH_EXIT(et); 586 CURVNET_RESTORE(); 587 588 return (tp != NULL); 589 } 590 if (tp->t_state == TCPS_SYN_SENT) { 591 /* 592 * If the SYN was retransmitted, indicate CWND to be 593 * limited to 1 segment in cc_conn_init(). 594 */ 595 tp->snd_cwnd = 1; 596 } else if (tp->t_rxtshift == 1) { 597 /* 598 * first retransmit; record ssthresh and cwnd so they can 599 * be recovered if this turns out to be a "bad" retransmit. 600 * A retransmit is considered "bad" if an ACK for this 601 * segment is received within RTT/2 interval; the assumption 602 * here is that the ACK was already in flight. See 603 * "On Estimating End-to-End Network Path Properties" by 604 * Allman and Paxson for more details. 605 */ 606 tp->snd_cwnd_prev = tp->snd_cwnd; 607 tp->snd_ssthresh_prev = tp->snd_ssthresh; 608 tp->snd_recover_prev = tp->snd_recover; 609 if (IN_FASTRECOVERY(tp->t_flags)) 610 tp->t_flags |= TF_WASFRECOVERY; 611 else 612 tp->t_flags &= ~TF_WASFRECOVERY; 613 if (IN_CONGRECOVERY(tp->t_flags)) 614 tp->t_flags |= TF_WASCRECOVERY; 615 else 616 tp->t_flags &= ~TF_WASCRECOVERY; 617 if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 618 tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1)); 619 /* In the event that we've negotiated timestamps 620 * badrxtwin will be set to the value that we set 621 * the retransmitted packet's to_tsval to by tcp_output 622 */ 623 tp->t_flags |= TF_PREVVALID; 624 } else 625 tp->t_flags &= ~TF_PREVVALID; 626 TCPSTAT_INC(tcps_rexmttimeo); 627 if ((tp->t_state == TCPS_SYN_SENT) || 628 (tp->t_state == TCPS_SYN_RECEIVED)) 629 rexmt = tcp_rexmit_initial * tcp_backoff[tp->t_rxtshift]; 630 else 631 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; 632 TCPT_RANGESET(tp->t_rxtcur, rexmt, 633 tp->t_rttmin, TCPTV_REXMTMAX); 634 635 /* 636 * We enter the path for PLMTUD if connection is established or, if 637 * connection is FIN_WAIT_1 status, reason for the last is that if 638 * amount of data we send is very small, we could send it in couple of 639 * packets and process straight to FIN. In that case we won't catch 640 * ESTABLISHED state. 641 */ 642 #ifdef INET6 643 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; 644 #else 645 isipv6 = false; 646 #endif 647 if (((V_tcp_pmtud_blackhole_detect == 1) || 648 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 649 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 650 ((tp->t_state == TCPS_ESTABLISHED) || 651 (tp->t_state == TCPS_FIN_WAIT_1))) { 652 if (tp->t_rxtshift == 1) { 653 /* 654 * We enter blackhole detection after the first 655 * unsuccessful timer based retransmission. 656 * Then we reduce up to two times the MSS, each 657 * candidate giving two tries of retransmissions. 658 * But we give a candidate only two tries, if it 659 * actually reduces the MSS. 660 */ 661 tp->t_blackhole_enter = 2; 662 tp->t_blackhole_exit = tp->t_blackhole_enter; 663 if (isipv6) { 664 #ifdef INET6 665 if (tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) 666 tp->t_blackhole_exit += 2; 667 if (tp->t_maxseg > V_tcp_v6mssdflt && 668 V_tcp_v6pmtud_blackhole_mss > V_tcp_v6mssdflt) 669 tp->t_blackhole_exit += 2; 670 #endif 671 } else { 672 #ifdef INET 673 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) 674 tp->t_blackhole_exit += 2; 675 if (tp->t_maxseg > V_tcp_mssdflt && 676 V_tcp_pmtud_blackhole_mss > V_tcp_mssdflt) 677 tp->t_blackhole_exit += 2; 678 #endif 679 } 680 } 681 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) == 682 (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) && 683 (tp->t_rxtshift >= tp->t_blackhole_enter && 684 tp->t_rxtshift < tp->t_blackhole_exit && 685 (tp->t_rxtshift - tp->t_blackhole_enter) % 2 == 0)) { 686 /* 687 * Enter Path MTU Black-hole Detection mechanism: 688 * - Disable Path MTU Discovery (IP "DF" bit). 689 * - Reduce MTU to lower value than what we 690 * negotiated with peer. 691 */ 692 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 693 /* Record that we may have found a black hole. */ 694 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 695 /* Keep track of previous MSS. */ 696 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 697 } 698 699 /* 700 * Reduce the MSS to blackhole value or to the default 701 * in an attempt to retransmit. 702 */ 703 #ifdef INET6 704 if (isipv6 && 705 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss && 706 V_tcp_v6pmtud_blackhole_mss > V_tcp_v6mssdflt) { 707 /* Use the sysctl tuneable blackhole MSS. */ 708 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 709 TCPSTAT_INC(tcps_pmtud_blackhole_activated); 710 } else if (isipv6) { 711 /* Use the default MSS. */ 712 tp->t_maxseg = V_tcp_v6mssdflt; 713 /* 714 * Disable Path MTU Discovery when we switch to 715 * minmss. 716 */ 717 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 718 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 719 } 720 #endif 721 #if defined(INET6) && defined(INET) 722 else 723 #endif 724 #ifdef INET 725 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss && 726 V_tcp_pmtud_blackhole_mss > V_tcp_mssdflt) { 727 /* Use the sysctl tuneable blackhole MSS. */ 728 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 729 TCPSTAT_INC(tcps_pmtud_blackhole_activated); 730 } else { 731 /* Use the default MSS. */ 732 tp->t_maxseg = V_tcp_mssdflt; 733 /* 734 * Disable Path MTU Discovery when we switch to 735 * minmss. 736 */ 737 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 738 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 739 } 740 #endif 741 /* 742 * Reset the slow-start flight size 743 * as it may depend on the new MSS. 744 */ 745 if (CC_ALGO(tp)->conn_init != NULL) 746 CC_ALGO(tp)->conn_init(&tp->t_ccv); 747 } else { 748 /* 749 * If further retransmissions are still unsuccessful 750 * with a lowered MTU, maybe this isn't a blackhole and 751 * we restore the previous MSS and blackhole detection 752 * flags. 753 */ 754 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 755 (tp->t_rxtshift >= tp->t_blackhole_exit)) { 756 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 757 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 758 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 759 TCPSTAT_INC(tcps_pmtud_blackhole_failed); 760 /* 761 * Reset the slow-start flight size as it 762 * may depend on the new MSS. 763 */ 764 if (CC_ALGO(tp)->conn_init != NULL) 765 CC_ALGO(tp)->conn_init(&tp->t_ccv); 766 } 767 } 768 } 769 770 /* 771 * Disable RFC1323 and SACK if we haven't got any response to 772 * our third SYN to work-around some broken terminal servers 773 * (most of which have hopefully been retired) that have bad VJ 774 * header compression code which trashes TCP segments containing 775 * unknown-to-them TCP options. 776 */ 777 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 778 (tp->t_rxtshift == 3)) 779 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 780 /* 781 * If we backed off this far, notify the L3 protocol that we're having 782 * connection problems. 783 */ 784 if (tp->t_rxtshift > TCP_RTT_INVALIDATE) { 785 #ifdef INET6 786 if ((inp->inp_vflag & INP_IPV6) != 0) 787 in6_losing(inp); 788 else 789 #endif 790 in_losing(inp); 791 } 792 tp->snd_nxt = tp->snd_una; 793 tp->snd_recover = tp->snd_max; 794 /* 795 * Force a segment to be sent. 796 */ 797 tp->t_flags |= TF_ACKNOW; 798 /* 799 * If timing a segment in this window, stop the timer. 800 */ 801 tp->t_rtttime = 0; 802 803 cc_cong_signal(tp, NULL, CC_RTO); 804 NET_EPOCH_ENTER(et); 805 rv = tcp_output_locked(tp); 806 NET_EPOCH_EXIT(et); 807 CURVNET_RESTORE(); 808 809 return (rv); 810 } 811 812 static void 813 tcp_bblog_timer(struct tcpcb *tp, tt_which which, tt_what what, uint32_t ticks) 814 { 815 struct tcp_log_buffer *lgb; 816 uint64_t ms; 817 818 INP_WLOCK_ASSERT(tptoinpcb(tp)); 819 if (tcp_bblogging_on(tp)) 820 lgb = tcp_log_event(tp, NULL, NULL, NULL, TCP_LOG_RTO, 0, 0, 821 NULL, false, NULL, NULL, 0, NULL); 822 else 823 lgb = NULL; 824 if (lgb != NULL) { 825 lgb->tlb_flex1 = (what << 8) | which; 826 if (what == TT_STARTING) { 827 /* Convert ticks to ms and store it in tlb_flex2. */ 828 if (hz == 1000) 829 lgb->tlb_flex2 = ticks; 830 else { 831 ms = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 832 if (ms > UINT32_MAX) 833 lgb->tlb_flex2 = UINT32_MAX; 834 else 835 lgb->tlb_flex2 = (uint32_t)ms; 836 } 837 } 838 } 839 } 840 841 static inline tt_which 842 tcp_timer_next(struct tcpcb *tp, sbintime_t *precision) 843 { 844 tt_which i, rv; 845 sbintime_t after, before; 846 847 for (i = 0, rv = TT_N, after = before = SBT_MAX; i < TT_N; i++) { 848 if (tp->t_timers[i] < after) { 849 after = tp->t_timers[i]; 850 rv = i; 851 } 852 before = MIN(before, tp->t_timers[i] + tp->t_precisions[i]); 853 } 854 if (precision != NULL) 855 *precision = before - after; 856 857 return (rv); 858 } 859 860 static void 861 tcp_timer_enter(void *xtp) 862 { 863 struct tcpcb *tp = xtp; 864 struct inpcb *inp = tptoinpcb(tp); 865 sbintime_t precision; 866 tt_which which; 867 bool tp_valid; 868 869 INP_WLOCK_ASSERT(inp); 870 MPASS((curthread->td_pflags & TDP_INTCPCALLOUT) == 0); 871 872 curthread->td_pflags |= TDP_INTCPCALLOUT; 873 874 which = tcp_timer_next(tp, NULL); 875 MPASS(which < TT_N); 876 tp->t_timers[which] = SBT_MAX; 877 tp->t_precisions[which] = 0; 878 879 tcp_bblog_timer(tp, which, TT_PROCESSING, 0); 880 tp_valid = tcp_timersw[which](tp); 881 if (tp_valid) { 882 tcp_bblog_timer(tp, which, TT_PROCESSED, 0); 883 if ((which = tcp_timer_next(tp, &precision)) != TT_N) { 884 callout_reset_sbt_on(&tp->t_callout, 885 tp->t_timers[which], precision, tcp_timer_enter, 886 tp, inp_to_cpuid(inp), C_ABSOLUTE); 887 } 888 INP_WUNLOCK(inp); 889 } 890 891 curthread->td_pflags &= ~TDP_INTCPCALLOUT; 892 } 893 894 /* 895 * Activate or stop (delta == 0) a TCP timer. 896 */ 897 void 898 tcp_timer_activate(struct tcpcb *tp, tt_which which, u_int delta) 899 { 900 struct inpcb *inp = tptoinpcb(tp); 901 sbintime_t precision; 902 tt_what what; 903 904 #ifdef TCP_OFFLOAD 905 if (tp->t_flags & TF_TOE) 906 return; 907 #endif 908 909 INP_WLOCK_ASSERT(inp); 910 911 if (delta > 0) { 912 what = TT_STARTING; 913 callout_when(tick_sbt * delta, 0, C_HARDCLOCK, 914 &tp->t_timers[which], &tp->t_precisions[which]); 915 } else { 916 what = TT_STOPPING; 917 tp->t_timers[which] = SBT_MAX; 918 } 919 tcp_bblog_timer(tp, which, what, delta); 920 921 if ((which = tcp_timer_next(tp, &precision)) != TT_N) 922 callout_reset_sbt_on(&tp->t_callout, tp->t_timers[which], 923 precision, tcp_timer_enter, tp, inp_to_cpuid(inp), 924 C_ABSOLUTE); 925 else 926 callout_stop(&tp->t_callout); 927 } 928 929 bool 930 tcp_timer_active(struct tcpcb *tp, tt_which which) 931 { 932 933 INP_WLOCK_ASSERT(tptoinpcb(tp)); 934 935 return (tp->t_timers[which] != SBT_MAX); 936 } 937 938 /* 939 * Stop all timers associated with tcpcb. 940 * 941 * Called only on tcpcb destruction. The tcpcb shall already be dropped from 942 * the pcb lookup database and socket is not losing the last reference. 943 * 944 * XXXGL: unfortunately our callout(9) is not able to fully stop a locked 945 * callout even when only two threads are involved: the callout itself and the 946 * thread that does callout_stop(). See where softclock_call_cc() swaps the 947 * callwheel lock to callout lock and then checks cc_exec_cancel(). This is 948 * the race window. If it happens, the tcp_timer_enter() won't be executed, 949 * however pcb lock will be locked and released, hence we can't free memory. 950 * Until callout(9) is improved, just keep retrying. In my profiling I've seen 951 * such event happening less than 1 time per hour with 20-30 Gbit/s of traffic. 952 */ 953 void 954 tcp_timer_stop(struct tcpcb *tp) 955 { 956 struct inpcb *inp = tptoinpcb(tp); 957 958 INP_WLOCK_ASSERT(inp); 959 960 if (curthread->td_pflags & TDP_INTCPCALLOUT) { 961 int stopped __diagused; 962 963 stopped = callout_stop(&tp->t_callout); 964 MPASS(stopped == 0); 965 } else while(__predict_false(callout_stop(&tp->t_callout) == 0)) { 966 INP_WUNLOCK(inp); 967 kern_yield(PRI_UNCHANGED); 968 INP_WLOCK(inp); 969 } 970 } 971