1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 #include "opt_tcpdebug.h" 40 #include "opt_rss.h" 41 42 #include <sys/param.h> 43 #include <sys/kernel.h> 44 #include <sys/lock.h> 45 #include <sys/mbuf.h> 46 #include <sys/mutex.h> 47 #include <sys/protosw.h> 48 #include <sys/smp.h> 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/sysctl.h> 52 #include <sys/systm.h> 53 54 #include <net/if.h> 55 #include <net/route.h> 56 #include <net/rss_config.h> 57 #include <net/vnet.h> 58 #include <net/netisr.h> 59 60 #include <netinet/in.h> 61 #include <netinet/in_kdtrace.h> 62 #include <netinet/in_pcb.h> 63 #include <netinet/in_rss.h> 64 #include <netinet/in_systm.h> 65 #ifdef INET6 66 #include <netinet6/in6_pcb.h> 67 #endif 68 #include <netinet/ip_var.h> 69 #include <netinet/tcp.h> 70 #include <netinet/tcp_fsm.h> 71 #include <netinet/tcp_log_buf.h> 72 #include <netinet/tcp_timer.h> 73 #include <netinet/tcp_var.h> 74 #include <netinet/tcp_seq.h> 75 #include <netinet/cc/cc.h> 76 #ifdef INET6 77 #include <netinet6/tcp6_var.h> 78 #endif 79 #include <netinet/tcpip.h> 80 #ifdef TCPDEBUG 81 #include <netinet/tcp_debug.h> 82 #endif 83 84 int tcp_persmin; 85 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, persmin, 86 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 87 &tcp_persmin, 0, sysctl_msec_to_ticks, "I", 88 "minimum persistence interval"); 89 90 int tcp_persmax; 91 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, persmax, 92 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 93 &tcp_persmax, 0, sysctl_msec_to_ticks, "I", 94 "maximum persistence interval"); 95 96 int tcp_keepinit; 97 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, 98 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 99 &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", 100 "time to establish connection"); 101 102 int tcp_keepidle; 103 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, 104 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 105 &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", 106 "time before keepalive probes begin"); 107 108 int tcp_keepintvl; 109 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, 110 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 111 &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", 112 "time between keepalive probes"); 113 114 int tcp_delacktime; 115 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DELACKTIME, delacktime, 116 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 117 &tcp_delacktime, 0, sysctl_msec_to_ticks, "I", 118 "Time before a delayed ACK is sent"); 119 120 VNET_DEFINE(int, tcp_msl); 121 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, 122 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET, 123 &VNET_NAME(tcp_msl), 0, sysctl_msec_to_ticks, "I", 124 "Maximum segment lifetime"); 125 126 int tcp_rexmit_initial; 127 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_initial, 128 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 129 &tcp_rexmit_initial, 0, sysctl_msec_to_ticks, "I", 130 "Initial Retransmission Timeout"); 131 132 int tcp_rexmit_min; 133 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_min, 134 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 135 &tcp_rexmit_min, 0, sysctl_msec_to_ticks, "I", 136 "Minimum Retransmission Timeout"); 137 138 int tcp_rexmit_slop; 139 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_slop, 140 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 141 &tcp_rexmit_slop, 0, sysctl_msec_to_ticks, "I", 142 "Retransmission Timer Slop"); 143 144 VNET_DEFINE(int, tcp_always_keepalive) = 1; 145 SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_VNET|CTLFLAG_RW, 146 &VNET_NAME(tcp_always_keepalive) , 0, 147 "Assume SO_KEEPALIVE on all TCP connections"); 148 149 int tcp_fast_finwait2_recycle = 0; 150 SYSCTL_INT(_net_inet_tcp, OID_AUTO, fast_finwait2_recycle, CTLFLAG_RW, 151 &tcp_fast_finwait2_recycle, 0, 152 "Recycle closed FIN_WAIT_2 connections faster"); 153 154 int tcp_finwait2_timeout; 155 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, finwait2_timeout, 156 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 157 &tcp_finwait2_timeout, 0, sysctl_msec_to_ticks, "I", 158 "FIN-WAIT2 timeout"); 159 160 int tcp_keepcnt = TCPTV_KEEPCNT; 161 SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt, CTLFLAG_RW, &tcp_keepcnt, 0, 162 "Number of keepalive probes to send"); 163 164 /* max idle probes */ 165 int tcp_maxpersistidle; 166 167 int tcp_rexmit_drop_options = 0; 168 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rexmit_drop_options, CTLFLAG_RW, 169 &tcp_rexmit_drop_options, 0, 170 "Drop TCP options from 3rd and later retransmitted SYN"); 171 172 VNET_DEFINE(int, tcp_pmtud_blackhole_detect); 173 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_detection, 174 CTLFLAG_RW|CTLFLAG_VNET, 175 &VNET_NAME(tcp_pmtud_blackhole_detect), 0, 176 "Path MTU Discovery Black Hole Detection Enabled"); 177 178 #ifdef INET 179 VNET_DEFINE(int, tcp_pmtud_blackhole_mss) = 1200; 180 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_mss, 181 CTLFLAG_RW|CTLFLAG_VNET, 182 &VNET_NAME(tcp_pmtud_blackhole_mss), 0, 183 "Path MTU Discovery Black Hole Detection lowered MSS"); 184 #endif 185 186 #ifdef INET6 187 VNET_DEFINE(int, tcp_v6pmtud_blackhole_mss) = 1220; 188 SYSCTL_INT(_net_inet_tcp, OID_AUTO, v6pmtud_blackhole_mss, 189 CTLFLAG_RW|CTLFLAG_VNET, 190 &VNET_NAME(tcp_v6pmtud_blackhole_mss), 0, 191 "Path MTU Discovery IPv6 Black Hole Detection lowered MSS"); 192 #endif 193 194 #ifdef RSS 195 static int per_cpu_timers = 1; 196 #else 197 static int per_cpu_timers = 0; 198 #endif 199 SYSCTL_INT(_net_inet_tcp, OID_AUTO, per_cpu_timers, CTLFLAG_RW, 200 &per_cpu_timers , 0, "run tcp timers on all cpus"); 201 202 /* 203 * Map the given inp to a CPU id. 204 * 205 * This queries RSS if it's compiled in, else it defaults to the current 206 * CPU ID. 207 */ 208 inline int 209 inp_to_cpuid(struct inpcb *inp) 210 { 211 u_int cpuid; 212 213 if (per_cpu_timers) { 214 #ifdef RSS 215 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype); 216 if (cpuid == NETISR_CPUID_NONE) 217 return (curcpu); /* XXX */ 218 else 219 return (cpuid); 220 #endif 221 /* 222 * We don't have a flowid -> cpuid mapping, so cheat and 223 * just map unknown cpuids to curcpu. Not the best, but 224 * apparently better than defaulting to swi 0. 225 */ 226 cpuid = inp->inp_flowid % (mp_maxid + 1); 227 if (! CPU_ABSENT(cpuid)) 228 return (cpuid); 229 return (curcpu); 230 } else { 231 return (0); 232 } 233 } 234 235 /* 236 * Tcp protocol timeout routine called every 500 ms. 237 * Updates timestamps used for TCP 238 * causes finite state machine actions if timers expire. 239 */ 240 void 241 tcp_slowtimo(void) 242 { 243 VNET_ITERATOR_DECL(vnet_iter); 244 245 VNET_LIST_RLOCK_NOSLEEP(); 246 VNET_FOREACH(vnet_iter) { 247 CURVNET_SET(vnet_iter); 248 (void) tcp_tw_2msl_scan(0); 249 CURVNET_RESTORE(); 250 } 251 VNET_LIST_RUNLOCK_NOSLEEP(); 252 } 253 254 int tcp_backoff[TCP_MAXRXTSHIFT + 1] = 255 { 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 512, 512, 512 }; 256 257 int tcp_totbackoff = 2559; /* sum of tcp_backoff[] */ 258 259 /* 260 * TCP timer processing. 261 */ 262 263 void 264 tcp_timer_delack(void *xtp) 265 { 266 struct epoch_tracker et; 267 struct tcpcb *tp = xtp; 268 struct inpcb *inp; 269 CURVNET_SET(tp->t_vnet); 270 271 inp = tp->t_inpcb; 272 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 273 INP_WLOCK(inp); 274 if (callout_pending(&tp->t_timers->tt_delack) || 275 !callout_active(&tp->t_timers->tt_delack)) { 276 INP_WUNLOCK(inp); 277 CURVNET_RESTORE(); 278 return; 279 } 280 callout_deactivate(&tp->t_timers->tt_delack); 281 if ((inp->inp_flags & INP_DROPPED) != 0) { 282 INP_WUNLOCK(inp); 283 CURVNET_RESTORE(); 284 return; 285 } 286 tp->t_flags |= TF_ACKNOW; 287 TCPSTAT_INC(tcps_delack); 288 NET_EPOCH_ENTER(et); 289 (void) tcp_output_unlock(tp); 290 NET_EPOCH_EXIT(et); 291 CURVNET_RESTORE(); 292 } 293 294 void 295 tcp_inpinfo_lock_del(struct inpcb *inp, struct tcpcb *tp) 296 { 297 if (inp && tp != NULL) 298 INP_WUNLOCK(inp); 299 } 300 301 void 302 tcp_timer_2msl(void *xtp) 303 { 304 struct tcpcb *tp = xtp; 305 struct inpcb *inp; 306 struct epoch_tracker et; 307 CURVNET_SET(tp->t_vnet); 308 #ifdef TCPDEBUG 309 int ostate; 310 311 ostate = tp->t_state; 312 #endif 313 inp = tp->t_inpcb; 314 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 315 INP_WLOCK(inp); 316 tcp_free_sackholes(tp); 317 if (callout_pending(&tp->t_timers->tt_2msl) || 318 !callout_active(&tp->t_timers->tt_2msl)) { 319 INP_WUNLOCK(tp->t_inpcb); 320 CURVNET_RESTORE(); 321 return; 322 } 323 callout_deactivate(&tp->t_timers->tt_2msl); 324 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 325 INP_WUNLOCK(inp); 326 CURVNET_RESTORE(); 327 return; 328 } 329 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0, 330 ("%s: tp %p tcpcb can't be stopped here", __func__, tp)); 331 /* 332 * 2 MSL timeout in shutdown went off. If we're closed but 333 * still waiting for peer to close and connection has been idle 334 * too long delete connection control block. Otherwise, check 335 * again in a bit. 336 * 337 * If fastrecycle of FIN_WAIT_2, in FIN_WAIT_2 and receiver has closed, 338 * there's no point in hanging onto FIN_WAIT_2 socket. Just close it. 339 * Ignore fact that there were recent incoming segments. 340 */ 341 if (tcp_fast_finwait2_recycle && tp->t_state == TCPS_FIN_WAIT_2 && 342 tp->t_inpcb && tp->t_inpcb->inp_socket && 343 (tp->t_inpcb->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE)) { 344 TCPSTAT_INC(tcps_finwait2_drops); 345 NET_EPOCH_ENTER(et); 346 tp = tcp_close(tp); 347 NET_EPOCH_EXIT(et); 348 tcp_inpinfo_lock_del(inp, tp); 349 goto out; 350 } else { 351 if (ticks - tp->t_rcvtime <= TP_MAXIDLE(tp)) { 352 callout_reset(&tp->t_timers->tt_2msl, 353 TP_KEEPINTVL(tp), tcp_timer_2msl, tp); 354 } else { 355 NET_EPOCH_ENTER(et); 356 tp = tcp_close(tp); 357 NET_EPOCH_EXIT(et); 358 tcp_inpinfo_lock_del(inp, tp); 359 goto out; 360 } 361 } 362 363 #ifdef TCPDEBUG 364 if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 365 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, 366 PRU_SLOWTIMO); 367 #endif 368 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 369 370 if (tp != NULL) 371 INP_WUNLOCK(inp); 372 out: 373 CURVNET_RESTORE(); 374 } 375 376 void 377 tcp_timer_keep(void *xtp) 378 { 379 struct tcpcb *tp = xtp; 380 struct tcptemp *t_template; 381 struct inpcb *inp; 382 struct epoch_tracker et; 383 CURVNET_SET(tp->t_vnet); 384 #ifdef TCPDEBUG 385 int ostate; 386 387 ostate = tp->t_state; 388 #endif 389 inp = tp->t_inpcb; 390 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 391 INP_WLOCK(inp); 392 if (callout_pending(&tp->t_timers->tt_keep) || 393 !callout_active(&tp->t_timers->tt_keep)) { 394 INP_WUNLOCK(inp); 395 CURVNET_RESTORE(); 396 return; 397 } 398 callout_deactivate(&tp->t_timers->tt_keep); 399 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 400 INP_WUNLOCK(inp); 401 CURVNET_RESTORE(); 402 return; 403 } 404 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0, 405 ("%s: tp %p tcpcb can't be stopped here", __func__, tp)); 406 407 /* 408 * Because we don't regularly reset the keepalive callout in 409 * the ESTABLISHED state, it may be that we don't actually need 410 * to send a keepalive yet. If that occurs, schedule another 411 * call for the next time the keepalive timer might expire. 412 */ 413 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 414 u_int idletime; 415 416 idletime = ticks - tp->t_rcvtime; 417 if (idletime < TP_KEEPIDLE(tp)) { 418 callout_reset(&tp->t_timers->tt_keep, 419 TP_KEEPIDLE(tp) - idletime, tcp_timer_keep, tp); 420 INP_WUNLOCK(inp); 421 CURVNET_RESTORE(); 422 return; 423 } 424 } 425 426 /* 427 * Keep-alive timer went off; send something 428 * or drop connection if idle for too long. 429 */ 430 TCPSTAT_INC(tcps_keeptimeo); 431 if (tp->t_state < TCPS_ESTABLISHED) 432 goto dropit; 433 if ((V_tcp_always_keepalive || 434 inp->inp_socket->so_options & SO_KEEPALIVE) && 435 tp->t_state <= TCPS_CLOSING) { 436 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 437 goto dropit; 438 /* 439 * Send a packet designed to force a response 440 * if the peer is up and reachable: 441 * either an ACK if the connection is still alive, 442 * or an RST if the peer has closed the connection 443 * due to timeout or reboot. 444 * Using sequence number tp->snd_una-1 445 * causes the transmitted zero-length segment 446 * to lie outside the receive window; 447 * by the protocol spec, this requires the 448 * correspondent TCP to respond. 449 */ 450 TCPSTAT_INC(tcps_keepprobe); 451 t_template = tcpip_maketemplate(inp); 452 if (t_template) { 453 NET_EPOCH_ENTER(et); 454 tcp_respond(tp, t_template->tt_ipgen, 455 &t_template->tt_t, (struct mbuf *)NULL, 456 tp->rcv_nxt, tp->snd_una - 1, 0); 457 NET_EPOCH_EXIT(et); 458 free(t_template, M_TEMP); 459 } 460 callout_reset(&tp->t_timers->tt_keep, TP_KEEPINTVL(tp), 461 tcp_timer_keep, tp); 462 } else 463 callout_reset(&tp->t_timers->tt_keep, TP_KEEPIDLE(tp), 464 tcp_timer_keep, tp); 465 466 #ifdef TCPDEBUG 467 if (inp->inp_socket->so_options & SO_DEBUG) 468 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, 469 PRU_SLOWTIMO); 470 #endif 471 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 472 INP_WUNLOCK(inp); 473 CURVNET_RESTORE(); 474 return; 475 476 dropit: 477 TCPSTAT_INC(tcps_keepdrops); 478 NET_EPOCH_ENTER(et); 479 tp = tcp_drop(tp, ETIMEDOUT); 480 481 #ifdef TCPDEBUG 482 if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 483 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, 484 PRU_SLOWTIMO); 485 #endif 486 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 487 NET_EPOCH_EXIT(et); 488 tcp_inpinfo_lock_del(inp, tp); 489 CURVNET_RESTORE(); 490 } 491 492 void 493 tcp_timer_persist(void *xtp) 494 { 495 struct tcpcb *tp = xtp; 496 struct inpcb *inp; 497 struct epoch_tracker et; 498 int outrv; 499 CURVNET_SET(tp->t_vnet); 500 #ifdef TCPDEBUG 501 int ostate; 502 503 ostate = tp->t_state; 504 #endif 505 inp = tp->t_inpcb; 506 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 507 INP_WLOCK(inp); 508 if (callout_pending(&tp->t_timers->tt_persist) || 509 !callout_active(&tp->t_timers->tt_persist)) { 510 INP_WUNLOCK(inp); 511 CURVNET_RESTORE(); 512 return; 513 } 514 callout_deactivate(&tp->t_timers->tt_persist); 515 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 516 INP_WUNLOCK(inp); 517 CURVNET_RESTORE(); 518 return; 519 } 520 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0, 521 ("%s: tp %p tcpcb can't be stopped here", __func__, tp)); 522 /* 523 * Persistence timer into zero window. 524 * Force a byte to be output, if possible. 525 */ 526 TCPSTAT_INC(tcps_persisttimeo); 527 /* 528 * Hack: if the peer is dead/unreachable, we do not 529 * time out if the window is closed. After a full 530 * backoff, drop the connection if the idle time 531 * (no responses to probes) reaches the maximum 532 * backoff that we would use if retransmitting. 533 */ 534 if (tp->t_rxtshift == TCP_MAXRXTSHIFT && 535 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 536 ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) { 537 TCPSTAT_INC(tcps_persistdrop); 538 NET_EPOCH_ENTER(et); 539 tp = tcp_drop(tp, ETIMEDOUT); 540 NET_EPOCH_EXIT(et); 541 tcp_inpinfo_lock_del(inp, tp); 542 goto out; 543 } 544 /* 545 * If the user has closed the socket then drop a persisting 546 * connection after a much reduced timeout. 547 */ 548 if (tp->t_state > TCPS_CLOSE_WAIT && 549 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 550 TCPSTAT_INC(tcps_persistdrop); 551 NET_EPOCH_ENTER(et); 552 tp = tcp_drop(tp, ETIMEDOUT); 553 NET_EPOCH_EXIT(et); 554 tcp_inpinfo_lock_del(inp, tp); 555 goto out; 556 } 557 tcp_setpersist(tp); 558 tp->t_flags |= TF_FORCEDATA; 559 NET_EPOCH_ENTER(et); 560 outrv = tcp_output_nodrop(tp); 561 tp->t_flags &= ~TF_FORCEDATA; 562 563 #ifdef TCPDEBUG 564 if (tp != NULL && tp->t_inpcb->inp_socket->so_options & SO_DEBUG) 565 tcp_trace(TA_USER, ostate, tp, NULL, NULL, PRU_SLOWTIMO); 566 #endif 567 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 568 (void) tcp_unlock_or_drop(tp, outrv); 569 NET_EPOCH_EXIT(et); 570 out: 571 CURVNET_RESTORE(); 572 } 573 574 void 575 tcp_timer_rexmt(void * xtp) 576 { 577 struct tcpcb *tp = xtp; 578 CURVNET_SET(tp->t_vnet); 579 int rexmt, outrv; 580 struct inpcb *inp; 581 struct epoch_tracker et; 582 bool isipv6; 583 #ifdef TCPDEBUG 584 int ostate; 585 586 ostate = tp->t_state; 587 #endif 588 inp = tp->t_inpcb; 589 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 590 INP_WLOCK(inp); 591 if (callout_pending(&tp->t_timers->tt_rexmt) || 592 !callout_active(&tp->t_timers->tt_rexmt)) { 593 INP_WUNLOCK(inp); 594 CURVNET_RESTORE(); 595 return; 596 } 597 callout_deactivate(&tp->t_timers->tt_rexmt); 598 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 599 INP_WUNLOCK(inp); 600 CURVNET_RESTORE(); 601 return; 602 } 603 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0, 604 ("%s: tp %p tcpcb can't be stopped here", __func__, tp)); 605 tcp_free_sackholes(tp); 606 TCP_LOG_EVENT(tp, NULL, NULL, NULL, TCP_LOG_RTO, 0, 0, NULL, false); 607 if (tp->t_fb->tfb_tcp_rexmit_tmr) { 608 /* The stack has a timer action too. */ 609 (*tp->t_fb->tfb_tcp_rexmit_tmr)(tp); 610 } 611 /* 612 * Retransmission timer went off. Message has not 613 * been acked within retransmit interval. Back off 614 * to a longer retransmit interval and retransmit one segment. 615 */ 616 if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) { 617 tp->t_rxtshift = TCP_MAXRXTSHIFT; 618 TCPSTAT_INC(tcps_timeoutdrop); 619 NET_EPOCH_ENTER(et); 620 tp = tcp_drop(tp, ETIMEDOUT); 621 NET_EPOCH_EXIT(et); 622 tcp_inpinfo_lock_del(inp, tp); 623 goto out; 624 } 625 if (tp->t_state == TCPS_SYN_SENT) { 626 /* 627 * If the SYN was retransmitted, indicate CWND to be 628 * limited to 1 segment in cc_conn_init(). 629 */ 630 tp->snd_cwnd = 1; 631 } else if (tp->t_rxtshift == 1) { 632 /* 633 * first retransmit; record ssthresh and cwnd so they can 634 * be recovered if this turns out to be a "bad" retransmit. 635 * A retransmit is considered "bad" if an ACK for this 636 * segment is received within RTT/2 interval; the assumption 637 * here is that the ACK was already in flight. See 638 * "On Estimating End-to-End Network Path Properties" by 639 * Allman and Paxson for more details. 640 */ 641 tp->snd_cwnd_prev = tp->snd_cwnd; 642 tp->snd_ssthresh_prev = tp->snd_ssthresh; 643 tp->snd_recover_prev = tp->snd_recover; 644 if (IN_FASTRECOVERY(tp->t_flags)) 645 tp->t_flags |= TF_WASFRECOVERY; 646 else 647 tp->t_flags &= ~TF_WASFRECOVERY; 648 if (IN_CONGRECOVERY(tp->t_flags)) 649 tp->t_flags |= TF_WASCRECOVERY; 650 else 651 tp->t_flags &= ~TF_WASCRECOVERY; 652 if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 653 tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1)); 654 /* In the event that we've negotiated timestamps 655 * badrxtwin will be set to the value that we set 656 * the retransmitted packet's to_tsval to by tcp_output 657 */ 658 tp->t_flags |= TF_PREVVALID; 659 } else 660 tp->t_flags &= ~TF_PREVVALID; 661 TCPSTAT_INC(tcps_rexmttimeo); 662 if ((tp->t_state == TCPS_SYN_SENT) || 663 (tp->t_state == TCPS_SYN_RECEIVED)) 664 rexmt = tcp_rexmit_initial * tcp_backoff[tp->t_rxtshift]; 665 else 666 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; 667 TCPT_RANGESET(tp->t_rxtcur, rexmt, 668 tp->t_rttmin, TCPTV_REXMTMAX); 669 670 /* 671 * We enter the path for PLMTUD if connection is established or, if 672 * connection is FIN_WAIT_1 status, reason for the last is that if 673 * amount of data we send is very small, we could send it in couple of 674 * packets and process straight to FIN. In that case we won't catch 675 * ESTABLISHED state. 676 */ 677 #ifdef INET6 678 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false; 679 #else 680 isipv6 = false; 681 #endif 682 if (((V_tcp_pmtud_blackhole_detect == 1) || 683 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 684 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 685 ((tp->t_state == TCPS_ESTABLISHED) || 686 (tp->t_state == TCPS_FIN_WAIT_1))) { 687 if (tp->t_rxtshift == 1) { 688 /* 689 * We enter blackhole detection after the first 690 * unsuccessful timer based retransmission. 691 * Then we reduce up to two times the MSS, each 692 * candidate giving two tries of retransmissions. 693 * But we give a candidate only two tries, if it 694 * actually reduces the MSS. 695 */ 696 tp->t_blackhole_enter = 2; 697 tp->t_blackhole_exit = tp->t_blackhole_enter; 698 if (isipv6) { 699 #ifdef INET6 700 if (tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) 701 tp->t_blackhole_exit += 2; 702 if (tp->t_maxseg > V_tcp_v6mssdflt && 703 V_tcp_v6pmtud_blackhole_mss > V_tcp_v6mssdflt) 704 tp->t_blackhole_exit += 2; 705 #endif 706 } else { 707 #ifdef INET 708 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) 709 tp->t_blackhole_exit += 2; 710 if (tp->t_maxseg > V_tcp_mssdflt && 711 V_tcp_pmtud_blackhole_mss > V_tcp_mssdflt) 712 tp->t_blackhole_exit += 2; 713 #endif 714 } 715 } 716 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) == 717 (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) && 718 (tp->t_rxtshift >= tp->t_blackhole_enter && 719 tp->t_rxtshift < tp->t_blackhole_exit && 720 (tp->t_rxtshift - tp->t_blackhole_enter) % 2 == 0)) { 721 /* 722 * Enter Path MTU Black-hole Detection mechanism: 723 * - Disable Path MTU Discovery (IP "DF" bit). 724 * - Reduce MTU to lower value than what we 725 * negotiated with peer. 726 */ 727 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 728 /* Record that we may have found a black hole. */ 729 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 730 /* Keep track of previous MSS. */ 731 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 732 } 733 734 /* 735 * Reduce the MSS to blackhole value or to the default 736 * in an attempt to retransmit. 737 */ 738 #ifdef INET6 739 if (isipv6 && 740 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss && 741 V_tcp_v6pmtud_blackhole_mss > V_tcp_v6mssdflt) { 742 /* Use the sysctl tuneable blackhole MSS. */ 743 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 744 TCPSTAT_INC(tcps_pmtud_blackhole_activated); 745 } else if (isipv6) { 746 /* Use the default MSS. */ 747 tp->t_maxseg = V_tcp_v6mssdflt; 748 /* 749 * Disable Path MTU Discovery when we switch to 750 * minmss. 751 */ 752 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 753 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 754 } 755 #endif 756 #if defined(INET6) && defined(INET) 757 else 758 #endif 759 #ifdef INET 760 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss && 761 V_tcp_pmtud_blackhole_mss > V_tcp_mssdflt) { 762 /* Use the sysctl tuneable blackhole MSS. */ 763 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 764 TCPSTAT_INC(tcps_pmtud_blackhole_activated); 765 } else { 766 /* Use the default MSS. */ 767 tp->t_maxseg = V_tcp_mssdflt; 768 /* 769 * Disable Path MTU Discovery when we switch to 770 * minmss. 771 */ 772 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 773 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 774 } 775 #endif 776 /* 777 * Reset the slow-start flight size 778 * as it may depend on the new MSS. 779 */ 780 if (CC_ALGO(tp)->conn_init != NULL) 781 CC_ALGO(tp)->conn_init(tp->ccv); 782 } else { 783 /* 784 * If further retransmissions are still unsuccessful 785 * with a lowered MTU, maybe this isn't a blackhole and 786 * we restore the previous MSS and blackhole detection 787 * flags. 788 */ 789 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 790 (tp->t_rxtshift >= tp->t_blackhole_exit)) { 791 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 792 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 793 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 794 TCPSTAT_INC(tcps_pmtud_blackhole_failed); 795 /* 796 * Reset the slow-start flight size as it 797 * may depend on the new MSS. 798 */ 799 if (CC_ALGO(tp)->conn_init != NULL) 800 CC_ALGO(tp)->conn_init(tp->ccv); 801 } 802 } 803 } 804 805 /* 806 * Disable RFC1323 and SACK if we haven't got any response to 807 * our third SYN to work-around some broken terminal servers 808 * (most of which have hopefully been retired) that have bad VJ 809 * header compression code which trashes TCP segments containing 810 * unknown-to-them TCP options. 811 */ 812 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 813 (tp->t_rxtshift == 3)) 814 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 815 /* 816 * If we backed off this far, notify the L3 protocol that we're having 817 * connection problems. 818 */ 819 if (tp->t_rxtshift > TCP_RTT_INVALIDATE) { 820 #ifdef INET6 821 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 822 in6_losing(tp->t_inpcb); 823 else 824 #endif 825 in_losing(tp->t_inpcb); 826 } 827 tp->snd_nxt = tp->snd_una; 828 tp->snd_recover = tp->snd_max; 829 /* 830 * Force a segment to be sent. 831 */ 832 tp->t_flags |= TF_ACKNOW; 833 /* 834 * If timing a segment in this window, stop the timer. 835 */ 836 tp->t_rtttime = 0; 837 838 cc_cong_signal(tp, NULL, CC_RTO); 839 NET_EPOCH_ENTER(et); 840 outrv = tcp_output_nodrop(tp); 841 #ifdef TCPDEBUG 842 if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 843 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, 844 PRU_SLOWTIMO); 845 #endif 846 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 847 (void) tcp_unlock_or_drop(tp, outrv); 848 NET_EPOCH_EXIT(et); 849 out: 850 CURVNET_RESTORE(); 851 } 852 853 void 854 tcp_timer_activate(struct tcpcb *tp, uint32_t timer_type, u_int delta) 855 { 856 struct callout *t_callout; 857 callout_func_t *f_callout; 858 struct inpcb *inp = tp->t_inpcb; 859 int cpu = inp_to_cpuid(inp); 860 861 #ifdef TCP_OFFLOAD 862 if (tp->t_flags & TF_TOE) 863 return; 864 #endif 865 866 if (tp->t_timers->tt_flags & TT_STOPPED) 867 return; 868 869 switch (timer_type) { 870 case TT_DELACK: 871 t_callout = &tp->t_timers->tt_delack; 872 f_callout = tcp_timer_delack; 873 break; 874 case TT_REXMT: 875 t_callout = &tp->t_timers->tt_rexmt; 876 f_callout = tcp_timer_rexmt; 877 break; 878 case TT_PERSIST: 879 t_callout = &tp->t_timers->tt_persist; 880 f_callout = tcp_timer_persist; 881 break; 882 case TT_KEEP: 883 t_callout = &tp->t_timers->tt_keep; 884 f_callout = tcp_timer_keep; 885 break; 886 case TT_2MSL: 887 t_callout = &tp->t_timers->tt_2msl; 888 f_callout = tcp_timer_2msl; 889 break; 890 default: 891 if (tp->t_fb->tfb_tcp_timer_activate) { 892 tp->t_fb->tfb_tcp_timer_activate(tp, timer_type, delta); 893 return; 894 } 895 panic("tp %p bad timer_type %#x", tp, timer_type); 896 } 897 if (delta == 0) { 898 callout_stop(t_callout); 899 } else { 900 callout_reset_on(t_callout, delta, f_callout, tp, cpu); 901 } 902 } 903 904 int 905 tcp_timer_active(struct tcpcb *tp, uint32_t timer_type) 906 { 907 struct callout *t_callout; 908 909 switch (timer_type) { 910 case TT_DELACK: 911 t_callout = &tp->t_timers->tt_delack; 912 break; 913 case TT_REXMT: 914 t_callout = &tp->t_timers->tt_rexmt; 915 break; 916 case TT_PERSIST: 917 t_callout = &tp->t_timers->tt_persist; 918 break; 919 case TT_KEEP: 920 t_callout = &tp->t_timers->tt_keep; 921 break; 922 case TT_2MSL: 923 t_callout = &tp->t_timers->tt_2msl; 924 break; 925 default: 926 if (tp->t_fb->tfb_tcp_timer_active) { 927 return(tp->t_fb->tfb_tcp_timer_active(tp, timer_type)); 928 } 929 panic("tp %p bad timer_type %#x", tp, timer_type); 930 } 931 return callout_active(t_callout); 932 } 933 934 /* 935 * Stop the timer from running, and apply a flag 936 * against the timer_flags that will force the 937 * timer never to run. The flag is needed to assure 938 * a race does not leave it running and cause 939 * the timer to possibly restart itself (keep and persist 940 * especially do this). 941 */ 942 int 943 tcp_timer_suspend(struct tcpcb *tp, uint32_t timer_type) 944 { 945 struct callout *t_callout; 946 uint32_t t_flags; 947 948 switch (timer_type) { 949 case TT_DELACK: 950 t_flags = TT_DELACK_SUS; 951 t_callout = &tp->t_timers->tt_delack; 952 break; 953 case TT_REXMT: 954 t_flags = TT_REXMT_SUS; 955 t_callout = &tp->t_timers->tt_rexmt; 956 break; 957 case TT_PERSIST: 958 t_flags = TT_PERSIST_SUS; 959 t_callout = &tp->t_timers->tt_persist; 960 break; 961 case TT_KEEP: 962 t_flags = TT_KEEP_SUS; 963 t_callout = &tp->t_timers->tt_keep; 964 break; 965 case TT_2MSL: 966 t_flags = TT_2MSL_SUS; 967 t_callout = &tp->t_timers->tt_2msl; 968 break; 969 default: 970 panic("tp:%p bad timer_type 0x%x", tp, timer_type); 971 } 972 tp->t_timers->tt_flags |= t_flags; 973 return (callout_stop(t_callout)); 974 } 975 976 void 977 tcp_timers_unsuspend(struct tcpcb *tp, uint32_t timer_type) 978 { 979 switch (timer_type) { 980 case TT_DELACK: 981 if (tp->t_timers->tt_flags & TT_DELACK_SUS) { 982 tp->t_timers->tt_flags &= ~TT_DELACK_SUS; 983 if (tp->t_flags & TF_DELACK) { 984 /* Delayed ack timer should be up activate a timer */ 985 tp->t_flags &= ~TF_DELACK; 986 tcp_timer_activate(tp, TT_DELACK, 987 tcp_delacktime); 988 } 989 } 990 break; 991 case TT_REXMT: 992 if (tp->t_timers->tt_flags & TT_REXMT_SUS) { 993 tp->t_timers->tt_flags &= ~TT_REXMT_SUS; 994 if (SEQ_GT(tp->snd_max, tp->snd_una) && 995 (tcp_timer_active((tp), TT_PERSIST) == 0) && 996 tp->snd_wnd) { 997 /* We have outstanding data activate a timer */ 998 tcp_timer_activate(tp, TT_REXMT, 999 tp->t_rxtcur); 1000 } 1001 } 1002 break; 1003 case TT_PERSIST: 1004 if (tp->t_timers->tt_flags & TT_PERSIST_SUS) { 1005 tp->t_timers->tt_flags &= ~TT_PERSIST_SUS; 1006 if (tp->snd_wnd == 0) { 1007 /* Activate the persists timer */ 1008 tp->t_rxtshift = 0; 1009 tcp_setpersist(tp); 1010 } 1011 } 1012 break; 1013 case TT_KEEP: 1014 if (tp->t_timers->tt_flags & TT_KEEP_SUS) { 1015 tp->t_timers->tt_flags &= ~TT_KEEP_SUS; 1016 tcp_timer_activate(tp, TT_KEEP, 1017 TCPS_HAVEESTABLISHED(tp->t_state) ? 1018 TP_KEEPIDLE(tp) : TP_KEEPINIT(tp)); 1019 } 1020 break; 1021 case TT_2MSL: 1022 if (tp->t_timers->tt_flags &= TT_2MSL_SUS) { 1023 tp->t_timers->tt_flags &= ~TT_2MSL_SUS; 1024 if ((tp->t_state == TCPS_FIN_WAIT_2) && 1025 ((tp->t_inpcb->inp_socket == NULL) || 1026 (tp->t_inpcb->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE))) { 1027 /* Star the 2MSL timer */ 1028 tcp_timer_activate(tp, TT_2MSL, 1029 (tcp_fast_finwait2_recycle) ? 1030 tcp_finwait2_timeout : TP_MAXIDLE(tp)); 1031 } 1032 } 1033 break; 1034 default: 1035 panic("tp:%p bad timer_type 0x%x", tp, timer_type); 1036 } 1037 } 1038 1039 static void 1040 tcp_timer_discard(void *ptp) 1041 { 1042 struct inpcb *inp; 1043 struct tcpcb *tp; 1044 struct epoch_tracker et; 1045 1046 tp = (struct tcpcb *)ptp; 1047 CURVNET_SET(tp->t_vnet); 1048 NET_EPOCH_ENTER(et); 1049 inp = tp->t_inpcb; 1050 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", 1051 __func__, tp)); 1052 INP_WLOCK(inp); 1053 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) != 0, 1054 ("%s: tcpcb has to be stopped here", __func__)); 1055 if (--tp->t_timers->tt_draincnt > 0 || 1056 tcp_freecb(tp) == false) 1057 INP_WUNLOCK(inp); 1058 NET_EPOCH_EXIT(et); 1059 CURVNET_RESTORE(); 1060 } 1061 1062 void 1063 tcp_timer_stop(struct tcpcb *tp, uint32_t timer_type) 1064 { 1065 struct callout *t_callout; 1066 1067 tp->t_timers->tt_flags |= TT_STOPPED; 1068 switch (timer_type) { 1069 case TT_DELACK: 1070 t_callout = &tp->t_timers->tt_delack; 1071 break; 1072 case TT_REXMT: 1073 t_callout = &tp->t_timers->tt_rexmt; 1074 break; 1075 case TT_PERSIST: 1076 t_callout = &tp->t_timers->tt_persist; 1077 break; 1078 case TT_KEEP: 1079 t_callout = &tp->t_timers->tt_keep; 1080 break; 1081 case TT_2MSL: 1082 t_callout = &tp->t_timers->tt_2msl; 1083 break; 1084 default: 1085 if (tp->t_fb->tfb_tcp_timer_stop) { 1086 /* 1087 * XXXrrs we need to look at this with the 1088 * stop case below (flags). 1089 */ 1090 tp->t_fb->tfb_tcp_timer_stop(tp, timer_type); 1091 return; 1092 } 1093 panic("tp %p bad timer_type %#x", tp, timer_type); 1094 } 1095 1096 if (callout_async_drain(t_callout, tcp_timer_discard) == 0) { 1097 /* 1098 * Can't stop the callout, defer tcpcb actual deletion 1099 * to the last one. We do this using the async drain 1100 * function and incrementing the count in 1101 */ 1102 tp->t_timers->tt_draincnt++; 1103 } 1104 } 1105