1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 #include "opt_tcpdebug.h" 40 #include "opt_rss.h" 41 42 #include <sys/param.h> 43 #include <sys/kernel.h> 44 #include <sys/lock.h> 45 #include <sys/mbuf.h> 46 #include <sys/mutex.h> 47 #include <sys/protosw.h> 48 #include <sys/smp.h> 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/sysctl.h> 52 #include <sys/systm.h> 53 54 #include <net/if.h> 55 #include <net/route.h> 56 #include <net/rss_config.h> 57 #include <net/vnet.h> 58 #include <net/netisr.h> 59 60 #include <netinet/in.h> 61 #include <netinet/in_kdtrace.h> 62 #include <netinet/in_pcb.h> 63 #include <netinet/in_rss.h> 64 #include <netinet/in_systm.h> 65 #ifdef INET6 66 #include <netinet6/in6_pcb.h> 67 #endif 68 #include <netinet/ip_var.h> 69 #include <netinet/tcp.h> 70 #include <netinet/tcp_fsm.h> 71 #include <netinet/tcp_log_buf.h> 72 #include <netinet/tcp_timer.h> 73 #include <netinet/tcp_var.h> 74 #include <netinet/cc/cc.h> 75 #ifdef INET6 76 #include <netinet6/tcp6_var.h> 77 #endif 78 #include <netinet/tcpip.h> 79 #ifdef TCPDEBUG 80 #include <netinet/tcp_debug.h> 81 #endif 82 83 int tcp_persmin; 84 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, persmin, CTLTYPE_INT|CTLFLAG_RW, 85 &tcp_persmin, 0, sysctl_msec_to_ticks, "I", "minimum persistence interval"); 86 87 int tcp_persmax; 88 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, persmax, CTLTYPE_INT|CTLFLAG_RW, 89 &tcp_persmax, 0, sysctl_msec_to_ticks, "I", "maximum persistence interval"); 90 91 int tcp_keepinit; 92 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, CTLTYPE_INT|CTLFLAG_RW, 93 &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", "time to establish connection"); 94 95 int tcp_keepidle; 96 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, CTLTYPE_INT|CTLFLAG_RW, 97 &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", "time before keepalive probes begin"); 98 99 int tcp_keepintvl; 100 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, CTLTYPE_INT|CTLFLAG_RW, 101 &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", "time between keepalive probes"); 102 103 int tcp_delacktime; 104 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DELACKTIME, delacktime, CTLTYPE_INT|CTLFLAG_RW, 105 &tcp_delacktime, 0, sysctl_msec_to_ticks, "I", 106 "Time before a delayed ACK is sent"); 107 108 int tcp_msl; 109 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, CTLTYPE_INT|CTLFLAG_RW, 110 &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime"); 111 112 int tcp_rexmit_min; 113 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_min, CTLTYPE_INT|CTLFLAG_RW, 114 &tcp_rexmit_min, 0, sysctl_msec_to_ticks, "I", 115 "Minimum Retransmission Timeout"); 116 117 int tcp_rexmit_slop; 118 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_slop, CTLTYPE_INT|CTLFLAG_RW, 119 &tcp_rexmit_slop, 0, sysctl_msec_to_ticks, "I", 120 "Retransmission Timer Slop"); 121 122 int tcp_always_keepalive = 1; 123 SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_RW, 124 &tcp_always_keepalive , 0, "Assume SO_KEEPALIVE on all TCP connections"); 125 126 int tcp_fast_finwait2_recycle = 0; 127 SYSCTL_INT(_net_inet_tcp, OID_AUTO, fast_finwait2_recycle, CTLFLAG_RW, 128 &tcp_fast_finwait2_recycle, 0, 129 "Recycle closed FIN_WAIT_2 connections faster"); 130 131 int tcp_finwait2_timeout; 132 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, finwait2_timeout, CTLTYPE_INT|CTLFLAG_RW, 133 &tcp_finwait2_timeout, 0, sysctl_msec_to_ticks, "I", "FIN-WAIT2 timeout"); 134 135 int tcp_keepcnt = TCPTV_KEEPCNT; 136 SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt, CTLFLAG_RW, &tcp_keepcnt, 0, 137 "Number of keepalive probes to send"); 138 139 /* max idle probes */ 140 int tcp_maxpersistidle; 141 142 static int tcp_rexmit_drop_options = 0; 143 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rexmit_drop_options, CTLFLAG_RW, 144 &tcp_rexmit_drop_options, 0, 145 "Drop TCP options from 3rd and later retransmitted SYN"); 146 147 VNET_DEFINE(int, tcp_pmtud_blackhole_detect); 148 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_detection, 149 CTLFLAG_RW|CTLFLAG_VNET, 150 &VNET_NAME(tcp_pmtud_blackhole_detect), 0, 151 "Path MTU Discovery Black Hole Detection Enabled"); 152 153 #ifdef INET 154 VNET_DEFINE(int, tcp_pmtud_blackhole_mss) = 1200; 155 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_mss, 156 CTLFLAG_RW|CTLFLAG_VNET, 157 &VNET_NAME(tcp_pmtud_blackhole_mss), 0, 158 "Path MTU Discovery Black Hole Detection lowered MSS"); 159 #endif 160 161 #ifdef INET6 162 VNET_DEFINE(int, tcp_v6pmtud_blackhole_mss) = 1220; 163 SYSCTL_INT(_net_inet_tcp, OID_AUTO, v6pmtud_blackhole_mss, 164 CTLFLAG_RW|CTLFLAG_VNET, 165 &VNET_NAME(tcp_v6pmtud_blackhole_mss), 0, 166 "Path MTU Discovery IPv6 Black Hole Detection lowered MSS"); 167 #endif 168 169 #ifdef RSS 170 static int per_cpu_timers = 1; 171 #else 172 static int per_cpu_timers = 0; 173 #endif 174 SYSCTL_INT(_net_inet_tcp, OID_AUTO, per_cpu_timers, CTLFLAG_RW, 175 &per_cpu_timers , 0, "run tcp timers on all cpus"); 176 177 #if 0 178 #define INP_CPU(inp) (per_cpu_timers ? (!CPU_ABSENT(((inp)->inp_flowid % (mp_maxid+1))) ? \ 179 ((inp)->inp_flowid % (mp_maxid+1)) : curcpu) : 0) 180 #endif 181 182 /* 183 * Map the given inp to a CPU id. 184 * 185 * This queries RSS if it's compiled in, else it defaults to the current 186 * CPU ID. 187 */ 188 static inline int 189 inp_to_cpuid(struct inpcb *inp) 190 { 191 u_int cpuid; 192 193 #ifdef RSS 194 if (per_cpu_timers) { 195 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype); 196 if (cpuid == NETISR_CPUID_NONE) 197 return (curcpu); /* XXX */ 198 else 199 return (cpuid); 200 } 201 #else 202 /* Legacy, pre-RSS behaviour */ 203 if (per_cpu_timers) { 204 /* 205 * We don't have a flowid -> cpuid mapping, so cheat and 206 * just map unknown cpuids to curcpu. Not the best, but 207 * apparently better than defaulting to swi 0. 208 */ 209 cpuid = inp->inp_flowid % (mp_maxid + 1); 210 if (! CPU_ABSENT(cpuid)) 211 return (cpuid); 212 return (curcpu); 213 } 214 #endif 215 /* Default for RSS and non-RSS - cpuid 0 */ 216 else { 217 return (0); 218 } 219 } 220 221 /* 222 * Tcp protocol timeout routine called every 500 ms. 223 * Updates timestamps used for TCP 224 * causes finite state machine actions if timers expire. 225 */ 226 void 227 tcp_slowtimo(void) 228 { 229 VNET_ITERATOR_DECL(vnet_iter); 230 231 VNET_LIST_RLOCK_NOSLEEP(); 232 VNET_FOREACH(vnet_iter) { 233 CURVNET_SET(vnet_iter); 234 (void) tcp_tw_2msl_scan(0); 235 CURVNET_RESTORE(); 236 } 237 VNET_LIST_RUNLOCK_NOSLEEP(); 238 } 239 240 int tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] = 241 { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 }; 242 243 int tcp_backoff[TCP_MAXRXTSHIFT + 1] = 244 { 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 512, 512, 512 }; 245 246 static int tcp_totbackoff = 2559; /* sum of tcp_backoff[] */ 247 248 /* 249 * TCP timer processing. 250 */ 251 252 void 253 tcp_timer_delack(void *xtp) 254 { 255 struct tcpcb *tp = xtp; 256 struct inpcb *inp; 257 CURVNET_SET(tp->t_vnet); 258 259 inp = tp->t_inpcb; 260 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 261 INP_WLOCK(inp); 262 if (callout_pending(&tp->t_timers->tt_delack) || 263 !callout_active(&tp->t_timers->tt_delack)) { 264 INP_WUNLOCK(inp); 265 CURVNET_RESTORE(); 266 return; 267 } 268 callout_deactivate(&tp->t_timers->tt_delack); 269 if ((inp->inp_flags & INP_DROPPED) != 0) { 270 INP_WUNLOCK(inp); 271 CURVNET_RESTORE(); 272 return; 273 } 274 tp->t_flags |= TF_ACKNOW; 275 TCPSTAT_INC(tcps_delack); 276 (void) tp->t_fb->tfb_tcp_output(tp); 277 INP_WUNLOCK(inp); 278 CURVNET_RESTORE(); 279 } 280 281 /* 282 * When a timer wants to remove a TCB it must 283 * hold the INP_INFO_RLOCK(). The timer function 284 * should only have grabbed the INP_WLOCK() when 285 * it entered. To safely switch to holding both the 286 * INP_INFO_RLOCK() and the INP_WLOCK() we must first 287 * grab a reference on the inp, which will hold the inp 288 * so that it can't be removed. We then unlock the INP_WLOCK(), 289 * and grab the INP_INFO_RLOCK() lock. Once we have the INP_INFO_RLOCK() 290 * we proceed again to get the INP_WLOCK() (this preserves proper 291 * lock order). After acquiring the INP_WLOCK we must check if someone 292 * else deleted the pcb i.e. the inp_flags check. 293 * If so we return 1 otherwise we return 0. 294 * 295 * No matter what the tcp_inpinfo_lock_add() function 296 * returns the caller must afterwards call tcp_inpinfo_lock_del() 297 * to drop the locks and reference properly. 298 */ 299 300 int 301 tcp_inpinfo_lock_add(struct inpcb *inp) 302 { 303 in_pcbref(inp); 304 INP_WUNLOCK(inp); 305 INP_INFO_RLOCK(&V_tcbinfo); 306 INP_WLOCK(inp); 307 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 308 return(1); 309 } 310 return(0); 311 312 } 313 314 void 315 tcp_inpinfo_lock_del(struct inpcb *inp, struct tcpcb *tp) 316 { 317 INP_INFO_RUNLOCK(&V_tcbinfo); 318 if (inp && (tp == NULL)) { 319 /* 320 * If tcp_close/drop() gets called and tp 321 * returns NULL, then the function dropped 322 * the inp lock, we hold a reference keeping 323 * this around, so we must re-aquire the 324 * INP_WLOCK() in order to proceed with 325 * our dropping the inp reference. 326 */ 327 INP_WLOCK(inp); 328 } 329 if (inp && in_pcbrele_wlocked(inp) == 0) 330 INP_WUNLOCK(inp); 331 } 332 333 void 334 tcp_timer_2msl(void *xtp) 335 { 336 struct tcpcb *tp = xtp; 337 struct inpcb *inp; 338 CURVNET_SET(tp->t_vnet); 339 #ifdef TCPDEBUG 340 int ostate; 341 342 ostate = tp->t_state; 343 #endif 344 inp = tp->t_inpcb; 345 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 346 INP_WLOCK(inp); 347 tcp_free_sackholes(tp); 348 if (callout_pending(&tp->t_timers->tt_2msl) || 349 !callout_active(&tp->t_timers->tt_2msl)) { 350 INP_WUNLOCK(tp->t_inpcb); 351 CURVNET_RESTORE(); 352 return; 353 } 354 callout_deactivate(&tp->t_timers->tt_2msl); 355 if ((inp->inp_flags & INP_DROPPED) != 0) { 356 INP_WUNLOCK(inp); 357 CURVNET_RESTORE(); 358 return; 359 } 360 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0, 361 ("%s: tp %p tcpcb can't be stopped here", __func__, tp)); 362 /* 363 * 2 MSL timeout in shutdown went off. If we're closed but 364 * still waiting for peer to close and connection has been idle 365 * too long delete connection control block. Otherwise, check 366 * again in a bit. 367 * 368 * If in TIME_WAIT state just ignore as this timeout is handled in 369 * tcp_tw_2msl_scan(). 370 * 371 * If fastrecycle of FIN_WAIT_2, in FIN_WAIT_2 and receiver has closed, 372 * there's no point in hanging onto FIN_WAIT_2 socket. Just close it. 373 * Ignore fact that there were recent incoming segments. 374 */ 375 if ((inp->inp_flags & INP_TIMEWAIT) != 0) { 376 INP_WUNLOCK(inp); 377 CURVNET_RESTORE(); 378 return; 379 } 380 if (tcp_fast_finwait2_recycle && tp->t_state == TCPS_FIN_WAIT_2 && 381 tp->t_inpcb && tp->t_inpcb->inp_socket && 382 (tp->t_inpcb->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE)) { 383 TCPSTAT_INC(tcps_finwait2_drops); 384 if (tcp_inpinfo_lock_add(inp)) { 385 tcp_inpinfo_lock_del(inp, tp); 386 goto out; 387 } 388 tp = tcp_close(tp); 389 tcp_inpinfo_lock_del(inp, tp); 390 goto out; 391 } else { 392 if (ticks - tp->t_rcvtime <= TP_MAXIDLE(tp)) { 393 callout_reset(&tp->t_timers->tt_2msl, 394 TP_KEEPINTVL(tp), tcp_timer_2msl, tp); 395 } else { 396 if (tcp_inpinfo_lock_add(inp)) { 397 tcp_inpinfo_lock_del(inp, tp); 398 goto out; 399 } 400 tp = tcp_close(tp); 401 tcp_inpinfo_lock_del(inp, tp); 402 goto out; 403 } 404 } 405 406 #ifdef TCPDEBUG 407 if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 408 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, 409 PRU_SLOWTIMO); 410 #endif 411 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 412 413 if (tp != NULL) 414 INP_WUNLOCK(inp); 415 out: 416 CURVNET_RESTORE(); 417 } 418 419 void 420 tcp_timer_keep(void *xtp) 421 { 422 struct tcpcb *tp = xtp; 423 struct tcptemp *t_template; 424 struct inpcb *inp; 425 CURVNET_SET(tp->t_vnet); 426 #ifdef TCPDEBUG 427 int ostate; 428 429 ostate = tp->t_state; 430 #endif 431 inp = tp->t_inpcb; 432 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 433 INP_WLOCK(inp); 434 if (callout_pending(&tp->t_timers->tt_keep) || 435 !callout_active(&tp->t_timers->tt_keep)) { 436 INP_WUNLOCK(inp); 437 CURVNET_RESTORE(); 438 return; 439 } 440 callout_deactivate(&tp->t_timers->tt_keep); 441 if ((inp->inp_flags & INP_DROPPED) != 0) { 442 INP_WUNLOCK(inp); 443 CURVNET_RESTORE(); 444 return; 445 } 446 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0, 447 ("%s: tp %p tcpcb can't be stopped here", __func__, tp)); 448 449 /* 450 * Because we don't regularly reset the keepalive callout in 451 * the ESTABLISHED state, it may be that we don't actually need 452 * to send a keepalive yet. If that occurs, schedule another 453 * call for the next time the keepalive timer might expire. 454 */ 455 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 456 u_int idletime; 457 458 idletime = ticks - tp->t_rcvtime; 459 if (idletime < TP_KEEPIDLE(tp)) { 460 callout_reset(&tp->t_timers->tt_keep, 461 TP_KEEPIDLE(tp) - idletime, tcp_timer_keep, tp); 462 INP_WUNLOCK(inp); 463 CURVNET_RESTORE(); 464 return; 465 } 466 } 467 468 /* 469 * Keep-alive timer went off; send something 470 * or drop connection if idle for too long. 471 */ 472 TCPSTAT_INC(tcps_keeptimeo); 473 if (tp->t_state < TCPS_ESTABLISHED) 474 goto dropit; 475 if ((tcp_always_keepalive || 476 inp->inp_socket->so_options & SO_KEEPALIVE) && 477 tp->t_state <= TCPS_CLOSING) { 478 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 479 goto dropit; 480 /* 481 * Send a packet designed to force a response 482 * if the peer is up and reachable: 483 * either an ACK if the connection is still alive, 484 * or an RST if the peer has closed the connection 485 * due to timeout or reboot. 486 * Using sequence number tp->snd_una-1 487 * causes the transmitted zero-length segment 488 * to lie outside the receive window; 489 * by the protocol spec, this requires the 490 * correspondent TCP to respond. 491 */ 492 TCPSTAT_INC(tcps_keepprobe); 493 t_template = tcpip_maketemplate(inp); 494 if (t_template) { 495 tcp_respond(tp, t_template->tt_ipgen, 496 &t_template->tt_t, (struct mbuf *)NULL, 497 tp->rcv_nxt, tp->snd_una - 1, 0); 498 free(t_template, M_TEMP); 499 } 500 callout_reset(&tp->t_timers->tt_keep, TP_KEEPINTVL(tp), 501 tcp_timer_keep, tp); 502 } else 503 callout_reset(&tp->t_timers->tt_keep, TP_KEEPIDLE(tp), 504 tcp_timer_keep, tp); 505 506 #ifdef TCPDEBUG 507 if (inp->inp_socket->so_options & SO_DEBUG) 508 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, 509 PRU_SLOWTIMO); 510 #endif 511 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 512 INP_WUNLOCK(inp); 513 CURVNET_RESTORE(); 514 return; 515 516 dropit: 517 TCPSTAT_INC(tcps_keepdrops); 518 519 if (tcp_inpinfo_lock_add(inp)) { 520 tcp_inpinfo_lock_del(inp, tp); 521 goto out; 522 } 523 tp = tcp_drop(tp, ETIMEDOUT); 524 525 #ifdef TCPDEBUG 526 if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 527 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, 528 PRU_SLOWTIMO); 529 #endif 530 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 531 tcp_inpinfo_lock_del(inp, tp); 532 out: 533 CURVNET_RESTORE(); 534 } 535 536 void 537 tcp_timer_persist(void *xtp) 538 { 539 struct tcpcb *tp = xtp; 540 struct inpcb *inp; 541 CURVNET_SET(tp->t_vnet); 542 #ifdef TCPDEBUG 543 int ostate; 544 545 ostate = tp->t_state; 546 #endif 547 inp = tp->t_inpcb; 548 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 549 INP_WLOCK(inp); 550 if (callout_pending(&tp->t_timers->tt_persist) || 551 !callout_active(&tp->t_timers->tt_persist)) { 552 INP_WUNLOCK(inp); 553 CURVNET_RESTORE(); 554 return; 555 } 556 callout_deactivate(&tp->t_timers->tt_persist); 557 if ((inp->inp_flags & INP_DROPPED) != 0) { 558 INP_WUNLOCK(inp); 559 CURVNET_RESTORE(); 560 return; 561 } 562 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0, 563 ("%s: tp %p tcpcb can't be stopped here", __func__, tp)); 564 /* 565 * Persistence timer into zero window. 566 * Force a byte to be output, if possible. 567 */ 568 TCPSTAT_INC(tcps_persisttimeo); 569 /* 570 * Hack: if the peer is dead/unreachable, we do not 571 * time out if the window is closed. After a full 572 * backoff, drop the connection if the idle time 573 * (no responses to probes) reaches the maximum 574 * backoff that we would use if retransmitting. 575 */ 576 if (tp->t_rxtshift == TCP_MAXRXTSHIFT && 577 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 578 ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) { 579 TCPSTAT_INC(tcps_persistdrop); 580 if (tcp_inpinfo_lock_add(inp)) { 581 tcp_inpinfo_lock_del(inp, tp); 582 goto out; 583 } 584 tp = tcp_drop(tp, ETIMEDOUT); 585 tcp_inpinfo_lock_del(inp, tp); 586 goto out; 587 } 588 /* 589 * If the user has closed the socket then drop a persisting 590 * connection after a much reduced timeout. 591 */ 592 if (tp->t_state > TCPS_CLOSE_WAIT && 593 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 594 TCPSTAT_INC(tcps_persistdrop); 595 if (tcp_inpinfo_lock_add(inp)) { 596 tcp_inpinfo_lock_del(inp, tp); 597 goto out; 598 } 599 tp = tcp_drop(tp, ETIMEDOUT); 600 tcp_inpinfo_lock_del(inp, tp); 601 goto out; 602 } 603 tcp_setpersist(tp); 604 tp->t_flags |= TF_FORCEDATA; 605 (void) tp->t_fb->tfb_tcp_output(tp); 606 tp->t_flags &= ~TF_FORCEDATA; 607 608 #ifdef TCPDEBUG 609 if (tp != NULL && tp->t_inpcb->inp_socket->so_options & SO_DEBUG) 610 tcp_trace(TA_USER, ostate, tp, NULL, NULL, PRU_SLOWTIMO); 611 #endif 612 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 613 INP_WUNLOCK(inp); 614 out: 615 CURVNET_RESTORE(); 616 } 617 618 void 619 tcp_timer_rexmt(void * xtp) 620 { 621 struct tcpcb *tp = xtp; 622 CURVNET_SET(tp->t_vnet); 623 int rexmt; 624 struct inpcb *inp; 625 #ifdef TCPDEBUG 626 int ostate; 627 628 ostate = tp->t_state; 629 #endif 630 inp = tp->t_inpcb; 631 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 632 INP_WLOCK(inp); 633 if (callout_pending(&tp->t_timers->tt_rexmt) || 634 !callout_active(&tp->t_timers->tt_rexmt)) { 635 INP_WUNLOCK(inp); 636 CURVNET_RESTORE(); 637 return; 638 } 639 callout_deactivate(&tp->t_timers->tt_rexmt); 640 if ((inp->inp_flags & INP_DROPPED) != 0) { 641 INP_WUNLOCK(inp); 642 CURVNET_RESTORE(); 643 return; 644 } 645 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0, 646 ("%s: tp %p tcpcb can't be stopped here", __func__, tp)); 647 tcp_free_sackholes(tp); 648 TCP_LOG_EVENT(tp, NULL, NULL, NULL, TCP_LOG_RTO, 0, 0, NULL, false); 649 if (tp->t_fb->tfb_tcp_rexmit_tmr) { 650 /* The stack has a timer action too. */ 651 (*tp->t_fb->tfb_tcp_rexmit_tmr)(tp); 652 } 653 /* 654 * Retransmission timer went off. Message has not 655 * been acked within retransmit interval. Back off 656 * to a longer retransmit interval and retransmit one segment. 657 */ 658 if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) { 659 tp->t_rxtshift = TCP_MAXRXTSHIFT; 660 TCPSTAT_INC(tcps_timeoutdrop); 661 if (tcp_inpinfo_lock_add(inp)) { 662 tcp_inpinfo_lock_del(inp, tp); 663 goto out; 664 } 665 tp = tcp_drop(tp, ETIMEDOUT); 666 tcp_inpinfo_lock_del(inp, tp); 667 goto out; 668 } 669 if (tp->t_state == TCPS_SYN_SENT) { 670 /* 671 * If the SYN was retransmitted, indicate CWND to be 672 * limited to 1 segment in cc_conn_init(). 673 */ 674 tp->snd_cwnd = 1; 675 } else if (tp->t_rxtshift == 1) { 676 /* 677 * first retransmit; record ssthresh and cwnd so they can 678 * be recovered if this turns out to be a "bad" retransmit. 679 * A retransmit is considered "bad" if an ACK for this 680 * segment is received within RTT/2 interval; the assumption 681 * here is that the ACK was already in flight. See 682 * "On Estimating End-to-End Network Path Properties" by 683 * Allman and Paxson for more details. 684 */ 685 tp->snd_cwnd_prev = tp->snd_cwnd; 686 tp->snd_ssthresh_prev = tp->snd_ssthresh; 687 tp->snd_recover_prev = tp->snd_recover; 688 if (IN_FASTRECOVERY(tp->t_flags)) 689 tp->t_flags |= TF_WASFRECOVERY; 690 else 691 tp->t_flags &= ~TF_WASFRECOVERY; 692 if (IN_CONGRECOVERY(tp->t_flags)) 693 tp->t_flags |= TF_WASCRECOVERY; 694 else 695 tp->t_flags &= ~TF_WASCRECOVERY; 696 if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 697 tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1)); 698 /* In the event that we've negotiated timestamps 699 * badrxtwin will be set to the value that we set 700 * the retransmitted packet's to_tsval to by tcp_output 701 */ 702 tp->t_flags |= TF_PREVVALID; 703 } else 704 tp->t_flags &= ~TF_PREVVALID; 705 TCPSTAT_INC(tcps_rexmttimeo); 706 if ((tp->t_state == TCPS_SYN_SENT) || 707 (tp->t_state == TCPS_SYN_RECEIVED)) 708 rexmt = TCPTV_RTOBASE * tcp_syn_backoff[tp->t_rxtshift]; 709 else 710 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; 711 TCPT_RANGESET(tp->t_rxtcur, rexmt, 712 tp->t_rttmin, TCPTV_REXMTMAX); 713 714 /* 715 * We enter the path for PLMTUD if connection is established or, if 716 * connection is FIN_WAIT_1 status, reason for the last is that if 717 * amount of data we send is very small, we could send it in couple of 718 * packets and process straight to FIN. In that case we won't catch 719 * ESTABLISHED state. 720 */ 721 if (V_tcp_pmtud_blackhole_detect && (((tp->t_state == TCPS_ESTABLISHED)) 722 || (tp->t_state == TCPS_FIN_WAIT_1))) { 723 #ifdef INET6 724 int isipv6; 725 #endif 726 727 /* 728 * Idea here is that at each stage of mtu probe (usually, 1448 729 * -> 1188 -> 524) should be given 2 chances to recover before 730 * further clamping down. 'tp->t_rxtshift % 2 == 0' should 731 * take care of that. 732 */ 733 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) == 734 (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) && 735 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 736 tp->t_rxtshift % 2 == 0)) { 737 /* 738 * Enter Path MTU Black-hole Detection mechanism: 739 * - Disable Path MTU Discovery (IP "DF" bit). 740 * - Reduce MTU to lower value than what we 741 * negotiated with peer. 742 */ 743 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 744 /* Record that we may have found a black hole. */ 745 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 746 /* Keep track of previous MSS. */ 747 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 748 } 749 750 /* 751 * Reduce the MSS to blackhole value or to the default 752 * in an attempt to retransmit. 753 */ 754 #ifdef INET6 755 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? 1 : 0; 756 if (isipv6 && 757 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 758 /* Use the sysctl tuneable blackhole MSS. */ 759 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 760 TCPSTAT_INC(tcps_pmtud_blackhole_activated); 761 } else if (isipv6) { 762 /* Use the default MSS. */ 763 tp->t_maxseg = V_tcp_v6mssdflt; 764 /* 765 * Disable Path MTU Discovery when we switch to 766 * minmss. 767 */ 768 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 769 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 770 } 771 #endif 772 #if defined(INET6) && defined(INET) 773 else 774 #endif 775 #ifdef INET 776 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 777 /* Use the sysctl tuneable blackhole MSS. */ 778 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 779 TCPSTAT_INC(tcps_pmtud_blackhole_activated); 780 } else { 781 /* Use the default MSS. */ 782 tp->t_maxseg = V_tcp_mssdflt; 783 /* 784 * Disable Path MTU Discovery when we switch to 785 * minmss. 786 */ 787 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 788 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 789 } 790 #endif 791 /* 792 * Reset the slow-start flight size 793 * as it may depend on the new MSS. 794 */ 795 if (CC_ALGO(tp)->conn_init != NULL) 796 CC_ALGO(tp)->conn_init(tp->ccv); 797 } else { 798 /* 799 * If further retransmissions are still unsuccessful 800 * with a lowered MTU, maybe this isn't a blackhole and 801 * we restore the previous MSS and blackhole detection 802 * flags. 803 * The limit '6' is determined by giving each probe 804 * stage (1448, 1188, 524) 2 chances to recover. 805 */ 806 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 807 (tp->t_rxtshift >= 6)) { 808 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 809 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 810 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 811 TCPSTAT_INC(tcps_pmtud_blackhole_failed); 812 /* 813 * Reset the slow-start flight size as it 814 * may depend on the new MSS. 815 */ 816 if (CC_ALGO(tp)->conn_init != NULL) 817 CC_ALGO(tp)->conn_init(tp->ccv); 818 } 819 } 820 } 821 822 /* 823 * Disable RFC1323 and SACK if we haven't got any response to 824 * our third SYN to work-around some broken terminal servers 825 * (most of which have hopefully been retired) that have bad VJ 826 * header compression code which trashes TCP segments containing 827 * unknown-to-them TCP options. 828 */ 829 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 830 (tp->t_rxtshift == 3)) 831 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 832 /* 833 * If we backed off this far, notify the L3 protocol that we're having 834 * connection problems. 835 */ 836 if (tp->t_rxtshift > TCP_RTT_INVALIDATE) { 837 #ifdef INET6 838 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 839 in6_losing(tp->t_inpcb); 840 else 841 #endif 842 in_losing(tp->t_inpcb); 843 } 844 tp->snd_nxt = tp->snd_una; 845 tp->snd_recover = tp->snd_max; 846 /* 847 * Force a segment to be sent. 848 */ 849 tp->t_flags |= TF_ACKNOW; 850 /* 851 * If timing a segment in this window, stop the timer. 852 */ 853 tp->t_rtttime = 0; 854 855 cc_cong_signal(tp, NULL, CC_RTO); 856 857 (void) tp->t_fb->tfb_tcp_output(tp); 858 859 #ifdef TCPDEBUG 860 if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 861 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, 862 PRU_SLOWTIMO); 863 #endif 864 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 865 INP_WUNLOCK(inp); 866 out: 867 CURVNET_RESTORE(); 868 } 869 870 void 871 tcp_timer_activate(struct tcpcb *tp, uint32_t timer_type, u_int delta) 872 { 873 struct callout *t_callout; 874 timeout_t *f_callout; 875 struct inpcb *inp = tp->t_inpcb; 876 int cpu = inp_to_cpuid(inp); 877 878 #ifdef TCP_OFFLOAD 879 if (tp->t_flags & TF_TOE) 880 return; 881 #endif 882 883 if (tp->t_timers->tt_flags & TT_STOPPED) 884 return; 885 886 switch (timer_type) { 887 case TT_DELACK: 888 t_callout = &tp->t_timers->tt_delack; 889 f_callout = tcp_timer_delack; 890 break; 891 case TT_REXMT: 892 t_callout = &tp->t_timers->tt_rexmt; 893 f_callout = tcp_timer_rexmt; 894 break; 895 case TT_PERSIST: 896 t_callout = &tp->t_timers->tt_persist; 897 f_callout = tcp_timer_persist; 898 break; 899 case TT_KEEP: 900 t_callout = &tp->t_timers->tt_keep; 901 f_callout = tcp_timer_keep; 902 break; 903 case TT_2MSL: 904 t_callout = &tp->t_timers->tt_2msl; 905 f_callout = tcp_timer_2msl; 906 break; 907 default: 908 if (tp->t_fb->tfb_tcp_timer_activate) { 909 tp->t_fb->tfb_tcp_timer_activate(tp, timer_type, delta); 910 return; 911 } 912 panic("tp %p bad timer_type %#x", tp, timer_type); 913 } 914 if (delta == 0) { 915 callout_stop(t_callout); 916 } else { 917 callout_reset_on(t_callout, delta, f_callout, tp, cpu); 918 } 919 } 920 921 int 922 tcp_timer_active(struct tcpcb *tp, uint32_t timer_type) 923 { 924 struct callout *t_callout; 925 926 switch (timer_type) { 927 case TT_DELACK: 928 t_callout = &tp->t_timers->tt_delack; 929 break; 930 case TT_REXMT: 931 t_callout = &tp->t_timers->tt_rexmt; 932 break; 933 case TT_PERSIST: 934 t_callout = &tp->t_timers->tt_persist; 935 break; 936 case TT_KEEP: 937 t_callout = &tp->t_timers->tt_keep; 938 break; 939 case TT_2MSL: 940 t_callout = &tp->t_timers->tt_2msl; 941 break; 942 default: 943 if (tp->t_fb->tfb_tcp_timer_active) { 944 return(tp->t_fb->tfb_tcp_timer_active(tp, timer_type)); 945 } 946 panic("tp %p bad timer_type %#x", tp, timer_type); 947 } 948 return callout_active(t_callout); 949 } 950 951 void 952 tcp_timer_stop(struct tcpcb *tp, uint32_t timer_type) 953 { 954 struct callout *t_callout; 955 956 tp->t_timers->tt_flags |= TT_STOPPED; 957 switch (timer_type) { 958 case TT_DELACK: 959 t_callout = &tp->t_timers->tt_delack; 960 break; 961 case TT_REXMT: 962 t_callout = &tp->t_timers->tt_rexmt; 963 break; 964 case TT_PERSIST: 965 t_callout = &tp->t_timers->tt_persist; 966 break; 967 case TT_KEEP: 968 t_callout = &tp->t_timers->tt_keep; 969 break; 970 case TT_2MSL: 971 t_callout = &tp->t_timers->tt_2msl; 972 break; 973 default: 974 if (tp->t_fb->tfb_tcp_timer_stop) { 975 /* 976 * XXXrrs we need to look at this with the 977 * stop case below (flags). 978 */ 979 tp->t_fb->tfb_tcp_timer_stop(tp, timer_type); 980 return; 981 } 982 panic("tp %p bad timer_type %#x", tp, timer_type); 983 } 984 985 if (callout_async_drain(t_callout, tcp_timer_discard) == 0) { 986 /* 987 * Can't stop the callout, defer tcpcb actual deletion 988 * to the last one. We do this using the async drain 989 * function and incrementing the count in 990 */ 991 tp->t_timers->tt_draincnt++; 992 } 993 } 994