1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 #include "opt_tcpdebug.h" 40 #include "opt_rss.h" 41 42 #include <sys/param.h> 43 #include <sys/kernel.h> 44 #include <sys/lock.h> 45 #include <sys/mbuf.h> 46 #include <sys/mutex.h> 47 #include <sys/protosw.h> 48 #include <sys/smp.h> 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/sysctl.h> 52 #include <sys/systm.h> 53 54 #include <net/if.h> 55 #include <net/route.h> 56 #include <net/rss_config.h> 57 #include <net/vnet.h> 58 #include <net/netisr.h> 59 60 #include <netinet/in.h> 61 #include <netinet/in_kdtrace.h> 62 #include <netinet/in_pcb.h> 63 #include <netinet/in_rss.h> 64 #include <netinet/in_systm.h> 65 #ifdef INET6 66 #include <netinet6/in6_pcb.h> 67 #endif 68 #include <netinet/ip_var.h> 69 #include <netinet/tcp.h> 70 #include <netinet/tcp_fsm.h> 71 #include <netinet/tcp_timer.h> 72 #include <netinet/tcp_var.h> 73 #include <netinet/cc/cc.h> 74 #ifdef INET6 75 #include <netinet6/tcp6_var.h> 76 #endif 77 #include <netinet/tcpip.h> 78 #ifdef TCPDEBUG 79 #include <netinet/tcp_debug.h> 80 #endif 81 82 int tcp_persmin; 83 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, persmin, CTLTYPE_INT|CTLFLAG_RW, 84 &tcp_persmin, 0, sysctl_msec_to_ticks, "I", "minimum persistence interval"); 85 86 int tcp_persmax; 87 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, persmax, CTLTYPE_INT|CTLFLAG_RW, 88 &tcp_persmax, 0, sysctl_msec_to_ticks, "I", "maximum persistence interval"); 89 90 int tcp_keepinit; 91 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, CTLTYPE_INT|CTLFLAG_RW, 92 &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", "time to establish connection"); 93 94 int tcp_keepidle; 95 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, CTLTYPE_INT|CTLFLAG_RW, 96 &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", "time before keepalive probes begin"); 97 98 int tcp_keepintvl; 99 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, CTLTYPE_INT|CTLFLAG_RW, 100 &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", "time between keepalive probes"); 101 102 int tcp_delacktime; 103 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DELACKTIME, delacktime, CTLTYPE_INT|CTLFLAG_RW, 104 &tcp_delacktime, 0, sysctl_msec_to_ticks, "I", 105 "Time before a delayed ACK is sent"); 106 107 int tcp_msl; 108 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, CTLTYPE_INT|CTLFLAG_RW, 109 &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime"); 110 111 int tcp_rexmit_min; 112 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_min, CTLTYPE_INT|CTLFLAG_RW, 113 &tcp_rexmit_min, 0, sysctl_msec_to_ticks, "I", 114 "Minimum Retransmission Timeout"); 115 116 int tcp_rexmit_slop; 117 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_slop, CTLTYPE_INT|CTLFLAG_RW, 118 &tcp_rexmit_slop, 0, sysctl_msec_to_ticks, "I", 119 "Retransmission Timer Slop"); 120 121 static int always_keepalive = 1; 122 SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_RW, 123 &always_keepalive , 0, "Assume SO_KEEPALIVE on all TCP connections"); 124 125 int tcp_fast_finwait2_recycle = 0; 126 SYSCTL_INT(_net_inet_tcp, OID_AUTO, fast_finwait2_recycle, CTLFLAG_RW, 127 &tcp_fast_finwait2_recycle, 0, 128 "Recycle closed FIN_WAIT_2 connections faster"); 129 130 int tcp_finwait2_timeout; 131 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, finwait2_timeout, CTLTYPE_INT|CTLFLAG_RW, 132 &tcp_finwait2_timeout, 0, sysctl_msec_to_ticks, "I", "FIN-WAIT2 timeout"); 133 134 int tcp_keepcnt = TCPTV_KEEPCNT; 135 SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt, CTLFLAG_RW, &tcp_keepcnt, 0, 136 "Number of keepalive probes to send"); 137 138 /* max idle probes */ 139 int tcp_maxpersistidle; 140 141 static int tcp_rexmit_drop_options = 0; 142 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rexmit_drop_options, CTLFLAG_RW, 143 &tcp_rexmit_drop_options, 0, 144 "Drop TCP options from 3rd and later retransmitted SYN"); 145 146 VNET_DEFINE(int, tcp_pmtud_blackhole_detect); 147 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_detection, 148 CTLFLAG_RW|CTLFLAG_VNET, 149 &VNET_NAME(tcp_pmtud_blackhole_detect), 0, 150 "Path MTU Discovery Black Hole Detection Enabled"); 151 152 #ifdef INET 153 VNET_DEFINE(int, tcp_pmtud_blackhole_mss) = 1200; 154 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_mss, 155 CTLFLAG_RW|CTLFLAG_VNET, 156 &VNET_NAME(tcp_pmtud_blackhole_mss), 0, 157 "Path MTU Discovery Black Hole Detection lowered MSS"); 158 #endif 159 160 #ifdef INET6 161 VNET_DEFINE(int, tcp_v6pmtud_blackhole_mss) = 1220; 162 SYSCTL_INT(_net_inet_tcp, OID_AUTO, v6pmtud_blackhole_mss, 163 CTLFLAG_RW|CTLFLAG_VNET, 164 &VNET_NAME(tcp_v6pmtud_blackhole_mss), 0, 165 "Path MTU Discovery IPv6 Black Hole Detection lowered MSS"); 166 #endif 167 168 #ifdef RSS 169 static int per_cpu_timers = 1; 170 #else 171 static int per_cpu_timers = 0; 172 #endif 173 SYSCTL_INT(_net_inet_tcp, OID_AUTO, per_cpu_timers, CTLFLAG_RW, 174 &per_cpu_timers , 0, "run tcp timers on all cpus"); 175 176 #if 0 177 #define INP_CPU(inp) (per_cpu_timers ? (!CPU_ABSENT(((inp)->inp_flowid % (mp_maxid+1))) ? \ 178 ((inp)->inp_flowid % (mp_maxid+1)) : curcpu) : 0) 179 #endif 180 181 /* 182 * Map the given inp to a CPU id. 183 * 184 * This queries RSS if it's compiled in, else it defaults to the current 185 * CPU ID. 186 */ 187 static inline int 188 inp_to_cpuid(struct inpcb *inp) 189 { 190 u_int cpuid; 191 192 #ifdef RSS 193 if (per_cpu_timers) { 194 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype); 195 if (cpuid == NETISR_CPUID_NONE) 196 return (curcpu); /* XXX */ 197 else 198 return (cpuid); 199 } 200 #else 201 /* Legacy, pre-RSS behaviour */ 202 if (per_cpu_timers) { 203 /* 204 * We don't have a flowid -> cpuid mapping, so cheat and 205 * just map unknown cpuids to curcpu. Not the best, but 206 * apparently better than defaulting to swi 0. 207 */ 208 cpuid = inp->inp_flowid % (mp_maxid + 1); 209 if (! CPU_ABSENT(cpuid)) 210 return (cpuid); 211 return (curcpu); 212 } 213 #endif 214 /* Default for RSS and non-RSS - cpuid 0 */ 215 else { 216 return (0); 217 } 218 } 219 220 /* 221 * Tcp protocol timeout routine called every 500 ms. 222 * Updates timestamps used for TCP 223 * causes finite state machine actions if timers expire. 224 */ 225 void 226 tcp_slowtimo(void) 227 { 228 VNET_ITERATOR_DECL(vnet_iter); 229 230 VNET_LIST_RLOCK_NOSLEEP(); 231 VNET_FOREACH(vnet_iter) { 232 CURVNET_SET(vnet_iter); 233 (void) tcp_tw_2msl_scan(0); 234 CURVNET_RESTORE(); 235 } 236 VNET_LIST_RUNLOCK_NOSLEEP(); 237 } 238 239 int tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] = 240 { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 }; 241 242 int tcp_backoff[TCP_MAXRXTSHIFT + 1] = 243 { 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 512, 512, 512 }; 244 245 static int tcp_totbackoff = 2559; /* sum of tcp_backoff[] */ 246 247 /* 248 * TCP timer processing. 249 */ 250 251 void 252 tcp_timer_delack(void *xtp) 253 { 254 struct tcpcb *tp = xtp; 255 struct inpcb *inp; 256 CURVNET_SET(tp->t_vnet); 257 258 inp = tp->t_inpcb; 259 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 260 INP_WLOCK(inp); 261 if (callout_pending(&tp->t_timers->tt_delack) || 262 !callout_active(&tp->t_timers->tt_delack)) { 263 INP_WUNLOCK(inp); 264 CURVNET_RESTORE(); 265 return; 266 } 267 callout_deactivate(&tp->t_timers->tt_delack); 268 if ((inp->inp_flags & INP_DROPPED) != 0) { 269 INP_WUNLOCK(inp); 270 CURVNET_RESTORE(); 271 return; 272 } 273 tp->t_flags |= TF_ACKNOW; 274 TCPSTAT_INC(tcps_delack); 275 (void) tp->t_fb->tfb_tcp_output(tp); 276 INP_WUNLOCK(inp); 277 CURVNET_RESTORE(); 278 } 279 280 /* 281 * When a timer wants to remove a TCB it must 282 * hold the INP_INFO_RLOCK(). The timer function 283 * should only have grabbed the INP_WLOCK() when 284 * it entered. To safely switch to holding both the 285 * INP_INFO_RLOCK() and the INP_WLOCK() we must first 286 * grab a reference on the inp, which will hold the inp 287 * so that it can't be removed. We then unlock the INP_WLOCK(), 288 * and grab the INP_INFO_RLOCK() lock. Once we have the INP_INFO_RLOCK() 289 * we proceed again to get the INP_WLOCK() (this preserves proper 290 * lock order). After acquiring the INP_WLOCK we must check if someone 291 * else deleted the pcb i.e. the inp_flags check. 292 * If so we return 1 otherwise we return 0. 293 * 294 * No matter what the tcp_inpinfo_lock_add() function 295 * returns the caller must afterwards call tcp_inpinfo_lock_del() 296 * to drop the locks and reference properly. 297 */ 298 299 int 300 tcp_inpinfo_lock_add(struct inpcb *inp) 301 { 302 in_pcbref(inp); 303 INP_WUNLOCK(inp); 304 INP_INFO_RLOCK(&V_tcbinfo); 305 INP_WLOCK(inp); 306 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 307 return(1); 308 } 309 return(0); 310 311 } 312 313 void 314 tcp_inpinfo_lock_del(struct inpcb *inp, struct tcpcb *tp) 315 { 316 INP_INFO_RUNLOCK(&V_tcbinfo); 317 if (inp && (tp == NULL)) { 318 /* 319 * If tcp_close/drop() gets called and tp 320 * returns NULL, then the function dropped 321 * the inp lock, we hold a reference keeping 322 * this around, so we must re-aquire the 323 * INP_WLOCK() in order to proceed with 324 * our dropping the inp reference. 325 */ 326 INP_WLOCK(inp); 327 } 328 if (inp && in_pcbrele_wlocked(inp) == 0) 329 INP_WUNLOCK(inp); 330 } 331 332 void 333 tcp_timer_2msl(void *xtp) 334 { 335 struct tcpcb *tp = xtp; 336 struct inpcb *inp; 337 CURVNET_SET(tp->t_vnet); 338 #ifdef TCPDEBUG 339 int ostate; 340 341 ostate = tp->t_state; 342 #endif 343 inp = tp->t_inpcb; 344 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 345 INP_WLOCK(inp); 346 tcp_free_sackholes(tp); 347 if (callout_pending(&tp->t_timers->tt_2msl) || 348 !callout_active(&tp->t_timers->tt_2msl)) { 349 INP_WUNLOCK(tp->t_inpcb); 350 CURVNET_RESTORE(); 351 return; 352 } 353 callout_deactivate(&tp->t_timers->tt_2msl); 354 if ((inp->inp_flags & INP_DROPPED) != 0) { 355 INP_WUNLOCK(inp); 356 CURVNET_RESTORE(); 357 return; 358 } 359 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0, 360 ("%s: tp %p tcpcb can't be stopped here", __func__, tp)); 361 /* 362 * 2 MSL timeout in shutdown went off. If we're closed but 363 * still waiting for peer to close and connection has been idle 364 * too long delete connection control block. Otherwise, check 365 * again in a bit. 366 * 367 * If in TIME_WAIT state just ignore as this timeout is handled in 368 * tcp_tw_2msl_scan(). 369 * 370 * If fastrecycle of FIN_WAIT_2, in FIN_WAIT_2 and receiver has closed, 371 * there's no point in hanging onto FIN_WAIT_2 socket. Just close it. 372 * Ignore fact that there were recent incoming segments. 373 */ 374 if ((inp->inp_flags & INP_TIMEWAIT) != 0) { 375 INP_WUNLOCK(inp); 376 CURVNET_RESTORE(); 377 return; 378 } 379 if (tcp_fast_finwait2_recycle && tp->t_state == TCPS_FIN_WAIT_2 && 380 tp->t_inpcb && tp->t_inpcb->inp_socket && 381 (tp->t_inpcb->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE)) { 382 TCPSTAT_INC(tcps_finwait2_drops); 383 if (tcp_inpinfo_lock_add(inp)) { 384 tcp_inpinfo_lock_del(inp, tp); 385 goto out; 386 } 387 tp = tcp_close(tp); 388 tcp_inpinfo_lock_del(inp, tp); 389 goto out; 390 } else { 391 if (ticks - tp->t_rcvtime <= TP_MAXIDLE(tp)) { 392 callout_reset(&tp->t_timers->tt_2msl, 393 TP_KEEPINTVL(tp), tcp_timer_2msl, tp); 394 } else { 395 if (tcp_inpinfo_lock_add(inp)) { 396 tcp_inpinfo_lock_del(inp, tp); 397 goto out; 398 } 399 tp = tcp_close(tp); 400 tcp_inpinfo_lock_del(inp, tp); 401 goto out; 402 } 403 } 404 405 #ifdef TCPDEBUG 406 if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 407 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, 408 PRU_SLOWTIMO); 409 #endif 410 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 411 412 if (tp != NULL) 413 INP_WUNLOCK(inp); 414 out: 415 CURVNET_RESTORE(); 416 } 417 418 void 419 tcp_timer_keep(void *xtp) 420 { 421 struct tcpcb *tp = xtp; 422 struct tcptemp *t_template; 423 struct inpcb *inp; 424 CURVNET_SET(tp->t_vnet); 425 #ifdef TCPDEBUG 426 int ostate; 427 428 ostate = tp->t_state; 429 #endif 430 inp = tp->t_inpcb; 431 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 432 INP_WLOCK(inp); 433 if (callout_pending(&tp->t_timers->tt_keep) || 434 !callout_active(&tp->t_timers->tt_keep)) { 435 INP_WUNLOCK(inp); 436 CURVNET_RESTORE(); 437 return; 438 } 439 callout_deactivate(&tp->t_timers->tt_keep); 440 if ((inp->inp_flags & INP_DROPPED) != 0) { 441 INP_WUNLOCK(inp); 442 CURVNET_RESTORE(); 443 return; 444 } 445 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0, 446 ("%s: tp %p tcpcb can't be stopped here", __func__, tp)); 447 448 /* 449 * Because we don't regularly reset the keepalive callout in 450 * the ESTABLISHED state, it may be that we don't actually need 451 * to send a keepalive yet. If that occurs, schedule another 452 * call for the next time the keepalive timer might expire. 453 */ 454 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 455 u_int idletime; 456 457 idletime = ticks - tp->t_rcvtime; 458 if (idletime < TP_KEEPIDLE(tp)) { 459 callout_reset(&tp->t_timers->tt_keep, 460 TP_KEEPIDLE(tp) - idletime, tcp_timer_keep, tp); 461 INP_WUNLOCK(inp); 462 CURVNET_RESTORE(); 463 return; 464 } 465 } 466 467 /* 468 * Keep-alive timer went off; send something 469 * or drop connection if idle for too long. 470 */ 471 TCPSTAT_INC(tcps_keeptimeo); 472 if (tp->t_state < TCPS_ESTABLISHED) 473 goto dropit; 474 if ((always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 475 tp->t_state <= TCPS_CLOSING) { 476 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 477 goto dropit; 478 /* 479 * Send a packet designed to force a response 480 * if the peer is up and reachable: 481 * either an ACK if the connection is still alive, 482 * or an RST if the peer has closed the connection 483 * due to timeout or reboot. 484 * Using sequence number tp->snd_una-1 485 * causes the transmitted zero-length segment 486 * to lie outside the receive window; 487 * by the protocol spec, this requires the 488 * correspondent TCP to respond. 489 */ 490 TCPSTAT_INC(tcps_keepprobe); 491 t_template = tcpip_maketemplate(inp); 492 if (t_template) { 493 tcp_respond(tp, t_template->tt_ipgen, 494 &t_template->tt_t, (struct mbuf *)NULL, 495 tp->rcv_nxt, tp->snd_una - 1, 0); 496 free(t_template, M_TEMP); 497 } 498 callout_reset(&tp->t_timers->tt_keep, TP_KEEPINTVL(tp), 499 tcp_timer_keep, tp); 500 } else 501 callout_reset(&tp->t_timers->tt_keep, TP_KEEPIDLE(tp), 502 tcp_timer_keep, tp); 503 504 #ifdef TCPDEBUG 505 if (inp->inp_socket->so_options & SO_DEBUG) 506 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, 507 PRU_SLOWTIMO); 508 #endif 509 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 510 INP_WUNLOCK(inp); 511 CURVNET_RESTORE(); 512 return; 513 514 dropit: 515 TCPSTAT_INC(tcps_keepdrops); 516 517 if (tcp_inpinfo_lock_add(inp)) { 518 tcp_inpinfo_lock_del(inp, tp); 519 goto out; 520 } 521 tp = tcp_drop(tp, ETIMEDOUT); 522 523 #ifdef TCPDEBUG 524 if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 525 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, 526 PRU_SLOWTIMO); 527 #endif 528 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 529 tcp_inpinfo_lock_del(inp, tp); 530 out: 531 CURVNET_RESTORE(); 532 } 533 534 void 535 tcp_timer_persist(void *xtp) 536 { 537 struct tcpcb *tp = xtp; 538 struct inpcb *inp; 539 CURVNET_SET(tp->t_vnet); 540 #ifdef TCPDEBUG 541 int ostate; 542 543 ostate = tp->t_state; 544 #endif 545 inp = tp->t_inpcb; 546 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 547 INP_WLOCK(inp); 548 if (callout_pending(&tp->t_timers->tt_persist) || 549 !callout_active(&tp->t_timers->tt_persist)) { 550 INP_WUNLOCK(inp); 551 CURVNET_RESTORE(); 552 return; 553 } 554 callout_deactivate(&tp->t_timers->tt_persist); 555 if ((inp->inp_flags & INP_DROPPED) != 0) { 556 INP_WUNLOCK(inp); 557 CURVNET_RESTORE(); 558 return; 559 } 560 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0, 561 ("%s: tp %p tcpcb can't be stopped here", __func__, tp)); 562 /* 563 * Persistence timer into zero window. 564 * Force a byte to be output, if possible. 565 */ 566 TCPSTAT_INC(tcps_persisttimeo); 567 /* 568 * Hack: if the peer is dead/unreachable, we do not 569 * time out if the window is closed. After a full 570 * backoff, drop the connection if the idle time 571 * (no responses to probes) reaches the maximum 572 * backoff that we would use if retransmitting. 573 */ 574 if (tp->t_rxtshift == TCP_MAXRXTSHIFT && 575 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 576 ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) { 577 TCPSTAT_INC(tcps_persistdrop); 578 if (tcp_inpinfo_lock_add(inp)) { 579 tcp_inpinfo_lock_del(inp, tp); 580 goto out; 581 } 582 tp = tcp_drop(tp, ETIMEDOUT); 583 tcp_inpinfo_lock_del(inp, tp); 584 goto out; 585 } 586 /* 587 * If the user has closed the socket then drop a persisting 588 * connection after a much reduced timeout. 589 */ 590 if (tp->t_state > TCPS_CLOSE_WAIT && 591 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 592 TCPSTAT_INC(tcps_persistdrop); 593 if (tcp_inpinfo_lock_add(inp)) { 594 tcp_inpinfo_lock_del(inp, tp); 595 goto out; 596 } 597 tp = tcp_drop(tp, ETIMEDOUT); 598 tcp_inpinfo_lock_del(inp, tp); 599 goto out; 600 } 601 tcp_setpersist(tp); 602 tp->t_flags |= TF_FORCEDATA; 603 (void) tp->t_fb->tfb_tcp_output(tp); 604 tp->t_flags &= ~TF_FORCEDATA; 605 606 #ifdef TCPDEBUG 607 if (tp != NULL && tp->t_inpcb->inp_socket->so_options & SO_DEBUG) 608 tcp_trace(TA_USER, ostate, tp, NULL, NULL, PRU_SLOWTIMO); 609 #endif 610 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 611 INP_WUNLOCK(inp); 612 out: 613 CURVNET_RESTORE(); 614 } 615 616 void 617 tcp_timer_rexmt(void * xtp) 618 { 619 struct tcpcb *tp = xtp; 620 CURVNET_SET(tp->t_vnet); 621 int rexmt; 622 struct inpcb *inp; 623 #ifdef TCPDEBUG 624 int ostate; 625 626 ostate = tp->t_state; 627 #endif 628 inp = tp->t_inpcb; 629 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 630 INP_WLOCK(inp); 631 if (callout_pending(&tp->t_timers->tt_rexmt) || 632 !callout_active(&tp->t_timers->tt_rexmt)) { 633 INP_WUNLOCK(inp); 634 CURVNET_RESTORE(); 635 return; 636 } 637 callout_deactivate(&tp->t_timers->tt_rexmt); 638 if ((inp->inp_flags & INP_DROPPED) != 0) { 639 INP_WUNLOCK(inp); 640 CURVNET_RESTORE(); 641 return; 642 } 643 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0, 644 ("%s: tp %p tcpcb can't be stopped here", __func__, tp)); 645 tcp_free_sackholes(tp); 646 if (tp->t_fb->tfb_tcp_rexmit_tmr) { 647 /* The stack has a timer action too. */ 648 (*tp->t_fb->tfb_tcp_rexmit_tmr)(tp); 649 } 650 /* 651 * Retransmission timer went off. Message has not 652 * been acked within retransmit interval. Back off 653 * to a longer retransmit interval and retransmit one segment. 654 */ 655 if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) { 656 tp->t_rxtshift = TCP_MAXRXTSHIFT; 657 TCPSTAT_INC(tcps_timeoutdrop); 658 if (tcp_inpinfo_lock_add(inp)) { 659 tcp_inpinfo_lock_del(inp, tp); 660 goto out; 661 } 662 tp = tcp_drop(tp, tp->t_softerror ? 663 tp->t_softerror : ETIMEDOUT); 664 tcp_inpinfo_lock_del(inp, tp); 665 goto out; 666 } 667 if (tp->t_state == TCPS_SYN_SENT) { 668 /* 669 * If the SYN was retransmitted, indicate CWND to be 670 * limited to 1 segment in cc_conn_init(). 671 */ 672 tp->snd_cwnd = 1; 673 } else if (tp->t_rxtshift == 1) { 674 /* 675 * first retransmit; record ssthresh and cwnd so they can 676 * be recovered if this turns out to be a "bad" retransmit. 677 * A retransmit is considered "bad" if an ACK for this 678 * segment is received within RTT/2 interval; the assumption 679 * here is that the ACK was already in flight. See 680 * "On Estimating End-to-End Network Path Properties" by 681 * Allman and Paxson for more details. 682 */ 683 tp->snd_cwnd_prev = tp->snd_cwnd; 684 tp->snd_ssthresh_prev = tp->snd_ssthresh; 685 tp->snd_recover_prev = tp->snd_recover; 686 if (IN_FASTRECOVERY(tp->t_flags)) 687 tp->t_flags |= TF_WASFRECOVERY; 688 else 689 tp->t_flags &= ~TF_WASFRECOVERY; 690 if (IN_CONGRECOVERY(tp->t_flags)) 691 tp->t_flags |= TF_WASCRECOVERY; 692 else 693 tp->t_flags &= ~TF_WASCRECOVERY; 694 tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1)); 695 tp->t_flags |= TF_PREVVALID; 696 } else 697 tp->t_flags &= ~TF_PREVVALID; 698 TCPSTAT_INC(tcps_rexmttimeo); 699 if ((tp->t_state == TCPS_SYN_SENT) || 700 (tp->t_state == TCPS_SYN_RECEIVED)) 701 rexmt = TCPTV_RTOBASE * tcp_syn_backoff[tp->t_rxtshift]; 702 else 703 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; 704 TCPT_RANGESET(tp->t_rxtcur, rexmt, 705 tp->t_rttmin, TCPTV_REXMTMAX); 706 707 /* 708 * We enter the path for PLMTUD if connection is established or, if 709 * connection is FIN_WAIT_1 status, reason for the last is that if 710 * amount of data we send is very small, we could send it in couple of 711 * packets and process straight to FIN. In that case we won't catch 712 * ESTABLISHED state. 713 */ 714 if (V_tcp_pmtud_blackhole_detect && (((tp->t_state == TCPS_ESTABLISHED)) 715 || (tp->t_state == TCPS_FIN_WAIT_1))) { 716 #ifdef INET6 717 int isipv6; 718 #endif 719 720 /* 721 * Idea here is that at each stage of mtu probe (usually, 1448 722 * -> 1188 -> 524) should be given 2 chances to recover before 723 * further clamping down. 'tp->t_rxtshift % 2 == 0' should 724 * take care of that. 725 */ 726 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) == 727 (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) && 728 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 729 tp->t_rxtshift % 2 == 0)) { 730 /* 731 * Enter Path MTU Black-hole Detection mechanism: 732 * - Disable Path MTU Discovery (IP "DF" bit). 733 * - Reduce MTU to lower value than what we 734 * negotiated with peer. 735 */ 736 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 737 /* Record that we may have found a black hole. */ 738 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 739 /* Keep track of previous MSS. */ 740 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 741 } 742 743 /* 744 * Reduce the MSS to blackhole value or to the default 745 * in an attempt to retransmit. 746 */ 747 #ifdef INET6 748 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? 1 : 0; 749 if (isipv6 && 750 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 751 /* Use the sysctl tuneable blackhole MSS. */ 752 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 753 TCPSTAT_INC(tcps_pmtud_blackhole_activated); 754 } else if (isipv6) { 755 /* Use the default MSS. */ 756 tp->t_maxseg = V_tcp_v6mssdflt; 757 /* 758 * Disable Path MTU Discovery when we switch to 759 * minmss. 760 */ 761 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 762 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 763 } 764 #endif 765 #if defined(INET6) && defined(INET) 766 else 767 #endif 768 #ifdef INET 769 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 770 /* Use the sysctl tuneable blackhole MSS. */ 771 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 772 TCPSTAT_INC(tcps_pmtud_blackhole_activated); 773 } else { 774 /* Use the default MSS. */ 775 tp->t_maxseg = V_tcp_mssdflt; 776 /* 777 * Disable Path MTU Discovery when we switch to 778 * minmss. 779 */ 780 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 781 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 782 } 783 #endif 784 /* 785 * Reset the slow-start flight size 786 * as it may depend on the new MSS. 787 */ 788 if (CC_ALGO(tp)->conn_init != NULL) 789 CC_ALGO(tp)->conn_init(tp->ccv); 790 } else { 791 /* 792 * If further retransmissions are still unsuccessful 793 * with a lowered MTU, maybe this isn't a blackhole and 794 * we restore the previous MSS and blackhole detection 795 * flags. 796 * The limit '6' is determined by giving each probe 797 * stage (1448, 1188, 524) 2 chances to recover. 798 */ 799 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 800 (tp->t_rxtshift >= 6)) { 801 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 802 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 803 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 804 TCPSTAT_INC(tcps_pmtud_blackhole_failed); 805 /* 806 * Reset the slow-start flight size as it 807 * may depend on the new MSS. 808 */ 809 if (CC_ALGO(tp)->conn_init != NULL) 810 CC_ALGO(tp)->conn_init(tp->ccv); 811 } 812 } 813 } 814 815 /* 816 * Disable RFC1323 and SACK if we haven't got any response to 817 * our third SYN to work-around some broken terminal servers 818 * (most of which have hopefully been retired) that have bad VJ 819 * header compression code which trashes TCP segments containing 820 * unknown-to-them TCP options. 821 */ 822 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 823 (tp->t_rxtshift == 3)) 824 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 825 /* 826 * If we backed off this far, notify the L3 protocol that we're having 827 * connection problems. 828 */ 829 if (tp->t_rxtshift > TCP_RTT_INVALIDATE) { 830 #ifdef INET6 831 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 832 in6_losing(tp->t_inpcb); 833 else 834 #endif 835 in_losing(tp->t_inpcb); 836 } 837 tp->snd_nxt = tp->snd_una; 838 tp->snd_recover = tp->snd_max; 839 /* 840 * Force a segment to be sent. 841 */ 842 tp->t_flags |= TF_ACKNOW; 843 /* 844 * If timing a segment in this window, stop the timer. 845 */ 846 tp->t_rtttime = 0; 847 848 cc_cong_signal(tp, NULL, CC_RTO); 849 850 (void) tp->t_fb->tfb_tcp_output(tp); 851 852 #ifdef TCPDEBUG 853 if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 854 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, 855 PRU_SLOWTIMO); 856 #endif 857 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 858 INP_WUNLOCK(inp); 859 out: 860 CURVNET_RESTORE(); 861 } 862 863 void 864 tcp_timer_activate(struct tcpcb *tp, uint32_t timer_type, u_int delta) 865 { 866 struct callout *t_callout; 867 timeout_t *f_callout; 868 struct inpcb *inp = tp->t_inpcb; 869 int cpu = inp_to_cpuid(inp); 870 871 #ifdef TCP_OFFLOAD 872 if (tp->t_flags & TF_TOE) 873 return; 874 #endif 875 876 if (tp->t_timers->tt_flags & TT_STOPPED) 877 return; 878 879 switch (timer_type) { 880 case TT_DELACK: 881 t_callout = &tp->t_timers->tt_delack; 882 f_callout = tcp_timer_delack; 883 break; 884 case TT_REXMT: 885 t_callout = &tp->t_timers->tt_rexmt; 886 f_callout = tcp_timer_rexmt; 887 break; 888 case TT_PERSIST: 889 t_callout = &tp->t_timers->tt_persist; 890 f_callout = tcp_timer_persist; 891 break; 892 case TT_KEEP: 893 t_callout = &tp->t_timers->tt_keep; 894 f_callout = tcp_timer_keep; 895 break; 896 case TT_2MSL: 897 t_callout = &tp->t_timers->tt_2msl; 898 f_callout = tcp_timer_2msl; 899 break; 900 default: 901 if (tp->t_fb->tfb_tcp_timer_activate) { 902 tp->t_fb->tfb_tcp_timer_activate(tp, timer_type, delta); 903 return; 904 } 905 panic("tp %p bad timer_type %#x", tp, timer_type); 906 } 907 if (delta == 0) { 908 callout_stop(t_callout); 909 } else { 910 callout_reset_on(t_callout, delta, f_callout, tp, cpu); 911 } 912 } 913 914 int 915 tcp_timer_active(struct tcpcb *tp, uint32_t timer_type) 916 { 917 struct callout *t_callout; 918 919 switch (timer_type) { 920 case TT_DELACK: 921 t_callout = &tp->t_timers->tt_delack; 922 break; 923 case TT_REXMT: 924 t_callout = &tp->t_timers->tt_rexmt; 925 break; 926 case TT_PERSIST: 927 t_callout = &tp->t_timers->tt_persist; 928 break; 929 case TT_KEEP: 930 t_callout = &tp->t_timers->tt_keep; 931 break; 932 case TT_2MSL: 933 t_callout = &tp->t_timers->tt_2msl; 934 break; 935 default: 936 if (tp->t_fb->tfb_tcp_timer_active) { 937 return(tp->t_fb->tfb_tcp_timer_active(tp, timer_type)); 938 } 939 panic("tp %p bad timer_type %#x", tp, timer_type); 940 } 941 return callout_active(t_callout); 942 } 943 944 void 945 tcp_timer_stop(struct tcpcb *tp, uint32_t timer_type) 946 { 947 struct callout *t_callout; 948 949 tp->t_timers->tt_flags |= TT_STOPPED; 950 switch (timer_type) { 951 case TT_DELACK: 952 t_callout = &tp->t_timers->tt_delack; 953 break; 954 case TT_REXMT: 955 t_callout = &tp->t_timers->tt_rexmt; 956 break; 957 case TT_PERSIST: 958 t_callout = &tp->t_timers->tt_persist; 959 break; 960 case TT_KEEP: 961 t_callout = &tp->t_timers->tt_keep; 962 break; 963 case TT_2MSL: 964 t_callout = &tp->t_timers->tt_2msl; 965 break; 966 default: 967 if (tp->t_fb->tfb_tcp_timer_stop) { 968 /* 969 * XXXrrs we need to look at this with the 970 * stop case below (flags). 971 */ 972 tp->t_fb->tfb_tcp_timer_stop(tp, timer_type); 973 return; 974 } 975 panic("tp %p bad timer_type %#x", tp, timer_type); 976 } 977 978 if (callout_async_drain(t_callout, tcp_timer_discard) == 0) { 979 /* 980 * Can't stop the callout, defer tcpcb actual deletion 981 * to the last one. We do this using the async drain 982 * function and incrementing the count in 983 */ 984 tp->t_timers->tt_draincnt++; 985 } 986 } 987