1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include "opt_inet.h" 33 #include "opt_inet6.h" 34 #include "opt_rss.h" 35 36 #include <sys/param.h> 37 #include <sys/kernel.h> 38 #include <sys/lock.h> 39 #include <sys/mbuf.h> 40 #include <sys/mutex.h> 41 #include <sys/protosw.h> 42 #include <sys/smp.h> 43 #include <sys/socket.h> 44 #include <sys/socketvar.h> 45 #include <sys/sysctl.h> 46 #include <sys/systm.h> 47 48 #include <net/if.h> 49 #include <net/route.h> 50 #include <net/rss_config.h> 51 #include <net/vnet.h> 52 #include <net/netisr.h> 53 54 #include <netinet/in.h> 55 #include <netinet/in_kdtrace.h> 56 #include <netinet/in_pcb.h> 57 #include <netinet/in_rss.h> 58 #include <netinet/in_systm.h> 59 #ifdef INET6 60 #include <netinet6/in6_pcb.h> 61 #endif 62 #include <netinet/ip_var.h> 63 #include <netinet/tcp.h> 64 #include <netinet/tcp_fsm.h> 65 #include <netinet/tcp_timer.h> 66 #include <netinet/tcp_var.h> 67 #include <netinet/tcp_log_buf.h> 68 #include <netinet/tcp_seq.h> 69 #include <netinet/cc/cc.h> 70 #ifdef INET6 71 #include <netinet6/tcp6_var.h> 72 #endif 73 #include <netinet/tcpip.h> 74 75 int tcp_persmin; 76 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, persmin, CTLTYPE_INT | CTLFLAG_RW, 77 &tcp_persmin, 0, sysctl_msec_to_ticks, "I", 78 "minimum persistence interval"); 79 80 int tcp_persmax; 81 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, persmax, CTLTYPE_INT | CTLFLAG_RW, 82 &tcp_persmax, 0, sysctl_msec_to_ticks, "I", 83 "maximum persistence interval"); 84 85 int tcp_keepinit; 86 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, CTLTYPE_INT | CTLFLAG_RW, 87 &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", 88 "time to establish connection"); 89 90 int tcp_keepidle; 91 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, CTLTYPE_INT | CTLFLAG_RW, 92 &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", 93 "time before keepalive probes begin"); 94 95 int tcp_keepintvl; 96 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, 97 CTLTYPE_INT | CTLFLAG_RW, &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", 98 "time between keepalive probes"); 99 100 int tcp_delacktime; 101 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DELACKTIME, delacktime, 102 CTLTYPE_INT | CTLFLAG_RW, &tcp_delacktime, 0, sysctl_msec_to_ticks, "I", 103 "Time before a delayed ACK is sent"); 104 105 VNET_DEFINE(int, tcp_msl); 106 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, 107 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET, 108 &VNET_NAME(tcp_msl), 0, sysctl_msec_to_ticks, "I", 109 "Maximum segment lifetime"); 110 111 VNET_DEFINE(int, tcp_msl_local); 112 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl_local, 113 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET, 114 &VNET_NAME(tcp_msl_local), 0, sysctl_msec_to_ticks, "I", 115 "Maximum segment lifetime for local communication"); 116 117 int tcp_rexmit_initial; 118 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_initial, CTLTYPE_INT | CTLFLAG_RW, 119 &tcp_rexmit_initial, 0, sysctl_msec_to_ticks, "I", 120 "Initial Retransmission Timeout"); 121 122 int tcp_rexmit_min; 123 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_min, CTLTYPE_INT | CTLFLAG_RW, 124 &tcp_rexmit_min, 0, sysctl_msec_to_ticks, "I", 125 "Minimum Retransmission Timeout"); 126 127 int tcp_rexmit_max; 128 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_max, CTLTYPE_INT | CTLFLAG_RW, 129 &tcp_rexmit_max, 0, sysctl_msec_to_ticks, "I", 130 "Maximum Retransmission Timeout"); 131 132 int tcp_rexmit_slop; 133 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_slop, CTLTYPE_INT | CTLFLAG_RW, 134 &tcp_rexmit_slop, 0, sysctl_msec_to_ticks, "I", 135 "Retransmission Timer Slop"); 136 137 VNET_DEFINE(int, tcp_always_keepalive) = 1; 138 SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_VNET|CTLFLAG_RW, 139 &VNET_NAME(tcp_always_keepalive) , 0, 140 "Assume SO_KEEPALIVE on all TCP connections"); 141 142 int tcp_fast_finwait2_recycle = 0; 143 SYSCTL_INT(_net_inet_tcp, OID_AUTO, fast_finwait2_recycle, CTLFLAG_RW, 144 &tcp_fast_finwait2_recycle, 0, 145 "Recycle closed FIN_WAIT_2 connections faster"); 146 147 int tcp_finwait2_timeout; 148 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, finwait2_timeout, CTLTYPE_INT | CTLFLAG_RW, 149 &tcp_finwait2_timeout, 0, sysctl_msec_to_ticks, "I", 150 "FIN-WAIT2 timeout"); 151 152 int tcp_keepcnt = TCPTV_KEEPCNT; 153 SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt, CTLFLAG_RW, &tcp_keepcnt, 0, 154 "Number of keepalive probes to send"); 155 156 /* max idle probes */ 157 int tcp_maxpersistidle; 158 159 int tcp_rexmit_drop_options = 0; 160 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rexmit_drop_options, CTLFLAG_RW, 161 &tcp_rexmit_drop_options, 0, 162 "Drop TCP options from 3rd and later retransmitted SYN"); 163 164 int tcp_maxunacktime = TCPTV_MAXUNACKTIME; 165 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, maxunacktime, CTLTYPE_INT | CTLFLAG_RW, 166 &tcp_maxunacktime, 0, sysctl_msec_to_ticks, "I", 167 "Maximum time (in ms) that a session can linger without making progress"); 168 169 VNET_DEFINE(int, tcp_pmtud_blackhole_detect); 170 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_detection, 171 CTLFLAG_RW|CTLFLAG_VNET, 172 &VNET_NAME(tcp_pmtud_blackhole_detect), 0, 173 "Path MTU Discovery Black Hole Detection Enabled"); 174 175 #ifdef INET 176 VNET_DEFINE(int, tcp_pmtud_blackhole_mss) = 1200; 177 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_mss, 178 CTLFLAG_RW|CTLFLAG_VNET, 179 &VNET_NAME(tcp_pmtud_blackhole_mss), 0, 180 "Path MTU Discovery Black Hole Detection lowered MSS"); 181 #endif 182 183 #ifdef INET6 184 VNET_DEFINE(int, tcp_v6pmtud_blackhole_mss) = 1220; 185 SYSCTL_INT(_net_inet_tcp, OID_AUTO, v6pmtud_blackhole_mss, 186 CTLFLAG_RW|CTLFLAG_VNET, 187 &VNET_NAME(tcp_v6pmtud_blackhole_mss), 0, 188 "Path MTU Discovery IPv6 Black Hole Detection lowered MSS"); 189 #endif 190 191 #ifdef RSS 192 static int per_cpu_timers = 1; 193 #else 194 static int per_cpu_timers = 0; 195 #endif 196 SYSCTL_INT(_net_inet_tcp, OID_AUTO, per_cpu_timers, CTLFLAG_RW, 197 &per_cpu_timers , 0, "run tcp timers on all cpus"); 198 199 static int 200 sysctl_net_inet_tcp_retries(SYSCTL_HANDLER_ARGS) 201 { 202 int error, new; 203 204 new = V_tcp_retries; 205 error = sysctl_handle_int(oidp, &new, 0, req); 206 if (error == 0 && req->newptr) { 207 if ((new < 1) || (new > TCP_MAXRXTSHIFT)) 208 error = EINVAL; 209 else 210 V_tcp_retries = new; 211 } 212 return (error); 213 } 214 215 VNET_DEFINE(int, tcp_retries) = TCP_MAXRXTSHIFT; 216 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, retries, 217 CTLTYPE_INT | CTLFLAG_VNET | CTLFLAG_RW, 218 &VNET_NAME(tcp_retries), 0, sysctl_net_inet_tcp_retries, "I", 219 "maximum number of consecutive timer based retransmissions"); 220 221 /* 222 * Map the given inp to a CPU id. 223 * 224 * This queries RSS if it's compiled in, else it defaults to the current 225 * CPU ID. 226 */ 227 inline int 228 inp_to_cpuid(struct inpcb *inp) 229 { 230 u_int cpuid; 231 232 if (per_cpu_timers) { 233 #ifdef RSS 234 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype); 235 if (cpuid == NETISR_CPUID_NONE) 236 return (curcpu); /* XXX */ 237 else 238 return (cpuid); 239 #endif 240 /* 241 * We don't have a flowid -> cpuid mapping, so cheat and 242 * just map unknown cpuids to curcpu. Not the best, but 243 * apparently better than defaulting to swi 0. 244 */ 245 cpuid = inp->inp_flowid % (mp_maxid + 1); 246 if (! CPU_ABSENT(cpuid)) 247 return (cpuid); 248 return (curcpu); 249 } else { 250 return (0); 251 } 252 } 253 254 int tcp_backoff[TCP_MAXRXTSHIFT + 1] = 255 { 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 512, 512, 512 }; 256 257 int tcp_totbackoff = 2559; /* sum of tcp_backoff[] */ 258 259 /* 260 * TCP timer processing. 261 * 262 * Each connection has 5 timers associated with it, which can be scheduled 263 * simultaneously. They all are serviced by one callout tcp_timer_enter(). 264 * This function executes the next timer via tcp_timersw[] vector. Each 265 * timer is supposed to return 'true' unless the connection was destroyed. 266 * In the former case tcp_timer_enter() will schedule callout for next timer. 267 */ 268 269 typedef bool tcp_timer_t(struct tcpcb *); 270 static tcp_timer_t tcp_timer_delack; 271 static tcp_timer_t tcp_timer_2msl; 272 static tcp_timer_t tcp_timer_keep; 273 static tcp_timer_t tcp_timer_persist; 274 static tcp_timer_t tcp_timer_rexmt; 275 276 static tcp_timer_t * const tcp_timersw[TT_N] = { 277 [TT_DELACK] = tcp_timer_delack, 278 [TT_REXMT] = tcp_timer_rexmt, 279 [TT_PERSIST] = tcp_timer_persist, 280 [TT_KEEP] = tcp_timer_keep, 281 [TT_2MSL] = tcp_timer_2msl, 282 }; 283 284 /* 285 * tcp_output_locked() s a timer specific variation of call to tcp_output(), 286 * see tcp_var.h for the rest. It handles drop request from advanced stacks, 287 * but keeps tcpcb locked unless tcp_drop() destroyed it. 288 * Returns true if tcpcb is valid and locked. 289 */ 290 static inline bool 291 tcp_output_locked(struct tcpcb *tp) 292 { 293 int rv; 294 295 INP_WLOCK_ASSERT(tptoinpcb(tp)); 296 297 if ((rv = tp->t_fb->tfb_tcp_output(tp)) < 0) { 298 KASSERT(tp->t_fb->tfb_flags & TCP_FUNC_OUTPUT_CANDROP, 299 ("TCP stack %s requested tcp_drop(%p)", 300 tp->t_fb->tfb_tcp_block_name, tp)); 301 tp = tcp_drop(tp, -rv); 302 } 303 304 return (tp != NULL); 305 } 306 307 static bool 308 tcp_timer_delack(struct tcpcb *tp) 309 { 310 struct epoch_tracker et; 311 #if defined(INVARIANTS) || defined(VIMAGE) 312 struct inpcb *inp = tptoinpcb(tp); 313 #endif 314 bool rv; 315 316 INP_WLOCK_ASSERT(inp); 317 318 CURVNET_SET(inp->inp_vnet); 319 tp->t_flags |= TF_ACKNOW; 320 TCPSTAT_INC(tcps_delack); 321 NET_EPOCH_ENTER(et); 322 rv = tcp_output_locked(tp); 323 NET_EPOCH_EXIT(et); 324 CURVNET_RESTORE(); 325 326 return (rv); 327 } 328 329 static bool 330 tcp_timer_2msl(struct tcpcb *tp) 331 { 332 struct inpcb *inp = tptoinpcb(tp); 333 bool close = false; 334 335 INP_WLOCK_ASSERT(inp); 336 337 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 338 CURVNET_SET(inp->inp_vnet); 339 tcp_log_end_status(tp, TCP_EI_STATUS_2MSL); 340 tcp_free_sackholes(tp); 341 /* 342 * 2 MSL timeout in shutdown went off. If we're closed but 343 * still waiting for peer to close and connection has been idle 344 * too long delete connection control block. Otherwise, check 345 * again in a bit. 346 * 347 * If fastrecycle of FIN_WAIT_2, in FIN_WAIT_2 and receiver has closed, 348 * there's no point in hanging onto FIN_WAIT_2 socket. Just close it. 349 * Ignore fact that there were recent incoming segments. 350 * 351 * XXXGL: check if inp_socket shall always be !NULL here? 352 */ 353 if (tp->t_state == TCPS_TIME_WAIT) { 354 close = true; 355 } else if (tp->t_state == TCPS_FIN_WAIT_2 && 356 tcp_fast_finwait2_recycle && inp->inp_socket && 357 (inp->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE)) { 358 TCPSTAT_INC(tcps_finwait2_drops); 359 close = true; 360 } else { 361 if (ticks - tp->t_rcvtime <= TP_MAXIDLE(tp)) 362 tcp_timer_activate(tp, TT_2MSL, TP_KEEPINTVL(tp)); 363 else 364 close = true; 365 } 366 if (close) { 367 struct epoch_tracker et; 368 369 NET_EPOCH_ENTER(et); 370 tp = tcp_close(tp); 371 NET_EPOCH_EXIT(et); 372 } 373 CURVNET_RESTORE(); 374 375 return (tp != NULL); 376 } 377 378 static bool 379 tcp_timer_keep(struct tcpcb *tp) 380 { 381 struct epoch_tracker et; 382 struct inpcb *inp = tptoinpcb(tp); 383 struct tcptemp *t_template; 384 385 INP_WLOCK_ASSERT(inp); 386 387 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 388 CURVNET_SET(inp->inp_vnet); 389 /* 390 * Because we don't regularly reset the keepalive callout in 391 * the ESTABLISHED state, it may be that we don't actually need 392 * to send a keepalive yet. If that occurs, schedule another 393 * call for the next time the keepalive timer might expire. 394 */ 395 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 396 u_int idletime; 397 398 idletime = ticks - tp->t_rcvtime; 399 if (idletime < TP_KEEPIDLE(tp)) { 400 tcp_timer_activate(tp, TT_KEEP, 401 TP_KEEPIDLE(tp) - idletime); 402 CURVNET_RESTORE(); 403 return (true); 404 } 405 } 406 407 /* 408 * Keep-alive timer went off; send something 409 * or drop connection if idle for too long. 410 */ 411 TCPSTAT_INC(tcps_keeptimeo); 412 if (tp->t_state < TCPS_ESTABLISHED) 413 goto dropit; 414 if ((V_tcp_always_keepalive || 415 inp->inp_socket->so_options & SO_KEEPALIVE) && 416 tp->t_state <= TCPS_CLOSING) { 417 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 418 goto dropit; 419 /* 420 * Send a packet designed to force a response 421 * if the peer is up and reachable: 422 * either an ACK if the connection is still alive, 423 * or an RST if the peer has closed the connection 424 * due to timeout or reboot. 425 * Using sequence number tp->snd_una-1 426 * causes the transmitted zero-length segment 427 * to lie outside the receive window; 428 * by the protocol spec, this requires the 429 * correspondent TCP to respond. 430 */ 431 TCPSTAT_INC(tcps_keepprobe); 432 t_template = tcpip_maketemplate(inp); 433 if (t_template) { 434 NET_EPOCH_ENTER(et); 435 tcp_respond(tp, t_template->tt_ipgen, 436 &t_template->tt_t, (struct mbuf *)NULL, 437 tp->rcv_nxt, tp->snd_una - 1, 0); 438 NET_EPOCH_EXIT(et); 439 free(t_template, M_TEMP); 440 } 441 tcp_timer_activate(tp, TT_KEEP, TP_KEEPINTVL(tp)); 442 } else 443 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp)); 444 445 CURVNET_RESTORE(); 446 return (true); 447 448 dropit: 449 TCPSTAT_INC(tcps_keepdrops); 450 NET_EPOCH_ENTER(et); 451 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 452 tp = tcp_drop(tp, ETIMEDOUT); 453 NET_EPOCH_EXIT(et); 454 CURVNET_RESTORE(); 455 456 return (tp != NULL); 457 } 458 459 /* 460 * Has this session exceeded the maximum time without seeing a substantive 461 * acknowledgement? If so, return true; otherwise false. 462 */ 463 static bool 464 tcp_maxunacktime_check(struct tcpcb *tp) 465 { 466 467 /* Are we tracking this timer for this session? */ 468 if (TP_MAXUNACKTIME(tp) == 0) 469 return false; 470 471 /* Do we have a current measurement. */ 472 if (tp->t_acktime == 0) 473 return false; 474 475 /* Are we within the acceptable range? */ 476 if (TSTMP_GT(TP_MAXUNACKTIME(tp) + tp->t_acktime, (u_int)ticks)) 477 return false; 478 479 /* We exceeded the timer. */ 480 TCPSTAT_INC(tcps_progdrops); 481 return true; 482 } 483 484 static bool 485 tcp_timer_persist(struct tcpcb *tp) 486 { 487 struct epoch_tracker et; 488 #if defined(INVARIANTS) || defined(VIMAGE) 489 struct inpcb *inp = tptoinpcb(tp); 490 #endif 491 bool progdrop, rv; 492 493 INP_WLOCK_ASSERT(inp); 494 495 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 496 CURVNET_SET(inp->inp_vnet); 497 /* 498 * Persistence timer into zero window. 499 * Force a byte to be output, if possible. 500 */ 501 TCPSTAT_INC(tcps_persisttimeo); 502 /* 503 * Hack: if the peer is dead/unreachable, we do not 504 * time out if the window is closed. After a full 505 * backoff, drop the connection if the idle time 506 * (no responses to probes) reaches the maximum 507 * backoff that we would use if retransmitting. 508 * Also, drop the connection if we haven't been making 509 * progress. 510 */ 511 progdrop = tcp_maxunacktime_check(tp); 512 if (progdrop || (tp->t_rxtshift >= V_tcp_retries && 513 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 514 ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff))) { 515 if (progdrop) { 516 tcp_log_end_status(tp, TCP_EI_STATUS_PROGRESS); 517 } else { 518 TCPSTAT_INC(tcps_persistdrop); 519 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 520 } 521 goto dropit; 522 } 523 /* 524 * If the user has closed the socket then drop a persisting 525 * connection after a much reduced timeout. 526 */ 527 if (tp->t_state > TCPS_CLOSE_WAIT && 528 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 529 TCPSTAT_INC(tcps_persistdrop); 530 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 531 goto dropit; 532 } 533 tcp_setpersist(tp); 534 tp->t_flags |= TF_FORCEDATA; 535 NET_EPOCH_ENTER(et); 536 if ((rv = tcp_output_locked(tp))) 537 tp->t_flags &= ~TF_FORCEDATA; 538 NET_EPOCH_EXIT(et); 539 CURVNET_RESTORE(); 540 541 return (rv); 542 543 dropit: 544 NET_EPOCH_ENTER(et); 545 tp = tcp_drop(tp, ETIMEDOUT); 546 NET_EPOCH_EXIT(et); 547 CURVNET_RESTORE(); 548 549 return (tp != NULL); 550 } 551 552 static bool 553 tcp_timer_rexmt(struct tcpcb *tp) 554 { 555 struct epoch_tracker et; 556 struct inpcb *inp = tptoinpcb(tp); 557 int rexmt; 558 bool isipv6, rv; 559 560 INP_WLOCK_ASSERT(inp); 561 562 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO); 563 CURVNET_SET(inp->inp_vnet); 564 if (tp->t_fb->tfb_tcp_rexmit_tmr) { 565 /* The stack has a timer action too. */ 566 (*tp->t_fb->tfb_tcp_rexmit_tmr)(tp); 567 } 568 /* 569 * Retransmission timer went off. Message has not 570 * been acked within retransmit interval. Back off 571 * to a longer retransmit interval and retransmit one segment. 572 * 573 * If we've either exceeded the maximum number of retransmissions, 574 * or we've gone long enough without making progress, then drop 575 * the session. 576 */ 577 if (++tp->t_rxtshift > V_tcp_retries || tcp_maxunacktime_check(tp)) { 578 if (tp->t_rxtshift > V_tcp_retries) 579 TCPSTAT_INC(tcps_timeoutdrop); 580 tp->t_rxtshift = V_tcp_retries; 581 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 582 NET_EPOCH_ENTER(et); 583 tp = tcp_drop(tp, ETIMEDOUT); 584 NET_EPOCH_EXIT(et); 585 CURVNET_RESTORE(); 586 587 return (tp != NULL); 588 } 589 if (tp->t_state == TCPS_SYN_SENT) { 590 /* 591 * If the SYN was retransmitted, indicate CWND to be 592 * limited to 1 segment in cc_conn_init(). 593 */ 594 tp->snd_cwnd = 1; 595 } else if (tp->t_rxtshift == 1) { 596 /* 597 * first retransmit; record ssthresh and cwnd so they can 598 * be recovered if this turns out to be a "bad" retransmit. 599 * A retransmit is considered "bad" if an ACK for this 600 * segment is received within RTT/2 interval; the assumption 601 * here is that the ACK was already in flight. See 602 * "On Estimating End-to-End Network Path Properties" by 603 * Allman and Paxson for more details. 604 */ 605 tp->snd_cwnd_prev = tp->snd_cwnd; 606 tp->snd_ssthresh_prev = tp->snd_ssthresh; 607 tp->snd_recover_prev = tp->snd_recover; 608 if (IN_FASTRECOVERY(tp->t_flags)) 609 tp->t_flags |= TF_WASFRECOVERY; 610 else 611 tp->t_flags &= ~TF_WASFRECOVERY; 612 if (IN_CONGRECOVERY(tp->t_flags)) 613 tp->t_flags |= TF_WASCRECOVERY; 614 else 615 tp->t_flags &= ~TF_WASCRECOVERY; 616 if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 617 tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1)); 618 /* In the event that we've negotiated timestamps 619 * badrxtwin will be set to the value that we set 620 * the retransmitted packet's to_tsval to by tcp_output 621 */ 622 tp->t_flags |= TF_PREVVALID; 623 tcp_resend_sackholes(tp); 624 } else { 625 tp->t_flags &= ~TF_PREVVALID; 626 tcp_free_sackholes(tp); 627 } 628 TCPSTAT_INC(tcps_rexmttimeo); 629 if ((tp->t_state == TCPS_SYN_SENT) || 630 (tp->t_state == TCPS_SYN_RECEIVED)) 631 rexmt = tcp_rexmit_initial * tcp_backoff[tp->t_rxtshift]; 632 else 633 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; 634 TCPT_RANGESET(tp->t_rxtcur, rexmt, tp->t_rttmin, tcp_rexmit_max); 635 636 /* 637 * We enter the path for PLMTUD if connection is established or, if 638 * connection is FIN_WAIT_1 status, reason for the last is that if 639 * amount of data we send is very small, we could send it in couple of 640 * packets and process straight to FIN. In that case we won't catch 641 * ESTABLISHED state. 642 */ 643 #ifdef INET6 644 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; 645 #else 646 isipv6 = false; 647 #endif 648 if (((V_tcp_pmtud_blackhole_detect == 1) || 649 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 650 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 651 ((tp->t_state == TCPS_ESTABLISHED) || 652 (tp->t_state == TCPS_FIN_WAIT_1))) { 653 if (tp->t_rxtshift == 1) { 654 /* 655 * We enter blackhole detection after the first 656 * unsuccessful timer based retransmission. 657 * Then we reduce up to two times the MSS, each 658 * candidate giving two tries of retransmissions. 659 * But we give a candidate only two tries, if it 660 * actually reduces the MSS. 661 */ 662 tp->t_blackhole_enter = 2; 663 tp->t_blackhole_exit = tp->t_blackhole_enter; 664 if (isipv6) { 665 #ifdef INET6 666 if (tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) 667 tp->t_blackhole_exit += 2; 668 if (tp->t_maxseg > V_tcp_v6mssdflt && 669 V_tcp_v6pmtud_blackhole_mss > V_tcp_v6mssdflt) 670 tp->t_blackhole_exit += 2; 671 #endif 672 } else { 673 #ifdef INET 674 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) 675 tp->t_blackhole_exit += 2; 676 if (tp->t_maxseg > V_tcp_mssdflt && 677 V_tcp_pmtud_blackhole_mss > V_tcp_mssdflt) 678 tp->t_blackhole_exit += 2; 679 #endif 680 } 681 } 682 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) == 683 (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) && 684 (tp->t_rxtshift >= tp->t_blackhole_enter && 685 tp->t_rxtshift < tp->t_blackhole_exit && 686 (tp->t_rxtshift - tp->t_blackhole_enter) % 2 == 0)) { 687 /* 688 * Enter Path MTU Black-hole Detection mechanism: 689 * - Disable Path MTU Discovery (IP "DF" bit). 690 * - Reduce MTU to lower value than what we 691 * negotiated with peer. 692 */ 693 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 694 /* Record that we may have found a black hole. */ 695 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 696 /* Keep track of previous MSS. */ 697 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 698 } 699 700 /* 701 * Reduce the MSS to blackhole value or to the default 702 * in an attempt to retransmit. 703 */ 704 #ifdef INET6 705 if (isipv6 && 706 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss && 707 V_tcp_v6pmtud_blackhole_mss > V_tcp_v6mssdflt) { 708 /* Use the sysctl tuneable blackhole MSS. */ 709 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 710 TCPSTAT_INC(tcps_pmtud_blackhole_activated); 711 } else if (isipv6) { 712 /* Use the default MSS. */ 713 tp->t_maxseg = V_tcp_v6mssdflt; 714 /* 715 * Disable Path MTU Discovery when we switch to 716 * minmss. 717 */ 718 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 719 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 720 } 721 #endif 722 #if defined(INET6) && defined(INET) 723 else 724 #endif 725 #ifdef INET 726 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss && 727 V_tcp_pmtud_blackhole_mss > V_tcp_mssdflt) { 728 /* Use the sysctl tuneable blackhole MSS. */ 729 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 730 TCPSTAT_INC(tcps_pmtud_blackhole_activated); 731 } else { 732 /* Use the default MSS. */ 733 tp->t_maxseg = V_tcp_mssdflt; 734 /* 735 * Disable Path MTU Discovery when we switch to 736 * minmss. 737 */ 738 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 739 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 740 } 741 #endif 742 /* 743 * Reset the slow-start flight size 744 * as it may depend on the new MSS. 745 */ 746 if (CC_ALGO(tp)->conn_init != NULL) 747 CC_ALGO(tp)->conn_init(&tp->t_ccv); 748 } else { 749 /* 750 * If further retransmissions are still unsuccessful 751 * with a lowered MTU, maybe this isn't a blackhole and 752 * we restore the previous MSS and blackhole detection 753 * flags. 754 */ 755 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 756 (tp->t_rxtshift >= tp->t_blackhole_exit)) { 757 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 758 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 759 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 760 if (tp->t_maxseg < V_tcp_mssdflt) { 761 /* 762 * The MSS is so small we should not 763 * process incoming SACK's since we are 764 * subject to attack in such a case. 765 */ 766 tp->t_flags2 |= TF2_PROC_SACK_PROHIBIT; 767 } else { 768 tp->t_flags2 &= ~TF2_PROC_SACK_PROHIBIT; 769 } 770 TCPSTAT_INC(tcps_pmtud_blackhole_failed); 771 /* 772 * Reset the slow-start flight size as it 773 * may depend on the new MSS. 774 */ 775 if (CC_ALGO(tp)->conn_init != NULL) 776 CC_ALGO(tp)->conn_init(&tp->t_ccv); 777 } 778 } 779 } 780 781 /* 782 * Disable RFC1323 and SACK if we haven't got any response to 783 * our third SYN to work-around some broken terminal servers 784 * (most of which have hopefully been retired) that have bad VJ 785 * header compression code which trashes TCP segments containing 786 * unknown-to-them TCP options. 787 */ 788 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 789 (tp->t_rxtshift == 3)) 790 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 791 /* 792 * If we backed off this far, notify the L3 protocol that we're having 793 * connection problems. 794 */ 795 if (tp->t_rxtshift > TCP_RTT_INVALIDATE) { 796 #ifdef INET6 797 if ((inp->inp_vflag & INP_IPV6) != 0) 798 in6_losing(inp); 799 else 800 #endif 801 in_losing(inp); 802 } 803 tp->snd_nxt = tp->snd_una; 804 tp->snd_recover = tp->snd_max; 805 /* 806 * Force a segment to be sent. 807 */ 808 tp->t_flags |= TF_ACKNOW; 809 /* 810 * If timing a segment in this window, stop the timer. 811 */ 812 tp->t_rtttime = 0; 813 814 /* Do not overwrite the snd_cwnd on SYN retransmissions. */ 815 if (tp->t_state != TCPS_SYN_SENT) 816 cc_cong_signal(tp, NULL, CC_RTO); 817 NET_EPOCH_ENTER(et); 818 rv = tcp_output_locked(tp); 819 NET_EPOCH_EXIT(et); 820 CURVNET_RESTORE(); 821 822 return (rv); 823 } 824 825 static void 826 tcp_bblog_timer(struct tcpcb *tp, tt_which which, tt_what what, uint32_t ticks) 827 { 828 struct tcp_log_buffer *lgb; 829 uint64_t ms; 830 831 INP_WLOCK_ASSERT(tptoinpcb(tp)); 832 if (tcp_bblogging_on(tp)) 833 lgb = tcp_log_event(tp, NULL, NULL, NULL, TCP_LOG_RTO, 0, 0, 834 NULL, false, NULL, NULL, 0, NULL); 835 else 836 lgb = NULL; 837 if (lgb != NULL) { 838 lgb->tlb_flex1 = (what << 8) | which; 839 if (what == TT_STARTING) { 840 /* Convert ticks to ms and store it in tlb_flex2. */ 841 if (hz == 1000) 842 lgb->tlb_flex2 = ticks; 843 else { 844 ms = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 845 if (ms > UINT32_MAX) 846 lgb->tlb_flex2 = UINT32_MAX; 847 else 848 lgb->tlb_flex2 = (uint32_t)ms; 849 } 850 } 851 } 852 } 853 854 static inline tt_which 855 tcp_timer_next(struct tcpcb *tp, sbintime_t *precision) 856 { 857 tt_which i, rv; 858 sbintime_t after, before; 859 860 for (i = 0, rv = TT_N, after = before = SBT_MAX; i < TT_N; i++) { 861 if (tp->t_timers[i] < after) { 862 after = tp->t_timers[i]; 863 rv = i; 864 } 865 before = MIN(before, tp->t_timers[i] + tp->t_precisions[i]); 866 } 867 if (precision != NULL) 868 *precision = before - after; 869 870 return (rv); 871 } 872 873 static void 874 tcp_timer_enter(void *xtp) 875 { 876 struct tcpcb *tp = xtp; 877 struct inpcb *inp = tptoinpcb(tp); 878 sbintime_t precision; 879 tt_which which; 880 881 INP_WLOCK_ASSERT(inp); 882 883 which = tcp_timer_next(tp, NULL); 884 MPASS(which < TT_N); 885 tp->t_timers[which] = SBT_MAX; 886 tp->t_precisions[which] = 0; 887 888 tcp_bblog_timer(tp, which, TT_PROCESSING, 0); 889 if (tcp_timersw[which](tp)) { 890 tcp_bblog_timer(tp, which, TT_PROCESSED, 0); 891 if ((which = tcp_timer_next(tp, &precision)) != TT_N) { 892 MPASS(tp->t_state > TCPS_CLOSED); 893 callout_reset_sbt_on(&tp->t_callout, 894 tp->t_timers[which], precision, tcp_timer_enter, 895 tp, inp_to_cpuid(inp), C_ABSOLUTE); 896 } 897 INP_WUNLOCK(inp); 898 } 899 } 900 901 /* 902 * Activate or stop (delta == 0) a TCP timer. 903 */ 904 void 905 tcp_timer_activate(struct tcpcb *tp, tt_which which, u_int delta) 906 { 907 struct inpcb *inp = tptoinpcb(tp); 908 sbintime_t precision; 909 tt_what what; 910 911 #ifdef TCP_OFFLOAD 912 if (tp->t_flags & TF_TOE) 913 return; 914 #endif 915 916 INP_WLOCK_ASSERT(inp); 917 MPASS(tp->t_state > TCPS_CLOSED); 918 919 if (delta > 0) { 920 what = TT_STARTING; 921 callout_when(tick_sbt * delta, 0, C_HARDCLOCK, 922 &tp->t_timers[which], &tp->t_precisions[which]); 923 } else { 924 what = TT_STOPPING; 925 tp->t_timers[which] = SBT_MAX; 926 } 927 tcp_bblog_timer(tp, which, what, delta); 928 929 if ((which = tcp_timer_next(tp, &precision)) != TT_N) 930 callout_reset_sbt_on(&tp->t_callout, tp->t_timers[which], 931 precision, tcp_timer_enter, tp, inp_to_cpuid(inp), 932 C_ABSOLUTE); 933 else 934 callout_stop(&tp->t_callout); 935 } 936 937 bool 938 tcp_timer_active(struct tcpcb *tp, tt_which which) 939 { 940 941 INP_WLOCK_ASSERT(tptoinpcb(tp)); 942 943 return (tp->t_timers[which] != SBT_MAX); 944 } 945 946 /* 947 * Stop all timers associated with tcpcb. 948 * Called when tcpcb moves to TCPS_CLOSED. 949 */ 950 void 951 tcp_timer_stop(struct tcpcb *tp) 952 { 953 954 INP_WLOCK_ASSERT(tptoinpcb(tp)); 955 956 /* 957 * We don't check return value from callout_stop(). There are two 958 * reasons why it can return 0. First, a legitimate one: we could have 959 * been called from the callout itself. Second, callout(9) has a bug. 960 * It can race internally in softclock_call_cc(), when callout has 961 * already completed, but cc_exec_curr still points at the callout. 962 */ 963 (void )callout_stop(&tp->t_callout); 964 /* 965 * In case of being called from callout itself, we must make sure that 966 * we don't reschedule. 967 */ 968 for (tt_which i = 0; i < TT_N; i++) 969 tp->t_timers[i] = SBT_MAX; 970 } 971