1 /*- 2 * Copyright (c) 2016-2019 3 * Netflix Inc. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 #include "opt_ipsec.h" 34 #include "opt_tcpdebug.h" 35 36 #include <sys/param.h> 37 #include <sys/module.h> 38 #include <sys/kernel.h> 39 #ifdef TCP_HHOOK 40 #include <sys/hhook.h> 41 #endif 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/mbuf.h> 47 #include <sys/proc.h> /* for proc0 declaration */ 48 #ifdef NETFLIX_STATS 49 #include <sys/qmath.h> 50 #endif 51 #include <sys/socket.h> 52 #include <sys/socketvar.h> 53 #include <sys/sysctl.h> 54 #include <sys/systm.h> 55 #include <sys/tree.h> 56 #ifdef NETFLIX_STATS 57 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 58 #endif 59 #include <sys/refcount.h> 60 #include <sys/queue.h> 61 #include <sys/smp.h> 62 #include <sys/kthread.h> 63 #include <sys/kern_prefetch.h> 64 65 #include <vm/uma.h> 66 67 #include <net/route.h> 68 #include <net/vnet.h> 69 70 #define TCPSTATES /* for logging */ 71 72 #include <netinet/in.h> 73 #include <netinet/in_kdtrace.h> 74 #include <netinet/in_pcb.h> 75 #include <netinet/ip.h> 76 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 77 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 78 #include <netinet/ip_var.h> 79 #include <netinet/ip6.h> 80 #include <netinet6/in6_pcb.h> 81 #include <netinet6/ip6_var.h> 82 #define TCPOUTFLAGS 83 #include <netinet/tcp.h> 84 #include <netinet/tcp_fsm.h> 85 #include <netinet/tcp_log_buf.h> 86 #include <netinet/tcp_seq.h> 87 #include <netinet/tcp_timer.h> 88 #include <netinet/tcp_var.h> 89 #include <netinet/tcp_hpts.h> 90 #include <netinet/tcpip.h> 91 #include <netinet/cc/cc.h> 92 #include <netinet/tcp_fastopen.h> 93 #ifdef TCPDEBUG 94 #include <netinet/tcp_debug.h> 95 #endif /* TCPDEBUG */ 96 #ifdef TCP_OFFLOAD 97 #include <netinet/tcp_offload.h> 98 #endif 99 #ifdef INET6 100 #include <netinet6/tcp6_var.h> 101 #endif 102 103 #include <netipsec/ipsec_support.h> 104 105 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 106 #include <netipsec/ipsec.h> 107 #include <netipsec/ipsec6.h> 108 #endif /* IPSEC */ 109 110 #include <netinet/udp.h> 111 #include <netinet/udp_var.h> 112 #include <machine/in_cksum.h> 113 114 #ifdef MAC 115 #include <security/mac/mac_framework.h> 116 #endif 117 #include "sack_filter.h" 118 #include "tcp_rack.h" 119 #include "rack_bbr_common.h" 120 121 uma_zone_t rack_zone; 122 uma_zone_t rack_pcb_zone; 123 124 #ifndef TICKS2SBT 125 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 126 #endif 127 128 struct sysctl_ctx_list rack_sysctl_ctx; 129 struct sysctl_oid *rack_sysctl_root; 130 131 #define CUM_ACKED 1 132 #define SACKED 2 133 134 /* 135 * The RACK module incorporates a number of 136 * TCP ideas that have been put out into the IETF 137 * over the last few years: 138 * - Matt Mathis's Rate Halving which slowly drops 139 * the congestion window so that the ack clock can 140 * be maintained during a recovery. 141 * - Yuchung Cheng's RACK TCP (for which its named) that 142 * will stop us using the number of dup acks and instead 143 * use time as the gage of when we retransmit. 144 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 145 * of Dukkipati et.al. 146 * RACK depends on SACK, so if an endpoint arrives that 147 * cannot do SACK the state machine below will shuttle the 148 * connection back to using the "default" TCP stack that is 149 * in FreeBSD. 150 * 151 * To implement RACK the original TCP stack was first decomposed 152 * into a functional state machine with individual states 153 * for each of the possible TCP connection states. The do_segement 154 * functions role in life is to mandate the connection supports SACK 155 * initially and then assure that the RACK state matches the conenction 156 * state before calling the states do_segment function. Each 157 * state is simplified due to the fact that the original do_segment 158 * has been decomposed and we *know* what state we are in (no 159 * switches on the state) and all tests for SACK are gone. This 160 * greatly simplifies what each state does. 161 * 162 * TCP output is also over-written with a new version since it 163 * must maintain the new rack scoreboard. 164 * 165 */ 166 static int32_t rack_precache = 1; 167 static int32_t rack_tlp_thresh = 1; 168 static int32_t rack_reorder_thresh = 2; 169 static int32_t rack_reorder_fade = 60000; /* 0 - never fade, def 60,000 170 * - 60 seconds */ 171 static int32_t rack_pkt_delay = 1; 172 static int32_t rack_inc_var = 0;/* For TLP */ 173 static int32_t rack_reduce_largest_on_idle = 0; 174 static int32_t rack_min_pace_time = 0; 175 static int32_t rack_min_pace_time_seg_req=6; 176 static int32_t rack_early_recovery = 1; 177 static int32_t rack_early_recovery_max_seg = 6; 178 static int32_t rack_send_a_lot_in_prr = 1; 179 static int32_t rack_min_to = 1; /* Number of ms minimum timeout */ 180 static int32_t rack_tlp_in_recovery = 1; /* Can we do TLP in recovery? */ 181 static int32_t rack_verbose_logging = 0; 182 static int32_t rack_ignore_data_after_close = 1; 183 static int32_t rack_map_entries_limit = 1024; 184 static int32_t rack_map_split_limit = 256; 185 186 /* 187 * Currently regular tcp has a rto_min of 30ms 188 * the backoff goes 12 times so that ends up 189 * being a total of 122.850 seconds before a 190 * connection is killed. 191 */ 192 static int32_t rack_tlp_min = 10; 193 static int32_t rack_rto_min = 30; /* 30ms same as main freebsd */ 194 static int32_t rack_rto_max = 30000; /* 30 seconds */ 195 static const int32_t rack_free_cache = 2; 196 static int32_t rack_hptsi_segments = 40; 197 static int32_t rack_rate_sample_method = USE_RTT_LOW; 198 static int32_t rack_pace_every_seg = 1; 199 static int32_t rack_delayed_ack_time = 200; /* 200ms */ 200 static int32_t rack_slot_reduction = 4; 201 static int32_t rack_lower_cwnd_at_tlp = 0; 202 static int32_t rack_use_proportional_reduce = 0; 203 static int32_t rack_proportional_rate = 10; 204 static int32_t rack_tlp_max_resend = 2; 205 static int32_t rack_limited_retran = 0; 206 static int32_t rack_always_send_oldest = 0; 207 static int32_t rack_sack_block_limit = 128; 208 static int32_t rack_use_sack_filter = 1; 209 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 210 211 /* Rack specific counters */ 212 counter_u64_t rack_badfr; 213 counter_u64_t rack_badfr_bytes; 214 counter_u64_t rack_rtm_prr_retran; 215 counter_u64_t rack_rtm_prr_newdata; 216 counter_u64_t rack_timestamp_mismatch; 217 counter_u64_t rack_reorder_seen; 218 counter_u64_t rack_paced_segments; 219 counter_u64_t rack_unpaced_segments; 220 counter_u64_t rack_saw_enobuf; 221 counter_u64_t rack_saw_enetunreach; 222 223 /* Tail loss probe counters */ 224 counter_u64_t rack_tlp_tot; 225 counter_u64_t rack_tlp_newdata; 226 counter_u64_t rack_tlp_retran; 227 counter_u64_t rack_tlp_retran_bytes; 228 counter_u64_t rack_tlp_retran_fail; 229 counter_u64_t rack_to_tot; 230 counter_u64_t rack_to_arm_rack; 231 counter_u64_t rack_to_arm_tlp; 232 counter_u64_t rack_to_alloc; 233 counter_u64_t rack_to_alloc_hard; 234 counter_u64_t rack_to_alloc_emerg; 235 counter_u64_t rack_to_alloc_limited; 236 counter_u64_t rack_alloc_limited_conns; 237 counter_u64_t rack_split_limited; 238 239 counter_u64_t rack_sack_proc_all; 240 counter_u64_t rack_sack_proc_short; 241 counter_u64_t rack_sack_proc_restart; 242 counter_u64_t rack_runt_sacks; 243 counter_u64_t rack_used_tlpmethod; 244 counter_u64_t rack_used_tlpmethod2; 245 counter_u64_t rack_enter_tlp_calc; 246 counter_u64_t rack_input_idle_reduces; 247 counter_u64_t rack_tlp_does_nada; 248 249 /* Temp CPU counters */ 250 counter_u64_t rack_find_high; 251 252 counter_u64_t rack_progress_drops; 253 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 254 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 255 256 /* 257 * This was originally defined in tcp_timer.c, but is now reproduced here given 258 * the unification of the SYN and non-SYN retransmit timer exponents combined 259 * with wanting to retain previous behaviour for previously deployed stack 260 * versions. 261 */ 262 int tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] = 263 { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 }; 264 265 static void 266 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 267 268 static int 269 rack_process_ack(struct mbuf *m, struct tcphdr *th, 270 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 271 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val); 272 static int 273 rack_process_data(struct mbuf *m, struct tcphdr *th, 274 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 275 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 276 static void 277 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 278 struct tcphdr *th, uint16_t nsegs, uint16_t type, int32_t recovery); 279 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 280 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 281 uint8_t limit_type); 282 static struct rack_sendmap * 283 rack_check_recovery_mode(struct tcpcb *tp, 284 uint32_t tsused); 285 static void 286 rack_cong_signal(struct tcpcb *tp, struct tcphdr *th, 287 uint32_t type); 288 static void rack_counter_destroy(void); 289 static int 290 rack_ctloutput(struct socket *so, struct sockopt *sopt, 291 struct inpcb *inp, struct tcpcb *tp); 292 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 293 static void 294 rack_do_segment(struct mbuf *m, struct tcphdr *th, 295 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 296 uint8_t iptos); 297 static void rack_dtor(void *mem, int32_t size, void *arg); 298 static void 299 rack_earlier_retran(struct tcpcb *tp, struct rack_sendmap *rsm, 300 uint32_t t, uint32_t cts); 301 static struct rack_sendmap * 302 rack_find_high_nonack(struct tcp_rack *rack, 303 struct rack_sendmap *rsm); 304 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 305 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 306 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 307 static int 308 rack_get_sockopt(struct socket *so, struct sockopt *sopt, 309 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack); 310 static int32_t rack_handoff_ok(struct tcpcb *tp); 311 static int32_t rack_init(struct tcpcb *tp); 312 static void rack_init_sysctls(void); 313 static void 314 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 315 struct tcphdr *th); 316 static void 317 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 318 uint32_t seq_out, uint8_t th_flags, int32_t err, uint32_t ts, 319 uint8_t pass, struct rack_sendmap *hintrsm); 320 static void 321 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 322 struct rack_sendmap *rsm); 323 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num); 324 static int32_t rack_output(struct tcpcb *tp); 325 static void 326 rack_hpts_do_segment(struct mbuf *m, struct tcphdr *th, 327 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 328 uint8_t iptos, int32_t nxt_pkt, struct timeval *tv); 329 330 static uint32_t 331 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 332 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 333 uint32_t cts); 334 static void rack_post_recovery(struct tcpcb *tp, struct tcphdr *th); 335 static void rack_remxt_tmr(struct tcpcb *tp); 336 static int 337 rack_set_sockopt(struct socket *so, struct sockopt *sopt, 338 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack); 339 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 340 static int32_t rack_stopall(struct tcpcb *tp); 341 static void 342 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, 343 uint32_t delta); 344 static int32_t rack_timer_active(struct tcpcb *tp, uint32_t timer_type); 345 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 346 static void rack_timer_stop(struct tcpcb *tp, uint32_t timer_type); 347 static uint32_t 348 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 349 struct rack_sendmap *rsm, uint32_t ts, int32_t * lenp); 350 static void 351 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 352 struct rack_sendmap *rsm, uint32_t ts); 353 static int 354 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 355 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type); 356 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 357 static void 358 rack_challenge_ack(struct mbuf *m, struct tcphdr *th, 359 struct tcpcb *tp, int32_t * ret_val); 360 static int 361 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 362 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 363 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 364 static int 365 rack_do_closing(struct mbuf *m, struct tcphdr *th, 366 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 367 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 368 static void rack_do_drop(struct mbuf *m, struct tcpcb *tp); 369 static void 370 rack_do_dropafterack(struct mbuf *m, struct tcpcb *tp, 371 struct tcphdr *th, int32_t thflags, int32_t tlen, int32_t * ret_val); 372 static void 373 rack_do_dropwithreset(struct mbuf *m, struct tcpcb *tp, 374 struct tcphdr *th, int32_t rstreason, int32_t tlen); 375 static int 376 rack_do_established(struct mbuf *m, struct tcphdr *th, 377 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 378 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 379 static int 380 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 381 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 382 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt); 383 static int 384 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 385 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 386 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 387 static int 388 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 389 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 390 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 391 static int 392 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 393 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 394 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 395 static int 396 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 397 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 398 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 399 static int 400 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 401 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 402 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 403 static int 404 rack_drop_checks(struct tcpopt *to, struct mbuf *m, 405 struct tcphdr *th, struct tcpcb *tp, int32_t * tlenp, int32_t * thf, 406 int32_t * drop_hdrlen, int32_t * ret_val); 407 static int 408 rack_process_rst(struct mbuf *m, struct tcphdr *th, 409 struct socket *so, struct tcpcb *tp); 410 struct rack_sendmap * 411 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 412 uint32_t tsused); 413 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt); 414 static void 415 tcp_rack_partialack(struct tcpcb *tp, struct tcphdr *th); 416 417 static int 418 rack_ts_check(struct mbuf *m, struct tcphdr *th, 419 struct tcpcb *tp, int32_t tlen, int32_t thflags, int32_t * ret_val); 420 421 int32_t rack_clear_counter=0; 422 423 424 static int 425 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 426 { 427 uint32_t stat; 428 int32_t error; 429 430 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 431 if (error || req->newptr == NULL) 432 return error; 433 434 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 435 if (error) 436 return (error); 437 if (stat == 1) { 438 #ifdef INVARIANTS 439 printf("Clearing RACK counters\n"); 440 #endif 441 counter_u64_zero(rack_badfr); 442 counter_u64_zero(rack_badfr_bytes); 443 counter_u64_zero(rack_rtm_prr_retran); 444 counter_u64_zero(rack_rtm_prr_newdata); 445 counter_u64_zero(rack_timestamp_mismatch); 446 counter_u64_zero(rack_reorder_seen); 447 counter_u64_zero(rack_tlp_tot); 448 counter_u64_zero(rack_tlp_newdata); 449 counter_u64_zero(rack_tlp_retran); 450 counter_u64_zero(rack_tlp_retran_bytes); 451 counter_u64_zero(rack_tlp_retran_fail); 452 counter_u64_zero(rack_to_tot); 453 counter_u64_zero(rack_to_arm_rack); 454 counter_u64_zero(rack_to_arm_tlp); 455 counter_u64_zero(rack_paced_segments); 456 counter_u64_zero(rack_unpaced_segments); 457 counter_u64_zero(rack_saw_enobuf); 458 counter_u64_zero(rack_saw_enetunreach); 459 counter_u64_zero(rack_to_alloc_hard); 460 counter_u64_zero(rack_to_alloc_emerg); 461 counter_u64_zero(rack_sack_proc_all); 462 counter_u64_zero(rack_sack_proc_short); 463 counter_u64_zero(rack_sack_proc_restart); 464 counter_u64_zero(rack_to_alloc); 465 counter_u64_zero(rack_to_alloc_limited); 466 counter_u64_zero(rack_alloc_limited_conns); 467 counter_u64_zero(rack_split_limited); 468 counter_u64_zero(rack_find_high); 469 counter_u64_zero(rack_runt_sacks); 470 counter_u64_zero(rack_used_tlpmethod); 471 counter_u64_zero(rack_used_tlpmethod2); 472 counter_u64_zero(rack_enter_tlp_calc); 473 counter_u64_zero(rack_progress_drops); 474 counter_u64_zero(rack_tlp_does_nada); 475 } 476 rack_clear_counter = 0; 477 return (0); 478 } 479 480 481 482 static void 483 rack_init_sysctls() 484 { 485 SYSCTL_ADD_S32(&rack_sysctl_ctx, 486 SYSCTL_CHILDREN(rack_sysctl_root), 487 OID_AUTO, "map_limit", CTLFLAG_RW, 488 &rack_map_entries_limit , 1024, 489 "Is there a limit on how big the sendmap can grow? "); 490 491 SYSCTL_ADD_S32(&rack_sysctl_ctx, 492 SYSCTL_CHILDREN(rack_sysctl_root), 493 OID_AUTO, "map_splitlimit", CTLFLAG_RW, 494 &rack_map_split_limit , 256, 495 "Is there a limit on how much splitting a peer can do?"); 496 497 SYSCTL_ADD_S32(&rack_sysctl_ctx, 498 SYSCTL_CHILDREN(rack_sysctl_root), 499 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 500 &rack_rate_sample_method , USE_RTT_LOW, 501 "What method should we use for rate sampling 0=high, 1=low "); 502 SYSCTL_ADD_S32(&rack_sysctl_ctx, 503 SYSCTL_CHILDREN(rack_sysctl_root), 504 OID_AUTO, "data_after_close", CTLFLAG_RW, 505 &rack_ignore_data_after_close, 0, 506 "Do we hold off sending a RST until all pending data is ack'd"); 507 SYSCTL_ADD_S32(&rack_sysctl_ctx, 508 SYSCTL_CHILDREN(rack_sysctl_root), 509 OID_AUTO, "tlpmethod", CTLFLAG_RW, 510 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 511 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 512 SYSCTL_ADD_S32(&rack_sysctl_ctx, 513 SYSCTL_CHILDREN(rack_sysctl_root), 514 OID_AUTO, "min_pace_time", CTLFLAG_RW, 515 &rack_min_pace_time, 0, 516 "Should we enforce a minimum pace time of 1ms"); 517 SYSCTL_ADD_S32(&rack_sysctl_ctx, 518 SYSCTL_CHILDREN(rack_sysctl_root), 519 OID_AUTO, "min_pace_segs", CTLFLAG_RW, 520 &rack_min_pace_time_seg_req, 6, 521 "How many segments have to be in the len to enforce min-pace-time"); 522 SYSCTL_ADD_S32(&rack_sysctl_ctx, 523 SYSCTL_CHILDREN(rack_sysctl_root), 524 OID_AUTO, "idle_reduce_high", CTLFLAG_RW, 525 &rack_reduce_largest_on_idle, 0, 526 "Should we reduce the largest cwnd seen to IW on idle reduction"); 527 SYSCTL_ADD_S32(&rack_sysctl_ctx, 528 SYSCTL_CHILDREN(rack_sysctl_root), 529 OID_AUTO, "bb_verbose", CTLFLAG_RW, 530 &rack_verbose_logging, 0, 531 "Should RACK black box logging be verbose"); 532 SYSCTL_ADD_S32(&rack_sysctl_ctx, 533 SYSCTL_CHILDREN(rack_sysctl_root), 534 OID_AUTO, "sackfiltering", CTLFLAG_RW, 535 &rack_use_sack_filter, 1, 536 "Do we use sack filtering?"); 537 SYSCTL_ADD_S32(&rack_sysctl_ctx, 538 SYSCTL_CHILDREN(rack_sysctl_root), 539 OID_AUTO, "delayed_ack", CTLFLAG_RW, 540 &rack_delayed_ack_time, 200, 541 "Delayed ack time (200ms)"); 542 SYSCTL_ADD_S32(&rack_sysctl_ctx, 543 SYSCTL_CHILDREN(rack_sysctl_root), 544 OID_AUTO, "tlpminto", CTLFLAG_RW, 545 &rack_tlp_min, 10, 546 "TLP minimum timeout per the specification (10ms)"); 547 SYSCTL_ADD_S32(&rack_sysctl_ctx, 548 SYSCTL_CHILDREN(rack_sysctl_root), 549 OID_AUTO, "precache", CTLFLAG_RW, 550 &rack_precache, 0, 551 "Where should we precache the mcopy (0 is not at all)"); 552 SYSCTL_ADD_S32(&rack_sysctl_ctx, 553 SYSCTL_CHILDREN(rack_sysctl_root), 554 OID_AUTO, "sblklimit", CTLFLAG_RW, 555 &rack_sack_block_limit, 128, 556 "When do we start paying attention to small sack blocks"); 557 SYSCTL_ADD_S32(&rack_sysctl_ctx, 558 SYSCTL_CHILDREN(rack_sysctl_root), 559 OID_AUTO, "send_oldest", CTLFLAG_RW, 560 &rack_always_send_oldest, 1, 561 "Should we always send the oldest TLP and RACK-TLP"); 562 SYSCTL_ADD_S32(&rack_sysctl_ctx, 563 SYSCTL_CHILDREN(rack_sysctl_root), 564 OID_AUTO, "rack_tlp_in_recovery", CTLFLAG_RW, 565 &rack_tlp_in_recovery, 1, 566 "Can we do a TLP during recovery?"); 567 SYSCTL_ADD_S32(&rack_sysctl_ctx, 568 SYSCTL_CHILDREN(rack_sysctl_root), 569 OID_AUTO, "rack_tlimit", CTLFLAG_RW, 570 &rack_limited_retran, 0, 571 "How many times can a rack timeout drive out sends"); 572 SYSCTL_ADD_S32(&rack_sysctl_ctx, 573 SYSCTL_CHILDREN(rack_sysctl_root), 574 OID_AUTO, "minrto", CTLFLAG_RW, 575 &rack_rto_min, 0, 576 "Minimum RTO in ms -- set with caution below 1000 due to TLP"); 577 SYSCTL_ADD_S32(&rack_sysctl_ctx, 578 SYSCTL_CHILDREN(rack_sysctl_root), 579 OID_AUTO, "maxrto", CTLFLAG_RW, 580 &rack_rto_max, 0, 581 "Maxiumum RTO in ms -- should be at least as large as min_rto"); 582 SYSCTL_ADD_S32(&rack_sysctl_ctx, 583 SYSCTL_CHILDREN(rack_sysctl_root), 584 OID_AUTO, "tlp_retry", CTLFLAG_RW, 585 &rack_tlp_max_resend, 2, 586 "How many times does TLP retry a single segment or multiple with no ACK"); 587 SYSCTL_ADD_S32(&rack_sysctl_ctx, 588 SYSCTL_CHILDREN(rack_sysctl_root), 589 OID_AUTO, "recovery_loss_prop", CTLFLAG_RW, 590 &rack_use_proportional_reduce, 0, 591 "Should we proportionaly reduce cwnd based on the number of losses "); 592 SYSCTL_ADD_S32(&rack_sysctl_ctx, 593 SYSCTL_CHILDREN(rack_sysctl_root), 594 OID_AUTO, "recovery_prop", CTLFLAG_RW, 595 &rack_proportional_rate, 10, 596 "What percent reduction per loss"); 597 SYSCTL_ADD_S32(&rack_sysctl_ctx, 598 SYSCTL_CHILDREN(rack_sysctl_root), 599 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 600 &rack_lower_cwnd_at_tlp, 0, 601 "When a TLP completes a retran should we enter recovery?"); 602 SYSCTL_ADD_S32(&rack_sysctl_ctx, 603 SYSCTL_CHILDREN(rack_sysctl_root), 604 OID_AUTO, "hptsi_reduces", CTLFLAG_RW, 605 &rack_slot_reduction, 4, 606 "When setting a slot should we reduce by divisor"); 607 SYSCTL_ADD_S32(&rack_sysctl_ctx, 608 SYSCTL_CHILDREN(rack_sysctl_root), 609 OID_AUTO, "hptsi_every_seg", CTLFLAG_RW, 610 &rack_pace_every_seg, 1, 611 "Should we pace out every segment hptsi"); 612 SYSCTL_ADD_S32(&rack_sysctl_ctx, 613 SYSCTL_CHILDREN(rack_sysctl_root), 614 OID_AUTO, "hptsi_seg_max", CTLFLAG_RW, 615 &rack_hptsi_segments, 6, 616 "Should we pace out only a limited size of segments"); 617 SYSCTL_ADD_S32(&rack_sysctl_ctx, 618 SYSCTL_CHILDREN(rack_sysctl_root), 619 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 620 &rack_send_a_lot_in_prr, 1, 621 "Send a lot in prr"); 622 SYSCTL_ADD_S32(&rack_sysctl_ctx, 623 SYSCTL_CHILDREN(rack_sysctl_root), 624 OID_AUTO, "minto", CTLFLAG_RW, 625 &rack_min_to, 1, 626 "Minimum rack timeout in milliseconds"); 627 SYSCTL_ADD_S32(&rack_sysctl_ctx, 628 SYSCTL_CHILDREN(rack_sysctl_root), 629 OID_AUTO, "earlyrecoveryseg", CTLFLAG_RW, 630 &rack_early_recovery_max_seg, 6, 631 "Max segments in early recovery"); 632 SYSCTL_ADD_S32(&rack_sysctl_ctx, 633 SYSCTL_CHILDREN(rack_sysctl_root), 634 OID_AUTO, "earlyrecovery", CTLFLAG_RW, 635 &rack_early_recovery, 1, 636 "Do we do early recovery with rack"); 637 SYSCTL_ADD_S32(&rack_sysctl_ctx, 638 SYSCTL_CHILDREN(rack_sysctl_root), 639 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 640 &rack_reorder_thresh, 2, 641 "What factor for rack will be added when seeing reordering (shift right)"); 642 SYSCTL_ADD_S32(&rack_sysctl_ctx, 643 SYSCTL_CHILDREN(rack_sysctl_root), 644 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 645 &rack_tlp_thresh, 1, 646 "what divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 647 SYSCTL_ADD_S32(&rack_sysctl_ctx, 648 SYSCTL_CHILDREN(rack_sysctl_root), 649 OID_AUTO, "reorder_fade", CTLFLAG_RW, 650 &rack_reorder_fade, 0, 651 "Does reorder detection fade, if so how many ms (0 means never)"); 652 SYSCTL_ADD_S32(&rack_sysctl_ctx, 653 SYSCTL_CHILDREN(rack_sysctl_root), 654 OID_AUTO, "pktdelay", CTLFLAG_RW, 655 &rack_pkt_delay, 1, 656 "Extra RACK time (in ms) besides reordering thresh"); 657 SYSCTL_ADD_S32(&rack_sysctl_ctx, 658 SYSCTL_CHILDREN(rack_sysctl_root), 659 OID_AUTO, "inc_var", CTLFLAG_RW, 660 &rack_inc_var, 0, 661 "Should rack add to the TLP timer the variance in rtt calculation"); 662 rack_badfr = counter_u64_alloc(M_WAITOK); 663 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 664 SYSCTL_CHILDREN(rack_sysctl_root), 665 OID_AUTO, "badfr", CTLFLAG_RD, 666 &rack_badfr, "Total number of bad FRs"); 667 rack_badfr_bytes = counter_u64_alloc(M_WAITOK); 668 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 669 SYSCTL_CHILDREN(rack_sysctl_root), 670 OID_AUTO, "badfr_bytes", CTLFLAG_RD, 671 &rack_badfr_bytes, "Total number of bad FRs"); 672 rack_rtm_prr_retran = counter_u64_alloc(M_WAITOK); 673 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 674 SYSCTL_CHILDREN(rack_sysctl_root), 675 OID_AUTO, "prrsndret", CTLFLAG_RD, 676 &rack_rtm_prr_retran, 677 "Total number of prr based retransmits"); 678 rack_rtm_prr_newdata = counter_u64_alloc(M_WAITOK); 679 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 680 SYSCTL_CHILDREN(rack_sysctl_root), 681 OID_AUTO, "prrsndnew", CTLFLAG_RD, 682 &rack_rtm_prr_newdata, 683 "Total number of prr based new transmits"); 684 rack_timestamp_mismatch = counter_u64_alloc(M_WAITOK); 685 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 686 SYSCTL_CHILDREN(rack_sysctl_root), 687 OID_AUTO, "tsnf", CTLFLAG_RD, 688 &rack_timestamp_mismatch, 689 "Total number of timestamps that we could not find the reported ts"); 690 rack_find_high = counter_u64_alloc(M_WAITOK); 691 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 692 SYSCTL_CHILDREN(rack_sysctl_root), 693 OID_AUTO, "findhigh", CTLFLAG_RD, 694 &rack_find_high, 695 "Total number of FIN causing find-high"); 696 rack_reorder_seen = counter_u64_alloc(M_WAITOK); 697 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 698 SYSCTL_CHILDREN(rack_sysctl_root), 699 OID_AUTO, "reordering", CTLFLAG_RD, 700 &rack_reorder_seen, 701 "Total number of times we added delay due to reordering"); 702 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 703 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 704 SYSCTL_CHILDREN(rack_sysctl_root), 705 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 706 &rack_tlp_tot, 707 "Total number of tail loss probe expirations"); 708 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 709 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 710 SYSCTL_CHILDREN(rack_sysctl_root), 711 OID_AUTO, "tlp_new", CTLFLAG_RD, 712 &rack_tlp_newdata, 713 "Total number of tail loss probe sending new data"); 714 715 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 716 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 717 SYSCTL_CHILDREN(rack_sysctl_root), 718 OID_AUTO, "tlp_retran", CTLFLAG_RD, 719 &rack_tlp_retran, 720 "Total number of tail loss probe sending retransmitted data"); 721 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 722 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 723 SYSCTL_CHILDREN(rack_sysctl_root), 724 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 725 &rack_tlp_retran_bytes, 726 "Total bytes of tail loss probe sending retransmitted data"); 727 rack_tlp_retran_fail = counter_u64_alloc(M_WAITOK); 728 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 729 SYSCTL_CHILDREN(rack_sysctl_root), 730 OID_AUTO, "tlp_retran_fail", CTLFLAG_RD, 731 &rack_tlp_retran_fail, 732 "Total number of tail loss probe sending retransmitted data that failed (wait for t3)"); 733 rack_to_tot = counter_u64_alloc(M_WAITOK); 734 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 735 SYSCTL_CHILDREN(rack_sysctl_root), 736 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 737 &rack_to_tot, 738 "Total number of times the rack to expired?"); 739 rack_to_arm_rack = counter_u64_alloc(M_WAITOK); 740 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 741 SYSCTL_CHILDREN(rack_sysctl_root), 742 OID_AUTO, "arm_rack", CTLFLAG_RD, 743 &rack_to_arm_rack, 744 "Total number of times the rack timer armed?"); 745 rack_to_arm_tlp = counter_u64_alloc(M_WAITOK); 746 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 747 SYSCTL_CHILDREN(rack_sysctl_root), 748 OID_AUTO, "arm_tlp", CTLFLAG_RD, 749 &rack_to_arm_tlp, 750 "Total number of times the tlp timer armed?"); 751 rack_paced_segments = counter_u64_alloc(M_WAITOK); 752 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 753 SYSCTL_CHILDREN(rack_sysctl_root), 754 OID_AUTO, "paced", CTLFLAG_RD, 755 &rack_paced_segments, 756 "Total number of times a segment send caused hptsi"); 757 rack_unpaced_segments = counter_u64_alloc(M_WAITOK); 758 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 759 SYSCTL_CHILDREN(rack_sysctl_root), 760 OID_AUTO, "unpaced", CTLFLAG_RD, 761 &rack_unpaced_segments, 762 "Total number of times a segment did not cause hptsi"); 763 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 764 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 765 SYSCTL_CHILDREN(rack_sysctl_root), 766 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 767 &rack_saw_enobuf, 768 "Total number of times a segment did not cause hptsi"); 769 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 770 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 771 SYSCTL_CHILDREN(rack_sysctl_root), 772 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 773 &rack_saw_enetunreach, 774 "Total number of times a segment did not cause hptsi"); 775 rack_to_alloc = counter_u64_alloc(M_WAITOK); 776 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 777 SYSCTL_CHILDREN(rack_sysctl_root), 778 OID_AUTO, "allocs", CTLFLAG_RD, 779 &rack_to_alloc, 780 "Total allocations of tracking structures"); 781 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 782 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 783 SYSCTL_CHILDREN(rack_sysctl_root), 784 OID_AUTO, "allochard", CTLFLAG_RD, 785 &rack_to_alloc_hard, 786 "Total allocations done with sleeping the hard way"); 787 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 788 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 789 SYSCTL_CHILDREN(rack_sysctl_root), 790 OID_AUTO, "allocemerg", CTLFLAG_RD, 791 &rack_to_alloc_emerg, 792 "Total allocations done from emergency cache"); 793 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 794 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 795 SYSCTL_CHILDREN(rack_sysctl_root), 796 OID_AUTO, "alloc_limited", CTLFLAG_RD, 797 &rack_to_alloc_limited, 798 "Total allocations dropped due to limit"); 799 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 800 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 801 SYSCTL_CHILDREN(rack_sysctl_root), 802 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 803 &rack_alloc_limited_conns, 804 "Connections with allocations dropped due to limit"); 805 rack_split_limited = counter_u64_alloc(M_WAITOK); 806 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 807 SYSCTL_CHILDREN(rack_sysctl_root), 808 OID_AUTO, "split_limited", CTLFLAG_RD, 809 &rack_split_limited, 810 "Split allocations dropped due to limit"); 811 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 812 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 813 SYSCTL_CHILDREN(rack_sysctl_root), 814 OID_AUTO, "sack_long", CTLFLAG_RD, 815 &rack_sack_proc_all, 816 "Total times we had to walk whole list for sack processing"); 817 818 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 819 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 820 SYSCTL_CHILDREN(rack_sysctl_root), 821 OID_AUTO, "sack_restart", CTLFLAG_RD, 822 &rack_sack_proc_restart, 823 "Total times we had to walk whole list due to a restart"); 824 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 825 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 826 SYSCTL_CHILDREN(rack_sysctl_root), 827 OID_AUTO, "sack_short", CTLFLAG_RD, 828 &rack_sack_proc_short, 829 "Total times we took shortcut for sack processing"); 830 rack_enter_tlp_calc = counter_u64_alloc(M_WAITOK); 831 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 832 SYSCTL_CHILDREN(rack_sysctl_root), 833 OID_AUTO, "tlp_calc_entered", CTLFLAG_RD, 834 &rack_enter_tlp_calc, 835 "Total times we called calc-tlp"); 836 rack_used_tlpmethod = counter_u64_alloc(M_WAITOK); 837 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 838 SYSCTL_CHILDREN(rack_sysctl_root), 839 OID_AUTO, "hit_tlp_method", CTLFLAG_RD, 840 &rack_used_tlpmethod, 841 "Total number of runt sacks"); 842 rack_used_tlpmethod2 = counter_u64_alloc(M_WAITOK); 843 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 844 SYSCTL_CHILDREN(rack_sysctl_root), 845 OID_AUTO, "hit_tlp_method2", CTLFLAG_RD, 846 &rack_used_tlpmethod2, 847 "Total number of runt sacks 2"); 848 rack_runt_sacks = counter_u64_alloc(M_WAITOK); 849 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 850 SYSCTL_CHILDREN(rack_sysctl_root), 851 OID_AUTO, "runtsacks", CTLFLAG_RD, 852 &rack_runt_sacks, 853 "Total number of runt sacks"); 854 rack_progress_drops = counter_u64_alloc(M_WAITOK); 855 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 856 SYSCTL_CHILDREN(rack_sysctl_root), 857 OID_AUTO, "prog_drops", CTLFLAG_RD, 858 &rack_progress_drops, 859 "Total number of progress drops"); 860 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 861 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 862 SYSCTL_CHILDREN(rack_sysctl_root), 863 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 864 &rack_input_idle_reduces, 865 "Total number of idle reductions on input"); 866 rack_tlp_does_nada = counter_u64_alloc(M_WAITOK); 867 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 868 SYSCTL_CHILDREN(rack_sysctl_root), 869 OID_AUTO, "tlp_nada", CTLFLAG_RD, 870 &rack_tlp_does_nada, 871 "Total number of nada tlp calls"); 872 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 873 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 874 OID_AUTO, "outsize", CTLFLAG_RD, 875 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 876 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 877 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 878 OID_AUTO, "opts", CTLFLAG_RD, 879 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 880 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 881 SYSCTL_CHILDREN(rack_sysctl_root), 882 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 883 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 884 } 885 886 static inline int32_t 887 rack_progress_timeout_check(struct tcpcb *tp) 888 { 889 #ifdef NETFLIX_PROGRESS 890 if (tp->t_maxunacktime && tp->t_acktime && TSTMP_GT(ticks, tp->t_acktime)) { 891 if ((ticks - tp->t_acktime) >= tp->t_maxunacktime) { 892 /* 893 * There is an assumption that the caller 894 * will drop the connection so we will 895 * increment the counters here. 896 */ 897 struct tcp_rack *rack; 898 rack = (struct tcp_rack *)tp->t_fb_ptr; 899 counter_u64_add(rack_progress_drops, 1); 900 TCPSTAT_INC(tcps_progdrops); 901 rack_log_progress_event(rack, tp, ticks, PROGRESS_DROP, __LINE__); 902 return (1); 903 } 904 } 905 #endif 906 return (0); 907 } 908 909 910 static void 911 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 912 { 913 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 914 union tcp_log_stackspecific log; 915 916 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 917 log.u_bbr.flex1 = TICKS_2_MSEC(rack->rc_tp->t_srtt >> TCP_RTT_SHIFT); 918 log.u_bbr.flex2 = to; 919 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 920 log.u_bbr.flex4 = slot; 921 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot; 922 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 923 log.u_bbr.flex8 = which; 924 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 925 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 926 TCP_LOG_EVENT(rack->rc_tp, NULL, 927 &rack->rc_inp->inp_socket->so_rcv, 928 &rack->rc_inp->inp_socket->so_snd, 929 BBR_LOG_TIMERSTAR, 0, 930 0, &log, false); 931 } 932 } 933 934 static void 935 rack_log_to_event(struct tcp_rack *rack, int32_t to_num) 936 { 937 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 938 union tcp_log_stackspecific log; 939 940 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 941 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 942 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 943 log.u_bbr.flex8 = to_num; 944 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 945 log.u_bbr.flex2 = rack->rc_rack_rtt; 946 TCP_LOG_EVENT(rack->rc_tp, NULL, 947 &rack->rc_inp->inp_socket->so_rcv, 948 &rack->rc_inp->inp_socket->so_snd, 949 BBR_LOG_RTO, 0, 950 0, &log, false); 951 } 952 } 953 954 static void 955 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, int32_t t, 956 uint32_t o_srtt, uint32_t o_var) 957 { 958 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 959 union tcp_log_stackspecific log; 960 961 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 962 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 963 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 964 log.u_bbr.flex1 = t; 965 log.u_bbr.flex2 = o_srtt; 966 log.u_bbr.flex3 = o_var; 967 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 968 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 969 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_rtt_cnt; 970 log.u_bbr.rttProp = rack->r_ctl.rack_rs.rs_rtt_tot; 971 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 972 TCP_LOG_EVENT(tp, NULL, 973 &rack->rc_inp->inp_socket->so_rcv, 974 &rack->rc_inp->inp_socket->so_snd, 975 BBR_LOG_BBRRTT, 0, 976 0, &log, false); 977 } 978 } 979 980 static void 981 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 982 { 983 /* 984 * Log the rtt sample we are 985 * applying to the srtt algorithm in 986 * useconds. 987 */ 988 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 989 union tcp_log_stackspecific log; 990 struct timeval tv; 991 992 memset(&log, 0, sizeof(log)); 993 /* Convert our ms to a microsecond */ 994 log.u_bbr.flex1 = rtt * 1000; 995 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 996 TCP_LOG_EVENTP(rack->rc_tp, NULL, 997 &rack->rc_inp->inp_socket->so_rcv, 998 &rack->rc_inp->inp_socket->so_snd, 999 TCP_LOG_RTT, 0, 1000 0, &log, false, &tv); 1001 } 1002 } 1003 1004 1005 static inline void 1006 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 1007 { 1008 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 1009 union tcp_log_stackspecific log; 1010 1011 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1012 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 1013 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 1014 log.u_bbr.flex1 = line; 1015 log.u_bbr.flex2 = tick; 1016 log.u_bbr.flex3 = tp->t_maxunacktime; 1017 log.u_bbr.flex4 = tp->t_acktime; 1018 log.u_bbr.flex8 = event; 1019 TCP_LOG_EVENT(tp, NULL, 1020 &rack->rc_inp->inp_socket->so_rcv, 1021 &rack->rc_inp->inp_socket->so_snd, 1022 BBR_LOG_PROGRESS, 0, 1023 0, &log, false); 1024 } 1025 } 1026 1027 static void 1028 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts) 1029 { 1030 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1031 union tcp_log_stackspecific log; 1032 1033 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1034 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 1035 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 1036 log.u_bbr.flex1 = slot; 1037 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 1038 log.u_bbr.flex8 = rack->rc_in_persist; 1039 TCP_LOG_EVENT(rack->rc_tp, NULL, 1040 &rack->rc_inp->inp_socket->so_rcv, 1041 &rack->rc_inp->inp_socket->so_snd, 1042 BBR_LOG_BBRSND, 0, 1043 0, &log, false); 1044 } 1045 } 1046 1047 static void 1048 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out) 1049 { 1050 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1051 union tcp_log_stackspecific log; 1052 1053 memset(&log, 0, sizeof(log)); 1054 log.u_bbr.flex1 = did_out; 1055 log.u_bbr.flex2 = nxt_pkt; 1056 log.u_bbr.flex3 = way_out; 1057 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 1058 log.u_bbr.flex7 = rack->r_wanted_output; 1059 log.u_bbr.flex8 = rack->rc_in_persist; 1060 TCP_LOG_EVENT(rack->rc_tp, NULL, 1061 &rack->rc_inp->inp_socket->so_rcv, 1062 &rack->rc_inp->inp_socket->so_snd, 1063 BBR_LOG_DOSEG_DONE, 0, 1064 0, &log, false); 1065 } 1066 } 1067 1068 1069 static void 1070 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, uint8_t hpts_calling) 1071 { 1072 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1073 union tcp_log_stackspecific log; 1074 1075 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1076 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 1077 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 1078 log.u_bbr.flex1 = slot; 1079 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 1080 log.u_bbr.flex7 = hpts_calling; 1081 log.u_bbr.flex8 = rack->rc_in_persist; 1082 TCP_LOG_EVENT(rack->rc_tp, NULL, 1083 &rack->rc_inp->inp_socket->so_rcv, 1084 &rack->rc_inp->inp_socket->so_snd, 1085 BBR_LOG_JUSTRET, 0, 1086 tlen, &log, false); 1087 } 1088 } 1089 1090 static void 1091 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line) 1092 { 1093 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1094 union tcp_log_stackspecific log; 1095 1096 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1097 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 1098 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 1099 log.u_bbr.flex1 = line; 1100 log.u_bbr.flex2 = 0; 1101 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 1102 log.u_bbr.flex4 = 0; 1103 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 1104 log.u_bbr.flex8 = hpts_removed; 1105 TCP_LOG_EVENT(rack->rc_tp, NULL, 1106 &rack->rc_inp->inp_socket->so_rcv, 1107 &rack->rc_inp->inp_socket->so_snd, 1108 BBR_LOG_TIMERCANC, 0, 1109 0, &log, false); 1110 } 1111 } 1112 1113 static void 1114 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 1115 { 1116 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1117 union tcp_log_stackspecific log; 1118 1119 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1120 log.u_bbr.flex1 = timers; 1121 log.u_bbr.flex2 = ret; 1122 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 1123 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 1124 log.u_bbr.flex5 = cts; 1125 TCP_LOG_EVENT(rack->rc_tp, NULL, 1126 &rack->rc_inp->inp_socket->so_rcv, 1127 &rack->rc_inp->inp_socket->so_snd, 1128 BBR_LOG_TO_PROCESS, 0, 1129 0, &log, false); 1130 } 1131 } 1132 1133 static void 1134 rack_counter_destroy() 1135 { 1136 counter_u64_free(rack_badfr); 1137 counter_u64_free(rack_badfr_bytes); 1138 counter_u64_free(rack_rtm_prr_retran); 1139 counter_u64_free(rack_rtm_prr_newdata); 1140 counter_u64_free(rack_timestamp_mismatch); 1141 counter_u64_free(rack_reorder_seen); 1142 counter_u64_free(rack_tlp_tot); 1143 counter_u64_free(rack_tlp_newdata); 1144 counter_u64_free(rack_tlp_retran); 1145 counter_u64_free(rack_tlp_retran_bytes); 1146 counter_u64_free(rack_tlp_retran_fail); 1147 counter_u64_free(rack_to_tot); 1148 counter_u64_free(rack_to_arm_rack); 1149 counter_u64_free(rack_to_arm_tlp); 1150 counter_u64_free(rack_paced_segments); 1151 counter_u64_free(rack_unpaced_segments); 1152 counter_u64_free(rack_saw_enobuf); 1153 counter_u64_free(rack_saw_enetunreach); 1154 counter_u64_free(rack_to_alloc_hard); 1155 counter_u64_free(rack_to_alloc_emerg); 1156 counter_u64_free(rack_sack_proc_all); 1157 counter_u64_free(rack_sack_proc_short); 1158 counter_u64_free(rack_sack_proc_restart); 1159 counter_u64_free(rack_to_alloc); 1160 counter_u64_free(rack_to_alloc_limited); 1161 counter_u64_free(rack_split_limited); 1162 counter_u64_free(rack_find_high); 1163 counter_u64_free(rack_runt_sacks); 1164 counter_u64_free(rack_enter_tlp_calc); 1165 counter_u64_free(rack_used_tlpmethod); 1166 counter_u64_free(rack_used_tlpmethod2); 1167 counter_u64_free(rack_progress_drops); 1168 counter_u64_free(rack_input_idle_reduces); 1169 counter_u64_free(rack_tlp_does_nada); 1170 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 1171 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 1172 } 1173 1174 static struct rack_sendmap * 1175 rack_alloc(struct tcp_rack *rack) 1176 { 1177 struct rack_sendmap *rsm; 1178 1179 rsm = uma_zalloc(rack_zone, M_NOWAIT); 1180 if (rsm) { 1181 rack->r_ctl.rc_num_maps_alloced++; 1182 counter_u64_add(rack_to_alloc, 1); 1183 return (rsm); 1184 } 1185 if (rack->rc_free_cnt) { 1186 counter_u64_add(rack_to_alloc_emerg, 1); 1187 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 1188 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_next); 1189 rack->rc_free_cnt--; 1190 return (rsm); 1191 } 1192 return (NULL); 1193 } 1194 1195 static struct rack_sendmap * 1196 rack_alloc_full_limit(struct tcp_rack *rack) 1197 { 1198 if ((rack_map_entries_limit > 0) && 1199 (rack->r_ctl.rc_num_maps_alloced >= rack_map_entries_limit)) { 1200 counter_u64_add(rack_to_alloc_limited, 1); 1201 if (!rack->alloc_limit_reported) { 1202 rack->alloc_limit_reported = 1; 1203 counter_u64_add(rack_alloc_limited_conns, 1); 1204 } 1205 return (NULL); 1206 } 1207 return (rack_alloc(rack)); 1208 } 1209 1210 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 1211 static struct rack_sendmap * 1212 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 1213 { 1214 struct rack_sendmap *rsm; 1215 1216 if (limit_type) { 1217 /* currently there is only one limit type */ 1218 if (rack_map_split_limit > 0 && 1219 rack->r_ctl.rc_num_split_allocs >= rack_map_split_limit) { 1220 counter_u64_add(rack_split_limited, 1); 1221 if (!rack->alloc_limit_reported) { 1222 rack->alloc_limit_reported = 1; 1223 counter_u64_add(rack_alloc_limited_conns, 1); 1224 } 1225 return (NULL); 1226 } 1227 } 1228 1229 /* allocate and mark in the limit type, if set */ 1230 rsm = rack_alloc(rack); 1231 if (rsm != NULL && limit_type) { 1232 rsm->r_limit_type = limit_type; 1233 rack->r_ctl.rc_num_split_allocs++; 1234 } 1235 return (rsm); 1236 } 1237 1238 static void 1239 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 1240 { 1241 if (rsm->r_limit_type) { 1242 /* currently there is only one limit type */ 1243 rack->r_ctl.rc_num_split_allocs--; 1244 } 1245 if (rack->r_ctl.rc_tlpsend == rsm) 1246 rack->r_ctl.rc_tlpsend = NULL; 1247 if (rack->r_ctl.rc_next == rsm) 1248 rack->r_ctl.rc_next = NULL; 1249 if (rack->r_ctl.rc_sacklast == rsm) 1250 rack->r_ctl.rc_sacklast = NULL; 1251 if (rack->rc_free_cnt < rack_free_cache) { 1252 memset(rsm, 0, sizeof(struct rack_sendmap)); 1253 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_next); 1254 rsm->r_limit_type = 0; 1255 rack->rc_free_cnt++; 1256 return; 1257 } 1258 rack->r_ctl.rc_num_maps_alloced--; 1259 uma_zfree(rack_zone, rsm); 1260 } 1261 1262 /* 1263 * CC wrapper hook functions 1264 */ 1265 static void 1266 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, struct tcphdr *th, uint16_t nsegs, 1267 uint16_t type, int32_t recovery) 1268 { 1269 #ifdef NETFLIX_STATS 1270 int32_t gput; 1271 #endif 1272 1273 INP_WLOCK_ASSERT(tp->t_inpcb); 1274 1275 tp->ccv->nsegs = nsegs; 1276 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th); 1277 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 1278 uint32_t max; 1279 1280 max = rack->r_ctl.rc_early_recovery_segs * tp->t_maxseg; 1281 if (tp->ccv->bytes_this_ack > max) { 1282 tp->ccv->bytes_this_ack = max; 1283 } 1284 } 1285 if (tp->snd_cwnd <= tp->snd_wnd) 1286 tp->ccv->flags |= CCF_CWND_LIMITED; 1287 else 1288 tp->ccv->flags &= ~CCF_CWND_LIMITED; 1289 1290 if (type == CC_ACK) { 1291 #ifdef NETFLIX_STATS 1292 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 1293 ((int32_t) tp->snd_cwnd) - tp->snd_wnd); 1294 if ((tp->t_flags & TF_GPUTINPROG) && 1295 SEQ_GEQ(th->th_ack, tp->gput_ack)) { 1296 gput = (((int64_t) (th->th_ack - tp->gput_seq)) << 3) / 1297 max(1, tcp_ts_getticks() - tp->gput_ts); 1298 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 1299 gput); 1300 /* 1301 * XXXLAS: This is a temporary hack, and should be 1302 * chained off VOI_TCP_GPUT when stats(9) grows an 1303 * API to deal with chained VOIs. 1304 */ 1305 if (tp->t_stats_gput_prev > 0) 1306 stats_voi_update_abs_s32(tp->t_stats, 1307 VOI_TCP_GPUT_ND, 1308 ((gput - tp->t_stats_gput_prev) * 100) / 1309 tp->t_stats_gput_prev); 1310 tp->t_flags &= ~TF_GPUTINPROG; 1311 tp->t_stats_gput_prev = gput; 1312 if (tp->t_maxpeakrate) { 1313 /* 1314 * We update t_peakrate_thr. This gives us roughly 1315 * one update per round trip time. 1316 */ 1317 tcp_update_peakrate_thr(tp); 1318 } 1319 } 1320 #endif 1321 if (tp->snd_cwnd > tp->snd_ssthresh) { 1322 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack, 1323 nsegs * V_tcp_abc_l_var * tp->t_maxseg); 1324 if (tp->t_bytes_acked >= tp->snd_cwnd) { 1325 tp->t_bytes_acked -= tp->snd_cwnd; 1326 tp->ccv->flags |= CCF_ABC_SENTAWND; 1327 } 1328 } else { 1329 tp->ccv->flags &= ~CCF_ABC_SENTAWND; 1330 tp->t_bytes_acked = 0; 1331 } 1332 } 1333 if (CC_ALGO(tp)->ack_received != NULL) { 1334 /* XXXLAS: Find a way to live without this */ 1335 tp->ccv->curack = th->th_ack; 1336 CC_ALGO(tp)->ack_received(tp->ccv, type); 1337 } 1338 #ifdef NETFLIX_STATS 1339 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, tp->snd_cwnd); 1340 #endif 1341 if (rack->r_ctl.rc_rack_largest_cwnd < tp->snd_cwnd) { 1342 rack->r_ctl.rc_rack_largest_cwnd = tp->snd_cwnd; 1343 } 1344 /* we enforce max peak rate if it is set. */ 1345 if (tp->t_peakrate_thr && tp->snd_cwnd > tp->t_peakrate_thr) { 1346 tp->snd_cwnd = tp->t_peakrate_thr; 1347 } 1348 } 1349 1350 static void 1351 tcp_rack_partialack(struct tcpcb *tp, struct tcphdr *th) 1352 { 1353 struct tcp_rack *rack; 1354 1355 rack = (struct tcp_rack *)tp->t_fb_ptr; 1356 INP_WLOCK_ASSERT(tp->t_inpcb); 1357 if (rack->r_ctl.rc_prr_sndcnt > 0) 1358 rack->r_wanted_output++; 1359 } 1360 1361 static void 1362 rack_post_recovery(struct tcpcb *tp, struct tcphdr *th) 1363 { 1364 struct tcp_rack *rack; 1365 1366 INP_WLOCK_ASSERT(tp->t_inpcb); 1367 rack = (struct tcp_rack *)tp->t_fb_ptr; 1368 if (CC_ALGO(tp)->post_recovery != NULL) { 1369 tp->ccv->curack = th->th_ack; 1370 CC_ALGO(tp)->post_recovery(tp->ccv); 1371 } 1372 /* 1373 * Here we can in theory adjust cwnd to be based on the number of 1374 * losses in the window (rack->r_ctl.rc_loss_count). This is done 1375 * based on the rack_use_proportional flag. 1376 */ 1377 if (rack->r_ctl.rc_prop_reduce && rack->r_ctl.rc_prop_rate) { 1378 int32_t reduce; 1379 1380 reduce = (rack->r_ctl.rc_loss_count * rack->r_ctl.rc_prop_rate); 1381 if (reduce > 50) { 1382 reduce = 50; 1383 } 1384 tp->snd_cwnd -= ((reduce * tp->snd_cwnd) / 100); 1385 } else { 1386 if (tp->snd_cwnd > tp->snd_ssthresh) { 1387 /* Drop us down to the ssthresh (1/2 cwnd at loss) */ 1388 tp->snd_cwnd = tp->snd_ssthresh; 1389 } 1390 } 1391 if (rack->r_ctl.rc_prr_sndcnt > 0) { 1392 /* Suck the next prr cnt back into cwnd */ 1393 tp->snd_cwnd += rack->r_ctl.rc_prr_sndcnt; 1394 rack->r_ctl.rc_prr_sndcnt = 0; 1395 } 1396 tp->snd_recover = tp->snd_una; 1397 EXIT_RECOVERY(tp->t_flags); 1398 } 1399 1400 static void 1401 rack_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type) 1402 { 1403 struct tcp_rack *rack; 1404 1405 INP_WLOCK_ASSERT(tp->t_inpcb); 1406 1407 rack = (struct tcp_rack *)tp->t_fb_ptr; 1408 switch (type) { 1409 case CC_NDUPACK: 1410 /* rack->r_ctl.rc_ssthresh_set = 1;*/ 1411 if (!IN_FASTRECOVERY(tp->t_flags)) { 1412 rack->r_ctl.rc_tlp_rtx_out = 0; 1413 rack->r_ctl.rc_prr_delivered = 0; 1414 rack->r_ctl.rc_prr_out = 0; 1415 rack->r_ctl.rc_loss_count = 0; 1416 rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg; 1417 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 1418 tp->snd_recover = tp->snd_max; 1419 if (tp->t_flags & TF_ECN_PERMIT) 1420 tp->t_flags |= TF_ECN_SND_CWR; 1421 } 1422 break; 1423 case CC_ECN: 1424 if (!IN_CONGRECOVERY(tp->t_flags)) { 1425 TCPSTAT_INC(tcps_ecn_rcwnd); 1426 tp->snd_recover = tp->snd_max; 1427 if (tp->t_flags & TF_ECN_PERMIT) 1428 tp->t_flags |= TF_ECN_SND_CWR; 1429 } 1430 break; 1431 case CC_RTO: 1432 tp->t_dupacks = 0; 1433 tp->t_bytes_acked = 0; 1434 EXIT_RECOVERY(tp->t_flags); 1435 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 / 1436 tp->t_maxseg) * tp->t_maxseg; 1437 tp->snd_cwnd = tp->t_maxseg; 1438 break; 1439 case CC_RTO_ERR: 1440 TCPSTAT_INC(tcps_sndrexmitbad); 1441 /* RTO was unnecessary, so reset everything. */ 1442 tp->snd_cwnd = tp->snd_cwnd_prev; 1443 tp->snd_ssthresh = tp->snd_ssthresh_prev; 1444 tp->snd_recover = tp->snd_recover_prev; 1445 if (tp->t_flags & TF_WASFRECOVERY) 1446 ENTER_FASTRECOVERY(tp->t_flags); 1447 if (tp->t_flags & TF_WASCRECOVERY) 1448 ENTER_CONGRECOVERY(tp->t_flags); 1449 tp->snd_nxt = tp->snd_max; 1450 tp->t_badrxtwin = 0; 1451 break; 1452 } 1453 1454 if (CC_ALGO(tp)->cong_signal != NULL) { 1455 if (th != NULL) 1456 tp->ccv->curack = th->th_ack; 1457 CC_ALGO(tp)->cong_signal(tp->ccv, type); 1458 } 1459 } 1460 1461 1462 1463 static inline void 1464 rack_cc_after_idle(struct tcpcb *tp, int reduce_largest) 1465 { 1466 uint32_t i_cwnd; 1467 1468 INP_WLOCK_ASSERT(tp->t_inpcb); 1469 1470 #ifdef NETFLIX_STATS 1471 TCPSTAT_INC(tcps_idle_restarts); 1472 if (tp->t_state == TCPS_ESTABLISHED) 1473 TCPSTAT_INC(tcps_idle_estrestarts); 1474 #endif 1475 if (CC_ALGO(tp)->after_idle != NULL) 1476 CC_ALGO(tp)->after_idle(tp->ccv); 1477 1478 if (V_tcp_initcwnd_segments) 1479 i_cwnd = min((V_tcp_initcwnd_segments * tp->t_maxseg), 1480 max(2 * tp->t_maxseg, 14600)); 1481 else if (V_tcp_do_rfc3390) 1482 i_cwnd = min(4 * tp->t_maxseg, 1483 max(2 * tp->t_maxseg, 4380)); 1484 else { 1485 /* Per RFC5681 Section 3.1 */ 1486 if (tp->t_maxseg > 2190) 1487 i_cwnd = 2 * tp->t_maxseg; 1488 else if (tp->t_maxseg > 1095) 1489 i_cwnd = 3 * tp->t_maxseg; 1490 else 1491 i_cwnd = 4 * tp->t_maxseg; 1492 } 1493 if (reduce_largest) { 1494 /* 1495 * Do we reduce the largest cwnd to make 1496 * rack play nice on restart hptsi wise? 1497 */ 1498 if (((struct tcp_rack *)tp->t_fb_ptr)->r_ctl.rc_rack_largest_cwnd > i_cwnd) 1499 ((struct tcp_rack *)tp->t_fb_ptr)->r_ctl.rc_rack_largest_cwnd = i_cwnd; 1500 } 1501 /* 1502 * Being idle is no differnt than the initial window. If the cc 1503 * clamps it down below the initial window raise it to the initial 1504 * window. 1505 */ 1506 if (tp->snd_cwnd < i_cwnd) { 1507 tp->snd_cwnd = i_cwnd; 1508 } 1509 } 1510 1511 1512 /* 1513 * Indicate whether this ack should be delayed. We can delay the ack if 1514 * following conditions are met: 1515 * - There is no delayed ack timer in progress. 1516 * - Our last ack wasn't a 0-sized window. We never want to delay 1517 * the ack that opens up a 0-sized window. 1518 * - LRO wasn't used for this segment. We make sure by checking that the 1519 * segment size is not larger than the MSS. 1520 * - Delayed acks are enabled or this is a half-synchronized T/TCP 1521 * connection. 1522 */ 1523 #define DELAY_ACK(tp, tlen) \ 1524 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 1525 ((tp->t_flags & TF_DELACK) == 0) && \ 1526 (tlen <= tp->t_maxseg) && \ 1527 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 1528 1529 static inline void 1530 rack_calc_rwin(struct socket *so, struct tcpcb *tp) 1531 { 1532 int32_t win; 1533 1534 /* 1535 * Calculate amount of space in receive window, and then do TCP 1536 * input processing. Receive window is amount of space in rcv queue, 1537 * but not less than advertised window. 1538 */ 1539 win = sbspace(&so->so_rcv); 1540 if (win < 0) 1541 win = 0; 1542 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 1543 } 1544 1545 static void 1546 rack_do_drop(struct mbuf *m, struct tcpcb *tp) 1547 { 1548 /* 1549 * Drop space held by incoming segment and return. 1550 */ 1551 if (tp != NULL) 1552 INP_WUNLOCK(tp->t_inpcb); 1553 if (m) 1554 m_freem(m); 1555 } 1556 1557 static void 1558 rack_do_dropwithreset(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th, int32_t rstreason, int32_t tlen) 1559 { 1560 if (tp != NULL) { 1561 tcp_dropwithreset(m, th, tp, tlen, rstreason); 1562 INP_WUNLOCK(tp->t_inpcb); 1563 } else 1564 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 1565 } 1566 1567 /* 1568 * The value in ret_val informs the caller 1569 * if we dropped the tcb (and lock) or not. 1570 * 1 = we dropped it, 0 = the TCB is still locked 1571 * and valid. 1572 */ 1573 static void 1574 rack_do_dropafterack(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th, int32_t thflags, int32_t tlen, int32_t * ret_val) 1575 { 1576 /* 1577 * Generate an ACK dropping incoming segment if it occupies sequence 1578 * space, where the ACK reflects our state. 1579 * 1580 * We can now skip the test for the RST flag since all paths to this 1581 * code happen after packets containing RST have been dropped. 1582 * 1583 * In the SYN-RECEIVED state, don't send an ACK unless the segment 1584 * we received passes the SYN-RECEIVED ACK test. If it fails send a 1585 * RST. This breaks the loop in the "LAND" DoS attack, and also 1586 * prevents an ACK storm between two listening ports that have been 1587 * sent forged SYN segments, each with the source address of the 1588 * other. 1589 */ 1590 struct tcp_rack *rack; 1591 1592 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 1593 (SEQ_GT(tp->snd_una, th->th_ack) || 1594 SEQ_GT(th->th_ack, tp->snd_max))) { 1595 *ret_val = 1; 1596 rack_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 1597 return; 1598 } else 1599 *ret_val = 0; 1600 rack = (struct tcp_rack *)tp->t_fb_ptr; 1601 rack->r_wanted_output++; 1602 tp->t_flags |= TF_ACKNOW; 1603 if (m) 1604 m_freem(m); 1605 } 1606 1607 1608 static int 1609 rack_process_rst(struct mbuf *m, struct tcphdr *th, struct socket *so, struct tcpcb *tp) 1610 { 1611 /* 1612 * RFC5961 Section 3.2 1613 * 1614 * - RST drops connection only if SEG.SEQ == RCV.NXT. - If RST is in 1615 * window, we send challenge ACK. 1616 * 1617 * Note: to take into account delayed ACKs, we should test against 1618 * last_ack_sent instead of rcv_nxt. Note 2: we handle special case 1619 * of closed window, not covered by the RFC. 1620 */ 1621 int dropped = 0; 1622 1623 if ((SEQ_GEQ(th->th_seq, (tp->last_ack_sent - 1)) && 1624 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || 1625 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) { 1626 1627 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1628 KASSERT(tp->t_state != TCPS_SYN_SENT, 1629 ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p", 1630 __func__, th, tp)); 1631 1632 if (V_tcp_insecure_rst || 1633 (tp->last_ack_sent == th->th_seq) || 1634 (tp->rcv_nxt == th->th_seq) || 1635 ((tp->last_ack_sent - 1) == th->th_seq)) { 1636 TCPSTAT_INC(tcps_drops); 1637 /* Drop the connection. */ 1638 switch (tp->t_state) { 1639 case TCPS_SYN_RECEIVED: 1640 so->so_error = ECONNREFUSED; 1641 goto close; 1642 case TCPS_ESTABLISHED: 1643 case TCPS_FIN_WAIT_1: 1644 case TCPS_FIN_WAIT_2: 1645 case TCPS_CLOSE_WAIT: 1646 case TCPS_CLOSING: 1647 case TCPS_LAST_ACK: 1648 so->so_error = ECONNRESET; 1649 close: 1650 tcp_state_change(tp, TCPS_CLOSED); 1651 /* FALLTHROUGH */ 1652 default: 1653 tp = tcp_close(tp); 1654 } 1655 dropped = 1; 1656 rack_do_drop(m, tp); 1657 } else { 1658 TCPSTAT_INC(tcps_badrst); 1659 /* Send challenge ACK. */ 1660 tcp_respond(tp, mtod(m, void *), th, m, 1661 tp->rcv_nxt, tp->snd_nxt, TH_ACK); 1662 tp->last_ack_sent = tp->rcv_nxt; 1663 } 1664 } else { 1665 m_freem(m); 1666 } 1667 return (dropped); 1668 } 1669 1670 /* 1671 * The value in ret_val informs the caller 1672 * if we dropped the tcb (and lock) or not. 1673 * 1 = we dropped it, 0 = the TCB is still locked 1674 * and valid. 1675 */ 1676 static void 1677 rack_challenge_ack(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, int32_t * ret_val) 1678 { 1679 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1680 1681 TCPSTAT_INC(tcps_badsyn); 1682 if (V_tcp_insecure_syn && 1683 SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 1684 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 1685 tp = tcp_drop(tp, ECONNRESET); 1686 *ret_val = 1; 1687 rack_do_drop(m, tp); 1688 } else { 1689 /* Send challenge ACK. */ 1690 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt, 1691 tp->snd_nxt, TH_ACK); 1692 tp->last_ack_sent = tp->rcv_nxt; 1693 m = NULL; 1694 *ret_val = 0; 1695 rack_do_drop(m, NULL); 1696 } 1697 } 1698 1699 /* 1700 * rack_ts_check returns 1 for you should not proceed. It places 1701 * in ret_val what should be returned 1/0 by the caller. The 1 indicates 1702 * that the TCB is unlocked and probably dropped. The 0 indicates the 1703 * TCB is still valid and locked. 1704 */ 1705 static int 1706 rack_ts_check(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, int32_t tlen, int32_t thflags, int32_t * ret_val) 1707 { 1708 1709 /* Check to see if ts_recent is over 24 days old. */ 1710 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) { 1711 /* 1712 * Invalidate ts_recent. If this segment updates ts_recent, 1713 * the age will be reset later and ts_recent will get a 1714 * valid value. If it does not, setting ts_recent to zero 1715 * will at least satisfy the requirement that zero be placed 1716 * in the timestamp echo reply when ts_recent isn't valid. 1717 * The age isn't reset until we get a valid ts_recent 1718 * because we don't want out-of-order segments to be dropped 1719 * when ts_recent is old. 1720 */ 1721 tp->ts_recent = 0; 1722 } else { 1723 TCPSTAT_INC(tcps_rcvduppack); 1724 TCPSTAT_ADD(tcps_rcvdupbyte, tlen); 1725 TCPSTAT_INC(tcps_pawsdrop); 1726 *ret_val = 0; 1727 if (tlen) { 1728 rack_do_dropafterack(m, tp, th, thflags, tlen, ret_val); 1729 } else { 1730 rack_do_drop(m, NULL); 1731 } 1732 return (1); 1733 } 1734 return (0); 1735 } 1736 1737 /* 1738 * rack_drop_checks returns 1 for you should not proceed. It places 1739 * in ret_val what should be returned 1/0 by the caller. The 1 indicates 1740 * that the TCB is unlocked and probably dropped. The 0 indicates the 1741 * TCB is still valid and locked. 1742 */ 1743 static int 1744 rack_drop_checks(struct tcpopt *to, struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, int32_t * tlenp, int32_t * thf, int32_t * drop_hdrlen, int32_t * ret_val) 1745 { 1746 int32_t todrop; 1747 int32_t thflags; 1748 int32_t tlen; 1749 1750 thflags = *thf; 1751 tlen = *tlenp; 1752 todrop = tp->rcv_nxt - th->th_seq; 1753 if (todrop > 0) { 1754 if (thflags & TH_SYN) { 1755 thflags &= ~TH_SYN; 1756 th->th_seq++; 1757 if (th->th_urp > 1) 1758 th->th_urp--; 1759 else 1760 thflags &= ~TH_URG; 1761 todrop--; 1762 } 1763 /* 1764 * Following if statement from Stevens, vol. 2, p. 960. 1765 */ 1766 if (todrop > tlen 1767 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 1768 /* 1769 * Any valid FIN must be to the left of the window. 1770 * At this point the FIN must be a duplicate or out 1771 * of sequence; drop it. 1772 */ 1773 thflags &= ~TH_FIN; 1774 /* 1775 * Send an ACK to resynchronize and drop any data. 1776 * But keep on processing for RST or ACK. 1777 */ 1778 tp->t_flags |= TF_ACKNOW; 1779 todrop = tlen; 1780 TCPSTAT_INC(tcps_rcvduppack); 1781 TCPSTAT_ADD(tcps_rcvdupbyte, todrop); 1782 } else { 1783 TCPSTAT_INC(tcps_rcvpartduppack); 1784 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop); 1785 } 1786 if (tp->t_flags & TF_SACK_PERMIT) { 1787 /* 1788 * record the left, to-be-dropped edge of data 1789 * here, for use as dsack block further down 1790 */ 1791 tcp_update_sack_list(tp, th->th_seq, 1792 th->th_seq + todrop); 1793 /* 1794 * ACK now, as the next in-sequence segment 1795 * will clear the DSACK block again 1796 */ 1797 tp->t_flags |= TF_ACKNOW; 1798 } 1799 *drop_hdrlen += todrop; /* drop from the top afterwards */ 1800 th->th_seq += todrop; 1801 tlen -= todrop; 1802 if (th->th_urp > todrop) 1803 th->th_urp -= todrop; 1804 else { 1805 thflags &= ~TH_URG; 1806 th->th_urp = 0; 1807 } 1808 } 1809 /* 1810 * If segment ends after window, drop trailing data (and PUSH and 1811 * FIN); if nothing left, just ACK. 1812 */ 1813 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 1814 if (todrop > 0) { 1815 TCPSTAT_INC(tcps_rcvpackafterwin); 1816 if (todrop >= tlen) { 1817 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen); 1818 /* 1819 * If window is closed can only take segments at 1820 * window edge, and have to drop data and PUSH from 1821 * incoming segments. Continue processing, but 1822 * remember to ack. Otherwise, drop segment and 1823 * ack. 1824 */ 1825 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 1826 tp->t_flags |= TF_ACKNOW; 1827 TCPSTAT_INC(tcps_rcvwinprobe); 1828 } else { 1829 rack_do_dropafterack(m, tp, th, thflags, tlen, ret_val); 1830 return (1); 1831 } 1832 } else 1833 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 1834 m_adj(m, -todrop); 1835 tlen -= todrop; 1836 thflags &= ~(TH_PUSH | TH_FIN); 1837 } 1838 *thf = thflags; 1839 *tlenp = tlen; 1840 return (0); 1841 } 1842 1843 static struct rack_sendmap * 1844 rack_find_lowest_rsm(struct tcp_rack *rack) 1845 { 1846 struct rack_sendmap *rsm; 1847 1848 /* 1849 * Walk the time-order transmitted list looking for an rsm that is 1850 * not acked. This will be the one that was sent the longest time 1851 * ago that is still outstanding. 1852 */ 1853 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 1854 if (rsm->r_flags & RACK_ACKED) { 1855 continue; 1856 } 1857 goto finish; 1858 } 1859 finish: 1860 return (rsm); 1861 } 1862 1863 static struct rack_sendmap * 1864 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 1865 { 1866 struct rack_sendmap *prsm; 1867 1868 /* 1869 * Walk the sequence order list backward until we hit and arrive at 1870 * the highest seq not acked. In theory when this is called it 1871 * should be the last segment (which it was not). 1872 */ 1873 counter_u64_add(rack_find_high, 1); 1874 prsm = rsm; 1875 TAILQ_FOREACH_REVERSE_FROM(prsm, &rack->r_ctl.rc_map, rack_head, r_next) { 1876 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 1877 continue; 1878 } 1879 return (prsm); 1880 } 1881 return (NULL); 1882 } 1883 1884 1885 static uint32_t 1886 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts) 1887 { 1888 int32_t lro; 1889 uint32_t thresh; 1890 1891 /* 1892 * lro is the flag we use to determine if we have seen reordering. 1893 * If it gets set we have seen reordering. The reorder logic either 1894 * works in one of two ways: 1895 * 1896 * If reorder-fade is configured, then we track the last time we saw 1897 * re-ordering occur. If we reach the point where enough time as 1898 * passed we no longer consider reordering has occuring. 1899 * 1900 * Or if reorder-face is 0, then once we see reordering we consider 1901 * the connection to alway be subject to reordering and just set lro 1902 * to 1. 1903 * 1904 * In the end if lro is non-zero we add the extra time for 1905 * reordering in. 1906 */ 1907 if (srtt == 0) 1908 srtt = 1; 1909 if (rack->r_ctl.rc_reorder_ts) { 1910 if (rack->r_ctl.rc_reorder_fade) { 1911 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 1912 lro = cts - rack->r_ctl.rc_reorder_ts; 1913 if (lro == 0) { 1914 /* 1915 * No time as passed since the last 1916 * reorder, mark it as reordering. 1917 */ 1918 lro = 1; 1919 } 1920 } else { 1921 /* Negative time? */ 1922 lro = 0; 1923 } 1924 if (lro > rack->r_ctl.rc_reorder_fade) { 1925 /* Turn off reordering seen too */ 1926 rack->r_ctl.rc_reorder_ts = 0; 1927 lro = 0; 1928 } 1929 } else { 1930 /* Reodering does not fade */ 1931 lro = 1; 1932 } 1933 } else { 1934 lro = 0; 1935 } 1936 thresh = srtt + rack->r_ctl.rc_pkt_delay; 1937 if (lro) { 1938 /* It must be set, if not you get 1/4 rtt */ 1939 if (rack->r_ctl.rc_reorder_shift) 1940 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 1941 else 1942 thresh += (srtt >> 2); 1943 } else { 1944 thresh += 1; 1945 } 1946 /* We don't let the rack timeout be above a RTO */ 1947 1948 if (thresh > TICKS_2_MSEC(rack->rc_tp->t_rxtcur)) { 1949 thresh = TICKS_2_MSEC(rack->rc_tp->t_rxtcur); 1950 } 1951 /* And we don't want it above the RTO max either */ 1952 if (thresh > rack_rto_max) { 1953 thresh = rack_rto_max; 1954 } 1955 return (thresh); 1956 } 1957 1958 static uint32_t 1959 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 1960 struct rack_sendmap *rsm, uint32_t srtt) 1961 { 1962 struct rack_sendmap *prsm; 1963 uint32_t thresh, len; 1964 int maxseg; 1965 1966 if (srtt == 0) 1967 srtt = 1; 1968 if (rack->r_ctl.rc_tlp_threshold) 1969 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 1970 else 1971 thresh = (srtt * 2); 1972 1973 /* Get the previous sent packet, if any */ 1974 maxseg = tcp_maxseg(tp); 1975 counter_u64_add(rack_enter_tlp_calc, 1); 1976 len = rsm->r_end - rsm->r_start; 1977 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 1978 /* Exactly like the ID */ 1979 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= maxseg) { 1980 uint32_t alt_thresh; 1981 /* 1982 * Compensate for delayed-ack with the d-ack time. 1983 */ 1984 counter_u64_add(rack_used_tlpmethod, 1); 1985 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 1986 if (alt_thresh > thresh) 1987 thresh = alt_thresh; 1988 } 1989 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 1990 /* 2.1 behavior */ 1991 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 1992 if (prsm && (len <= maxseg)) { 1993 /* 1994 * Two packets outstanding, thresh should be (2*srtt) + 1995 * possible inter-packet delay (if any). 1996 */ 1997 uint32_t inter_gap = 0; 1998 int idx, nidx; 1999 2000 counter_u64_add(rack_used_tlpmethod, 1); 2001 idx = rsm->r_rtr_cnt - 1; 2002 nidx = prsm->r_rtr_cnt - 1; 2003 if (TSTMP_GEQ(rsm->r_tim_lastsent[nidx], prsm->r_tim_lastsent[idx])) { 2004 /* Yes it was sent later (or at the same time) */ 2005 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 2006 } 2007 thresh += inter_gap; 2008 } else if (len <= maxseg) { 2009 /* 2010 * Possibly compensate for delayed-ack. 2011 */ 2012 uint32_t alt_thresh; 2013 2014 counter_u64_add(rack_used_tlpmethod2, 1); 2015 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 2016 if (alt_thresh > thresh) 2017 thresh = alt_thresh; 2018 } 2019 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 2020 /* 2.2 behavior */ 2021 if (len <= maxseg) { 2022 uint32_t alt_thresh; 2023 /* 2024 * Compensate for delayed-ack with the d-ack time. 2025 */ 2026 counter_u64_add(rack_used_tlpmethod, 1); 2027 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 2028 if (alt_thresh > thresh) 2029 thresh = alt_thresh; 2030 } 2031 } 2032 /* Not above an RTO */ 2033 if (thresh > TICKS_2_MSEC(tp->t_rxtcur)) { 2034 thresh = TICKS_2_MSEC(tp->t_rxtcur); 2035 } 2036 /* Not above a RTO max */ 2037 if (thresh > rack_rto_max) { 2038 thresh = rack_rto_max; 2039 } 2040 /* Apply user supplied min TLP */ 2041 if (thresh < rack_tlp_min) { 2042 thresh = rack_tlp_min; 2043 } 2044 return (thresh); 2045 } 2046 2047 static struct rack_sendmap * 2048 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 2049 { 2050 /* 2051 * Check to see that we don't need to fall into recovery. We will 2052 * need to do so if our oldest transmit is past the time we should 2053 * have had an ack. 2054 */ 2055 struct tcp_rack *rack; 2056 struct rack_sendmap *rsm; 2057 int32_t idx; 2058 uint32_t srtt_cur, srtt, thresh; 2059 2060 rack = (struct tcp_rack *)tp->t_fb_ptr; 2061 if (TAILQ_EMPTY(&rack->r_ctl.rc_map)) { 2062 return (NULL); 2063 } 2064 srtt_cur = tp->t_srtt >> TCP_RTT_SHIFT; 2065 srtt = TICKS_2_MSEC(srtt_cur); 2066 if (rack->rc_rack_rtt && (srtt > rack->rc_rack_rtt)) 2067 srtt = rack->rc_rack_rtt; 2068 2069 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 2070 if (rsm == NULL) 2071 return (NULL); 2072 2073 if (rsm->r_flags & RACK_ACKED) { 2074 rsm = rack_find_lowest_rsm(rack); 2075 if (rsm == NULL) 2076 return (NULL); 2077 } 2078 idx = rsm->r_rtr_cnt - 1; 2079 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 2080 if (tsused < rsm->r_tim_lastsent[idx]) { 2081 return (NULL); 2082 } 2083 if ((tsused - rsm->r_tim_lastsent[idx]) < thresh) { 2084 return (NULL); 2085 } 2086 /* Ok if we reach here we are over-due */ 2087 rack->r_ctl.rc_rsm_start = rsm->r_start; 2088 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 2089 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 2090 rack_cong_signal(tp, NULL, CC_NDUPACK); 2091 return (rsm); 2092 } 2093 2094 static uint32_t 2095 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 2096 { 2097 int32_t t; 2098 int32_t tt; 2099 uint32_t ret_val; 2100 2101 t = TICKS_2_MSEC((tp->t_srtt >> TCP_RTT_SHIFT) + ((tp->t_rttvar * 4) >> TCP_RTT_SHIFT)); 2102 TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 2103 tcp_persmin, tcp_persmax); 2104 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 2105 tp->t_rxtshift++; 2106 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 2107 ret_val = (uint32_t)tt; 2108 return (ret_val); 2109 } 2110 2111 static uint32_t 2112 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 2113 { 2114 /* 2115 * Start the FR timer, we do this based on getting the first one in 2116 * the rc_tmap. Note that if its NULL we must stop the timer. in all 2117 * events we need to stop the running timer (if its running) before 2118 * starting the new one. 2119 */ 2120 uint32_t thresh, exp, to, srtt, time_since_sent; 2121 uint32_t srtt_cur; 2122 int32_t idx; 2123 int32_t is_tlp_timer = 0; 2124 struct rack_sendmap *rsm; 2125 2126 if (rack->t_timers_stopped) { 2127 /* All timers have been stopped none are to run */ 2128 return (0); 2129 } 2130 if (rack->rc_in_persist) { 2131 /* We can't start any timer in persists */ 2132 return (rack_get_persists_timer_val(tp, rack)); 2133 } 2134 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 2135 if (rsm == NULL) { 2136 /* Nothing on the send map */ 2137 activate_rxt: 2138 if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) { 2139 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 2140 to = TICKS_2_MSEC(tp->t_rxtcur); 2141 if (to == 0) 2142 to = 1; 2143 return (to); 2144 } 2145 return (0); 2146 } 2147 if (rsm->r_flags & RACK_ACKED) { 2148 rsm = rack_find_lowest_rsm(rack); 2149 if (rsm == NULL) { 2150 /* No lowest? */ 2151 goto activate_rxt; 2152 } 2153 } 2154 /* Convert from ms to usecs */ 2155 if (rsm->r_flags & RACK_SACK_PASSED) { 2156 if ((tp->t_flags & TF_SENTFIN) && 2157 ((tp->snd_max - tp->snd_una) == 1) && 2158 (rsm->r_flags & RACK_HAS_FIN)) { 2159 /* 2160 * We don't start a rack timer if all we have is a 2161 * FIN outstanding. 2162 */ 2163 goto activate_rxt; 2164 } 2165 if (tp->t_srtt) { 2166 srtt_cur = (tp->t_srtt >> TCP_RTT_SHIFT); 2167 srtt = TICKS_2_MSEC(srtt_cur); 2168 } else 2169 srtt = RACK_INITIAL_RTO; 2170 2171 thresh = rack_calc_thresh_rack(rack, srtt, cts); 2172 idx = rsm->r_rtr_cnt - 1; 2173 exp = rsm->r_tim_lastsent[idx] + thresh; 2174 if (SEQ_GEQ(exp, cts)) { 2175 to = exp - cts; 2176 if (to < rack->r_ctl.rc_min_to) { 2177 to = rack->r_ctl.rc_min_to; 2178 } 2179 } else { 2180 to = rack->r_ctl.rc_min_to; 2181 } 2182 } else { 2183 /* Ok we need to do a TLP not RACK */ 2184 if ((rack->rc_tlp_in_progress != 0) || 2185 (rack->r_ctl.rc_tlp_rtx_out != 0)) { 2186 /* 2187 * The previous send was a TLP or a tlp_rtx is in 2188 * process. 2189 */ 2190 goto activate_rxt; 2191 } 2192 if ((tp->snd_max - tp->snd_una) > tp->snd_wnd) { 2193 /* 2194 * Peer collapsed rwnd, don't do TLP. 2195 */ 2196 goto activate_rxt; 2197 } 2198 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 2199 if (rsm == NULL) { 2200 /* We found no rsm to TLP with. */ 2201 goto activate_rxt; 2202 } 2203 if (rsm->r_flags & RACK_HAS_FIN) { 2204 /* If its a FIN we dont do TLP */ 2205 rsm = NULL; 2206 goto activate_rxt; 2207 } 2208 idx = rsm->r_rtr_cnt - 1; 2209 if (TSTMP_GT(cts, rsm->r_tim_lastsent[idx])) 2210 time_since_sent = cts - rsm->r_tim_lastsent[idx]; 2211 else 2212 time_since_sent = 0; 2213 is_tlp_timer = 1; 2214 if (tp->t_srtt) { 2215 srtt_cur = (tp->t_srtt >> TCP_RTT_SHIFT); 2216 srtt = TICKS_2_MSEC(srtt_cur); 2217 } else 2218 srtt = RACK_INITIAL_RTO; 2219 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 2220 if (thresh > time_since_sent) 2221 to = thresh - time_since_sent; 2222 else 2223 to = rack->r_ctl.rc_min_to; 2224 if (to > TCPTV_REXMTMAX) { 2225 /* 2226 * If the TLP time works out to larger than the max 2227 * RTO lets not do TLP.. just RTO. 2228 */ 2229 goto activate_rxt; 2230 } 2231 if (rsm->r_start != rack->r_ctl.rc_last_tlp_seq) { 2232 /* 2233 * The tail is no longer the last one I did a probe 2234 * on 2235 */ 2236 rack->r_ctl.rc_tlp_seg_send_cnt = 0; 2237 rack->r_ctl.rc_last_tlp_seq = rsm->r_start; 2238 } 2239 } 2240 if (is_tlp_timer == 0) { 2241 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 2242 } else { 2243 if ((rack->r_ctl.rc_tlp_send_cnt > rack_tlp_max_resend) || 2244 (rack->r_ctl.rc_tlp_seg_send_cnt > rack_tlp_max_resend)) { 2245 /* 2246 * We have exceeded how many times we can retran the 2247 * current TLP timer, switch to the RTO timer. 2248 */ 2249 goto activate_rxt; 2250 } else { 2251 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 2252 } 2253 } 2254 if (to == 0) 2255 to = 1; 2256 return (to); 2257 } 2258 2259 static void 2260 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 2261 { 2262 if (rack->rc_in_persist == 0) { 2263 if (((tp->t_flags & TF_SENTFIN) == 0) && 2264 (tp->snd_max - tp->snd_una) >= sbavail(&rack->rc_inp->inp_socket->so_snd)) 2265 /* Must need to send more data to enter persist */ 2266 return; 2267 rack->r_ctl.rc_went_idle_time = cts; 2268 rack_timer_cancel(tp, rack, cts, __LINE__); 2269 tp->t_rxtshift = 0; 2270 rack->rc_in_persist = 1; 2271 } 2272 } 2273 2274 static void 2275 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack) 2276 { 2277 if (rack->rc_inp->inp_in_hpts) { 2278 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 2279 rack->r_ctl.rc_hpts_flags = 0; 2280 } 2281 rack->rc_in_persist = 0; 2282 rack->r_ctl.rc_went_idle_time = 0; 2283 tp->t_flags &= ~TF_FORCEDATA; 2284 tp->t_rxtshift = 0; 2285 } 2286 2287 static void 2288 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, int32_t line, 2289 int32_t slot, uint32_t tot_len_this_send, int32_t frm_out_sbavail) 2290 { 2291 struct inpcb *inp; 2292 uint32_t delayed_ack = 0; 2293 uint32_t hpts_timeout; 2294 uint8_t stopped; 2295 uint32_t left = 0; 2296 2297 inp = tp->t_inpcb; 2298 if (inp->inp_in_hpts) { 2299 /* A previous call is already set up */ 2300 return; 2301 } 2302 2303 if ((tp->t_state == TCPS_CLOSED) || 2304 (tp->t_state == TCPS_LISTEN)) { 2305 return; 2306 } 2307 stopped = rack->rc_tmr_stopped; 2308 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 2309 left = rack->r_ctl.rc_timer_exp - cts; 2310 } 2311 rack->r_ctl.rc_timer_exp = 0; 2312 if (rack->rc_inp->inp_in_hpts == 0) { 2313 rack->r_ctl.rc_hpts_flags = 0; 2314 } 2315 if (slot) { 2316 /* We are hptsi too */ 2317 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 2318 } else if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 2319 /* 2320 * We are still left on the hpts when the to goes 2321 * it will be for output. 2322 */ 2323 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) 2324 slot = rack->r_ctl.rc_last_output_to - cts; 2325 else 2326 slot = 1; 2327 } 2328 if ((tp->snd_wnd == 0) && TCPS_HAVEESTABLISHED(tp->t_state)) { 2329 /* No send window.. we must enter persist */ 2330 rack_enter_persist(tp, rack, cts); 2331 } else if ((frm_out_sbavail && 2332 (frm_out_sbavail > (tp->snd_max - tp->snd_una)) && 2333 (tp->snd_wnd < tp->t_maxseg)) && 2334 TCPS_HAVEESTABLISHED(tp->t_state)) { 2335 /* 2336 * If we have no window or we can't send a segment (and have 2337 * data to send.. we cheat here and frm_out_sbavail is 2338 * passed in with the sbavail(sb) only from bbr_output) and 2339 * we are established, then we must enter persits (if not 2340 * already in persits). 2341 */ 2342 rack_enter_persist(tp, rack, cts); 2343 } 2344 hpts_timeout = rack_timer_start(tp, rack, cts); 2345 if (tp->t_flags & TF_DELACK) { 2346 delayed_ack = TICKS_2_MSEC(tcp_delacktime); 2347 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 2348 } 2349 if (delayed_ack && ((hpts_timeout == 0) || 2350 (delayed_ack < hpts_timeout))) 2351 hpts_timeout = delayed_ack; 2352 else 2353 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 2354 /* 2355 * If no timers are going to run and we will fall off the hptsi 2356 * wheel, we resort to a keep-alive timer if its configured. 2357 */ 2358 if ((hpts_timeout == 0) && 2359 (slot == 0)) { 2360 if ((tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 2361 (tp->t_state <= TCPS_CLOSING)) { 2362 /* 2363 * Ok we have no timer (persists, rack, tlp, rxt or 2364 * del-ack), we don't have segments being paced. So 2365 * all that is left is the keepalive timer. 2366 */ 2367 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 2368 /* Get the established keep-alive time */ 2369 hpts_timeout = TP_KEEPIDLE(tp); 2370 } else { 2371 /* Get the initial setup keep-alive time */ 2372 hpts_timeout = TP_KEEPINIT(tp); 2373 } 2374 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 2375 } 2376 } 2377 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 2378 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 2379 /* 2380 * RACK, TLP, persists and RXT timers all are restartable 2381 * based on actions input .. i.e we received a packet (ack 2382 * or sack) and that changes things (rw, or snd_una etc). 2383 * Thus we can restart them with a new value. For 2384 * keep-alive, delayed_ack we keep track of what was left 2385 * and restart the timer with a smaller value. 2386 */ 2387 if (left < hpts_timeout) 2388 hpts_timeout = left; 2389 } 2390 if (hpts_timeout) { 2391 /* 2392 * Hack alert for now we can't time-out over 2,147,483 2393 * seconds (a bit more than 596 hours), which is probably ok 2394 * :). 2395 */ 2396 if (hpts_timeout > 0x7ffffffe) 2397 hpts_timeout = 0x7ffffffe; 2398 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 2399 } 2400 if (slot) { 2401 rack->r_ctl.rc_last_output_to = cts + slot; 2402 if ((hpts_timeout == 0) || (hpts_timeout > slot)) { 2403 if (rack->rc_inp->inp_in_hpts == 0) 2404 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(slot)); 2405 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 2406 } else { 2407 /* 2408 * Arrange for the hpts to kick back in after the 2409 * t-o if the t-o does not cause a send. 2410 */ 2411 if (rack->rc_inp->inp_in_hpts == 0) 2412 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(hpts_timeout)); 2413 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 2414 } 2415 } else if (hpts_timeout) { 2416 if (rack->rc_inp->inp_in_hpts == 0) 2417 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(hpts_timeout)); 2418 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 2419 } else { 2420 /* No timer starting */ 2421 #ifdef INVARIANTS 2422 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 2423 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 2424 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 2425 } 2426 #endif 2427 } 2428 rack->rc_tmr_stopped = 0; 2429 if (slot) 2430 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, cts); 2431 } 2432 2433 /* 2434 * RACK Timer, here we simply do logging and house keeping. 2435 * the normal rack_output() function will call the 2436 * appropriate thing to check if we need to do a RACK retransmit. 2437 * We return 1, saying don't proceed with rack_output only 2438 * when all timers have been stopped (destroyed PCB?). 2439 */ 2440 static int 2441 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 2442 { 2443 /* 2444 * This timer simply provides an internal trigger to send out data. 2445 * The check_recovery_mode call will see if there are needed 2446 * retransmissions, if so we will enter fast-recovery. The output 2447 * call may or may not do the same thing depending on sysctl 2448 * settings. 2449 */ 2450 struct rack_sendmap *rsm; 2451 int32_t recovery; 2452 2453 if (tp->t_timers->tt_flags & TT_STOPPED) { 2454 return (1); 2455 } 2456 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 2457 /* Its not time yet */ 2458 return (0); 2459 } 2460 rack_log_to_event(rack, RACK_TO_FRM_RACK); 2461 recovery = IN_RECOVERY(tp->t_flags); 2462 counter_u64_add(rack_to_tot, 1); 2463 if (rack->r_state && (rack->r_state != tp->t_state)) 2464 rack_set_state(tp, rack); 2465 rsm = rack_check_recovery_mode(tp, cts); 2466 if (rsm) { 2467 uint32_t rtt; 2468 2469 rtt = rack->rc_rack_rtt; 2470 if (rtt == 0) 2471 rtt = 1; 2472 if ((recovery == 0) && 2473 (rack->r_ctl.rc_prr_sndcnt < tp->t_maxseg)) { 2474 /* 2475 * The rack-timeout that enter's us into recovery 2476 * will force out one MSS and set us up so that we 2477 * can do one more send in 2*rtt (transitioning the 2478 * rack timeout into a rack-tlp). 2479 */ 2480 rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg; 2481 } else if ((rack->r_ctl.rc_prr_sndcnt < tp->t_maxseg) && 2482 ((rsm->r_end - rsm->r_start) > rack->r_ctl.rc_prr_sndcnt)) { 2483 /* 2484 * When a rack timer goes, we have to send at 2485 * least one segment. They will be paced a min of 1ms 2486 * apart via the next rack timer (or further 2487 * if the rack timer dictates it). 2488 */ 2489 rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg; 2490 } 2491 } else { 2492 /* This is a case that should happen rarely if ever */ 2493 counter_u64_add(rack_tlp_does_nada, 1); 2494 #ifdef TCP_BLACKBOX 2495 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 2496 #endif 2497 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 2498 } 2499 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 2500 return (0); 2501 } 2502 2503 static struct rack_sendmap * 2504 rack_merge_rsm(struct tcp_rack *rack, 2505 struct rack_sendmap *l_rsm, 2506 struct rack_sendmap *r_rsm) 2507 { 2508 /* 2509 * We are merging two ack'd RSM's, 2510 * the l_rsm is on the left (lower seq 2511 * values) and the r_rsm is on the right 2512 * (higher seq value). The simplest way 2513 * to merge these is to move the right 2514 * one into the left. I don't think there 2515 * is any reason we need to try to find 2516 * the oldest (or last oldest retransmitted). 2517 */ 2518 l_rsm->r_end = r_rsm->r_end; 2519 if (r_rsm->r_rtr_bytes) 2520 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 2521 if (r_rsm->r_in_tmap) { 2522 /* This really should not happen */ 2523 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 2524 } 2525 /* Now the flags */ 2526 if (r_rsm->r_flags & RACK_HAS_FIN) 2527 l_rsm->r_flags |= RACK_HAS_FIN; 2528 if (r_rsm->r_flags & RACK_TLP) 2529 l_rsm->r_flags |= RACK_TLP; 2530 TAILQ_REMOVE(&rack->r_ctl.rc_map, r_rsm, r_next); 2531 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 2532 /* Transfer the split limit to the map we free */ 2533 r_rsm->r_limit_type = l_rsm->r_limit_type; 2534 l_rsm->r_limit_type = 0; 2535 } 2536 rack_free(rack, r_rsm); 2537 return(l_rsm); 2538 } 2539 2540 /* 2541 * TLP Timer, here we simply setup what segment we want to 2542 * have the TLP expire on, the normal rack_output() will then 2543 * send it out. 2544 * 2545 * We return 1, saying don't proceed with rack_output only 2546 * when all timers have been stopped (destroyed PCB?). 2547 */ 2548 static int 2549 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 2550 { 2551 /* 2552 * Tail Loss Probe. 2553 */ 2554 struct rack_sendmap *rsm = NULL; 2555 struct socket *so; 2556 uint32_t amm, old_prr_snd = 0; 2557 uint32_t out, avail; 2558 2559 if (tp->t_timers->tt_flags & TT_STOPPED) { 2560 return (1); 2561 } 2562 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 2563 /* Its not time yet */ 2564 return (0); 2565 } 2566 if (rack_progress_timeout_check(tp)) { 2567 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 2568 return (1); 2569 } 2570 /* 2571 * A TLP timer has expired. We have been idle for 2 rtts. So we now 2572 * need to figure out how to force a full MSS segment out. 2573 */ 2574 rack_log_to_event(rack, RACK_TO_FRM_TLP); 2575 counter_u64_add(rack_tlp_tot, 1); 2576 if (rack->r_state && (rack->r_state != tp->t_state)) 2577 rack_set_state(tp, rack); 2578 so = tp->t_inpcb->inp_socket; 2579 avail = sbavail(&so->so_snd); 2580 out = tp->snd_max - tp->snd_una; 2581 rack->rc_timer_up = 1; 2582 /* 2583 * If we are in recovery we can jazz out a segment if new data is 2584 * present simply by setting rc_prr_sndcnt to a segment. 2585 */ 2586 if ((avail > out) && 2587 ((rack_always_send_oldest == 0) || (TAILQ_EMPTY(&rack->r_ctl.rc_tmap)))) { 2588 /* New data is available */ 2589 amm = avail - out; 2590 if (amm > tp->t_maxseg) { 2591 amm = tp->t_maxseg; 2592 } else if ((amm < tp->t_maxseg) && ((tp->t_flags & TF_NODELAY) == 0)) { 2593 /* not enough to fill a MTU and no-delay is off */ 2594 goto need_retran; 2595 } 2596 if (IN_RECOVERY(tp->t_flags)) { 2597 /* Unlikely */ 2598 old_prr_snd = rack->r_ctl.rc_prr_sndcnt; 2599 if (out + amm <= tp->snd_wnd) 2600 rack->r_ctl.rc_prr_sndcnt = amm; 2601 else 2602 goto need_retran; 2603 } else { 2604 /* Set the send-new override */ 2605 if (out + amm <= tp->snd_wnd) 2606 rack->r_ctl.rc_tlp_new_data = amm; 2607 else 2608 goto need_retran; 2609 } 2610 rack->r_ctl.rc_tlp_seg_send_cnt = 0; 2611 rack->r_ctl.rc_last_tlp_seq = tp->snd_max; 2612 rack->r_ctl.rc_tlpsend = NULL; 2613 counter_u64_add(rack_tlp_newdata, 1); 2614 goto send; 2615 } 2616 need_retran: 2617 /* 2618 * Ok we need to arrange the last un-acked segment to be re-sent, or 2619 * optionally the first un-acked segment. 2620 */ 2621 if (rack_always_send_oldest) 2622 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 2623 else { 2624 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_map, rack_sendmap, r_next); 2625 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 2626 rsm = rack_find_high_nonack(rack, rsm); 2627 } 2628 } 2629 if (rsm == NULL) { 2630 counter_u64_add(rack_tlp_does_nada, 1); 2631 #ifdef TCP_BLACKBOX 2632 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 2633 #endif 2634 goto out; 2635 } 2636 if ((rsm->r_end - rsm->r_start) > tp->t_maxseg) { 2637 /* 2638 * We need to split this the last segment in two. 2639 */ 2640 int32_t idx; 2641 struct rack_sendmap *nrsm; 2642 2643 nrsm = rack_alloc_full_limit(rack); 2644 if (nrsm == NULL) { 2645 /* 2646 * No memory to split, we will just exit and punt 2647 * off to the RXT timer. 2648 */ 2649 counter_u64_add(rack_tlp_does_nada, 1); 2650 goto out; 2651 } 2652 nrsm->r_start = (rsm->r_end - tp->t_maxseg); 2653 nrsm->r_end = rsm->r_end; 2654 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 2655 nrsm->r_flags = rsm->r_flags; 2656 nrsm->r_sndcnt = rsm->r_sndcnt; 2657 nrsm->r_rtr_bytes = 0; 2658 rsm->r_end = nrsm->r_start; 2659 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 2660 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 2661 } 2662 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_map, rsm, nrsm, r_next); 2663 if (rsm->r_in_tmap) { 2664 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 2665 nrsm->r_in_tmap = 1; 2666 } 2667 rsm->r_flags &= (~RACK_HAS_FIN); 2668 rsm = nrsm; 2669 } 2670 rack->r_ctl.rc_tlpsend = rsm; 2671 rack->r_ctl.rc_tlp_rtx_out = 1; 2672 if (rsm->r_start == rack->r_ctl.rc_last_tlp_seq) { 2673 rack->r_ctl.rc_tlp_seg_send_cnt++; 2674 tp->t_rxtshift++; 2675 } else { 2676 rack->r_ctl.rc_last_tlp_seq = rsm->r_start; 2677 rack->r_ctl.rc_tlp_seg_send_cnt = 1; 2678 } 2679 send: 2680 rack->r_ctl.rc_tlp_send_cnt++; 2681 if (rack->r_ctl.rc_tlp_send_cnt > rack_tlp_max_resend) { 2682 /* 2683 * Can't [re]/transmit a segment we have not heard from the 2684 * peer in max times. We need the retransmit timer to take 2685 * over. 2686 */ 2687 restore: 2688 rack->r_ctl.rc_tlpsend = NULL; 2689 if (rsm) 2690 rsm->r_flags &= ~RACK_TLP; 2691 rack->r_ctl.rc_prr_sndcnt = old_prr_snd; 2692 counter_u64_add(rack_tlp_retran_fail, 1); 2693 goto out; 2694 } else if (rsm) { 2695 rsm->r_flags |= RACK_TLP; 2696 } 2697 if (rsm && (rsm->r_start == rack->r_ctl.rc_last_tlp_seq) && 2698 (rack->r_ctl.rc_tlp_seg_send_cnt > rack_tlp_max_resend)) { 2699 /* 2700 * We don't want to send a single segment more than the max 2701 * either. 2702 */ 2703 goto restore; 2704 } 2705 rack->r_timer_override = 1; 2706 rack->r_tlp_running = 1; 2707 rack->rc_tlp_in_progress = 1; 2708 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 2709 return (0); 2710 out: 2711 rack->rc_timer_up = 0; 2712 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 2713 return (0); 2714 } 2715 2716 /* 2717 * Delayed ack Timer, here we simply need to setup the 2718 * ACK_NOW flag and remove the DELACK flag. From there 2719 * the output routine will send the ack out. 2720 * 2721 * We only return 1, saying don't proceed, if all timers 2722 * are stopped (destroyed PCB?). 2723 */ 2724 static int 2725 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 2726 { 2727 if (tp->t_timers->tt_flags & TT_STOPPED) { 2728 return (1); 2729 } 2730 rack_log_to_event(rack, RACK_TO_FRM_DELACK); 2731 tp->t_flags &= ~TF_DELACK; 2732 tp->t_flags |= TF_ACKNOW; 2733 TCPSTAT_INC(tcps_delack); 2734 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 2735 return (0); 2736 } 2737 2738 /* 2739 * Persists timer, here we simply need to setup the 2740 * FORCE-DATA flag the output routine will send 2741 * the one byte send. 2742 * 2743 * We only return 1, saying don't proceed, if all timers 2744 * are stopped (destroyed PCB?). 2745 */ 2746 static int 2747 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 2748 { 2749 struct inpcb *inp; 2750 int32_t retval = 0; 2751 2752 inp = tp->t_inpcb; 2753 2754 if (tp->t_timers->tt_flags & TT_STOPPED) { 2755 return (1); 2756 } 2757 if (rack->rc_in_persist == 0) 2758 return (0); 2759 if (rack_progress_timeout_check(tp)) { 2760 tcp_set_inp_to_drop(inp, ETIMEDOUT); 2761 return (1); 2762 } 2763 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 2764 /* 2765 * Persistence timer into zero window. Force a byte to be output, if 2766 * possible. 2767 */ 2768 TCPSTAT_INC(tcps_persisttimeo); 2769 /* 2770 * Hack: if the peer is dead/unreachable, we do not time out if the 2771 * window is closed. After a full backoff, drop the connection if 2772 * the idle time (no responses to probes) reaches the maximum 2773 * backoff that we would use if retransmitting. 2774 */ 2775 if (tp->t_rxtshift == TCP_MAXRXTSHIFT && 2776 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 2777 ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) { 2778 TCPSTAT_INC(tcps_persistdrop); 2779 retval = 1; 2780 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT); 2781 goto out; 2782 } 2783 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 2784 tp->snd_una == tp->snd_max) 2785 rack_exit_persist(tp, rack); 2786 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 2787 /* 2788 * If the user has closed the socket then drop a persisting 2789 * connection after a much reduced timeout. 2790 */ 2791 if (tp->t_state > TCPS_CLOSE_WAIT && 2792 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 2793 retval = 1; 2794 TCPSTAT_INC(tcps_persistdrop); 2795 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT); 2796 goto out; 2797 } 2798 tp->t_flags |= TF_FORCEDATA; 2799 out: 2800 rack_log_to_event(rack, RACK_TO_FRM_PERSIST); 2801 return (retval); 2802 } 2803 2804 /* 2805 * If a keepalive goes off, we had no other timers 2806 * happening. We always return 1 here since this 2807 * routine either drops the connection or sends 2808 * out a segment with respond. 2809 */ 2810 static int 2811 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 2812 { 2813 struct tcptemp *t_template; 2814 struct inpcb *inp; 2815 2816 if (tp->t_timers->tt_flags & TT_STOPPED) { 2817 return (1); 2818 } 2819 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 2820 inp = tp->t_inpcb; 2821 rack_log_to_event(rack, RACK_TO_FRM_KEEP); 2822 /* 2823 * Keep-alive timer went off; send something or drop connection if 2824 * idle for too long. 2825 */ 2826 TCPSTAT_INC(tcps_keeptimeo); 2827 if (tp->t_state < TCPS_ESTABLISHED) 2828 goto dropit; 2829 if ((tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 2830 tp->t_state <= TCPS_CLOSING) { 2831 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 2832 goto dropit; 2833 /* 2834 * Send a packet designed to force a response if the peer is 2835 * up and reachable: either an ACK if the connection is 2836 * still alive, or an RST if the peer has closed the 2837 * connection due to timeout or reboot. Using sequence 2838 * number tp->snd_una-1 causes the transmitted zero-length 2839 * segment to lie outside the receive window; by the 2840 * protocol spec, this requires the correspondent TCP to 2841 * respond. 2842 */ 2843 TCPSTAT_INC(tcps_keepprobe); 2844 t_template = tcpip_maketemplate(inp); 2845 if (t_template) { 2846 tcp_respond(tp, t_template->tt_ipgen, 2847 &t_template->tt_t, (struct mbuf *)NULL, 2848 tp->rcv_nxt, tp->snd_una - 1, 0); 2849 free(t_template, M_TEMP); 2850 } 2851 } 2852 rack_start_hpts_timer(rack, tp, cts, __LINE__, 0, 0, 0); 2853 return (1); 2854 dropit: 2855 TCPSTAT_INC(tcps_keepdrops); 2856 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT); 2857 return (1); 2858 } 2859 2860 /* 2861 * Retransmit helper function, clear up all the ack 2862 * flags and take care of important book keeping. 2863 */ 2864 static void 2865 rack_remxt_tmr(struct tcpcb *tp) 2866 { 2867 /* 2868 * The retransmit timer went off, all sack'd blocks must be 2869 * un-acked. 2870 */ 2871 struct rack_sendmap *rsm, *trsm = NULL; 2872 struct tcp_rack *rack; 2873 int32_t cnt = 0; 2874 2875 rack = (struct tcp_rack *)tp->t_fb_ptr; 2876 rack_timer_cancel(tp, rack, tcp_ts_getticks(), __LINE__); 2877 rack_log_to_event(rack, RACK_TO_FRM_TMR); 2878 if (rack->r_state && (rack->r_state != tp->t_state)) 2879 rack_set_state(tp, rack); 2880 /* 2881 * Ideally we would like to be able to 2882 * mark SACK-PASS on anything not acked here. 2883 * However, if we do that we would burst out 2884 * all that data 1ms apart. This would be unwise, 2885 * so for now we will just let the normal rxt timer 2886 * and tlp timer take care of it. 2887 */ 2888 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_map, r_next) { 2889 if (rsm->r_flags & RACK_ACKED) { 2890 cnt++; 2891 rsm->r_sndcnt = 0; 2892 if (rsm->r_in_tmap == 0) { 2893 /* We must re-add it back to the tlist */ 2894 if (trsm == NULL) { 2895 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 2896 } else { 2897 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 2898 } 2899 rsm->r_in_tmap = 1; 2900 trsm = rsm; 2901 } 2902 } 2903 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS); 2904 } 2905 /* Clear the count (we just un-acked them) */ 2906 rack->r_ctl.rc_sacked = 0; 2907 /* Clear the tlp rtx mark */ 2908 rack->r_ctl.rc_tlp_rtx_out = 0; 2909 rack->r_ctl.rc_tlp_seg_send_cnt = 0; 2910 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_map); 2911 /* Setup so we send one segment */ 2912 if (rack->r_ctl.rc_prr_sndcnt < tp->t_maxseg) 2913 rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg; 2914 rack->r_timer_override = 1; 2915 } 2916 2917 /* 2918 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 2919 * we will setup to retransmit the lowest seq number outstanding. 2920 */ 2921 static int 2922 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 2923 { 2924 int32_t rexmt; 2925 struct inpcb *inp; 2926 int32_t retval = 0; 2927 2928 inp = tp->t_inpcb; 2929 if (tp->t_timers->tt_flags & TT_STOPPED) { 2930 return (1); 2931 } 2932 if (rack_progress_timeout_check(tp)) { 2933 tcp_set_inp_to_drop(inp, ETIMEDOUT); 2934 return (1); 2935 } 2936 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 2937 if (TCPS_HAVEESTABLISHED(tp->t_state) && 2938 (tp->snd_una == tp->snd_max)) { 2939 /* Nothing outstanding .. nothing to do */ 2940 return (0); 2941 } 2942 /* 2943 * Retransmission timer went off. Message has not been acked within 2944 * retransmit interval. Back off to a longer retransmit interval 2945 * and retransmit one segment. 2946 */ 2947 if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) { 2948 tp->t_rxtshift = TCP_MAXRXTSHIFT; 2949 TCPSTAT_INC(tcps_timeoutdrop); 2950 retval = 1; 2951 tcp_set_inp_to_drop(rack->rc_inp, 2952 (tp->t_softerror ? (uint16_t) tp->t_softerror : ETIMEDOUT)); 2953 goto out; 2954 } 2955 rack_remxt_tmr(tp); 2956 if (tp->t_state == TCPS_SYN_SENT) { 2957 /* 2958 * If the SYN was retransmitted, indicate CWND to be limited 2959 * to 1 segment in cc_conn_init(). 2960 */ 2961 tp->snd_cwnd = 1; 2962 } else if (tp->t_rxtshift == 1) { 2963 /* 2964 * first retransmit; record ssthresh and cwnd so they can be 2965 * recovered if this turns out to be a "bad" retransmit. A 2966 * retransmit is considered "bad" if an ACK for this segment 2967 * is received within RTT/2 interval; the assumption here is 2968 * that the ACK was already in flight. See "On Estimating 2969 * End-to-End Network Path Properties" by Allman and Paxson 2970 * for more details. 2971 */ 2972 tp->snd_cwnd_prev = tp->snd_cwnd; 2973 tp->snd_ssthresh_prev = tp->snd_ssthresh; 2974 tp->snd_recover_prev = tp->snd_recover; 2975 if (IN_FASTRECOVERY(tp->t_flags)) 2976 tp->t_flags |= TF_WASFRECOVERY; 2977 else 2978 tp->t_flags &= ~TF_WASFRECOVERY; 2979 if (IN_CONGRECOVERY(tp->t_flags)) 2980 tp->t_flags |= TF_WASCRECOVERY; 2981 else 2982 tp->t_flags &= ~TF_WASCRECOVERY; 2983 tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1)); 2984 tp->t_flags |= TF_PREVVALID; 2985 } else 2986 tp->t_flags &= ~TF_PREVVALID; 2987 TCPSTAT_INC(tcps_rexmttimeo); 2988 if ((tp->t_state == TCPS_SYN_SENT) || 2989 (tp->t_state == TCPS_SYN_RECEIVED)) 2990 rexmt = MSEC_2_TICKS(RACK_INITIAL_RTO * tcp_syn_backoff[tp->t_rxtshift]); 2991 else 2992 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; 2993 TCPT_RANGESET(tp->t_rxtcur, rexmt, 2994 max(MSEC_2_TICKS(rack_rto_min), rexmt), 2995 MSEC_2_TICKS(rack_rto_max)); 2996 /* 2997 * We enter the path for PLMTUD if connection is established or, if 2998 * connection is FIN_WAIT_1 status, reason for the last is that if 2999 * amount of data we send is very small, we could send it in couple 3000 * of packets and process straight to FIN. In that case we won't 3001 * catch ESTABLISHED state. 3002 */ 3003 if (V_tcp_pmtud_blackhole_detect && (((tp->t_state == TCPS_ESTABLISHED)) 3004 || (tp->t_state == TCPS_FIN_WAIT_1))) { 3005 #ifdef INET6 3006 int32_t isipv6; 3007 #endif 3008 3009 /* 3010 * Idea here is that at each stage of mtu probe (usually, 3011 * 1448 -> 1188 -> 524) should be given 2 chances to recover 3012 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 3013 * should take care of that. 3014 */ 3015 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 3016 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 3017 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 3018 tp->t_rxtshift % 2 == 0)) { 3019 /* 3020 * Enter Path MTU Black-hole Detection mechanism: - 3021 * Disable Path MTU Discovery (IP "DF" bit). - 3022 * Reduce MTU to lower value than what we negotiated 3023 * with peer. 3024 */ 3025 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 3026 /* Record that we may have found a black hole. */ 3027 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 3028 /* Keep track of previous MSS. */ 3029 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 3030 } 3031 3032 /* 3033 * Reduce the MSS to blackhole value or to the 3034 * default in an attempt to retransmit. 3035 */ 3036 #ifdef INET6 3037 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? 1 : 0; 3038 if (isipv6 && 3039 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 3040 /* Use the sysctl tuneable blackhole MSS. */ 3041 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 3042 TCPSTAT_INC(tcps_pmtud_blackhole_activated); 3043 } else if (isipv6) { 3044 /* Use the default MSS. */ 3045 tp->t_maxseg = V_tcp_v6mssdflt; 3046 /* 3047 * Disable Path MTU Discovery when we switch 3048 * to minmss. 3049 */ 3050 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 3051 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 3052 } 3053 #endif 3054 #if defined(INET6) && defined(INET) 3055 else 3056 #endif 3057 #ifdef INET 3058 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 3059 /* Use the sysctl tuneable blackhole MSS. */ 3060 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 3061 TCPSTAT_INC(tcps_pmtud_blackhole_activated); 3062 } else { 3063 /* Use the default MSS. */ 3064 tp->t_maxseg = V_tcp_mssdflt; 3065 /* 3066 * Disable Path MTU Discovery when we switch 3067 * to minmss. 3068 */ 3069 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 3070 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 3071 } 3072 #endif 3073 } else { 3074 /* 3075 * If further retransmissions are still unsuccessful 3076 * with a lowered MTU, maybe this isn't a blackhole 3077 * and we restore the previous MSS and blackhole 3078 * detection flags. The limit '6' is determined by 3079 * giving each probe stage (1448, 1188, 524) 2 3080 * chances to recover. 3081 */ 3082 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 3083 (tp->t_rxtshift >= 6)) { 3084 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 3085 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 3086 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 3087 TCPSTAT_INC(tcps_pmtud_blackhole_failed); 3088 } 3089 } 3090 } 3091 /* 3092 * Disable RFC1323 and SACK if we haven't got any response to our 3093 * third SYN to work-around some broken terminal servers (most of 3094 * which have hopefully been retired) that have bad VJ header 3095 * compression code which trashes TCP segments containing 3096 * unknown-to-them TCP options. 3097 */ 3098 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 3099 (tp->t_rxtshift == 3)) 3100 tp->t_flags &= ~(TF_REQ_SCALE | TF_REQ_TSTMP | TF_SACK_PERMIT); 3101 /* 3102 * If we backed off this far, our srtt estimate is probably bogus. 3103 * Clobber it so we'll take the next rtt measurement as our srtt; 3104 * move the current srtt into rttvar to keep the current retransmit 3105 * times until then. 3106 */ 3107 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 3108 #ifdef INET6 3109 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 3110 in6_losing(tp->t_inpcb); 3111 else 3112 #endif 3113 in_losing(tp->t_inpcb); 3114 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT); 3115 tp->t_srtt = 0; 3116 } 3117 if (rack_use_sack_filter) 3118 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 3119 tp->snd_recover = tp->snd_max; 3120 tp->t_flags |= TF_ACKNOW; 3121 tp->t_rtttime = 0; 3122 rack_cong_signal(tp, NULL, CC_RTO); 3123 out: 3124 return (retval); 3125 } 3126 3127 static int 3128 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling) 3129 { 3130 int32_t ret = 0; 3131 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 3132 3133 if (timers == 0) { 3134 return (0); 3135 } 3136 if (tp->t_state == TCPS_LISTEN) { 3137 /* no timers on listen sockets */ 3138 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 3139 return (0); 3140 return (1); 3141 } 3142 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 3143 uint32_t left; 3144 3145 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 3146 ret = -1; 3147 rack_log_to_processing(rack, cts, ret, 0); 3148 return (0); 3149 } 3150 if (hpts_calling == 0) { 3151 ret = -2; 3152 rack_log_to_processing(rack, cts, ret, 0); 3153 return (0); 3154 } 3155 /* 3156 * Ok our timer went off early and we are not paced false 3157 * alarm, go back to sleep. 3158 */ 3159 ret = -3; 3160 left = rack->r_ctl.rc_timer_exp - cts; 3161 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(left)); 3162 rack_log_to_processing(rack, cts, ret, left); 3163 rack->rc_last_pto_set = 0; 3164 return (1); 3165 } 3166 rack->rc_tmr_stopped = 0; 3167 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 3168 if (timers & PACE_TMR_DELACK) { 3169 ret = rack_timeout_delack(tp, rack, cts); 3170 } else if (timers & PACE_TMR_RACK) { 3171 ret = rack_timeout_rack(tp, rack, cts); 3172 } else if (timers & PACE_TMR_TLP) { 3173 ret = rack_timeout_tlp(tp, rack, cts); 3174 } else if (timers & PACE_TMR_RXT) { 3175 ret = rack_timeout_rxt(tp, rack, cts); 3176 } else if (timers & PACE_TMR_PERSIT) { 3177 ret = rack_timeout_persist(tp, rack, cts); 3178 } else if (timers & PACE_TMR_KEEP) { 3179 ret = rack_timeout_keepalive(tp, rack, cts); 3180 } 3181 rack_log_to_processing(rack, cts, ret, timers); 3182 return (ret); 3183 } 3184 3185 static void 3186 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 3187 { 3188 uint8_t hpts_removed = 0; 3189 3190 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 3191 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 3192 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 3193 hpts_removed = 1; 3194 } 3195 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 3196 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 3197 if (rack->rc_inp->inp_in_hpts && 3198 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 3199 /* 3200 * Canceling timer's when we have no output being 3201 * paced. We also must remove ourselves from the 3202 * hpts. 3203 */ 3204 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 3205 hpts_removed = 1; 3206 } 3207 rack_log_to_cancel(rack, hpts_removed, line); 3208 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 3209 } 3210 } 3211 3212 static void 3213 rack_timer_stop(struct tcpcb *tp, uint32_t timer_type) 3214 { 3215 return; 3216 } 3217 3218 static int 3219 rack_stopall(struct tcpcb *tp) 3220 { 3221 struct tcp_rack *rack; 3222 rack = (struct tcp_rack *)tp->t_fb_ptr; 3223 rack->t_timers_stopped = 1; 3224 return (0); 3225 } 3226 3227 static void 3228 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, uint32_t delta) 3229 { 3230 return; 3231 } 3232 3233 static int 3234 rack_timer_active(struct tcpcb *tp, uint32_t timer_type) 3235 { 3236 return (0); 3237 } 3238 3239 static void 3240 rack_stop_all_timers(struct tcpcb *tp) 3241 { 3242 struct tcp_rack *rack; 3243 3244 /* 3245 * Assure no timers are running. 3246 */ 3247 if (tcp_timer_active(tp, TT_PERSIST)) { 3248 /* We enter in persists, set the flag appropriately */ 3249 rack = (struct tcp_rack *)tp->t_fb_ptr; 3250 rack->rc_in_persist = 1; 3251 } 3252 tcp_timer_suspend(tp, TT_PERSIST); 3253 tcp_timer_suspend(tp, TT_REXMT); 3254 tcp_timer_suspend(tp, TT_KEEP); 3255 tcp_timer_suspend(tp, TT_DELACK); 3256 } 3257 3258 static void 3259 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 3260 struct rack_sendmap *rsm, uint32_t ts) 3261 { 3262 int32_t idx; 3263 3264 rsm->r_rtr_cnt++; 3265 rsm->r_sndcnt++; 3266 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 3267 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 3268 rsm->r_flags |= RACK_OVERMAX; 3269 } 3270 if ((rsm->r_rtr_cnt > 1) && (rack->r_tlp_running == 0)) { 3271 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 3272 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 3273 } 3274 idx = rsm->r_rtr_cnt - 1; 3275 rsm->r_tim_lastsent[idx] = ts; 3276 if (rsm->r_flags & RACK_ACKED) { 3277 /* Problably MTU discovery messing with us */ 3278 rsm->r_flags &= ~RACK_ACKED; 3279 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 3280 } 3281 if (rsm->r_in_tmap) { 3282 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 3283 } 3284 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 3285 rsm->r_in_tmap = 1; 3286 if (rsm->r_flags & RACK_SACK_PASSED) { 3287 /* We have retransmitted due to the SACK pass */ 3288 rsm->r_flags &= ~RACK_SACK_PASSED; 3289 rsm->r_flags |= RACK_WAS_SACKPASS; 3290 } 3291 /* Update memory for next rtr */ 3292 rack->r_ctl.rc_next = TAILQ_NEXT(rsm, r_next); 3293 } 3294 3295 3296 static uint32_t 3297 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 3298 struct rack_sendmap *rsm, uint32_t ts, int32_t * lenp) 3299 { 3300 /* 3301 * We (re-)transmitted starting at rsm->r_start for some length 3302 * (possibly less than r_end. 3303 */ 3304 struct rack_sendmap *nrsm; 3305 uint32_t c_end; 3306 int32_t len; 3307 int32_t idx; 3308 3309 len = *lenp; 3310 c_end = rsm->r_start + len; 3311 if (SEQ_GEQ(c_end, rsm->r_end)) { 3312 /* 3313 * We retransmitted the whole piece or more than the whole 3314 * slopping into the next rsm. 3315 */ 3316 rack_update_rsm(tp, rack, rsm, ts); 3317 if (c_end == rsm->r_end) { 3318 *lenp = 0; 3319 return (0); 3320 } else { 3321 int32_t act_len; 3322 3323 /* Hangs over the end return whats left */ 3324 act_len = rsm->r_end - rsm->r_start; 3325 *lenp = (len - act_len); 3326 return (rsm->r_end); 3327 } 3328 /* We don't get out of this block. */ 3329 } 3330 /* 3331 * Here we retransmitted less than the whole thing which means we 3332 * have to split this into what was transmitted and what was not. 3333 */ 3334 nrsm = rack_alloc_full_limit(rack); 3335 if (nrsm == NULL) { 3336 /* 3337 * We can't get memory, so lets not proceed. 3338 */ 3339 *lenp = 0; 3340 return (0); 3341 } 3342 /* 3343 * So here we are going to take the original rsm and make it what we 3344 * retransmitted. nrsm will be the tail portion we did not 3345 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 3346 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 3347 * 1, 6 and the new piece will be 6, 11. 3348 */ 3349 nrsm->r_start = c_end; 3350 nrsm->r_end = rsm->r_end; 3351 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 3352 nrsm->r_flags = rsm->r_flags; 3353 nrsm->r_sndcnt = rsm->r_sndcnt; 3354 nrsm->r_rtr_bytes = 0; 3355 rsm->r_end = c_end; 3356 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 3357 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 3358 } 3359 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_map, rsm, nrsm, r_next); 3360 if (rsm->r_in_tmap) { 3361 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 3362 nrsm->r_in_tmap = 1; 3363 } 3364 rsm->r_flags &= (~RACK_HAS_FIN); 3365 rack_update_rsm(tp, rack, rsm, ts); 3366 *lenp = 0; 3367 return (0); 3368 } 3369 3370 3371 static void 3372 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 3373 uint32_t seq_out, uint8_t th_flags, int32_t err, uint32_t ts, 3374 uint8_t pass, struct rack_sendmap *hintrsm) 3375 { 3376 struct tcp_rack *rack; 3377 struct rack_sendmap *rsm, *nrsm; 3378 register uint32_t snd_max, snd_una; 3379 int32_t idx; 3380 3381 /* 3382 * Add to the RACK log of packets in flight or retransmitted. If 3383 * there is a TS option we will use the TS echoed, if not we will 3384 * grab a TS. 3385 * 3386 * Retransmissions will increment the count and move the ts to its 3387 * proper place. Note that if options do not include TS's then we 3388 * won't be able to effectively use the ACK for an RTT on a retran. 3389 * 3390 * Notes about r_start and r_end. Lets consider a send starting at 3391 * sequence 1 for 10 bytes. In such an example the r_start would be 3392 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 3393 * This means that r_end is actually the first sequence for the next 3394 * slot (11). 3395 * 3396 */ 3397 /* 3398 * If err is set what do we do XXXrrs? should we not add the thing? 3399 * -- i.e. return if err != 0 or should we pretend we sent it? -- 3400 * i.e. proceed with add ** do this for now. 3401 */ 3402 INP_WLOCK_ASSERT(tp->t_inpcb); 3403 if (err) 3404 /* 3405 * We don't log errors -- we could but snd_max does not 3406 * advance in this case either. 3407 */ 3408 return; 3409 3410 if (th_flags & TH_RST) { 3411 /* 3412 * We don't log resets and we return immediately from 3413 * sending 3414 */ 3415 return; 3416 } 3417 rack = (struct tcp_rack *)tp->t_fb_ptr; 3418 snd_una = tp->snd_una; 3419 if (SEQ_LEQ((seq_out + len), snd_una)) { 3420 /* Are sending an old segment to induce an ack (keep-alive)? */ 3421 return; 3422 } 3423 if (SEQ_LT(seq_out, snd_una)) { 3424 /* huh? should we panic? */ 3425 uint32_t end; 3426 3427 end = seq_out + len; 3428 seq_out = snd_una; 3429 len = end - seq_out; 3430 } 3431 snd_max = tp->snd_max; 3432 if (th_flags & (TH_SYN | TH_FIN)) { 3433 /* 3434 * The call to rack_log_output is made before bumping 3435 * snd_max. This means we can record one extra byte on a SYN 3436 * or FIN if seq_out is adding more on and a FIN is present 3437 * (and we are not resending). 3438 */ 3439 if (th_flags & TH_SYN) 3440 len++; 3441 if (th_flags & TH_FIN) 3442 len++; 3443 if (SEQ_LT(snd_max, tp->snd_nxt)) { 3444 /* 3445 * The add/update as not been done for the FIN/SYN 3446 * yet. 3447 */ 3448 snd_max = tp->snd_nxt; 3449 } 3450 } 3451 if (len == 0) { 3452 /* We don't log zero window probes */ 3453 return; 3454 } 3455 rack->r_ctl.rc_time_last_sent = ts; 3456 if (IN_RECOVERY(tp->t_flags)) { 3457 rack->r_ctl.rc_prr_out += len; 3458 } 3459 /* First question is it a retransmission? */ 3460 if (seq_out == snd_max) { 3461 again: 3462 rsm = rack_alloc(rack); 3463 if (rsm == NULL) { 3464 /* 3465 * Hmm out of memory and the tcb got destroyed while 3466 * we tried to wait. 3467 */ 3468 return; 3469 } 3470 if (th_flags & TH_FIN) { 3471 rsm->r_flags = RACK_HAS_FIN; 3472 } else { 3473 rsm->r_flags = 0; 3474 } 3475 rsm->r_tim_lastsent[0] = ts; 3476 rsm->r_rtr_cnt = 1; 3477 rsm->r_rtr_bytes = 0; 3478 rsm->r_start = seq_out; 3479 rsm->r_end = rsm->r_start + len; 3480 rsm->r_sndcnt = 0; 3481 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_map, rsm, r_next); 3482 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 3483 rsm->r_in_tmap = 1; 3484 return; 3485 } 3486 /* 3487 * If we reach here its a retransmission and we need to find it. 3488 */ 3489 more: 3490 if (hintrsm && (hintrsm->r_start == seq_out)) { 3491 rsm = hintrsm; 3492 hintrsm = NULL; 3493 } else if (rack->r_ctl.rc_next) { 3494 /* We have a hint from a previous run */ 3495 rsm = rack->r_ctl.rc_next; 3496 } else { 3497 /* No hints sorry */ 3498 rsm = NULL; 3499 } 3500 if ((rsm) && (rsm->r_start == seq_out)) { 3501 /* 3502 * We used rc_next or hintrsm to retransmit, hopefully the 3503 * likely case. 3504 */ 3505 seq_out = rack_update_entry(tp, rack, rsm, ts, &len); 3506 if (len == 0) { 3507 return; 3508 } else { 3509 goto more; 3510 } 3511 } 3512 /* Ok it was not the last pointer go through it the hard way. */ 3513 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_map, r_next) { 3514 if (rsm->r_start == seq_out) { 3515 seq_out = rack_update_entry(tp, rack, rsm, ts, &len); 3516 rack->r_ctl.rc_next = TAILQ_NEXT(rsm, r_next); 3517 if (len == 0) { 3518 return; 3519 } else { 3520 continue; 3521 } 3522 } 3523 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 3524 /* Transmitted within this piece */ 3525 /* 3526 * Ok we must split off the front and then let the 3527 * update do the rest 3528 */ 3529 nrsm = rack_alloc_full_limit(rack); 3530 if (nrsm == NULL) { 3531 rack_update_rsm(tp, rack, rsm, ts); 3532 return; 3533 } 3534 /* 3535 * copy rsm to nrsm and then trim the front of rsm 3536 * to not include this part. 3537 */ 3538 nrsm->r_start = seq_out; 3539 nrsm->r_end = rsm->r_end; 3540 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 3541 nrsm->r_flags = rsm->r_flags; 3542 nrsm->r_sndcnt = rsm->r_sndcnt; 3543 nrsm->r_rtr_bytes = 0; 3544 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 3545 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 3546 } 3547 rsm->r_end = nrsm->r_start; 3548 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_map, rsm, nrsm, r_next); 3549 if (rsm->r_in_tmap) { 3550 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 3551 nrsm->r_in_tmap = 1; 3552 } 3553 rsm->r_flags &= (~RACK_HAS_FIN); 3554 seq_out = rack_update_entry(tp, rack, nrsm, ts, &len); 3555 if (len == 0) { 3556 return; 3557 } 3558 } 3559 } 3560 /* 3561 * Hmm not found in map did they retransmit both old and on into the 3562 * new? 3563 */ 3564 if (seq_out == tp->snd_max) { 3565 goto again; 3566 } else if (SEQ_LT(seq_out, tp->snd_max)) { 3567 #ifdef INVARIANTS 3568 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 3569 seq_out, len, tp->snd_una, tp->snd_max); 3570 printf("Starting Dump of all rack entries\n"); 3571 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_map, r_next) { 3572 printf("rsm:%p start:%u end:%u\n", 3573 rsm, rsm->r_start, rsm->r_end); 3574 } 3575 printf("Dump complete\n"); 3576 panic("seq_out not found rack:%p tp:%p", 3577 rack, tp); 3578 #endif 3579 } else { 3580 #ifdef INVARIANTS 3581 /* 3582 * Hmm beyond sndmax? (only if we are using the new rtt-pack 3583 * flag) 3584 */ 3585 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 3586 seq_out, len, tp->snd_max, tp); 3587 #endif 3588 } 3589 } 3590 3591 /* 3592 * Record one of the RTT updates from an ack into 3593 * our sample structure. 3594 */ 3595 static void 3596 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt) 3597 { 3598 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 3599 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 3600 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 3601 } 3602 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 3603 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 3604 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 3605 } 3606 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 3607 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 3608 rack->r_ctl.rack_rs.rs_rtt_cnt++; 3609 } 3610 3611 /* 3612 * Collect new round-trip time estimate 3613 * and update averages and current timeout. 3614 */ 3615 static void 3616 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 3617 { 3618 int32_t delta; 3619 uint32_t o_srtt, o_var; 3620 int32_t rtt; 3621 3622 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 3623 /* No valid sample */ 3624 return; 3625 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 3626 /* We are to use the lowest RTT seen in a single ack */ 3627 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 3628 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 3629 /* We are to use the highest RTT seen in a single ack */ 3630 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 3631 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 3632 /* We are to use the average RTT seen in a single ack */ 3633 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 3634 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 3635 } else { 3636 #ifdef INVARIANTS 3637 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 3638 #endif 3639 return; 3640 } 3641 if (rtt == 0) 3642 rtt = 1; 3643 rack_log_rtt_sample(rack, rtt); 3644 o_srtt = tp->t_srtt; 3645 o_var = tp->t_rttvar; 3646 rack = (struct tcp_rack *)tp->t_fb_ptr; 3647 if (tp->t_srtt != 0) { 3648 /* 3649 * srtt is stored as fixed point with 5 bits after the 3650 * binary point (i.e., scaled by 8). The following magic is 3651 * equivalent to the smoothing algorithm in rfc793 with an 3652 * alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed point). 3653 * Adjust rtt to origin 0. 3654 */ 3655 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 3656 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 3657 3658 tp->t_srtt += delta; 3659 if (tp->t_srtt <= 0) 3660 tp->t_srtt = 1; 3661 3662 /* 3663 * We accumulate a smoothed rtt variance (actually, a 3664 * smoothed mean difference), then set the retransmit timer 3665 * to smoothed rtt + 4 times the smoothed variance. rttvar 3666 * is stored as fixed point with 4 bits after the binary 3667 * point (scaled by 16). The following is equivalent to 3668 * rfc793 smoothing with an alpha of .75 (rttvar = 3669 * rttvar*3/4 + |delta| / 4). This replaces rfc793's 3670 * wired-in beta. 3671 */ 3672 if (delta < 0) 3673 delta = -delta; 3674 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 3675 tp->t_rttvar += delta; 3676 if (tp->t_rttvar <= 0) 3677 tp->t_rttvar = 1; 3678 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 3679 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3680 } else { 3681 /* 3682 * No rtt measurement yet - use the unsmoothed rtt. Set the 3683 * variance to half the rtt (so our first retransmit happens 3684 * at 3*rtt). 3685 */ 3686 tp->t_srtt = rtt << TCP_RTT_SHIFT; 3687 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 3688 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3689 } 3690 TCPSTAT_INC(tcps_rttupdated); 3691 rack_log_rtt_upd(tp, rack, rtt, o_srtt, o_var); 3692 tp->t_rttupdated++; 3693 #ifdef NETFLIX_STATS 3694 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 3695 #endif 3696 tp->t_rxtshift = 0; 3697 3698 /* 3699 * the retransmit should happen at rtt + 4 * rttvar. Because of the 3700 * way we do the smoothing, srtt and rttvar will each average +1/2 3701 * tick of bias. When we compute the retransmit timer, we want 1/2 3702 * tick of rounding and 1 extra tick because of +-1/2 tick 3703 * uncertainty in the firing of the timer. The bias will give us 3704 * exactly the 1.5 tick we need. But, because the bias is 3705 * statistical, we have to test that we don't drop below the minimum 3706 * feasible timer (which is 2 ticks). 3707 */ 3708 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 3709 max(MSEC_2_TICKS(rack_rto_min), rtt + 2), MSEC_2_TICKS(rack_rto_max)); 3710 tp->t_softerror = 0; 3711 } 3712 3713 static void 3714 rack_earlier_retran(struct tcpcb *tp, struct rack_sendmap *rsm, 3715 uint32_t t, uint32_t cts) 3716 { 3717 /* 3718 * For this RSM, we acknowledged the data from a previous 3719 * transmission, not the last one we made. This means we did a false 3720 * retransmit. 3721 */ 3722 struct tcp_rack *rack; 3723 3724 if (rsm->r_flags & RACK_HAS_FIN) { 3725 /* 3726 * The sending of the FIN often is multiple sent when we 3727 * have everything outstanding ack'd. We ignore this case 3728 * since its over now. 3729 */ 3730 return; 3731 } 3732 if (rsm->r_flags & RACK_TLP) { 3733 /* 3734 * We expect TLP's to have this occur. 3735 */ 3736 return; 3737 } 3738 rack = (struct tcp_rack *)tp->t_fb_ptr; 3739 /* should we undo cc changes and exit recovery? */ 3740 if (IN_RECOVERY(tp->t_flags)) { 3741 if (rack->r_ctl.rc_rsm_start == rsm->r_start) { 3742 /* 3743 * Undo what we ratched down and exit recovery if 3744 * possible 3745 */ 3746 EXIT_RECOVERY(tp->t_flags); 3747 tp->snd_recover = tp->snd_una; 3748 if (rack->r_ctl.rc_cwnd_at > tp->snd_cwnd) 3749 tp->snd_cwnd = rack->r_ctl.rc_cwnd_at; 3750 if (rack->r_ctl.rc_ssthresh_at > tp->snd_ssthresh) 3751 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at; 3752 } 3753 } 3754 if (rsm->r_flags & RACK_WAS_SACKPASS) { 3755 /* 3756 * We retransmitted based on a sack and the earlier 3757 * retransmission ack'd it - re-ordering is occuring. 3758 */ 3759 counter_u64_add(rack_reorder_seen, 1); 3760 rack->r_ctl.rc_reorder_ts = cts; 3761 } 3762 counter_u64_add(rack_badfr, 1); 3763 counter_u64_add(rack_badfr_bytes, (rsm->r_end - rsm->r_start)); 3764 } 3765 3766 3767 static int 3768 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 3769 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type) 3770 { 3771 int32_t i; 3772 uint32_t t; 3773 3774 if (rsm->r_flags & RACK_ACKED) 3775 /* Already done */ 3776 return (0); 3777 3778 3779 if ((rsm->r_rtr_cnt == 1) || 3780 ((ack_type == CUM_ACKED) && 3781 (to->to_flags & TOF_TS) && 3782 (to->to_tsecr) && 3783 (rsm->r_tim_lastsent[rsm->r_rtr_cnt - 1] == to->to_tsecr)) 3784 ) { 3785 /* 3786 * We will only find a matching timestamp if its cum-acked. 3787 * But if its only one retransmission its for-sure matching 3788 * :-) 3789 */ 3790 t = cts - rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 3791 if ((int)t <= 0) 3792 t = 1; 3793 if (!tp->t_rttlow || tp->t_rttlow > t) 3794 tp->t_rttlow = t; 3795 if (!rack->r_ctl.rc_rack_min_rtt || 3796 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 3797 rack->r_ctl.rc_rack_min_rtt = t; 3798 if (rack->r_ctl.rc_rack_min_rtt == 0) { 3799 rack->r_ctl.rc_rack_min_rtt = 1; 3800 } 3801 } 3802 tcp_rack_xmit_timer(rack, TCP_TS_TO_TICKS(t) + 1); 3803 if ((rsm->r_flags & RACK_TLP) && 3804 (!IN_RECOVERY(tp->t_flags))) { 3805 /* Segment was a TLP and our retrans matched */ 3806 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 3807 rack->r_ctl.rc_rsm_start = tp->snd_max; 3808 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 3809 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 3810 rack_cong_signal(tp, NULL, CC_NDUPACK); 3811 /* 3812 * When we enter recovery we need to assure 3813 * we send one packet. 3814 */ 3815 rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg; 3816 } else 3817 rack->r_ctl.rc_tlp_rtx_out = 0; 3818 } 3819 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 3820 /* New more recent rack_tmit_time */ 3821 rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 3822 rack->rc_rack_rtt = t; 3823 } 3824 return (1); 3825 } 3826 /* 3827 * We clear the soft/rxtshift since we got an ack. 3828 * There is no assurance we will call the commit() function 3829 * so we need to clear these to avoid incorrect handling. 3830 */ 3831 tp->t_rxtshift = 0; 3832 tp->t_softerror = 0; 3833 if ((to->to_flags & TOF_TS) && 3834 (ack_type == CUM_ACKED) && 3835 (to->to_tsecr) && 3836 ((rsm->r_flags & (RACK_DEFERRED | RACK_OVERMAX)) == 0)) { 3837 /* 3838 * Now which timestamp does it match? In this block the ACK 3839 * must be coming from a previous transmission. 3840 */ 3841 for (i = 0; i < rsm->r_rtr_cnt; i++) { 3842 if (rsm->r_tim_lastsent[i] == to->to_tsecr) { 3843 t = cts - rsm->r_tim_lastsent[i]; 3844 if ((int)t <= 0) 3845 t = 1; 3846 if ((i + 1) < rsm->r_rtr_cnt) { 3847 /* Likely */ 3848 rack_earlier_retran(tp, rsm, t, cts); 3849 } 3850 if (!tp->t_rttlow || tp->t_rttlow > t) 3851 tp->t_rttlow = t; 3852 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 3853 rack->r_ctl.rc_rack_min_rtt = t; 3854 if (rack->r_ctl.rc_rack_min_rtt == 0) { 3855 rack->r_ctl.rc_rack_min_rtt = 1; 3856 } 3857 } 3858 /* 3859 * Note the following calls to 3860 * tcp_rack_xmit_timer() are being commented 3861 * out for now. They give us no more accuracy 3862 * and often lead to a wrong choice. We have 3863 * enough samples that have not been 3864 * retransmitted. I leave the commented out 3865 * code in here in case in the future we 3866 * decide to add it back (though I can't forsee 3867 * doing that). That way we will easily see 3868 * where they need to be placed. 3869 */ 3870 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 3871 rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 3872 /* New more recent rack_tmit_time */ 3873 rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 3874 rack->rc_rack_rtt = t; 3875 } 3876 return (1); 3877 } 3878 } 3879 goto ts_not_found; 3880 } else { 3881 /* 3882 * Ok its a SACK block that we retransmitted. or a windows 3883 * machine without timestamps. We can tell nothing from the 3884 * time-stamp since its not there or the time the peer last 3885 * recieved a segment that moved forward its cum-ack point. 3886 */ 3887 ts_not_found: 3888 i = rsm->r_rtr_cnt - 1; 3889 t = cts - rsm->r_tim_lastsent[i]; 3890 if ((int)t <= 0) 3891 t = 1; 3892 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 3893 /* 3894 * We retransmitted and the ack came back in less 3895 * than the smallest rtt we have observed. We most 3896 * likey did an improper retransmit as outlined in 3897 * 4.2 Step 3 point 2 in the rack-draft. 3898 */ 3899 i = rsm->r_rtr_cnt - 2; 3900 t = cts - rsm->r_tim_lastsent[i]; 3901 rack_earlier_retran(tp, rsm, t, cts); 3902 } else if (rack->r_ctl.rc_rack_min_rtt) { 3903 /* 3904 * We retransmitted it and the retransmit did the 3905 * job. 3906 */ 3907 if (!rack->r_ctl.rc_rack_min_rtt || 3908 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 3909 rack->r_ctl.rc_rack_min_rtt = t; 3910 if (rack->r_ctl.rc_rack_min_rtt == 0) { 3911 rack->r_ctl.rc_rack_min_rtt = 1; 3912 } 3913 } 3914 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, rsm->r_tim_lastsent[i])) { 3915 /* New more recent rack_tmit_time */ 3916 rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[i]; 3917 rack->rc_rack_rtt = t; 3918 } 3919 return (1); 3920 } 3921 } 3922 return (0); 3923 } 3924 3925 /* 3926 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 3927 */ 3928 static void 3929 rack_log_sack_passed(struct tcpcb *tp, 3930 struct tcp_rack *rack, struct rack_sendmap *rsm) 3931 { 3932 struct rack_sendmap *nrsm; 3933 uint32_t ts; 3934 int32_t idx; 3935 3936 idx = rsm->r_rtr_cnt - 1; 3937 ts = rsm->r_tim_lastsent[idx]; 3938 nrsm = rsm; 3939 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 3940 rack_head, r_tnext) { 3941 if (nrsm == rsm) { 3942 /* Skip orginal segment he is acked */ 3943 continue; 3944 } 3945 if (nrsm->r_flags & RACK_ACKED) { 3946 /* Skip ack'd segments */ 3947 continue; 3948 } 3949 if (nrsm->r_flags & RACK_SACK_PASSED) { 3950 /* 3951 * We found one that is already marked 3952 * passed, we have been here before and 3953 * so all others below this are marked. 3954 */ 3955 break; 3956 } 3957 idx = nrsm->r_rtr_cnt - 1; 3958 if (ts == nrsm->r_tim_lastsent[idx]) { 3959 /* 3960 * For this case lets use seq no, if we sent in a 3961 * big block (TSO) we would have a bunch of segments 3962 * sent at the same time. 3963 * 3964 * We would only get a report if its SEQ is earlier. 3965 * If we have done multiple retransmits the times 3966 * would not be equal. 3967 */ 3968 if (SEQ_LT(nrsm->r_start, rsm->r_start)) { 3969 nrsm->r_flags |= RACK_SACK_PASSED; 3970 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 3971 } 3972 } else { 3973 /* 3974 * Here they were sent at different times, not a big 3975 * block. Since we transmitted this one later and 3976 * see it sack'd then this must also be missing (or 3977 * we would have gotten a sack block for it) 3978 */ 3979 nrsm->r_flags |= RACK_SACK_PASSED; 3980 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 3981 } 3982 } 3983 } 3984 3985 static uint32_t 3986 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 3987 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts) 3988 { 3989 int32_t idx; 3990 int32_t times = 0; 3991 uint32_t start, end, changed = 0; 3992 struct rack_sendmap *rsm, *nrsm; 3993 int32_t used_ref = 1; 3994 3995 start = sack->start; 3996 end = sack->end; 3997 rsm = *prsm; 3998 if (rsm && SEQ_LT(start, rsm->r_start)) { 3999 TAILQ_FOREACH_REVERSE_FROM(rsm, &rack->r_ctl.rc_map, rack_head, r_next) { 4000 if (SEQ_GEQ(start, rsm->r_start) && 4001 SEQ_LT(start, rsm->r_end)) { 4002 goto do_rest_ofb; 4003 } 4004 } 4005 } 4006 if (rsm == NULL) { 4007 start_at_beginning: 4008 rsm = NULL; 4009 used_ref = 0; 4010 } 4011 /* First lets locate the block where this guy is */ 4012 TAILQ_FOREACH_FROM(rsm, &rack->r_ctl.rc_map, r_next) { 4013 if (SEQ_GEQ(start, rsm->r_start) && 4014 SEQ_LT(start, rsm->r_end)) { 4015 break; 4016 } 4017 } 4018 do_rest_ofb: 4019 if (rsm == NULL) { 4020 /* 4021 * This happens when we get duplicate sack blocks with the 4022 * same end. For example SACK 4: 100 SACK 3: 100 The sort 4023 * will not change there location so we would just start at 4024 * the end of the first one and get lost. 4025 */ 4026 if (tp->t_flags & TF_SENTFIN) { 4027 /* 4028 * Check to see if we have not logged the FIN that 4029 * went out. 4030 */ 4031 nrsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_map, rack_sendmap, r_next); 4032 if (nrsm && (nrsm->r_end + 1) == tp->snd_max) { 4033 /* 4034 * Ok we did not get the FIN logged. 4035 */ 4036 nrsm->r_end++; 4037 rsm = nrsm; 4038 goto do_rest_ofb; 4039 } 4040 } 4041 if (times == 1) { 4042 #ifdef INVARIANTS 4043 panic("tp:%p rack:%p sack:%p to:%p prsm:%p", 4044 tp, rack, sack, to, prsm); 4045 #else 4046 goto out; 4047 #endif 4048 } 4049 times++; 4050 counter_u64_add(rack_sack_proc_restart, 1); 4051 goto start_at_beginning; 4052 } 4053 /* Ok we have an ACK for some piece of rsm */ 4054 if (rsm->r_start != start) { 4055 /* 4056 * Need to split this in two pieces the before and after. 4057 */ 4058 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 4059 if (nrsm == NULL) { 4060 /* 4061 * failed XXXrrs what can we do but loose the sack 4062 * info? 4063 */ 4064 goto out; 4065 } 4066 nrsm->r_start = start; 4067 nrsm->r_rtr_bytes = 0; 4068 nrsm->r_end = rsm->r_end; 4069 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 4070 nrsm->r_flags = rsm->r_flags; 4071 nrsm->r_sndcnt = rsm->r_sndcnt; 4072 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 4073 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 4074 } 4075 rsm->r_end = nrsm->r_start; 4076 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_map, rsm, nrsm, r_next); 4077 if (rsm->r_in_tmap) { 4078 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 4079 nrsm->r_in_tmap = 1; 4080 } 4081 rsm->r_flags &= (~RACK_HAS_FIN); 4082 rsm = nrsm; 4083 } 4084 if (SEQ_GEQ(end, rsm->r_end)) { 4085 /* 4086 * The end of this block is either beyond this guy or right 4087 * at this guy. 4088 */ 4089 4090 if ((rsm->r_flags & RACK_ACKED) == 0) { 4091 rack_update_rtt(tp, rack, rsm, to, cts, SACKED); 4092 changed += (rsm->r_end - rsm->r_start); 4093 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 4094 rack_log_sack_passed(tp, rack, rsm); 4095 /* Is Reordering occuring? */ 4096 if (rsm->r_flags & RACK_SACK_PASSED) { 4097 counter_u64_add(rack_reorder_seen, 1); 4098 rack->r_ctl.rc_reorder_ts = cts; 4099 } 4100 rsm->r_flags |= RACK_ACKED; 4101 rsm->r_flags &= ~RACK_TLP; 4102 if (rsm->r_in_tmap) { 4103 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 4104 rsm->r_in_tmap = 0; 4105 } 4106 } 4107 if (end == rsm->r_end) { 4108 /* This block only - done */ 4109 goto out; 4110 } 4111 /* There is more not coverend by this rsm move on */ 4112 start = rsm->r_end; 4113 nrsm = TAILQ_NEXT(rsm, r_next); 4114 rsm = nrsm; 4115 times = 0; 4116 goto do_rest_ofb; 4117 } 4118 /* Ok we need to split off this one at the tail */ 4119 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 4120 if (nrsm == NULL) { 4121 /* failed rrs what can we do but loose the sack info? */ 4122 goto out; 4123 } 4124 /* Clone it */ 4125 nrsm->r_start = end; 4126 nrsm->r_end = rsm->r_end; 4127 nrsm->r_rtr_bytes = 0; 4128 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 4129 nrsm->r_flags = rsm->r_flags; 4130 nrsm->r_sndcnt = rsm->r_sndcnt; 4131 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 4132 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 4133 } 4134 /* The sack block does not cover this guy fully */ 4135 rsm->r_flags &= (~RACK_HAS_FIN); 4136 rsm->r_end = end; 4137 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_map, rsm, nrsm, r_next); 4138 if (rsm->r_in_tmap) { 4139 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 4140 nrsm->r_in_tmap = 1; 4141 } 4142 if (rsm->r_flags & RACK_ACKED) { 4143 /* Been here done that */ 4144 goto out; 4145 } 4146 rack_update_rtt(tp, rack, rsm, to, cts, SACKED); 4147 changed += (rsm->r_end - rsm->r_start); 4148 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 4149 rack_log_sack_passed(tp, rack, rsm); 4150 /* Is Reordering occuring? */ 4151 if (rsm->r_flags & RACK_SACK_PASSED) { 4152 counter_u64_add(rack_reorder_seen, 1); 4153 rack->r_ctl.rc_reorder_ts = cts; 4154 } 4155 rsm->r_flags |= RACK_ACKED; 4156 rsm->r_flags &= ~RACK_TLP; 4157 if (rsm->r_in_tmap) { 4158 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 4159 rsm->r_in_tmap = 0; 4160 } 4161 out: 4162 if (rsm && (rsm->r_flags & RACK_ACKED)) { 4163 /* 4164 * Now can we merge this newly acked 4165 * block with either the previous or 4166 * next block? 4167 */ 4168 nrsm = TAILQ_NEXT(rsm, r_next); 4169 if (nrsm && 4170 (nrsm->r_flags & RACK_ACKED)) { 4171 /* yep this and next can be merged */ 4172 rsm = rack_merge_rsm(rack, rsm, nrsm); 4173 } 4174 /* Now what about the previous? */ 4175 nrsm = TAILQ_PREV(rsm, rack_head, r_next); 4176 if (nrsm && 4177 (nrsm->r_flags & RACK_ACKED)) { 4178 /* yep the previous and this can be merged */ 4179 rsm = rack_merge_rsm(rack, nrsm, rsm); 4180 } 4181 } 4182 if (used_ref == 0) { 4183 counter_u64_add(rack_sack_proc_all, 1); 4184 } else { 4185 counter_u64_add(rack_sack_proc_short, 1); 4186 } 4187 /* Save off where we last were */ 4188 if (rsm) 4189 rack->r_ctl.rc_sacklast = TAILQ_NEXT(rsm, r_next); 4190 else 4191 rack->r_ctl.rc_sacklast = NULL; 4192 *prsm = rsm; 4193 return (changed); 4194 } 4195 4196 static void inline 4197 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 4198 { 4199 struct rack_sendmap *tmap; 4200 4201 tmap = NULL; 4202 while (rsm && (rsm->r_flags & RACK_ACKED)) { 4203 /* Its no longer sacked, mark it so */ 4204 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 4205 #ifdef INVARIANTS 4206 if (rsm->r_in_tmap) { 4207 panic("rack:%p rsm:%p flags:0x%x in tmap?", 4208 rack, rsm, rsm->r_flags); 4209 } 4210 #endif 4211 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 4212 /* Rebuild it into our tmap */ 4213 if (tmap == NULL) { 4214 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 4215 tmap = rsm; 4216 } else { 4217 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 4218 tmap = rsm; 4219 } 4220 tmap->r_in_tmap = 1; 4221 rsm = TAILQ_NEXT(rsm, r_next); 4222 } 4223 /* 4224 * Now lets possibly clear the sack filter so we start 4225 * recognizing sacks that cover this area. 4226 */ 4227 if (rack_use_sack_filter) 4228 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 4229 4230 } 4231 4232 static void 4233 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th) 4234 { 4235 uint32_t changed, last_seq, entered_recovery = 0; 4236 struct tcp_rack *rack; 4237 struct rack_sendmap *rsm; 4238 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 4239 register uint32_t th_ack; 4240 int32_t i, j, k, num_sack_blks = 0; 4241 uint32_t cts, acked, ack_point, sack_changed = 0; 4242 4243 INP_WLOCK_ASSERT(tp->t_inpcb); 4244 if (th->th_flags & TH_RST) { 4245 /* We don't log resets */ 4246 return; 4247 } 4248 rack = (struct tcp_rack *)tp->t_fb_ptr; 4249 cts = tcp_ts_getticks(); 4250 rsm = TAILQ_FIRST(&rack->r_ctl.rc_map); 4251 changed = 0; 4252 th_ack = th->th_ack; 4253 4254 if (SEQ_GT(th_ack, tp->snd_una)) { 4255 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 4256 tp->t_acktime = ticks; 4257 } 4258 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 4259 changed = th_ack - rsm->r_start; 4260 if (changed) { 4261 /* 4262 * The ACK point is advancing to th_ack, we must drop off 4263 * the packets in the rack log and calculate any eligble 4264 * RTT's. 4265 */ 4266 rack->r_wanted_output++; 4267 more: 4268 rsm = TAILQ_FIRST(&rack->r_ctl.rc_map); 4269 if (rsm == NULL) { 4270 if ((th_ack - 1) == tp->iss) { 4271 /* 4272 * For the SYN incoming case we will not 4273 * have called tcp_output for the sending of 4274 * the SYN, so there will be no map. All 4275 * other cases should probably be a panic. 4276 */ 4277 goto proc_sack; 4278 } 4279 if (tp->t_flags & TF_SENTFIN) { 4280 /* if we send a FIN we will not hav a map */ 4281 goto proc_sack; 4282 } 4283 #ifdef INVARIANTS 4284 panic("No rack map tp:%p for th:%p state:%d rack:%p snd_una:%u snd_max:%u snd_nxt:%u chg:%d\n", 4285 tp, 4286 th, tp->t_state, rack, 4287 tp->snd_una, tp->snd_max, tp->snd_nxt, changed); 4288 #endif 4289 goto proc_sack; 4290 } 4291 if (SEQ_LT(th_ack, rsm->r_start)) { 4292 /* Huh map is missing this */ 4293 #ifdef INVARIANTS 4294 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 4295 rsm->r_start, 4296 th_ack, tp->t_state, rack->r_state); 4297 #endif 4298 goto proc_sack; 4299 } 4300 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED); 4301 /* Now do we consume the whole thing? */ 4302 if (SEQ_GEQ(th_ack, rsm->r_end)) { 4303 /* Its all consumed. */ 4304 uint32_t left; 4305 4306 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 4307 rsm->r_rtr_bytes = 0; 4308 TAILQ_REMOVE(&rack->r_ctl.rc_map, rsm, r_next); 4309 if (rsm->r_in_tmap) { 4310 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 4311 rsm->r_in_tmap = 0; 4312 } 4313 if (rack->r_ctl.rc_next == rsm) { 4314 /* scoot along the marker */ 4315 rack->r_ctl.rc_next = TAILQ_FIRST(&rack->r_ctl.rc_map); 4316 } 4317 if (rsm->r_flags & RACK_ACKED) { 4318 /* 4319 * It was acked on the scoreboard -- remove 4320 * it from total 4321 */ 4322 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 4323 } else if (rsm->r_flags & RACK_SACK_PASSED) { 4324 /* 4325 * There are acked segments ACKED on the 4326 * scoreboard further up. We are seeing 4327 * reordering. 4328 */ 4329 counter_u64_add(rack_reorder_seen, 1); 4330 rsm->r_flags |= RACK_ACKED; 4331 rack->r_ctl.rc_reorder_ts = cts; 4332 } 4333 left = th_ack - rsm->r_end; 4334 if (rsm->r_rtr_cnt > 1) { 4335 /* 4336 * Technically we should make r_rtr_cnt be 4337 * monotonicly increasing and just mod it to 4338 * the timestamp it is replacing.. that way 4339 * we would have the last 3 retransmits. Now 4340 * rc_loss_count will be wrong if we 4341 * retransmit something more than 2 times in 4342 * recovery :( 4343 */ 4344 rack->r_ctl.rc_loss_count += (rsm->r_rtr_cnt - 1); 4345 } 4346 /* Free back to zone */ 4347 rack_free(rack, rsm); 4348 if (left) { 4349 goto more; 4350 } 4351 goto proc_sack; 4352 } 4353 if (rsm->r_flags & RACK_ACKED) { 4354 /* 4355 * It was acked on the scoreboard -- remove it from 4356 * total for the part being cum-acked. 4357 */ 4358 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 4359 } 4360 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 4361 rsm->r_rtr_bytes = 0; 4362 rsm->r_start = th_ack; 4363 } 4364 proc_sack: 4365 /* Check for reneging */ 4366 rsm = TAILQ_FIRST(&rack->r_ctl.rc_map); 4367 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 4368 /* 4369 * The peer has moved snd_una up to 4370 * the edge of this send, i.e. one 4371 * that it had previously acked. The only 4372 * way that can be true if the peer threw 4373 * away data (space issues) that it had 4374 * previously sacked (else it would have 4375 * given us snd_una up to (rsm->r_end). 4376 * We need to undo the acked markings here. 4377 * 4378 * Note we have to look to make sure th_ack is 4379 * our rsm->r_start in case we get an old ack 4380 * where th_ack is behind snd_una. 4381 */ 4382 rack_peer_reneges(rack, rsm, th->th_ack); 4383 } 4384 if ((to->to_flags & TOF_SACK) == 0) { 4385 /* We are done nothing left to log */ 4386 goto out; 4387 } 4388 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_map, rack_sendmap, r_next); 4389 if (rsm) { 4390 last_seq = rsm->r_end; 4391 } else { 4392 last_seq = tp->snd_max; 4393 } 4394 /* Sack block processing */ 4395 if (SEQ_GT(th_ack, tp->snd_una)) 4396 ack_point = th_ack; 4397 else 4398 ack_point = tp->snd_una; 4399 for (i = 0; i < to->to_nsacks; i++) { 4400 bcopy((to->to_sacks + i * TCPOLEN_SACK), 4401 &sack, sizeof(sack)); 4402 sack.start = ntohl(sack.start); 4403 sack.end = ntohl(sack.end); 4404 if (SEQ_GT(sack.end, sack.start) && 4405 SEQ_GT(sack.start, ack_point) && 4406 SEQ_LT(sack.start, tp->snd_max) && 4407 SEQ_GT(sack.end, ack_point) && 4408 SEQ_LEQ(sack.end, tp->snd_max)) { 4409 if ((rack->r_ctl.rc_num_maps_alloced > rack_sack_block_limit) && 4410 (SEQ_LT(sack.end, last_seq)) && 4411 ((sack.end - sack.start) < (tp->t_maxseg / 8))) { 4412 /* 4413 * Not the last piece and its smaller than 4414 * 1/8th of a MSS. We ignore this. 4415 */ 4416 counter_u64_add(rack_runt_sacks, 1); 4417 continue; 4418 } 4419 sack_blocks[num_sack_blks] = sack; 4420 num_sack_blks++; 4421 } else if (SEQ_LEQ(sack.start, th_ack) && 4422 SEQ_LEQ(sack.end, th_ack)) { 4423 /* 4424 * Its a D-SACK block. 4425 */ 4426 /* tcp_record_dsack(sack.start, sack.end); */ 4427 } 4428 } 4429 if (num_sack_blks == 0) 4430 goto out; 4431 /* 4432 * Sort the SACK blocks so we can update the rack scoreboard with 4433 * just one pass. 4434 */ 4435 if (rack_use_sack_filter) { 4436 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks, 4437 num_sack_blks, th->th_ack); 4438 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 4439 } 4440 if (num_sack_blks < 2) { 4441 goto do_sack_work; 4442 } 4443 /* Sort the sacks */ 4444 for (i = 0; i < num_sack_blks; i++) { 4445 for (j = i + 1; j < num_sack_blks; j++) { 4446 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 4447 sack = sack_blocks[i]; 4448 sack_blocks[i] = sack_blocks[j]; 4449 sack_blocks[j] = sack; 4450 } 4451 } 4452 } 4453 /* 4454 * Now are any of the sack block ends the same (yes some 4455 * implememtations send these)? 4456 */ 4457 again: 4458 if (num_sack_blks > 1) { 4459 for (i = 0; i < num_sack_blks; i++) { 4460 for (j = i + 1; j < num_sack_blks; j++) { 4461 if (sack_blocks[i].end == sack_blocks[j].end) { 4462 /* 4463 * Ok these two have the same end we 4464 * want the smallest end and then 4465 * throw away the larger and start 4466 * again. 4467 */ 4468 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 4469 /* 4470 * The second block covers 4471 * more area use that 4472 */ 4473 sack_blocks[i].start = sack_blocks[j].start; 4474 } 4475 /* 4476 * Now collapse out the dup-sack and 4477 * lower the count 4478 */ 4479 for (k = (j + 1); k < num_sack_blks; k++) { 4480 sack_blocks[j].start = sack_blocks[k].start; 4481 sack_blocks[j].end = sack_blocks[k].end; 4482 j++; 4483 } 4484 num_sack_blks--; 4485 goto again; 4486 } 4487 } 4488 } 4489 } 4490 do_sack_work: 4491 rsm = rack->r_ctl.rc_sacklast; 4492 for (i = 0; i < num_sack_blks; i++) { 4493 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts); 4494 if (acked) { 4495 rack->r_wanted_output++; 4496 changed += acked; 4497 sack_changed += acked; 4498 } 4499 } 4500 out: 4501 if (changed) { 4502 /* Something changed cancel the rack timer */ 4503 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 4504 } 4505 if ((sack_changed) && (!IN_RECOVERY(tp->t_flags))) { 4506 /* 4507 * Ok we have a high probability that we need to go in to 4508 * recovery since we have data sack'd 4509 */ 4510 struct rack_sendmap *rsm; 4511 uint32_t tsused; 4512 4513 tsused = tcp_ts_getticks(); 4514 rsm = tcp_rack_output(tp, rack, tsused); 4515 if (rsm) { 4516 /* Enter recovery */ 4517 rack->r_ctl.rc_rsm_start = rsm->r_start; 4518 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 4519 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 4520 entered_recovery = 1; 4521 rack_cong_signal(tp, NULL, CC_NDUPACK); 4522 /* 4523 * When we enter recovery we need to assure we send 4524 * one packet. 4525 */ 4526 rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg; 4527 rack->r_timer_override = 1; 4528 } 4529 } 4530 if (IN_RECOVERY(tp->t_flags) && (entered_recovery == 0)) { 4531 /* Deal with changed an PRR here (in recovery only) */ 4532 uint32_t pipe, snd_una; 4533 4534 rack->r_ctl.rc_prr_delivered += changed; 4535 /* Compute prr_sndcnt */ 4536 if (SEQ_GT(tp->snd_una, th_ack)) { 4537 snd_una = tp->snd_una; 4538 } else { 4539 snd_una = th_ack; 4540 } 4541 pipe = ((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt; 4542 if (pipe > tp->snd_ssthresh) { 4543 long sndcnt; 4544 4545 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 4546 if (rack->r_ctl.rc_prr_recovery_fs > 0) 4547 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 4548 else { 4549 rack->r_ctl.rc_prr_sndcnt = 0; 4550 sndcnt = 0; 4551 } 4552 sndcnt++; 4553 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 4554 sndcnt -= rack->r_ctl.rc_prr_out; 4555 else 4556 sndcnt = 0; 4557 rack->r_ctl.rc_prr_sndcnt = sndcnt; 4558 } else { 4559 uint32_t limit; 4560 4561 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 4562 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 4563 else 4564 limit = 0; 4565 if (changed > limit) 4566 limit = changed; 4567 limit += tp->t_maxseg; 4568 if (tp->snd_ssthresh > pipe) { 4569 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 4570 } else { 4571 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 4572 } 4573 } 4574 if (rack->r_ctl.rc_prr_sndcnt >= tp->t_maxseg) { 4575 rack->r_timer_override = 1; 4576 } 4577 } 4578 } 4579 4580 /* 4581 * Return value of 1, we do not need to call rack_process_data(). 4582 * return value of 0, rack_process_data can be called. 4583 * For ret_val if its 0 the TCP is locked, if its non-zero 4584 * its unlocked and probably unsafe to touch the TCB. 4585 */ 4586 static int 4587 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 4588 struct tcpcb *tp, struct tcpopt *to, 4589 uint32_t tiwin, int32_t tlen, 4590 int32_t * ofia, int32_t thflags, int32_t * ret_val) 4591 { 4592 int32_t ourfinisacked = 0; 4593 int32_t nsegs, acked_amount; 4594 int32_t acked; 4595 struct mbuf *mfree; 4596 struct tcp_rack *rack; 4597 int32_t recovery = 0; 4598 4599 rack = (struct tcp_rack *)tp->t_fb_ptr; 4600 if (SEQ_GT(th->th_ack, tp->snd_max)) { 4601 rack_do_dropafterack(m, tp, th, thflags, tlen, ret_val); 4602 return (1); 4603 } 4604 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 4605 rack_log_ack(tp, to, th); 4606 } 4607 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 4608 /* 4609 * Old ack, behind (or duplicate to) the last one rcv'd 4610 * Note: Should mark reordering is occuring! We should also 4611 * look for sack blocks arriving e.g. ack 1, 4-4 then ack 1, 4612 * 3-3, 4-4 would be reording. As well as ack 1, 3-3 <no 4613 * retran and> ack 3 4614 */ 4615 return (0); 4616 } 4617 /* 4618 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 4619 * something we sent. 4620 */ 4621 if (tp->t_flags & TF_NEEDSYN) { 4622 /* 4623 * T/TCP: Connection was half-synchronized, and our SYN has 4624 * been ACK'd (so connection is now fully synchronized). Go 4625 * to non-starred state, increment snd_una for ACK of SYN, 4626 * and check if we can do window scaling. 4627 */ 4628 tp->t_flags &= ~TF_NEEDSYN; 4629 tp->snd_una++; 4630 /* Do window scaling? */ 4631 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 4632 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 4633 tp->rcv_scale = tp->request_r_scale; 4634 /* Send window already scaled. */ 4635 } 4636 } 4637 nsegs = max(1, m->m_pkthdr.lro_nsegs); 4638 INP_WLOCK_ASSERT(tp->t_inpcb); 4639 4640 acked = BYTES_THIS_ACK(tp, th); 4641 TCPSTAT_ADD(tcps_rcvackpack, nsegs); 4642 TCPSTAT_ADD(tcps_rcvackbyte, acked); 4643 4644 /* 4645 * If we just performed our first retransmit, and the ACK arrives 4646 * within our recovery window, then it was a mistake to do the 4647 * retransmit in the first place. Recover our original cwnd and 4648 * ssthresh, and proceed to transmit where we left off. 4649 */ 4650 if (tp->t_flags & TF_PREVVALID) { 4651 tp->t_flags &= ~TF_PREVVALID; 4652 if (tp->t_rxtshift == 1 && 4653 (int)(ticks - tp->t_badrxtwin) < 0) 4654 rack_cong_signal(tp, th, CC_RTO_ERR); 4655 } 4656 /* 4657 * If we have a timestamp reply, update smoothed round trip time. If 4658 * no timestamp is present but transmit timer is running and timed 4659 * sequence number was acked, update smoothed round trip time. Since 4660 * we now have an rtt measurement, cancel the timer backoff (cf., 4661 * Phil Karn's retransmit alg.). Recompute the initial retransmit 4662 * timer. 4663 * 4664 * Some boxes send broken timestamp replies during the SYN+ACK 4665 * phase, ignore timestamps of 0 or we could calculate a huge RTT 4666 * and blow up the retransmit timer. 4667 */ 4668 /* 4669 * If all outstanding data is acked, stop retransmit timer and 4670 * remember to restart (more output or persist). If there is more 4671 * data to be acked, restart retransmit timer, using current 4672 * (possibly backed-off) value. 4673 */ 4674 if (th->th_ack == tp->snd_max) { 4675 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 4676 rack->r_wanted_output++; 4677 } 4678 /* 4679 * If no data (only SYN) was ACK'd, skip rest of ACK processing. 4680 */ 4681 if (acked == 0) { 4682 if (ofia) 4683 *ofia = ourfinisacked; 4684 return (0); 4685 } 4686 if (rack->r_ctl.rc_early_recovery) { 4687 if (IN_RECOVERY(tp->t_flags)) { 4688 if (SEQ_LT(th->th_ack, tp->snd_recover) && 4689 (SEQ_LT(th->th_ack, tp->snd_max))) { 4690 tcp_rack_partialack(tp, th); 4691 } else { 4692 rack_post_recovery(tp, th); 4693 recovery = 1; 4694 } 4695 } 4696 } 4697 /* 4698 * Let the congestion control algorithm update congestion control 4699 * related information. This typically means increasing the 4700 * congestion window. 4701 */ 4702 rack_ack_received(tp, rack, th, nsegs, CC_ACK, recovery); 4703 SOCKBUF_LOCK(&so->so_snd); 4704 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 4705 tp->snd_wnd -= acked_amount; 4706 mfree = sbcut_locked(&so->so_snd, acked_amount); 4707 if ((sbused(&so->so_snd) == 0) && 4708 (acked > acked_amount) && 4709 (tp->t_state >= TCPS_FIN_WAIT_1)) { 4710 ourfinisacked = 1; 4711 } 4712 /* NB: sowwakeup_locked() does an implicit unlock. */ 4713 sowwakeup_locked(so); 4714 m_freem(mfree); 4715 if (rack->r_ctl.rc_early_recovery == 0) { 4716 if (IN_RECOVERY(tp->t_flags)) { 4717 if (SEQ_LT(th->th_ack, tp->snd_recover) && 4718 (SEQ_LT(th->th_ack, tp->snd_max))) { 4719 tcp_rack_partialack(tp, th); 4720 } else { 4721 rack_post_recovery(tp, th); 4722 } 4723 } 4724 } 4725 tp->snd_una = th->th_ack; 4726 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 4727 tp->snd_recover = tp->snd_una; 4728 4729 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) { 4730 tp->snd_nxt = tp->snd_una; 4731 } 4732 if (tp->snd_una == tp->snd_max) { 4733 /* Nothing left outstanding */ 4734 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 4735 tp->t_acktime = 0; 4736 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 4737 /* Set need output so persist might get set */ 4738 rack->r_wanted_output++; 4739 if (rack_use_sack_filter) 4740 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 4741 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 4742 (sbavail(&so->so_snd) == 0) && 4743 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 4744 /* 4745 * The socket was gone and the 4746 * peer sent data, time to 4747 * reset him. 4748 */ 4749 *ret_val = 1; 4750 tp = tcp_close(tp); 4751 rack_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 4752 return (1); 4753 } 4754 } 4755 if (ofia) 4756 *ofia = ourfinisacked; 4757 return (0); 4758 } 4759 4760 4761 /* 4762 * Return value of 1, the TCB is unlocked and most 4763 * likely gone, return value of 0, the TCP is still 4764 * locked. 4765 */ 4766 static int 4767 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 4768 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 4769 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 4770 { 4771 /* 4772 * Update window information. Don't look at window if no ACK: TAC's 4773 * send garbage on first SYN. 4774 */ 4775 int32_t nsegs; 4776 #ifdef TCP_RFC7413 4777 int32_t tfo_syn; 4778 #else 4779 #define tfo_syn (FALSE) 4780 #endif 4781 struct tcp_rack *rack; 4782 4783 rack = (struct tcp_rack *)tp->t_fb_ptr; 4784 INP_WLOCK_ASSERT(tp->t_inpcb); 4785 nsegs = max(1, m->m_pkthdr.lro_nsegs); 4786 if ((thflags & TH_ACK) && 4787 (SEQ_LT(tp->snd_wl1, th->th_seq) || 4788 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 4789 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 4790 /* keep track of pure window updates */ 4791 if (tlen == 0 && 4792 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 4793 TCPSTAT_INC(tcps_rcvwinupd); 4794 tp->snd_wnd = tiwin; 4795 tp->snd_wl1 = th->th_seq; 4796 tp->snd_wl2 = th->th_ack; 4797 if (tp->snd_wnd > tp->max_sndwnd) 4798 tp->max_sndwnd = tp->snd_wnd; 4799 rack->r_wanted_output++; 4800 } else if (thflags & TH_ACK) { 4801 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 4802 tp->snd_wnd = tiwin; 4803 tp->snd_wl1 = th->th_seq; 4804 tp->snd_wl2 = th->th_ack; 4805 } 4806 } 4807 /* Was persist timer active and now we have window space? */ 4808 if ((rack->rc_in_persist != 0) && tp->snd_wnd) { 4809 rack_exit_persist(tp, rack); 4810 tp->snd_nxt = tp->snd_max; 4811 /* Make sure we output to start the timer */ 4812 rack->r_wanted_output++; 4813 } 4814 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 4815 m_freem(m); 4816 return (0); 4817 } 4818 /* 4819 * Process segments with URG. 4820 */ 4821 if ((thflags & TH_URG) && th->th_urp && 4822 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 4823 /* 4824 * This is a kludge, but if we receive and accept random 4825 * urgent pointers, we'll crash in soreceive. It's hard to 4826 * imagine someone actually wanting to send this much urgent 4827 * data. 4828 */ 4829 SOCKBUF_LOCK(&so->so_rcv); 4830 if (th->th_urp + sbavail(&so->so_rcv) > sb_max) { 4831 th->th_urp = 0; /* XXX */ 4832 thflags &= ~TH_URG; /* XXX */ 4833 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */ 4834 goto dodata; /* XXX */ 4835 } 4836 /* 4837 * If this segment advances the known urgent pointer, then 4838 * mark the data stream. This should not happen in 4839 * CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since a 4840 * FIN has been received from the remote side. In these 4841 * states we ignore the URG. 4842 * 4843 * According to RFC961 (Assigned Protocols), the urgent 4844 * pointer points to the last octet of urgent data. We 4845 * continue, however, to consider it to indicate the first 4846 * octet of data past the urgent section as the original 4847 * spec states (in one of two places). 4848 */ 4849 if (SEQ_GT(th->th_seq + th->th_urp, tp->rcv_up)) { 4850 tp->rcv_up = th->th_seq + th->th_urp; 4851 so->so_oobmark = sbavail(&so->so_rcv) + 4852 (tp->rcv_up - tp->rcv_nxt) - 1; 4853 if (so->so_oobmark == 0) 4854 so->so_rcv.sb_state |= SBS_RCVATMARK; 4855 sohasoutofband(so); 4856 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 4857 } 4858 SOCKBUF_UNLOCK(&so->so_rcv); 4859 /* 4860 * Remove out of band data so doesn't get presented to user. 4861 * This can happen independent of advancing the URG pointer, 4862 * but if two URG's are pending at once, some out-of-band 4863 * data may creep in... ick. 4864 */ 4865 if (th->th_urp <= (uint32_t) tlen && 4866 !(so->so_options & SO_OOBINLINE)) { 4867 /* hdr drop is delayed */ 4868 tcp_pulloutofband(so, th, m, drop_hdrlen); 4869 } 4870 } else { 4871 /* 4872 * If no out of band data is expected, pull receive urgent 4873 * pointer along with the receive window. 4874 */ 4875 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 4876 tp->rcv_up = tp->rcv_nxt; 4877 } 4878 dodata: /* XXX */ 4879 INP_WLOCK_ASSERT(tp->t_inpcb); 4880 4881 /* 4882 * Process the segment text, merging it into the TCP sequencing 4883 * queue, and arranging for acknowledgment of receipt if necessary. 4884 * This process logically involves adjusting tp->rcv_wnd as data is 4885 * presented to the user (this happens in tcp_usrreq.c, case 4886 * PRU_RCVD). If a FIN has already been received on this connection 4887 * then we just ignore the text. 4888 */ 4889 #ifdef TCP_RFC7413 4890 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 4891 (tp->t_flags & TF_FASTOPEN)); 4892 #endif 4893 if ((tlen || (thflags & TH_FIN) || tfo_syn) && 4894 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 4895 tcp_seq save_start = th->th_seq; 4896 tcp_seq save_rnxt = tp->rcv_nxt; 4897 int save_tlen = tlen; 4898 4899 m_adj(m, drop_hdrlen); /* delayed header drop */ 4900 /* 4901 * Insert segment which includes th into TCP reassembly 4902 * queue with control block tp. Set thflags to whether 4903 * reassembly now includes a segment with FIN. This handles 4904 * the common case inline (segment is the next to be 4905 * received on an established connection, and the queue is 4906 * empty), avoiding linkage into and removal from the queue 4907 * and repetition of various conversions. Set DELACK for 4908 * segments received in order, but ack immediately when 4909 * segments are out of order (so fast retransmit can work). 4910 */ 4911 if (th->th_seq == tp->rcv_nxt && 4912 SEGQ_EMPTY(tp) && 4913 (TCPS_HAVEESTABLISHED(tp->t_state) || 4914 tfo_syn)) { 4915 if (DELAY_ACK(tp, tlen) || tfo_syn) { 4916 rack_timer_cancel(tp, rack, 4917 rack->r_ctl.rc_rcvtime, __LINE__); 4918 tp->t_flags |= TF_DELACK; 4919 } else { 4920 rack->r_wanted_output++; 4921 tp->t_flags |= TF_ACKNOW; 4922 } 4923 tp->rcv_nxt += tlen; 4924 thflags = th->th_flags & TH_FIN; 4925 TCPSTAT_ADD(tcps_rcvpack, nsegs); 4926 TCPSTAT_ADD(tcps_rcvbyte, tlen); 4927 SOCKBUF_LOCK(&so->so_rcv); 4928 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 4929 m_freem(m); 4930 else 4931 sbappendstream_locked(&so->so_rcv, m, 0); 4932 /* NB: sorwakeup_locked() does an implicit unlock. */ 4933 sorwakeup_locked(so); 4934 } else { 4935 /* 4936 * XXX: Due to the header drop above "th" is 4937 * theoretically invalid by now. Fortunately 4938 * m_adj() doesn't actually frees any mbufs when 4939 * trimming from the head. 4940 */ 4941 tcp_seq temp = save_start; 4942 thflags = tcp_reass(tp, th, &temp, &tlen, m); 4943 tp->t_flags |= TF_ACKNOW; 4944 } 4945 if (((tlen == 0) && (save_tlen > 0) && 4946 (SEQ_LT(save_start, save_rnxt)))) { 4947 /* 4948 * DSACK actually handled in the fastpath 4949 * above. 4950 */ 4951 tcp_update_sack_list(tp, save_start, 4952 save_start + save_tlen); 4953 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 4954 /* 4955 * Cleaning sackblks by using zero length 4956 * update. 4957 */ 4958 if ((tp->rcv_numsacks >= 1) && 4959 (tp->sackblks[0].end == save_start)) { 4960 /* partial overlap, recorded at todrop above */ 4961 tcp_update_sack_list(tp, tp->sackblks[0].start, 4962 tp->sackblks[0].end); 4963 } else { 4964 tcp_update_dsack_list(tp, save_start, 4965 save_start + save_tlen); 4966 } 4967 } else if ((tlen > 0) && (tlen >= save_tlen)) { 4968 /* Update of sackblks. */ 4969 tcp_update_dsack_list(tp, save_start, 4970 save_start + save_tlen); 4971 } else if (tlen > 0) { 4972 tcp_update_dsack_list(tp, save_start, 4973 save_start + tlen); 4974 } 4975 } else { 4976 m_freem(m); 4977 thflags &= ~TH_FIN; 4978 } 4979 4980 /* 4981 * If FIN is received ACK the FIN and let the user know that the 4982 * connection is closing. 4983 */ 4984 if (thflags & TH_FIN) { 4985 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 4986 socantrcvmore(so); 4987 /* 4988 * If connection is half-synchronized (ie NEEDSYN 4989 * flag on) then delay ACK, so it may be piggybacked 4990 * when SYN is sent. Otherwise, since we received a 4991 * FIN then no more input can be expected, send ACK 4992 * now. 4993 */ 4994 if (tp->t_flags & TF_NEEDSYN) { 4995 rack_timer_cancel(tp, rack, 4996 rack->r_ctl.rc_rcvtime, __LINE__); 4997 tp->t_flags |= TF_DELACK; 4998 } else { 4999 tp->t_flags |= TF_ACKNOW; 5000 } 5001 tp->rcv_nxt++; 5002 } 5003 switch (tp->t_state) { 5004 5005 /* 5006 * In SYN_RECEIVED and ESTABLISHED STATES enter the 5007 * CLOSE_WAIT state. 5008 */ 5009 case TCPS_SYN_RECEIVED: 5010 tp->t_starttime = ticks; 5011 /* FALLTHROUGH */ 5012 case TCPS_ESTABLISHED: 5013 rack_timer_cancel(tp, rack, 5014 rack->r_ctl.rc_rcvtime, __LINE__); 5015 tcp_state_change(tp, TCPS_CLOSE_WAIT); 5016 break; 5017 5018 /* 5019 * If still in FIN_WAIT_1 STATE FIN has not been 5020 * acked so enter the CLOSING state. 5021 */ 5022 case TCPS_FIN_WAIT_1: 5023 rack_timer_cancel(tp, rack, 5024 rack->r_ctl.rc_rcvtime, __LINE__); 5025 tcp_state_change(tp, TCPS_CLOSING); 5026 break; 5027 5028 /* 5029 * In FIN_WAIT_2 state enter the TIME_WAIT state, 5030 * starting the time-wait timer, turning off the 5031 * other standard timers. 5032 */ 5033 case TCPS_FIN_WAIT_2: 5034 rack_timer_cancel(tp, rack, 5035 rack->r_ctl.rc_rcvtime, __LINE__); 5036 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 5037 tcp_twstart(tp); 5038 return (1); 5039 } 5040 } 5041 /* 5042 * Return any desired output. 5043 */ 5044 if ((tp->t_flags & TF_ACKNOW) || 5045 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 5046 rack->r_wanted_output++; 5047 } 5048 INP_WLOCK_ASSERT(tp->t_inpcb); 5049 return (0); 5050 } 5051 5052 /* 5053 * Here nothing is really faster, its just that we 5054 * have broken out the fast-data path also just like 5055 * the fast-ack. 5056 */ 5057 static int 5058 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 5059 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 5060 uint32_t tiwin, int32_t nxt_pkt) 5061 { 5062 int32_t nsegs; 5063 int32_t newsize = 0; /* automatic sockbuf scaling */ 5064 struct tcp_rack *rack; 5065 #ifdef TCPDEBUG 5066 /* 5067 * The size of tcp_saveipgen must be the size of the max ip header, 5068 * now IPv6. 5069 */ 5070 u_char tcp_saveipgen[IP6_HDR_LEN]; 5071 struct tcphdr tcp_savetcp; 5072 short ostate = 0; 5073 5074 #endif 5075 /* 5076 * If last ACK falls within this segment's sequence numbers, record 5077 * the timestamp. NOTE that the test is modified according to the 5078 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 5079 */ 5080 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 5081 return (0); 5082 } 5083 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 5084 return (0); 5085 } 5086 if (tiwin && tiwin != tp->snd_wnd) { 5087 return (0); 5088 } 5089 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 5090 return (0); 5091 } 5092 if (__predict_false((to->to_flags & TOF_TS) && 5093 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 5094 return (0); 5095 } 5096 if (__predict_false((th->th_ack != tp->snd_una))) { 5097 return (0); 5098 } 5099 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 5100 return (0); 5101 } 5102 if ((to->to_flags & TOF_TS) != 0 && 5103 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 5104 tp->ts_recent_age = tcp_ts_getticks(); 5105 tp->ts_recent = to->to_tsval; 5106 } 5107 rack = (struct tcp_rack *)tp->t_fb_ptr; 5108 /* 5109 * This is a pure, in-sequence data packet with nothing on the 5110 * reassembly queue and we have enough buffer space to take it. 5111 */ 5112 nsegs = max(1, m->m_pkthdr.lro_nsegs); 5113 5114 5115 /* Clean receiver SACK report if present */ 5116 if (tp->rcv_numsacks) 5117 tcp_clean_sackreport(tp); 5118 TCPSTAT_INC(tcps_preddat); 5119 tp->rcv_nxt += tlen; 5120 /* 5121 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 5122 */ 5123 tp->snd_wl1 = th->th_seq; 5124 /* 5125 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 5126 */ 5127 tp->rcv_up = tp->rcv_nxt; 5128 TCPSTAT_ADD(tcps_rcvpack, nsegs); 5129 TCPSTAT_ADD(tcps_rcvbyte, tlen); 5130 #ifdef TCPDEBUG 5131 if (so->so_options & SO_DEBUG) 5132 tcp_trace(TA_INPUT, ostate, tp, 5133 (void *)tcp_saveipgen, &tcp_savetcp, 0); 5134 #endif 5135 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 5136 5137 /* Add data to socket buffer. */ 5138 SOCKBUF_LOCK(&so->so_rcv); 5139 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 5140 m_freem(m); 5141 } else { 5142 /* 5143 * Set new socket buffer size. Give up when limit is 5144 * reached. 5145 */ 5146 if (newsize) 5147 if (!sbreserve_locked(&so->so_rcv, 5148 newsize, so, NULL)) 5149 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 5150 m_adj(m, drop_hdrlen); /* delayed header drop */ 5151 sbappendstream_locked(&so->so_rcv, m, 0); 5152 rack_calc_rwin(so, tp); 5153 } 5154 /* NB: sorwakeup_locked() does an implicit unlock. */ 5155 sorwakeup_locked(so); 5156 if (DELAY_ACK(tp, tlen)) { 5157 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 5158 tp->t_flags |= TF_DELACK; 5159 } else { 5160 tp->t_flags |= TF_ACKNOW; 5161 rack->r_wanted_output++; 5162 } 5163 if ((tp->snd_una == tp->snd_max) && rack_use_sack_filter) 5164 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 5165 return (1); 5166 } 5167 5168 /* 5169 * This subfunction is used to try to highly optimize the 5170 * fast path. We again allow window updates that are 5171 * in sequence to remain in the fast-path. We also add 5172 * in the __predict's to attempt to help the compiler. 5173 * Note that if we return a 0, then we can *not* process 5174 * it and the caller should push the packet into the 5175 * slow-path. 5176 */ 5177 static int 5178 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 5179 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 5180 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 5181 { 5182 int32_t acked; 5183 int32_t nsegs; 5184 5185 #ifdef TCPDEBUG 5186 /* 5187 * The size of tcp_saveipgen must be the size of the max ip header, 5188 * now IPv6. 5189 */ 5190 u_char tcp_saveipgen[IP6_HDR_LEN]; 5191 struct tcphdr tcp_savetcp; 5192 short ostate = 0; 5193 5194 #endif 5195 struct tcp_rack *rack; 5196 5197 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 5198 /* Old ack, behind (or duplicate to) the last one rcv'd */ 5199 return (0); 5200 } 5201 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 5202 /* Above what we have sent? */ 5203 return (0); 5204 } 5205 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 5206 /* We are retransmitting */ 5207 return (0); 5208 } 5209 if (__predict_false(tiwin == 0)) { 5210 /* zero window */ 5211 return (0); 5212 } 5213 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 5214 /* We need a SYN or a FIN, unlikely.. */ 5215 return (0); 5216 } 5217 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 5218 /* Timestamp is behind .. old ack with seq wrap? */ 5219 return (0); 5220 } 5221 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 5222 /* Still recovering */ 5223 return (0); 5224 } 5225 rack = (struct tcp_rack *)tp->t_fb_ptr; 5226 if (rack->r_ctl.rc_sacked) { 5227 /* We have sack holes on our scoreboard */ 5228 return (0); 5229 } 5230 /* Ok if we reach here, we can process a fast-ack */ 5231 nsegs = max(1, m->m_pkthdr.lro_nsegs); 5232 rack_log_ack(tp, to, th); 5233 /* Did the window get updated? */ 5234 if (tiwin != tp->snd_wnd) { 5235 tp->snd_wnd = tiwin; 5236 tp->snd_wl1 = th->th_seq; 5237 if (tp->snd_wnd > tp->max_sndwnd) 5238 tp->max_sndwnd = tp->snd_wnd; 5239 } 5240 if ((rack->rc_in_persist != 0) && (tp->snd_wnd >= tp->t_maxseg)) { 5241 rack_exit_persist(tp, rack); 5242 } 5243 /* 5244 * If last ACK falls within this segment's sequence numbers, record 5245 * the timestamp. NOTE that the test is modified according to the 5246 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 5247 */ 5248 if ((to->to_flags & TOF_TS) != 0 && 5249 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 5250 tp->ts_recent_age = tcp_ts_getticks(); 5251 tp->ts_recent = to->to_tsval; 5252 } 5253 /* 5254 * This is a pure ack for outstanding data. 5255 */ 5256 TCPSTAT_INC(tcps_predack); 5257 5258 /* 5259 * "bad retransmit" recovery. 5260 */ 5261 if (tp->t_flags & TF_PREVVALID) { 5262 tp->t_flags &= ~TF_PREVVALID; 5263 if (tp->t_rxtshift == 1 && 5264 (int)(ticks - tp->t_badrxtwin) < 0) 5265 rack_cong_signal(tp, th, CC_RTO_ERR); 5266 } 5267 /* 5268 * Recalculate the transmit timer / rtt. 5269 * 5270 * Some boxes send broken timestamp replies during the SYN+ACK 5271 * phase, ignore timestamps of 0 or we could calculate a huge RTT 5272 * and blow up the retransmit timer. 5273 */ 5274 acked = BYTES_THIS_ACK(tp, th); 5275 5276 #ifdef TCP_HHOOK 5277 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 5278 hhook_run_tcp_est_in(tp, th, to); 5279 #endif 5280 5281 TCPSTAT_ADD(tcps_rcvackpack, nsegs); 5282 TCPSTAT_ADD(tcps_rcvackbyte, acked); 5283 sbdrop(&so->so_snd, acked); 5284 /* 5285 * Let the congestion control algorithm update congestion control 5286 * related information. This typically means increasing the 5287 * congestion window. 5288 */ 5289 rack_ack_received(tp, rack, th, nsegs, CC_ACK, 0); 5290 5291 tp->snd_una = th->th_ack; 5292 /* 5293 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 5294 */ 5295 tp->snd_wl2 = th->th_ack; 5296 tp->t_dupacks = 0; 5297 m_freem(m); 5298 /* ND6_HINT(tp); *//* Some progress has been made. */ 5299 5300 /* 5301 * If all outstanding data are acked, stop retransmit timer, 5302 * otherwise restart timer using current (possibly backed-off) 5303 * value. If process is waiting for space, wakeup/selwakeup/signal. 5304 * If data are ready to send, let tcp_output decide between more 5305 * output or persist. 5306 */ 5307 #ifdef TCPDEBUG 5308 if (so->so_options & SO_DEBUG) 5309 tcp_trace(TA_INPUT, ostate, tp, 5310 (void *)tcp_saveipgen, 5311 &tcp_savetcp, 0); 5312 #endif 5313 if (tp->snd_una == tp->snd_max) { 5314 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 5315 tp->t_acktime = 0; 5316 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 5317 } 5318 /* Wake up the socket if we have room to write more */ 5319 sowwakeup(so); 5320 if (sbavail(&so->so_snd)) { 5321 rack->r_wanted_output++; 5322 } 5323 return (1); 5324 } 5325 5326 /* 5327 * Return value of 1, the TCB is unlocked and most 5328 * likely gone, return value of 0, the TCP is still 5329 * locked. 5330 */ 5331 static int 5332 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 5333 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 5334 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 5335 { 5336 int32_t ret_val = 0; 5337 int32_t todrop; 5338 int32_t ourfinisacked = 0; 5339 5340 rack_calc_rwin(so, tp); 5341 /* 5342 * If the state is SYN_SENT: if seg contains an ACK, but not for our 5343 * SYN, drop the input. if seg contains a RST, then drop the 5344 * connection. if seg does not contain SYN, then drop it. Otherwise 5345 * this is an acceptable SYN segment initialize tp->rcv_nxt and 5346 * tp->irs if seg contains ack then advance tp->snd_una if seg 5347 * contains an ECE and ECN support is enabled, the stream is ECN 5348 * capable. if SYN has been acked change to ESTABLISHED else 5349 * SYN_RCVD state arrange for segment to be acked (eventually) 5350 * continue processing rest of data/controls, beginning with URG 5351 */ 5352 if ((thflags & TH_ACK) && 5353 (SEQ_LEQ(th->th_ack, tp->iss) || 5354 SEQ_GT(th->th_ack, tp->snd_max))) { 5355 rack_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 5356 return (1); 5357 } 5358 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 5359 TCP_PROBE5(connect__refused, NULL, tp, 5360 mtod(m, const char *), tp, th); 5361 tp = tcp_drop(tp, ECONNREFUSED); 5362 rack_do_drop(m, tp); 5363 return (1); 5364 } 5365 if (thflags & TH_RST) { 5366 rack_do_drop(m, tp); 5367 return (1); 5368 } 5369 if (!(thflags & TH_SYN)) { 5370 rack_do_drop(m, tp); 5371 return (1); 5372 } 5373 tp->irs = th->th_seq; 5374 tcp_rcvseqinit(tp); 5375 if (thflags & TH_ACK) { 5376 TCPSTAT_INC(tcps_connects); 5377 soisconnected(so); 5378 #ifdef MAC 5379 mac_socketpeer_set_from_mbuf(m, so); 5380 #endif 5381 /* Do window scaling on this connection? */ 5382 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 5383 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 5384 tp->rcv_scale = tp->request_r_scale; 5385 } 5386 tp->rcv_adv += min(tp->rcv_wnd, 5387 TCP_MAXWIN << tp->rcv_scale); 5388 /* 5389 * If there's data, delay ACK; if there's also a FIN ACKNOW 5390 * will be turned on later. 5391 */ 5392 if (DELAY_ACK(tp, tlen) && tlen != 0) { 5393 rack_timer_cancel(tp, (struct tcp_rack *)tp->t_fb_ptr, 5394 ((struct tcp_rack *)tp->t_fb_ptr)->r_ctl.rc_rcvtime, __LINE__); 5395 tp->t_flags |= TF_DELACK; 5396 } else { 5397 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output++; 5398 tp->t_flags |= TF_ACKNOW; 5399 } 5400 5401 if ((thflags & TH_ECE) && V_tcp_do_ecn) { 5402 tp->t_flags |= TF_ECN_PERMIT; 5403 TCPSTAT_INC(tcps_ecn_shs); 5404 } 5405 /* 5406 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 5407 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 5408 */ 5409 tp->t_starttime = ticks; 5410 if (tp->t_flags & TF_NEEDFIN) { 5411 tcp_state_change(tp, TCPS_FIN_WAIT_1); 5412 tp->t_flags &= ~TF_NEEDFIN; 5413 thflags &= ~TH_SYN; 5414 } else { 5415 tcp_state_change(tp, TCPS_ESTABLISHED); 5416 TCP_PROBE5(connect__established, NULL, tp, 5417 mtod(m, const char *), tp, th); 5418 cc_conn_init(tp); 5419 } 5420 } else { 5421 /* 5422 * Received initial SYN in SYN-SENT[*] state => simultaneous 5423 * open. If segment contains CC option and there is a 5424 * cached CC, apply TAO test. If it succeeds, connection is * 5425 * half-synchronized. Otherwise, do 3-way handshake: 5426 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 5427 * there was no CC option, clear cached CC value. 5428 */ 5429 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 5430 tcp_state_change(tp, TCPS_SYN_RECEIVED); 5431 } 5432 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 5433 INP_WLOCK_ASSERT(tp->t_inpcb); 5434 /* 5435 * Advance th->th_seq to correspond to first data byte. If data, 5436 * trim to stay within window, dropping FIN if necessary. 5437 */ 5438 th->th_seq++; 5439 if (tlen > tp->rcv_wnd) { 5440 todrop = tlen - tp->rcv_wnd; 5441 m_adj(m, -todrop); 5442 tlen = tp->rcv_wnd; 5443 thflags &= ~TH_FIN; 5444 TCPSTAT_INC(tcps_rcvpackafterwin); 5445 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 5446 } 5447 tp->snd_wl1 = th->th_seq - 1; 5448 tp->rcv_up = th->th_seq; 5449 /* 5450 * Client side of transaction: already sent SYN and data. If the 5451 * remote host used T/TCP to validate the SYN, our data will be 5452 * ACK'd; if so, enter normal data segment processing in the middle 5453 * of step 5, ack processing. Otherwise, goto step 6. 5454 */ 5455 if (thflags & TH_ACK) { 5456 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) 5457 return (ret_val); 5458 /* We may have changed to FIN_WAIT_1 above */ 5459 if (tp->t_state == TCPS_FIN_WAIT_1) { 5460 /* 5461 * In FIN_WAIT_1 STATE in addition to the processing 5462 * for the ESTABLISHED state if our FIN is now 5463 * acknowledged then enter FIN_WAIT_2. 5464 */ 5465 if (ourfinisacked) { 5466 /* 5467 * If we can't receive any more data, then 5468 * closing user can proceed. Starting the 5469 * timer is contrary to the specification, 5470 * but if we don't get a FIN we'll hang 5471 * forever. 5472 * 5473 * XXXjl: we should release the tp also, and 5474 * use a compressed state. 5475 */ 5476 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 5477 soisdisconnected(so); 5478 tcp_timer_activate(tp, TT_2MSL, 5479 (tcp_fast_finwait2_recycle ? 5480 tcp_finwait2_timeout : 5481 TP_MAXIDLE(tp))); 5482 } 5483 tcp_state_change(tp, TCPS_FIN_WAIT_2); 5484 } 5485 } 5486 } 5487 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 5488 tiwin, thflags, nxt_pkt)); 5489 } 5490 5491 /* 5492 * Return value of 1, the TCB is unlocked and most 5493 * likely gone, return value of 0, the TCP is still 5494 * locked. 5495 */ 5496 static int 5497 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 5498 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 5499 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 5500 { 5501 int32_t ret_val = 0; 5502 int32_t ourfinisacked = 0; 5503 5504 rack_calc_rwin(so, tp); 5505 5506 if ((thflags & TH_ACK) && 5507 (SEQ_LEQ(th->th_ack, tp->snd_una) || 5508 SEQ_GT(th->th_ack, tp->snd_max))) { 5509 rack_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 5510 return (1); 5511 } 5512 #ifdef TCP_RFC7413 5513 if (tp->t_flags & TF_FASTOPEN) { 5514 /* 5515 * When a TFO connection is in SYN_RECEIVED, the only valid 5516 * packets are the initial SYN, a retransmit/copy of the 5517 * initial SYN (possibly with a subset of the original 5518 * data), a valid ACK, a FIN, or a RST. 5519 */ 5520 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 5521 rack_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 5522 return (1); 5523 } else if (thflags & TH_SYN) { 5524 /* non-initial SYN is ignored */ 5525 struct tcp_rack *rack; 5526 5527 rack = (struct tcp_rack *)tp->t_fb_ptr; 5528 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 5529 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 5530 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 5531 rack_do_drop(m, NULL); 5532 return (0); 5533 } 5534 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 5535 rack_do_drop(m, NULL); 5536 return (0); 5537 } 5538 } 5539 #endif 5540 if (thflags & TH_RST) 5541 return (rack_process_rst(m, th, so, tp)); 5542 /* 5543 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 5544 * synchronized state. 5545 */ 5546 if (thflags & TH_SYN) { 5547 rack_challenge_ack(m, th, tp, &ret_val); 5548 return (ret_val); 5549 } 5550 /* 5551 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 5552 * it's less than ts_recent, drop it. 5553 */ 5554 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 5555 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 5556 if (rack_ts_check(m, th, tp, tlen, thflags, &ret_val)) 5557 return (ret_val); 5558 } 5559 /* 5560 * In the SYN-RECEIVED state, validate that the packet belongs to 5561 * this connection before trimming the data to fit the receive 5562 * window. Check the sequence number versus IRS since we know the 5563 * sequence numbers haven't wrapped. This is a partial fix for the 5564 * "LAND" DoS attack. 5565 */ 5566 if (SEQ_LT(th->th_seq, tp->irs)) { 5567 rack_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 5568 return (1); 5569 } 5570 if (rack_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 5571 return (ret_val); 5572 } 5573 /* 5574 * If last ACK falls within this segment's sequence numbers, record 5575 * its timestamp. NOTE: 1) That the test incorporates suggestions 5576 * from the latest proposal of the tcplw@cray.com list (Braden 5577 * 1993/04/26). 2) That updating only on newer timestamps interferes 5578 * with our earlier PAWS tests, so this check should be solely 5579 * predicated on the sequence space of this segment. 3) That we 5580 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 5581 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 5582 * SEG.Len, This modified check allows us to overcome RFC1323's 5583 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 5584 * p.869. In such cases, we can still calculate the RTT correctly 5585 * when RCV.NXT == Last.ACK.Sent. 5586 */ 5587 if ((to->to_flags & TOF_TS) != 0 && 5588 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 5589 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 5590 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 5591 tp->ts_recent_age = tcp_ts_getticks(); 5592 tp->ts_recent = to->to_tsval; 5593 } 5594 /* 5595 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 5596 * is on (half-synchronized state), then queue data for later 5597 * processing; else drop segment and return. 5598 */ 5599 if ((thflags & TH_ACK) == 0) { 5600 #ifdef TCP_RFC7413 5601 if (tp->t_flags & TF_FASTOPEN) { 5602 tp->snd_wnd = tiwin; 5603 cc_conn_init(tp); 5604 } 5605 #endif 5606 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 5607 tiwin, thflags, nxt_pkt)); 5608 } 5609 TCPSTAT_INC(tcps_connects); 5610 soisconnected(so); 5611 /* Do window scaling? */ 5612 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 5613 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 5614 tp->rcv_scale = tp->request_r_scale; 5615 tp->snd_wnd = tiwin; 5616 } 5617 /* 5618 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 5619 * FIN-WAIT-1 5620 */ 5621 tp->t_starttime = ticks; 5622 if (tp->t_flags & TF_NEEDFIN) { 5623 tcp_state_change(tp, TCPS_FIN_WAIT_1); 5624 tp->t_flags &= ~TF_NEEDFIN; 5625 } else { 5626 tcp_state_change(tp, TCPS_ESTABLISHED); 5627 TCP_PROBE5(accept__established, NULL, tp, 5628 mtod(m, const char *), tp, th); 5629 #ifdef TCP_RFC7413 5630 if (tp->t_tfo_pending) { 5631 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 5632 tp->t_tfo_pending = NULL; 5633 5634 /* 5635 * Account for the ACK of our SYN prior to regular 5636 * ACK processing below. 5637 */ 5638 tp->snd_una++; 5639 } 5640 /* 5641 * TFO connections call cc_conn_init() during SYN 5642 * processing. Calling it again here for such connections 5643 * is not harmless as it would undo the snd_cwnd reduction 5644 * that occurs when a TFO SYN|ACK is retransmitted. 5645 */ 5646 if (!(tp->t_flags & TF_FASTOPEN)) 5647 #endif 5648 cc_conn_init(tp); 5649 } 5650 /* 5651 * If segment contains data or ACK, will call tcp_reass() later; if 5652 * not, do so now to pass queued data to user. 5653 */ 5654 if (tlen == 0 && (thflags & TH_FIN) == 0) 5655 (void)tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 5656 (struct mbuf *)0); 5657 tp->snd_wl1 = th->th_seq - 1; 5658 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 5659 return (ret_val); 5660 } 5661 if (tp->t_state == TCPS_FIN_WAIT_1) { 5662 /* We could have went to FIN_WAIT_1 (or EST) above */ 5663 /* 5664 * In FIN_WAIT_1 STATE in addition to the processing for the 5665 * ESTABLISHED state if our FIN is now acknowledged then 5666 * enter FIN_WAIT_2. 5667 */ 5668 if (ourfinisacked) { 5669 /* 5670 * If we can't receive any more data, then closing 5671 * user can proceed. Starting the timer is contrary 5672 * to the specification, but if we don't get a FIN 5673 * we'll hang forever. 5674 * 5675 * XXXjl: we should release the tp also, and use a 5676 * compressed state. 5677 */ 5678 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 5679 soisdisconnected(so); 5680 tcp_timer_activate(tp, TT_2MSL, 5681 (tcp_fast_finwait2_recycle ? 5682 tcp_finwait2_timeout : 5683 TP_MAXIDLE(tp))); 5684 } 5685 tcp_state_change(tp, TCPS_FIN_WAIT_2); 5686 } 5687 } 5688 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 5689 tiwin, thflags, nxt_pkt)); 5690 } 5691 5692 /* 5693 * Return value of 1, the TCB is unlocked and most 5694 * likely gone, return value of 0, the TCP is still 5695 * locked. 5696 */ 5697 static int 5698 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 5699 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 5700 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 5701 { 5702 int32_t ret_val = 0; 5703 5704 /* 5705 * Header prediction: check for the two common cases of a 5706 * uni-directional data xfer. If the packet has no control flags, 5707 * is in-sequence, the window didn't change and we're not 5708 * retransmitting, it's a candidate. If the length is zero and the 5709 * ack moved forward, we're the sender side of the xfer. Just free 5710 * the data acked & wake any higher level process that was blocked 5711 * waiting for space. If the length is non-zero and the ack didn't 5712 * move, we're the receiver side. If we're getting packets in-order 5713 * (the reassembly queue is empty), add the data toc The socket 5714 * buffer and note that we need a delayed ack. Make sure that the 5715 * hidden state-flags are also off. Since we check for 5716 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 5717 */ 5718 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 5719 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_URG | TH_ACK)) == TH_ACK) && 5720 __predict_true(SEGQ_EMPTY(tp)) && 5721 __predict_true(th->th_seq == tp->rcv_nxt)) { 5722 struct tcp_rack *rack; 5723 5724 rack = (struct tcp_rack *)tp->t_fb_ptr; 5725 if (tlen == 0) { 5726 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 5727 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 5728 return (0); 5729 } 5730 } else { 5731 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 5732 tiwin, nxt_pkt)) { 5733 return (0); 5734 } 5735 } 5736 } 5737 rack_calc_rwin(so, tp); 5738 5739 if (thflags & TH_RST) 5740 return (rack_process_rst(m, th, so, tp)); 5741 5742 /* 5743 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 5744 * synchronized state. 5745 */ 5746 if (thflags & TH_SYN) { 5747 rack_challenge_ack(m, th, tp, &ret_val); 5748 return (ret_val); 5749 } 5750 /* 5751 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 5752 * it's less than ts_recent, drop it. 5753 */ 5754 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 5755 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 5756 if (rack_ts_check(m, th, tp, tlen, thflags, &ret_val)) 5757 return (ret_val); 5758 } 5759 if (rack_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 5760 return (ret_val); 5761 } 5762 /* 5763 * If last ACK falls within this segment's sequence numbers, record 5764 * its timestamp. NOTE: 1) That the test incorporates suggestions 5765 * from the latest proposal of the tcplw@cray.com list (Braden 5766 * 1993/04/26). 2) That updating only on newer timestamps interferes 5767 * with our earlier PAWS tests, so this check should be solely 5768 * predicated on the sequence space of this segment. 3) That we 5769 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 5770 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 5771 * SEG.Len, This modified check allows us to overcome RFC1323's 5772 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 5773 * p.869. In such cases, we can still calculate the RTT correctly 5774 * when RCV.NXT == Last.ACK.Sent. 5775 */ 5776 if ((to->to_flags & TOF_TS) != 0 && 5777 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 5778 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 5779 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 5780 tp->ts_recent_age = tcp_ts_getticks(); 5781 tp->ts_recent = to->to_tsval; 5782 } 5783 /* 5784 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 5785 * is on (half-synchronized state), then queue data for later 5786 * processing; else drop segment and return. 5787 */ 5788 if ((thflags & TH_ACK) == 0) { 5789 if (tp->t_flags & TF_NEEDSYN) { 5790 5791 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 5792 tiwin, thflags, nxt_pkt)); 5793 5794 } else if (tp->t_flags & TF_ACKNOW) { 5795 rack_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 5796 return (ret_val); 5797 } else { 5798 rack_do_drop(m, NULL); 5799 return (0); 5800 } 5801 } 5802 /* 5803 * Ack processing. 5804 */ 5805 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 5806 return (ret_val); 5807 } 5808 if (sbavail(&so->so_snd)) { 5809 if (rack_progress_timeout_check(tp)) { 5810 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 5811 rack_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 5812 return (1); 5813 } 5814 } 5815 /* State changes only happen in rack_process_data() */ 5816 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 5817 tiwin, thflags, nxt_pkt)); 5818 } 5819 5820 /* 5821 * Return value of 1, the TCB is unlocked and most 5822 * likely gone, return value of 0, the TCP is still 5823 * locked. 5824 */ 5825 static int 5826 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 5827 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 5828 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 5829 { 5830 int32_t ret_val = 0; 5831 5832 rack_calc_rwin(so, tp); 5833 if (thflags & TH_RST) 5834 return (rack_process_rst(m, th, so, tp)); 5835 /* 5836 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 5837 * synchronized state. 5838 */ 5839 if (thflags & TH_SYN) { 5840 rack_challenge_ack(m, th, tp, &ret_val); 5841 return (ret_val); 5842 } 5843 /* 5844 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 5845 * it's less than ts_recent, drop it. 5846 */ 5847 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 5848 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 5849 if (rack_ts_check(m, th, tp, tlen, thflags, &ret_val)) 5850 return (ret_val); 5851 } 5852 if (rack_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 5853 return (ret_val); 5854 } 5855 /* 5856 * If last ACK falls within this segment's sequence numbers, record 5857 * its timestamp. NOTE: 1) That the test incorporates suggestions 5858 * from the latest proposal of the tcplw@cray.com list (Braden 5859 * 1993/04/26). 2) That updating only on newer timestamps interferes 5860 * with our earlier PAWS tests, so this check should be solely 5861 * predicated on the sequence space of this segment. 3) That we 5862 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 5863 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 5864 * SEG.Len, This modified check allows us to overcome RFC1323's 5865 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 5866 * p.869. In such cases, we can still calculate the RTT correctly 5867 * when RCV.NXT == Last.ACK.Sent. 5868 */ 5869 if ((to->to_flags & TOF_TS) != 0 && 5870 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 5871 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 5872 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 5873 tp->ts_recent_age = tcp_ts_getticks(); 5874 tp->ts_recent = to->to_tsval; 5875 } 5876 /* 5877 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 5878 * is on (half-synchronized state), then queue data for later 5879 * processing; else drop segment and return. 5880 */ 5881 if ((thflags & TH_ACK) == 0) { 5882 if (tp->t_flags & TF_NEEDSYN) { 5883 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 5884 tiwin, thflags, nxt_pkt)); 5885 5886 } else if (tp->t_flags & TF_ACKNOW) { 5887 rack_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 5888 return (ret_val); 5889 } else { 5890 rack_do_drop(m, NULL); 5891 return (0); 5892 } 5893 } 5894 /* 5895 * Ack processing. 5896 */ 5897 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 5898 return (ret_val); 5899 } 5900 if (sbavail(&so->so_snd)) { 5901 if (rack_progress_timeout_check(tp)) { 5902 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 5903 rack_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 5904 return (1); 5905 } 5906 } 5907 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 5908 tiwin, thflags, nxt_pkt)); 5909 } 5910 5911 static int 5912 rack_check_data_after_close(struct mbuf *m, 5913 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 5914 { 5915 struct tcp_rack *rack; 5916 5917 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 5918 rack = (struct tcp_rack *)tp->t_fb_ptr; 5919 if (rack->rc_allow_data_af_clo == 0) { 5920 close_now: 5921 tp = tcp_close(tp); 5922 TCPSTAT_INC(tcps_rcvafterclose); 5923 rack_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 5924 return (1); 5925 } 5926 if (sbavail(&so->so_snd) == 0) 5927 goto close_now; 5928 /* Ok we allow data that is ignored and a followup reset */ 5929 tp->rcv_nxt = th->th_seq + *tlen; 5930 tp->t_flags2 |= TF2_DROP_AF_DATA; 5931 rack->r_wanted_output = 1; 5932 *tlen = 0; 5933 return (0); 5934 } 5935 5936 /* 5937 * Return value of 1, the TCB is unlocked and most 5938 * likely gone, return value of 0, the TCP is still 5939 * locked. 5940 */ 5941 static int 5942 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 5943 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 5944 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 5945 { 5946 int32_t ret_val = 0; 5947 int32_t ourfinisacked = 0; 5948 5949 rack_calc_rwin(so, tp); 5950 5951 if (thflags & TH_RST) 5952 return (rack_process_rst(m, th, so, tp)); 5953 /* 5954 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 5955 * synchronized state. 5956 */ 5957 if (thflags & TH_SYN) { 5958 rack_challenge_ack(m, th, tp, &ret_val); 5959 return (ret_val); 5960 } 5961 /* 5962 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 5963 * it's less than ts_recent, drop it. 5964 */ 5965 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 5966 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 5967 if (rack_ts_check(m, th, tp, tlen, thflags, &ret_val)) 5968 return (ret_val); 5969 } 5970 if (rack_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 5971 return (ret_val); 5972 } 5973 /* 5974 * If new data are received on a connection after the user processes 5975 * are gone, then RST the other end. 5976 */ 5977 if ((so->so_state & SS_NOFDREF) && tlen) { 5978 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 5979 return (1); 5980 } 5981 /* 5982 * If last ACK falls within this segment's sequence numbers, record 5983 * its timestamp. NOTE: 1) That the test incorporates suggestions 5984 * from the latest proposal of the tcplw@cray.com list (Braden 5985 * 1993/04/26). 2) That updating only on newer timestamps interferes 5986 * with our earlier PAWS tests, so this check should be solely 5987 * predicated on the sequence space of this segment. 3) That we 5988 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 5989 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 5990 * SEG.Len, This modified check allows us to overcome RFC1323's 5991 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 5992 * p.869. In such cases, we can still calculate the RTT correctly 5993 * when RCV.NXT == Last.ACK.Sent. 5994 */ 5995 if ((to->to_flags & TOF_TS) != 0 && 5996 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 5997 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 5998 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 5999 tp->ts_recent_age = tcp_ts_getticks(); 6000 tp->ts_recent = to->to_tsval; 6001 } 6002 /* 6003 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 6004 * is on (half-synchronized state), then queue data for later 6005 * processing; else drop segment and return. 6006 */ 6007 if ((thflags & TH_ACK) == 0) { 6008 if (tp->t_flags & TF_NEEDSYN) { 6009 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6010 tiwin, thflags, nxt_pkt)); 6011 } else if (tp->t_flags & TF_ACKNOW) { 6012 rack_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 6013 return (ret_val); 6014 } else { 6015 rack_do_drop(m, NULL); 6016 return (0); 6017 } 6018 } 6019 /* 6020 * Ack processing. 6021 */ 6022 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 6023 return (ret_val); 6024 } 6025 if (ourfinisacked) { 6026 /* 6027 * If we can't receive any more data, then closing user can 6028 * proceed. Starting the timer is contrary to the 6029 * specification, but if we don't get a FIN we'll hang 6030 * forever. 6031 * 6032 * XXXjl: we should release the tp also, and use a 6033 * compressed state. 6034 */ 6035 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6036 soisdisconnected(so); 6037 tcp_timer_activate(tp, TT_2MSL, 6038 (tcp_fast_finwait2_recycle ? 6039 tcp_finwait2_timeout : 6040 TP_MAXIDLE(tp))); 6041 } 6042 tcp_state_change(tp, TCPS_FIN_WAIT_2); 6043 } 6044 if (sbavail(&so->so_snd)) { 6045 if (rack_progress_timeout_check(tp)) { 6046 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 6047 rack_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 6048 return (1); 6049 } 6050 } 6051 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6052 tiwin, thflags, nxt_pkt)); 6053 } 6054 6055 /* 6056 * Return value of 1, the TCB is unlocked and most 6057 * likely gone, return value of 0, the TCP is still 6058 * locked. 6059 */ 6060 static int 6061 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 6062 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 6063 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 6064 { 6065 int32_t ret_val = 0; 6066 int32_t ourfinisacked = 0; 6067 6068 rack_calc_rwin(so, tp); 6069 6070 if (thflags & TH_RST) 6071 return (rack_process_rst(m, th, so, tp)); 6072 /* 6073 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 6074 * synchronized state. 6075 */ 6076 if (thflags & TH_SYN) { 6077 rack_challenge_ack(m, th, tp, &ret_val); 6078 return (ret_val); 6079 } 6080 /* 6081 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 6082 * it's less than ts_recent, drop it. 6083 */ 6084 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 6085 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 6086 if (rack_ts_check(m, th, tp, tlen, thflags, &ret_val)) 6087 return (ret_val); 6088 } 6089 if (rack_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 6090 return (ret_val); 6091 } 6092 /* 6093 * If new data are received on a connection after the user processes 6094 * are gone, then RST the other end. 6095 */ 6096 if ((so->so_state & SS_NOFDREF) && tlen) { 6097 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 6098 return (1); 6099 } 6100 /* 6101 * If last ACK falls within this segment's sequence numbers, record 6102 * its timestamp. NOTE: 1) That the test incorporates suggestions 6103 * from the latest proposal of the tcplw@cray.com list (Braden 6104 * 1993/04/26). 2) That updating only on newer timestamps interferes 6105 * with our earlier PAWS tests, so this check should be solely 6106 * predicated on the sequence space of this segment. 3) That we 6107 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 6108 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 6109 * SEG.Len, This modified check allows us to overcome RFC1323's 6110 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 6111 * p.869. In such cases, we can still calculate the RTT correctly 6112 * when RCV.NXT == Last.ACK.Sent. 6113 */ 6114 if ((to->to_flags & TOF_TS) != 0 && 6115 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 6116 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 6117 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 6118 tp->ts_recent_age = tcp_ts_getticks(); 6119 tp->ts_recent = to->to_tsval; 6120 } 6121 /* 6122 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 6123 * is on (half-synchronized state), then queue data for later 6124 * processing; else drop segment and return. 6125 */ 6126 if ((thflags & TH_ACK) == 0) { 6127 if (tp->t_flags & TF_NEEDSYN) { 6128 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6129 tiwin, thflags, nxt_pkt)); 6130 } else if (tp->t_flags & TF_ACKNOW) { 6131 rack_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 6132 return (ret_val); 6133 } else { 6134 rack_do_drop(m, NULL); 6135 return (0); 6136 } 6137 } 6138 /* 6139 * Ack processing. 6140 */ 6141 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 6142 return (ret_val); 6143 } 6144 if (ourfinisacked) { 6145 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 6146 tcp_twstart(tp); 6147 m_freem(m); 6148 return (1); 6149 } 6150 if (sbavail(&so->so_snd)) { 6151 if (rack_progress_timeout_check(tp)) { 6152 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 6153 rack_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 6154 return (1); 6155 } 6156 } 6157 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6158 tiwin, thflags, nxt_pkt)); 6159 } 6160 6161 /* 6162 * Return value of 1, the TCB is unlocked and most 6163 * likely gone, return value of 0, the TCP is still 6164 * locked. 6165 */ 6166 static int 6167 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 6168 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 6169 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 6170 { 6171 int32_t ret_val = 0; 6172 int32_t ourfinisacked = 0; 6173 6174 rack_calc_rwin(so, tp); 6175 6176 if (thflags & TH_RST) 6177 return (rack_process_rst(m, th, so, tp)); 6178 /* 6179 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 6180 * synchronized state. 6181 */ 6182 if (thflags & TH_SYN) { 6183 rack_challenge_ack(m, th, tp, &ret_val); 6184 return (ret_val); 6185 } 6186 /* 6187 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 6188 * it's less than ts_recent, drop it. 6189 */ 6190 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 6191 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 6192 if (rack_ts_check(m, th, tp, tlen, thflags, &ret_val)) 6193 return (ret_val); 6194 } 6195 if (rack_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 6196 return (ret_val); 6197 } 6198 /* 6199 * If new data are received on a connection after the user processes 6200 * are gone, then RST the other end. 6201 */ 6202 if ((so->so_state & SS_NOFDREF) && tlen) { 6203 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 6204 return (1); 6205 } 6206 /* 6207 * If last ACK falls within this segment's sequence numbers, record 6208 * its timestamp. NOTE: 1) That the test incorporates suggestions 6209 * from the latest proposal of the tcplw@cray.com list (Braden 6210 * 1993/04/26). 2) That updating only on newer timestamps interferes 6211 * with our earlier PAWS tests, so this check should be solely 6212 * predicated on the sequence space of this segment. 3) That we 6213 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 6214 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 6215 * SEG.Len, This modified check allows us to overcome RFC1323's 6216 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 6217 * p.869. In such cases, we can still calculate the RTT correctly 6218 * when RCV.NXT == Last.ACK.Sent. 6219 */ 6220 if ((to->to_flags & TOF_TS) != 0 && 6221 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 6222 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 6223 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 6224 tp->ts_recent_age = tcp_ts_getticks(); 6225 tp->ts_recent = to->to_tsval; 6226 } 6227 /* 6228 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 6229 * is on (half-synchronized state), then queue data for later 6230 * processing; else drop segment and return. 6231 */ 6232 if ((thflags & TH_ACK) == 0) { 6233 if (tp->t_flags & TF_NEEDSYN) { 6234 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6235 tiwin, thflags, nxt_pkt)); 6236 } else if (tp->t_flags & TF_ACKNOW) { 6237 rack_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 6238 return (ret_val); 6239 } else { 6240 rack_do_drop(m, NULL); 6241 return (0); 6242 } 6243 } 6244 /* 6245 * case TCPS_LAST_ACK: Ack processing. 6246 */ 6247 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 6248 return (ret_val); 6249 } 6250 if (ourfinisacked) { 6251 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 6252 tp = tcp_close(tp); 6253 rack_do_drop(m, tp); 6254 return (1); 6255 } 6256 if (sbavail(&so->so_snd)) { 6257 if (rack_progress_timeout_check(tp)) { 6258 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 6259 rack_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 6260 return (1); 6261 } 6262 } 6263 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6264 tiwin, thflags, nxt_pkt)); 6265 } 6266 6267 6268 /* 6269 * Return value of 1, the TCB is unlocked and most 6270 * likely gone, return value of 0, the TCP is still 6271 * locked. 6272 */ 6273 static int 6274 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 6275 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 6276 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 6277 { 6278 int32_t ret_val = 0; 6279 int32_t ourfinisacked = 0; 6280 6281 rack_calc_rwin(so, tp); 6282 6283 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 6284 if (thflags & TH_RST) 6285 return (rack_process_rst(m, th, so, tp)); 6286 /* 6287 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 6288 * synchronized state. 6289 */ 6290 if (thflags & TH_SYN) { 6291 rack_challenge_ack(m, th, tp, &ret_val); 6292 return (ret_val); 6293 } 6294 /* 6295 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 6296 * it's less than ts_recent, drop it. 6297 */ 6298 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 6299 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 6300 if (rack_ts_check(m, th, tp, tlen, thflags, &ret_val)) 6301 return (ret_val); 6302 } 6303 if (rack_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 6304 return (ret_val); 6305 } 6306 /* 6307 * If new data are received on a connection after the user processes 6308 * are gone, then RST the other end. 6309 */ 6310 if ((so->so_state & SS_NOFDREF) && 6311 tlen) { 6312 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 6313 return (1); 6314 } 6315 /* 6316 * If last ACK falls within this segment's sequence numbers, record 6317 * its timestamp. NOTE: 1) That the test incorporates suggestions 6318 * from the latest proposal of the tcplw@cray.com list (Braden 6319 * 1993/04/26). 2) That updating only on newer timestamps interferes 6320 * with our earlier PAWS tests, so this check should be solely 6321 * predicated on the sequence space of this segment. 3) That we 6322 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 6323 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 6324 * SEG.Len, This modified check allows us to overcome RFC1323's 6325 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 6326 * p.869. In such cases, we can still calculate the RTT correctly 6327 * when RCV.NXT == Last.ACK.Sent. 6328 */ 6329 if ((to->to_flags & TOF_TS) != 0 && 6330 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 6331 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 6332 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 6333 tp->ts_recent_age = tcp_ts_getticks(); 6334 tp->ts_recent = to->to_tsval; 6335 } 6336 /* 6337 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 6338 * is on (half-synchronized state), then queue data for later 6339 * processing; else drop segment and return. 6340 */ 6341 if ((thflags & TH_ACK) == 0) { 6342 if (tp->t_flags & TF_NEEDSYN) { 6343 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6344 tiwin, thflags, nxt_pkt)); 6345 } else if (tp->t_flags & TF_ACKNOW) { 6346 rack_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 6347 return (ret_val); 6348 } else { 6349 rack_do_drop(m, NULL); 6350 return (0); 6351 } 6352 } 6353 /* 6354 * Ack processing. 6355 */ 6356 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 6357 return (ret_val); 6358 } 6359 if (sbavail(&so->so_snd)) { 6360 if (rack_progress_timeout_check(tp)) { 6361 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 6362 rack_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 6363 return (1); 6364 } 6365 } 6366 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6367 tiwin, thflags, nxt_pkt)); 6368 } 6369 6370 6371 static void inline 6372 rack_clear_rate_sample(struct tcp_rack *rack) 6373 { 6374 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 6375 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 6376 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 6377 } 6378 6379 static int 6380 rack_init(struct tcpcb *tp) 6381 { 6382 struct tcp_rack *rack = NULL; 6383 6384 tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 6385 if (tp->t_fb_ptr == NULL) { 6386 /* 6387 * We need to allocate memory but cant. The INP and INP_INFO 6388 * locks and they are recusive (happens during setup. So a 6389 * scheme to drop the locks fails :( 6390 * 6391 */ 6392 return (ENOMEM); 6393 } 6394 memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack)); 6395 6396 rack = (struct tcp_rack *)tp->t_fb_ptr; 6397 TAILQ_INIT(&rack->r_ctl.rc_map); 6398 TAILQ_INIT(&rack->r_ctl.rc_free); 6399 TAILQ_INIT(&rack->r_ctl.rc_tmap); 6400 rack->rc_tp = tp; 6401 if (tp->t_inpcb) { 6402 rack->rc_inp = tp->t_inpcb; 6403 } 6404 /* Probably not needed but lets be sure */ 6405 rack_clear_rate_sample(rack); 6406 rack->r_cpu = 0; 6407 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 6408 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 6409 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 6410 rack->rc_pace_reduce = rack_slot_reduction; 6411 if (V_tcp_delack_enabled) 6412 tp->t_delayed_ack = 1; 6413 else 6414 tp->t_delayed_ack = 0; 6415 rack->rc_pace_max_segs = rack_hptsi_segments; 6416 rack->r_ctl.rc_early_recovery_segs = rack_early_recovery_max_seg; 6417 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 6418 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 6419 rack->r_ctl.rc_prop_reduce = rack_use_proportional_reduce; 6420 rack->r_idle_reduce_largest = rack_reduce_largest_on_idle; 6421 rack->r_enforce_min_pace = rack_min_pace_time; 6422 rack->r_min_pace_seg_thresh = rack_min_pace_time_seg_req; 6423 rack->r_ctl.rc_prop_rate = rack_proportional_rate; 6424 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 6425 rack->r_ctl.rc_early_recovery = rack_early_recovery; 6426 rack->rc_always_pace = rack_pace_every_seg; 6427 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 6428 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 6429 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 6430 rack->r_ctl.rc_min_to = rack_min_to; 6431 rack->r_ctl.rc_prr_inc_var = rack_inc_var; 6432 if (tp->snd_una != tp->snd_max) { 6433 /* Create a send map for the current outstanding data */ 6434 struct rack_sendmap *rsm; 6435 6436 rsm = rack_alloc(rack); 6437 if (rsm == NULL) { 6438 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 6439 tp->t_fb_ptr = NULL; 6440 return (ENOMEM); 6441 } 6442 rsm->r_flags = RACK_OVERMAX; 6443 rsm->r_tim_lastsent[0] = tcp_ts_getticks(); 6444 rsm->r_rtr_cnt = 1; 6445 rsm->r_rtr_bytes = 0; 6446 rsm->r_start = tp->snd_una; 6447 rsm->r_end = tp->snd_max; 6448 rsm->r_sndcnt = 0; 6449 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_map, rsm, r_next); 6450 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 6451 rsm->r_in_tmap = 1; 6452 } 6453 rack_stop_all_timers(tp); 6454 rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), __LINE__, 0, 0, 0); 6455 return (0); 6456 } 6457 6458 static int 6459 rack_handoff_ok(struct tcpcb *tp) 6460 { 6461 if ((tp->t_state == TCPS_CLOSED) || 6462 (tp->t_state == TCPS_LISTEN)) { 6463 /* Sure no problem though it may not stick */ 6464 return (0); 6465 } 6466 if ((tp->t_state == TCPS_SYN_SENT) || 6467 (tp->t_state == TCPS_SYN_RECEIVED)) { 6468 /* 6469 * We really don't know you have to get to ESTAB or beyond 6470 * to tell. 6471 */ 6472 return (EAGAIN); 6473 } 6474 if (tp->t_flags & TF_SACK_PERMIT) { 6475 return (0); 6476 } 6477 /* 6478 * If we reach here we don't do SACK on this connection so we can 6479 * never do rack. 6480 */ 6481 return (EINVAL); 6482 } 6483 6484 static void 6485 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 6486 { 6487 if (tp->t_fb_ptr) { 6488 struct tcp_rack *rack; 6489 struct rack_sendmap *rsm; 6490 6491 rack = (struct tcp_rack *)tp->t_fb_ptr; 6492 #ifdef TCP_BLACKBOX 6493 tcp_log_flowend(tp); 6494 #endif 6495 rsm = TAILQ_FIRST(&rack->r_ctl.rc_map); 6496 while (rsm) { 6497 TAILQ_REMOVE(&rack->r_ctl.rc_map, rsm, r_next); 6498 uma_zfree(rack_zone, rsm); 6499 rsm = TAILQ_FIRST(&rack->r_ctl.rc_map); 6500 } 6501 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 6502 while (rsm) { 6503 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_next); 6504 uma_zfree(rack_zone, rsm); 6505 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 6506 } 6507 rack->rc_free_cnt = 0; 6508 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 6509 tp->t_fb_ptr = NULL; 6510 } 6511 /* Make sure snd_nxt is correctly set */ 6512 tp->snd_nxt = tp->snd_max; 6513 } 6514 6515 static void 6516 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 6517 { 6518 switch (tp->t_state) { 6519 case TCPS_SYN_SENT: 6520 rack->r_state = TCPS_SYN_SENT; 6521 rack->r_substate = rack_do_syn_sent; 6522 break; 6523 case TCPS_SYN_RECEIVED: 6524 rack->r_state = TCPS_SYN_RECEIVED; 6525 rack->r_substate = rack_do_syn_recv; 6526 break; 6527 case TCPS_ESTABLISHED: 6528 rack->r_state = TCPS_ESTABLISHED; 6529 rack->r_substate = rack_do_established; 6530 break; 6531 case TCPS_CLOSE_WAIT: 6532 rack->r_state = TCPS_CLOSE_WAIT; 6533 rack->r_substate = rack_do_close_wait; 6534 break; 6535 case TCPS_FIN_WAIT_1: 6536 rack->r_state = TCPS_FIN_WAIT_1; 6537 rack->r_substate = rack_do_fin_wait_1; 6538 break; 6539 case TCPS_CLOSING: 6540 rack->r_state = TCPS_CLOSING; 6541 rack->r_substate = rack_do_closing; 6542 break; 6543 case TCPS_LAST_ACK: 6544 rack->r_state = TCPS_LAST_ACK; 6545 rack->r_substate = rack_do_lastack; 6546 break; 6547 case TCPS_FIN_WAIT_2: 6548 rack->r_state = TCPS_FIN_WAIT_2; 6549 rack->r_substate = rack_do_fin_wait_2; 6550 break; 6551 case TCPS_LISTEN: 6552 case TCPS_CLOSED: 6553 case TCPS_TIME_WAIT: 6554 default: 6555 break; 6556 }; 6557 } 6558 6559 6560 static void 6561 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 6562 { 6563 /* 6564 * We received an ack, and then did not 6565 * call send or were bounced out due to the 6566 * hpts was running. Now a timer is up as well, is 6567 * it the right timer? 6568 */ 6569 struct rack_sendmap *rsm; 6570 int tmr_up; 6571 6572 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 6573 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 6574 return; 6575 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6576 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 6577 (tmr_up == PACE_TMR_RXT)) { 6578 /* Should be an RXT */ 6579 return; 6580 } 6581 if (rsm == NULL) { 6582 /* Nothing outstanding? */ 6583 if (tp->t_flags & TF_DELACK) { 6584 if (tmr_up == PACE_TMR_DELACK) 6585 /* We are supposed to have delayed ack up and we do */ 6586 return; 6587 } else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) { 6588 /* 6589 * if we hit enobufs then we would expect the possiblity 6590 * of nothing outstanding and the RXT up (and the hptsi timer). 6591 */ 6592 return; 6593 } else if (((tcp_always_keepalive || 6594 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 6595 (tp->t_state <= TCPS_CLOSING)) && 6596 (tmr_up == PACE_TMR_KEEP) && 6597 (tp->snd_max == tp->snd_una)) { 6598 /* We should have keep alive up and we do */ 6599 return; 6600 } 6601 } 6602 if (rsm && (rsm->r_flags & RACK_SACK_PASSED)) { 6603 if ((tp->t_flags & TF_SENTFIN) && 6604 ((tp->snd_max - tp->snd_una) == 1) && 6605 (rsm->r_flags & RACK_HAS_FIN)) { 6606 /* needs to be a RXT */ 6607 if (tmr_up == PACE_TMR_RXT) 6608 return; 6609 } else if (tmr_up == PACE_TMR_RACK) 6610 return; 6611 } else if (SEQ_GT(tp->snd_max,tp->snd_una) && 6612 ((tmr_up == PACE_TMR_TLP) || 6613 (tmr_up == PACE_TMR_RXT))) { 6614 /* 6615 * Either a TLP or RXT is fine if no sack-passed 6616 * is in place and data is outstanding. 6617 */ 6618 return; 6619 } else if (tmr_up == PACE_TMR_DELACK) { 6620 /* 6621 * If the delayed ack was going to go off 6622 * before the rtx/tlp/rack timer were going to 6623 * expire, then that would be the timer in control. 6624 * Note we don't check the time here trusting the 6625 * code is correct. 6626 */ 6627 return; 6628 } 6629 /* 6630 * Ok the timer originally started is not what we want now. 6631 * We will force the hpts to be stopped if any, and restart 6632 * with the slot set to what was in the saved slot. 6633 */ 6634 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 6635 rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), __LINE__, 0, 0, 0); 6636 } 6637 6638 static void 6639 rack_hpts_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 6640 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, 6641 int32_t nxt_pkt, struct timeval *tv) 6642 { 6643 int32_t thflags, retval, did_out = 0; 6644 int32_t way_out = 0; 6645 uint32_t cts; 6646 uint32_t tiwin; 6647 struct tcpopt to; 6648 struct tcp_rack *rack; 6649 struct rack_sendmap *rsm; 6650 int32_t prev_state = 0; 6651 6652 cts = tcp_tv_to_mssectick(tv); 6653 rack = (struct tcp_rack *)tp->t_fb_ptr; 6654 6655 kern_prefetch(rack, &prev_state); 6656 prev_state = 0; 6657 thflags = th->th_flags; 6658 /* 6659 * If this is either a state-changing packet or current state isn't 6660 * established, we require a read lock on tcbinfo. Otherwise, we 6661 * allow the tcbinfo to be in either locked or unlocked, as the 6662 * caller may have unnecessarily acquired a lock due to a race. 6663 */ 6664 INP_WLOCK_ASSERT(tp->t_inpcb); 6665 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 6666 __func__)); 6667 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 6668 __func__)); 6669 { 6670 union tcp_log_stackspecific log; 6671 6672 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 6673 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 6674 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 6675 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 6676 TCP_LOG_EVENT(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 6677 tlen, &log, true); 6678 } 6679 /* 6680 * Segment received on connection. Reset idle time and keep-alive 6681 * timer. XXX: This should be done after segment validation to 6682 * ignore broken/spoofed segs. 6683 */ 6684 if (tp->t_idle_reduce && (tp->snd_max == tp->snd_una)) { 6685 if ((ticks - tp->t_rcvtime) >= tp->t_rxtcur) { 6686 counter_u64_add(rack_input_idle_reduces, 1); 6687 rack_cc_after_idle(tp, 6688 (rack->r_idle_reduce_largest ? 1 :0)); 6689 } 6690 } 6691 rack->r_ctl.rc_rcvtime = cts; 6692 tp->t_rcvtime = ticks; 6693 6694 /* 6695 * Unscale the window into a 32-bit value. For the SYN_SENT state 6696 * the scale is zero. 6697 */ 6698 tiwin = th->th_win << tp->snd_scale; 6699 #ifdef NETFLIX_STATS 6700 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 6701 #endif 6702 /* 6703 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 6704 * this to occur after we've validated the segment. 6705 */ 6706 if (tp->t_flags & TF_ECN_PERMIT) { 6707 if (thflags & TH_CWR) 6708 tp->t_flags &= ~TF_ECN_SND_ECE; 6709 switch (iptos & IPTOS_ECN_MASK) { 6710 case IPTOS_ECN_CE: 6711 tp->t_flags |= TF_ECN_SND_ECE; 6712 TCPSTAT_INC(tcps_ecn_ce); 6713 break; 6714 case IPTOS_ECN_ECT0: 6715 TCPSTAT_INC(tcps_ecn_ect0); 6716 break; 6717 case IPTOS_ECN_ECT1: 6718 TCPSTAT_INC(tcps_ecn_ect1); 6719 break; 6720 } 6721 /* Congestion experienced. */ 6722 if (thflags & TH_ECE) { 6723 rack_cong_signal(tp, th, CC_ECN); 6724 } 6725 } 6726 /* 6727 * Parse options on any incoming segment. 6728 */ 6729 tcp_dooptions(&to, (u_char *)(th + 1), 6730 (th->th_off << 2) - sizeof(struct tcphdr), 6731 (thflags & TH_SYN) ? TO_SYN : 0); 6732 6733 /* 6734 * If echoed timestamp is later than the current time, fall back to 6735 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 6736 * were used when this connection was established. 6737 */ 6738 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 6739 to.to_tsecr -= tp->ts_offset; 6740 if (TSTMP_GT(to.to_tsecr, cts)) 6741 to.to_tsecr = 0; 6742 } 6743 /* 6744 * If its the first time in we need to take care of options and 6745 * verify we can do SACK for rack! 6746 */ 6747 if (rack->r_state == 0) { 6748 /* Should be init'd by rack_init() */ 6749 KASSERT(rack->rc_inp != NULL, 6750 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 6751 if (rack->rc_inp == NULL) { 6752 rack->rc_inp = tp->t_inpcb; 6753 } 6754 6755 /* 6756 * Process options only when we get SYN/ACK back. The SYN 6757 * case for incoming connections is handled in tcp_syncache. 6758 * According to RFC1323 the window field in a SYN (i.e., a 6759 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 6760 * this is traditional behavior, may need to be cleaned up. 6761 */ 6762 rack->r_cpu = inp_to_cpuid(tp->t_inpcb); 6763 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 6764 if ((to.to_flags & TOF_SCALE) && 6765 (tp->t_flags & TF_REQ_SCALE)) { 6766 tp->t_flags |= TF_RCVD_SCALE; 6767 tp->snd_scale = to.to_wscale; 6768 } 6769 /* 6770 * Initial send window. It will be updated with the 6771 * next incoming segment to the scaled value. 6772 */ 6773 tp->snd_wnd = th->th_win; 6774 if (to.to_flags & TOF_TS) { 6775 tp->t_flags |= TF_RCVD_TSTMP; 6776 tp->ts_recent = to.to_tsval; 6777 tp->ts_recent_age = cts; 6778 } 6779 if (to.to_flags & TOF_MSS) 6780 tcp_mss(tp, to.to_mss); 6781 if ((tp->t_flags & TF_SACK_PERMIT) && 6782 (to.to_flags & TOF_SACKPERM) == 0) 6783 tp->t_flags &= ~TF_SACK_PERMIT; 6784 } 6785 /* 6786 * At this point we are at the initial call. Here we decide 6787 * if we are doing RACK or not. We do this by seeing if 6788 * TF_SACK_PERMIT is set, if not rack is *not* possible and 6789 * we switch to the default code. 6790 */ 6791 if ((tp->t_flags & TF_SACK_PERMIT) == 0) { 6792 tcp_switch_back_to_default(tp); 6793 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen, 6794 tlen, iptos); 6795 return; 6796 } 6797 /* Set the flag */ 6798 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 6799 tcp_set_hpts(tp->t_inpcb); 6800 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 6801 } 6802 /* 6803 * This is the one exception case where we set the rack state 6804 * always. All other times (timers etc) we must have a rack-state 6805 * set (so we assure we have done the checks above for SACK). 6806 */ 6807 if (rack->r_state != tp->t_state) 6808 rack_set_state(tp, rack); 6809 if (SEQ_GT(th->th_ack, tp->snd_una) && (rsm = TAILQ_FIRST(&rack->r_ctl.rc_map)) != NULL) 6810 kern_prefetch(rsm, &prev_state); 6811 prev_state = rack->r_state; 6812 rack->r_ctl.rc_tlp_send_cnt = 0; 6813 rack_clear_rate_sample(rack); 6814 retval = (*rack->r_substate) (m, th, so, 6815 tp, &to, drop_hdrlen, 6816 tlen, tiwin, thflags, nxt_pkt); 6817 #ifdef INVARIANTS 6818 if ((retval == 0) && 6819 (tp->t_inpcb == NULL)) { 6820 panic("retval:%d tp:%p t_inpcb:NULL state:%d", 6821 retval, tp, prev_state); 6822 } 6823 #endif 6824 if (retval == 0) { 6825 /* 6826 * If retval is 1 the tcb is unlocked and most likely the tp 6827 * is gone. 6828 */ 6829 INP_WLOCK_ASSERT(tp->t_inpcb); 6830 tcp_rack_xmit_timer_commit(rack, tp); 6831 if (nxt_pkt == 0) { 6832 if (rack->r_wanted_output != 0) { 6833 did_out = 1; 6834 (void)tp->t_fb->tfb_tcp_output(tp); 6835 } 6836 rack_start_hpts_timer(rack, tp, cts, __LINE__, 0, 0, 0); 6837 } 6838 if (((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 6839 (SEQ_GT(tp->snd_max, tp->snd_una) || 6840 (tp->t_flags & TF_DELACK) || 6841 ((tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 6842 (tp->t_state <= TCPS_CLOSING)))) { 6843 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 6844 if ((tp->snd_max == tp->snd_una) && 6845 ((tp->t_flags & TF_DELACK) == 0) && 6846 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 6847 /* keep alive not needed if we are hptsi output yet */ 6848 ; 6849 } else { 6850 if (rack->rc_inp->inp_in_hpts) 6851 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 6852 rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), __LINE__, 0, 0, 0); 6853 } 6854 way_out = 1; 6855 } else { 6856 /* Do we have the correct timer running? */ 6857 rack_timer_audit(tp, rack, &so->so_snd); 6858 way_out = 2; 6859 } 6860 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out); 6861 if (did_out) 6862 rack->r_wanted_output = 0; 6863 #ifdef INVARIANTS 6864 if (tp->t_inpcb == NULL) { 6865 panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d", 6866 did_out, 6867 retval, tp, prev_state); 6868 } 6869 #endif 6870 INP_WUNLOCK(tp->t_inpcb); 6871 } 6872 } 6873 6874 void 6875 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 6876 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 6877 { 6878 struct timeval tv; 6879 #ifdef RSS 6880 struct tcp_function_block *tfb; 6881 struct tcp_rack *rack; 6882 struct inpcb *inp; 6883 6884 rack = (struct tcp_rack *)tp->t_fb_ptr; 6885 if (rack->r_state == 0) { 6886 /* 6887 * Initial input (ACK to SYN-ACK etc)lets go ahead and get 6888 * it processed 6889 */ 6890 tcp_get_usecs(&tv); 6891 rack_hpts_do_segment(m, th, so, tp, drop_hdrlen, 6892 tlen, iptos, 0, &tv); 6893 return; 6894 } 6895 tcp_queue_to_input(tp, m, th, tlen, drop_hdrlen, iptos); 6896 INP_WUNLOCK(tp->t_inpcb); 6897 #else 6898 tcp_get_usecs(&tv); 6899 rack_hpts_do_segment(m, th, so, tp, drop_hdrlen, 6900 tlen, iptos, 0, &tv); 6901 #endif 6902 } 6903 6904 struct rack_sendmap * 6905 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 6906 { 6907 struct rack_sendmap *rsm = NULL; 6908 int32_t idx; 6909 uint32_t srtt_cur, srtt = 0, thresh = 0, ts_low = 0; 6910 6911 /* Return the next guy to be re-transmitted */ 6912 if (TAILQ_EMPTY(&rack->r_ctl.rc_map)) { 6913 return (NULL); 6914 } 6915 if (tp->t_flags & TF_SENTFIN) { 6916 /* retran the end FIN? */ 6917 return (NULL); 6918 } 6919 /* ok lets look at this one */ 6920 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6921 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 6922 goto check_it; 6923 } 6924 rsm = rack_find_lowest_rsm(rack); 6925 if (rsm == NULL) { 6926 return (NULL); 6927 } 6928 check_it: 6929 srtt_cur = tp->t_srtt >> TCP_RTT_SHIFT; 6930 srtt = TICKS_2_MSEC(srtt_cur); 6931 if (rack->rc_rack_rtt && (srtt > rack->rc_rack_rtt)) 6932 srtt = rack->rc_rack_rtt; 6933 if (rsm->r_flags & RACK_ACKED) { 6934 return (NULL); 6935 } 6936 if ((rsm->r_flags & RACK_SACK_PASSED) == 0) { 6937 /* Its not yet ready */ 6938 return (NULL); 6939 } 6940 idx = rsm->r_rtr_cnt - 1; 6941 ts_low = rsm->r_tim_lastsent[idx]; 6942 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 6943 if (tsused <= ts_low) { 6944 return (NULL); 6945 } 6946 if ((tsused - ts_low) >= thresh) { 6947 return (rsm); 6948 } 6949 return (NULL); 6950 } 6951 6952 static int 6953 rack_output(struct tcpcb *tp) 6954 { 6955 struct socket *so; 6956 uint32_t recwin, sendwin; 6957 uint32_t sb_offset; 6958 int32_t len, flags, error = 0; 6959 struct mbuf *m; 6960 struct mbuf *mb; 6961 uint32_t if_hw_tsomaxsegcount = 0; 6962 uint32_t if_hw_tsomaxsegsize; 6963 long tot_len_this_send = 0; 6964 struct ip *ip = NULL; 6965 #ifdef TCPDEBUG 6966 struct ipovly *ipov = NULL; 6967 #endif 6968 #ifdef NETFLIX_TCP_O_UDP 6969 struct udphdr *udp = NULL; 6970 #endif 6971 struct tcp_rack *rack; 6972 struct tcphdr *th; 6973 uint8_t pass = 0; 6974 u_char opt[TCP_MAXOLEN]; 6975 unsigned ipoptlen, optlen, hdrlen; 6976 #ifdef NETFLIX_TCP_O_UDP 6977 unsigned ulen; 6978 #endif 6979 uint32_t rack_seq; 6980 6981 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 6982 unsigned ipsec_optlen = 0; 6983 6984 #endif 6985 int32_t idle, sendalot; 6986 int32_t sub_from_prr = 0; 6987 volatile int32_t sack_rxmit; 6988 struct rack_sendmap *rsm = NULL; 6989 int32_t tso, mtu, would_have_fin = 0; 6990 struct tcpopt to; 6991 int32_t slot = 0; 6992 uint32_t cts; 6993 uint8_t hpts_calling, doing_tlp = 0; 6994 int32_t do_a_prefetch; 6995 int32_t prefetch_rsm = 0; 6996 int32_t prefetch_so_done = 0; 6997 struct tcp_log_buffer *lgb = NULL; 6998 struct inpcb *inp; 6999 struct sockbuf *sb; 7000 #ifdef INET6 7001 struct ip6_hdr *ip6 = NULL; 7002 int32_t isipv6; 7003 #endif 7004 #ifdef KERN_TLS 7005 const bool hw_tls = (so->so_snd.sb_flags & SB_TLS_IFNET) != 0; 7006 #else 7007 const bool hw_tls = false; 7008 #endif 7009 7010 /* setup and take the cache hits here */ 7011 rack = (struct tcp_rack *)tp->t_fb_ptr; 7012 inp = rack->rc_inp; 7013 so = inp->inp_socket; 7014 sb = &so->so_snd; 7015 kern_prefetch(sb, &do_a_prefetch); 7016 do_a_prefetch = 1; 7017 7018 INP_WLOCK_ASSERT(inp); 7019 #ifdef TCP_OFFLOAD 7020 if (tp->t_flags & TF_TOE) 7021 return (tcp_offload_output(tp)); 7022 #endif 7023 7024 #ifdef TCP_RFC7413 7025 /* 7026 * For TFO connections in SYN_RECEIVED, only allow the initial 7027 * SYN|ACK and those sent by the retransmit timer. 7028 */ 7029 if ((tp->t_flags & TF_FASTOPEN) && 7030 (tp->t_state == TCPS_SYN_RECEIVED) && 7031 SEQ_GT(tp->snd_max, tp->snd_una) && /* inital SYN|ACK sent */ 7032 (tp->snd_nxt != tp->snd_una)) /* not a retransmit */ 7033 return (0); 7034 #endif 7035 #ifdef INET6 7036 if (rack->r_state) { 7037 /* Use the cache line loaded if possible */ 7038 isipv6 = rack->r_is_v6; 7039 } else { 7040 isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 7041 } 7042 #endif 7043 cts = tcp_ts_getticks(); 7044 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 7045 inp->inp_in_hpts) { 7046 /* 7047 * We are on the hpts for some timer but not hptsi output. 7048 * Remove from the hpts unconditionally. 7049 */ 7050 rack_timer_cancel(tp, rack, cts, __LINE__); 7051 } 7052 /* Mark that we have called rack_output(). */ 7053 if ((rack->r_timer_override) || 7054 (tp->t_flags & TF_FORCEDATA) || 7055 (tp->t_state < TCPS_ESTABLISHED)) { 7056 if (tp->t_inpcb->inp_in_hpts) 7057 tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT); 7058 } else if (tp->t_inpcb->inp_in_hpts) { 7059 /* 7060 * On the hpts you can't pass even if ACKNOW is on, we will 7061 * when the hpts fires. 7062 */ 7063 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 7064 return (0); 7065 } 7066 hpts_calling = inp->inp_hpts_calls; 7067 inp->inp_hpts_calls = 0; 7068 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 7069 if (rack_process_timers(tp, rack, cts, hpts_calling)) { 7070 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 7071 return (0); 7072 } 7073 } 7074 rack->r_wanted_output = 0; 7075 rack->r_timer_override = 0; 7076 /* 7077 * Determine length of data that should be transmitted, and flags 7078 * that will be used. If there is some data or critical controls 7079 * (SYN, RST) to send, then transmit; otherwise, investigate 7080 * further. 7081 */ 7082 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 7083 if (tp->t_idle_reduce) { 7084 if (idle && ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 7085 rack_cc_after_idle(tp, 7086 (rack->r_idle_reduce_largest ? 1 :0)); 7087 } 7088 tp->t_flags &= ~TF_LASTIDLE; 7089 if (idle) { 7090 if (tp->t_flags & TF_MORETOCOME) { 7091 tp->t_flags |= TF_LASTIDLE; 7092 idle = 0; 7093 } 7094 } 7095 again: 7096 /* 7097 * If we've recently taken a timeout, snd_max will be greater than 7098 * snd_nxt. There may be SACK information that allows us to avoid 7099 * resending already delivered data. Adjust snd_nxt accordingly. 7100 */ 7101 sendalot = 0; 7102 cts = tcp_ts_getticks(); 7103 tso = 0; 7104 mtu = 0; 7105 sb_offset = tp->snd_max - tp->snd_una; 7106 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 7107 7108 flags = tcp_outflags[tp->t_state]; 7109 /* 7110 * Send any SACK-generated retransmissions. If we're explicitly 7111 * trying to send out new data (when sendalot is 1), bypass this 7112 * function. If we retransmit in fast recovery mode, decrement 7113 * snd_cwnd, since we're replacing a (future) new transmission with 7114 * a retransmission now, and we previously incremented snd_cwnd in 7115 * tcp_input(). 7116 */ 7117 /* 7118 * Still in sack recovery , reset rxmit flag to zero. 7119 */ 7120 while (rack->rc_free_cnt < rack_free_cache) { 7121 rsm = rack_alloc(rack); 7122 if (rsm == NULL) { 7123 if (inp->inp_hpts_calls) 7124 /* Retry in a ms */ 7125 slot = 1; 7126 goto just_return_nolock; 7127 } 7128 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_next); 7129 rack->rc_free_cnt++; 7130 rsm = NULL; 7131 } 7132 if (inp->inp_hpts_calls) 7133 inp->inp_hpts_calls = 0; 7134 sack_rxmit = 0; 7135 len = 0; 7136 rsm = NULL; 7137 if (flags & TH_RST) { 7138 SOCKBUF_LOCK(sb); 7139 goto send; 7140 } 7141 if (rack->r_ctl.rc_tlpsend) { 7142 /* Tail loss probe */ 7143 long cwin; 7144 long tlen; 7145 7146 doing_tlp = 1; 7147 rsm = rack->r_ctl.rc_tlpsend; 7148 rack->r_ctl.rc_tlpsend = NULL; 7149 sack_rxmit = 1; 7150 tlen = rsm->r_end - rsm->r_start; 7151 if (tlen > tp->t_maxseg) 7152 tlen = tp->t_maxseg; 7153 #ifdef INVARIANTS 7154 if (SEQ_GT(tp->snd_una, rsm->r_start)) { 7155 panic("tp:%p rack:%p snd_una:%u rsm:%p r_start:%u", 7156 tp, rack, tp->snd_una, rsm, rsm->r_start); 7157 } 7158 #endif 7159 sb_offset = rsm->r_start - tp->snd_una; 7160 cwin = min(tp->snd_wnd, tlen); 7161 len = cwin; 7162 } else if (rack->r_ctl.rc_resend) { 7163 /* Retransmit timer */ 7164 rsm = rack->r_ctl.rc_resend; 7165 rack->r_ctl.rc_resend = NULL; 7166 len = rsm->r_end - rsm->r_start; 7167 sack_rxmit = 1; 7168 sendalot = 0; 7169 sb_offset = rsm->r_start - tp->snd_una; 7170 if (len >= tp->t_maxseg) { 7171 len = tp->t_maxseg; 7172 } 7173 KASSERT(sb_offset >= 0, ("%s: sack block to the left of una : %d", 7174 __func__, sb_offset)); 7175 } else if ((rack->rc_in_persist == 0) && 7176 ((rsm = tcp_rack_output(tp, rack, cts)) != NULL)) { 7177 long tlen; 7178 7179 if ((!IN_RECOVERY(tp->t_flags)) && 7180 ((tp->t_flags & (TF_WASFRECOVERY | TF_WASCRECOVERY)) == 0)) { 7181 /* Enter recovery if not induced by a time-out */ 7182 rack->r_ctl.rc_rsm_start = rsm->r_start; 7183 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 7184 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 7185 rack_cong_signal(tp, NULL, CC_NDUPACK); 7186 /* 7187 * When we enter recovery we need to assure we send 7188 * one packet. 7189 */ 7190 rack->r_ctl.rc_prr_sndcnt = tp->t_maxseg; 7191 } 7192 #ifdef INVARIANTS 7193 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 7194 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 7195 tp, rack, rsm, rsm->r_start, tp->snd_una); 7196 } 7197 #endif 7198 tlen = rsm->r_end - rsm->r_start; 7199 sb_offset = rsm->r_start - tp->snd_una; 7200 if (tlen > rack->r_ctl.rc_prr_sndcnt) { 7201 len = rack->r_ctl.rc_prr_sndcnt; 7202 } else { 7203 len = tlen; 7204 } 7205 if (len >= tp->t_maxseg) { 7206 sendalot = 1; 7207 len = tp->t_maxseg; 7208 } else { 7209 sendalot = 0; 7210 if ((rack->rc_timer_up == 0) && 7211 (len < tlen)) { 7212 /* 7213 * If its not a timer don't send a partial 7214 * segment. 7215 */ 7216 len = 0; 7217 goto just_return_nolock; 7218 } 7219 } 7220 KASSERT(sb_offset >= 0, ("%s: sack block to the left of una : %d", 7221 __func__, sb_offset)); 7222 if (len > 0) { 7223 sub_from_prr = 1; 7224 sack_rxmit = 1; 7225 TCPSTAT_INC(tcps_sack_rexmits); 7226 TCPSTAT_ADD(tcps_sack_rexmit_bytes, 7227 min(len, tp->t_maxseg)); 7228 counter_u64_add(rack_rtm_prr_retran, 1); 7229 } 7230 } 7231 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 7232 /* we are retransmitting the fin */ 7233 len--; 7234 if (len) { 7235 /* 7236 * When retransmitting data do *not* include the 7237 * FIN. This could happen from a TLP probe. 7238 */ 7239 flags &= ~TH_FIN; 7240 } 7241 } 7242 #ifdef INVARIANTS 7243 /* For debugging */ 7244 rack->r_ctl.rc_rsm_at_retran = rsm; 7245 #endif 7246 /* 7247 * Enforce a connection sendmap count limit if set 7248 * as long as we are not retransmiting. 7249 */ 7250 if ((rsm == NULL) && 7251 (rack_map_entries_limit > 0) && 7252 (rack->r_ctl.rc_num_maps_alloced >= rack_map_entries_limit)) { 7253 counter_u64_add(rack_to_alloc_limited, 1); 7254 if (!rack->alloc_limit_reported) { 7255 rack->alloc_limit_reported = 1; 7256 counter_u64_add(rack_alloc_limited_conns, 1); 7257 } 7258 goto just_return_nolock; 7259 } 7260 /* 7261 * Get standard flags, and add SYN or FIN if requested by 'hidden' 7262 * state flags. 7263 */ 7264 if (tp->t_flags & TF_NEEDFIN) 7265 flags |= TH_FIN; 7266 if (tp->t_flags & TF_NEEDSYN) 7267 flags |= TH_SYN; 7268 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 7269 void *end_rsm; 7270 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 7271 if (end_rsm) 7272 kern_prefetch(end_rsm, &prefetch_rsm); 7273 prefetch_rsm = 1; 7274 } 7275 SOCKBUF_LOCK(sb); 7276 /* 7277 * If in persist timeout with window of 0, send 1 byte. Otherwise, 7278 * if window is small but nonzero and time TF_SENTFIN expired, we 7279 * will send what we can and go to transmit state. 7280 */ 7281 if (tp->t_flags & TF_FORCEDATA) { 7282 if (sendwin == 0) { 7283 /* 7284 * If we still have some data to send, then clear 7285 * the FIN bit. Usually this would happen below 7286 * when it realizes that we aren't sending all the 7287 * data. However, if we have exactly 1 byte of 7288 * unsent data, then it won't clear the FIN bit 7289 * below, and if we are in persist state, we wind up 7290 * sending the packet without recording that we sent 7291 * the FIN bit. 7292 * 7293 * We can't just blindly clear the FIN bit, because 7294 * if we don't have any more data to send then the 7295 * probe will be the FIN itself. 7296 */ 7297 if (sb_offset < sbused(sb)) 7298 flags &= ~TH_FIN; 7299 sendwin = 1; 7300 } else { 7301 if (rack->rc_in_persist) 7302 rack_exit_persist(tp, rack); 7303 /* 7304 * If we are dropping persist mode then we need to 7305 * correct snd_nxt/snd_max and off. 7306 */ 7307 tp->snd_nxt = tp->snd_max; 7308 sb_offset = tp->snd_nxt - tp->snd_una; 7309 } 7310 } 7311 /* 7312 * If snd_nxt == snd_max and we have transmitted a FIN, the 7313 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a 7314 * negative length. This can also occur when TCP opens up its 7315 * congestion window while receiving additional duplicate acks after 7316 * fast-retransmit because TCP will reset snd_nxt to snd_max after 7317 * the fast-retransmit. 7318 * 7319 * In the normal retransmit-FIN-only case, however, snd_nxt will be 7320 * set to snd_una, the sb_offset will be 0, and the length may wind 7321 * up 0. 7322 * 7323 * If sack_rxmit is true we are retransmitting from the scoreboard 7324 * in which case len is already set. 7325 */ 7326 if (sack_rxmit == 0) { 7327 uint32_t avail; 7328 7329 avail = sbavail(sb); 7330 if (SEQ_GT(tp->snd_nxt, tp->snd_una)) 7331 sb_offset = tp->snd_nxt - tp->snd_una; 7332 else 7333 sb_offset = 0; 7334 if (IN_RECOVERY(tp->t_flags) == 0) { 7335 if (rack->r_ctl.rc_tlp_new_data) { 7336 /* TLP is forcing out new data */ 7337 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 7338 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 7339 } 7340 if (rack->r_ctl.rc_tlp_new_data > tp->snd_wnd) 7341 len = tp->snd_wnd; 7342 else 7343 len = rack->r_ctl.rc_tlp_new_data; 7344 rack->r_ctl.rc_tlp_new_data = 0; 7345 doing_tlp = 1; 7346 } else { 7347 if (sendwin > avail) { 7348 /* use the available */ 7349 if (avail > sb_offset) { 7350 len = (int32_t)(avail - sb_offset); 7351 } else { 7352 len = 0; 7353 } 7354 } else { 7355 if (sendwin > sb_offset) { 7356 len = (int32_t)(sendwin - sb_offset); 7357 } else { 7358 len = 0; 7359 } 7360 } 7361 } 7362 } else { 7363 uint32_t outstanding; 7364 7365 /* 7366 * We are inside of a SACK recovery episode and are 7367 * sending new data, having retransmitted all the 7368 * data possible so far in the scoreboard. 7369 */ 7370 outstanding = tp->snd_max - tp->snd_una; 7371 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 7372 if (tp->snd_wnd > outstanding) { 7373 len = tp->snd_wnd - outstanding; 7374 /* Check to see if we have the data */ 7375 if (((sb_offset + len) > avail) && 7376 (avail > sb_offset)) 7377 len = avail - sb_offset; 7378 else 7379 len = 0; 7380 } else 7381 len = 0; 7382 } else if (avail > sb_offset) 7383 len = avail - sb_offset; 7384 else 7385 len = 0; 7386 if (len > 0) { 7387 if (len > rack->r_ctl.rc_prr_sndcnt) 7388 len = rack->r_ctl.rc_prr_sndcnt; 7389 7390 if (len > 0) { 7391 sub_from_prr = 1; 7392 counter_u64_add(rack_rtm_prr_newdata, 1); 7393 } 7394 } 7395 if (len > tp->t_maxseg) { 7396 /* 7397 * We should never send more than a MSS when 7398 * retransmitting or sending new data in prr 7399 * mode unless the override flag is on. Most 7400 * likely the PRR algorithm is not going to 7401 * let us send a lot as well :-) 7402 */ 7403 if (rack->r_ctl.rc_prr_sendalot == 0) 7404 len = tp->t_maxseg; 7405 } else if (len < tp->t_maxseg) { 7406 /* 7407 * Do we send any? The idea here is if the 7408 * send empty's the socket buffer we want to 7409 * do it. However if not then lets just wait 7410 * for our prr_sndcnt to get bigger. 7411 */ 7412 long leftinsb; 7413 7414 leftinsb = sbavail(sb) - sb_offset; 7415 if (leftinsb > len) { 7416 /* This send does not empty the sb */ 7417 len = 0; 7418 } 7419 } 7420 } 7421 } 7422 if (prefetch_so_done == 0) { 7423 kern_prefetch(so, &prefetch_so_done); 7424 prefetch_so_done = 1; 7425 } 7426 /* 7427 * Lop off SYN bit if it has already been sent. However, if this is 7428 * SYN-SENT state and if segment contains data and if we don't know 7429 * that foreign host supports TAO, suppress sending segment. 7430 */ 7431 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una)) { 7432 if ((tp->t_state != TCPS_SYN_RECEIVED) && 7433 (tp->t_state != TCPS_SYN_SENT)) 7434 flags &= ~TH_SYN; 7435 #ifdef TCP_RFC7413 7436 /* 7437 * When sending additional segments following a TFO SYN|ACK, 7438 * do not include the SYN bit. 7439 */ 7440 if ((tp->t_flags & TF_FASTOPEN) && 7441 (tp->t_state == TCPS_SYN_RECEIVED)) 7442 flags &= ~TH_SYN; 7443 #endif 7444 } 7445 /* 7446 * Be careful not to send data and/or FIN on SYN segments. This 7447 * measure is needed to prevent interoperability problems with not 7448 * fully conformant TCP implementations. 7449 */ 7450 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 7451 len = 0; 7452 flags &= ~TH_FIN; 7453 } 7454 #ifdef TCP_RFC7413 7455 /* 7456 * When retransmitting SYN|ACK on a passively-created TFO socket, 7457 * don't include data, as the presence of data may have caused the 7458 * original SYN|ACK to have been dropped by a middlebox. 7459 */ 7460 if ((tp->t_flags & TF_FASTOPEN) && 7461 ((tp->t_state == TCPS_SYN_RECEIVED) && (tp->t_rxtshift > 0))) 7462 len = 0; 7463 #endif 7464 if (len <= 0) { 7465 /* 7466 * If FIN has been sent but not acked, but we haven't been 7467 * called to retransmit, len will be < 0. Otherwise, window 7468 * shrank after we sent into it. If window shrank to 0, 7469 * cancel pending retransmit, pull snd_nxt back to (closed) 7470 * window, and set the persist timer if it isn't already 7471 * going. If the window didn't close completely, just wait 7472 * for an ACK. 7473 * 7474 * We also do a general check here to ensure that we will 7475 * set the persist timer when we have data to send, but a 7476 * 0-byte window. This makes sure the persist timer is set 7477 * even if the packet hits one of the "goto send" lines 7478 * below. 7479 */ 7480 len = 0; 7481 if ((tp->snd_wnd == 0) && 7482 (TCPS_HAVEESTABLISHED(tp->t_state)) && 7483 (sb_offset < (int)sbavail(sb))) { 7484 tp->snd_nxt = tp->snd_una; 7485 rack_enter_persist(tp, rack, cts); 7486 } 7487 } 7488 /* len will be >= 0 after this point. */ 7489 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 7490 tcp_sndbuf_autoscale(tp, so, sendwin); 7491 /* 7492 * Decide if we can use TCP Segmentation Offloading (if supported by 7493 * hardware). 7494 * 7495 * TSO may only be used if we are in a pure bulk sending state. The 7496 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 7497 * options prevent using TSO. With TSO the TCP header is the same 7498 * (except for the sequence number) for all generated packets. This 7499 * makes it impossible to transmit any options which vary per 7500 * generated segment or packet. 7501 * 7502 * IPv4 handling has a clear separation of ip options and ip header 7503 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 7504 * the right thing below to provide length of just ip options and thus 7505 * checking for ipoptlen is enough to decide if ip options are present. 7506 */ 7507 7508 #ifdef INET6 7509 if (isipv6) 7510 ipoptlen = ip6_optlen(tp->t_inpcb); 7511 else 7512 #endif 7513 if (tp->t_inpcb->inp_options) 7514 ipoptlen = tp->t_inpcb->inp_options->m_len - 7515 offsetof(struct ipoption, ipopt_list); 7516 else 7517 ipoptlen = 0; 7518 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 7519 /* 7520 * Pre-calculate here as we save another lookup into the darknesses 7521 * of IPsec that way and can actually decide if TSO is ok. 7522 */ 7523 #ifdef INET6 7524 if (isipv6 && IPSEC_ENABLED(ipv6)) 7525 ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb); 7526 #ifdef INET 7527 else 7528 #endif 7529 #endif /* INET6 */ 7530 #ifdef INET 7531 if (IPSEC_ENABLED(ipv4)) 7532 ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb); 7533 #endif /* INET */ 7534 #endif 7535 7536 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 7537 ipoptlen += ipsec_optlen; 7538 #endif 7539 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > tp->t_maxseg && 7540 #ifdef NETFLIX_TCP_O_UDP 7541 (tp->t_port == 0) && 7542 #endif 7543 ((tp->t_flags & TF_SIGNATURE) == 0) && 7544 tp->rcv_numsacks == 0 && sack_rxmit == 0 && 7545 ipoptlen == 0) 7546 tso = 1; 7547 { 7548 uint32_t outstanding; 7549 7550 outstanding = tp->snd_max - tp->snd_una; 7551 if (tp->t_flags & TF_SENTFIN) { 7552 /* 7553 * If we sent a fin, snd_max is 1 higher than 7554 * snd_una 7555 */ 7556 outstanding--; 7557 } 7558 if (outstanding > 0) { 7559 /* 7560 * This is sub-optimal. We only send a stand alone 7561 * FIN on its own segment. 7562 */ 7563 if (flags & TH_FIN) { 7564 flags &= ~TH_FIN; 7565 would_have_fin = 1; 7566 } 7567 } else if (sack_rxmit) { 7568 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 7569 flags &= ~TH_FIN; 7570 } else { 7571 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + 7572 sbused(sb))) 7573 flags &= ~TH_FIN; 7574 } 7575 } 7576 recwin = sbspace(&so->so_rcv); 7577 7578 /* 7579 * Sender silly window avoidance. We transmit under the following 7580 * conditions when len is non-zero: 7581 * 7582 * - We have a full segment (or more with TSO) - This is the last 7583 * buffer in a write()/send() and we are either idle or running 7584 * NODELAY - we've timed out (e.g. persist timer) - we have more 7585 * then 1/2 the maximum send window's worth of data (receiver may be 7586 * limited the window size) - we need to retransmit 7587 */ 7588 if (len) { 7589 if (len >= tp->t_maxseg) { 7590 pass = 1; 7591 goto send; 7592 } 7593 /* 7594 * NOTE! on localhost connections an 'ack' from the remote 7595 * end may occur synchronously with the output and cause us 7596 * to flush a buffer queued with moretocome. XXX 7597 * 7598 */ 7599 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 7600 (idle || (tp->t_flags & TF_NODELAY)) && 7601 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(&so->so_snd)) && 7602 (tp->t_flags & TF_NOPUSH) == 0) { 7603 pass = 2; 7604 goto send; 7605 } 7606 if (tp->t_flags & TF_FORCEDATA) { /* typ. timeout case */ 7607 pass = 3; 7608 goto send; 7609 } 7610 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 7611 goto send; 7612 } 7613 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 7614 pass = 4; 7615 goto send; 7616 } 7617 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */ 7618 pass = 5; 7619 goto send; 7620 } 7621 if (sack_rxmit) { 7622 pass = 6; 7623 goto send; 7624 } 7625 } 7626 /* 7627 * Sending of standalone window updates. 7628 * 7629 * Window updates are important when we close our window due to a 7630 * full socket buffer and are opening it again after the application 7631 * reads data from it. Once the window has opened again and the 7632 * remote end starts to send again the ACK clock takes over and 7633 * provides the most current window information. 7634 * 7635 * We must avoid the silly window syndrome whereas every read from 7636 * the receive buffer, no matter how small, causes a window update 7637 * to be sent. We also should avoid sending a flurry of window 7638 * updates when the socket buffer had queued a lot of data and the 7639 * application is doing small reads. 7640 * 7641 * Prevent a flurry of pointless window updates by only sending an 7642 * update when we can increase the advertized window by more than 7643 * 1/4th of the socket buffer capacity. When the buffer is getting 7644 * full or is very small be more aggressive and send an update 7645 * whenever we can increase by two mss sized segments. In all other 7646 * situations the ACK's to new incoming data will carry further 7647 * window increases. 7648 * 7649 * Don't send an independent window update if a delayed ACK is 7650 * pending (it will get piggy-backed on it) or the remote side 7651 * already has done a half-close and won't send more data. Skip 7652 * this if the connection is in T/TCP half-open state. 7653 */ 7654 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 7655 !(tp->t_flags & TF_DELACK) && 7656 !TCPS_HAVERCVDFIN(tp->t_state)) { 7657 /* 7658 * "adv" is the amount we could increase the window, taking 7659 * into account that we are limited by TCP_MAXWIN << 7660 * tp->rcv_scale. 7661 */ 7662 int32_t adv; 7663 int oldwin; 7664 7665 adv = min(recwin, (long)TCP_MAXWIN << tp->rcv_scale); 7666 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 7667 oldwin = (tp->rcv_adv - tp->rcv_nxt); 7668 adv -= oldwin; 7669 } else 7670 oldwin = 0; 7671 7672 /* 7673 * If the new window size ends up being the same as the old 7674 * size when it is scaled, then don't force a window update. 7675 */ 7676 if (oldwin >> tp->rcv_scale == (adv + oldwin) >> tp->rcv_scale) 7677 goto dontupdate; 7678 7679 if (adv >= (int32_t)(2 * tp->t_maxseg) && 7680 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 7681 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 7682 so->so_rcv.sb_hiwat <= 8 * tp->t_maxseg)) { 7683 pass = 7; 7684 goto send; 7685 } 7686 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) 7687 goto send; 7688 } 7689 dontupdate: 7690 7691 /* 7692 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 7693 * is also a catch-all for the retransmit timer timeout case. 7694 */ 7695 if (tp->t_flags & TF_ACKNOW) { 7696 pass = 8; 7697 goto send; 7698 } 7699 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 7700 pass = 9; 7701 goto send; 7702 } 7703 if (SEQ_GT(tp->snd_up, tp->snd_una)) { 7704 pass = 10; 7705 goto send; 7706 } 7707 /* 7708 * If our state indicates that FIN should be sent and we have not 7709 * yet done so, then we need to send. 7710 */ 7711 if (flags & TH_FIN) { 7712 if ((tp->t_flags & TF_SENTFIN) || 7713 (((tp->t_flags & TF_SENTFIN) == 0) && 7714 (tp->snd_nxt == tp->snd_una))) { 7715 pass = 11; 7716 goto send; 7717 } 7718 } 7719 /* 7720 * No reason to send a segment, just return. 7721 */ 7722 just_return: 7723 SOCKBUF_UNLOCK(sb); 7724 just_return_nolock: 7725 if (tot_len_this_send == 0) 7726 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 7727 rack_start_hpts_timer(rack, tp, cts, __LINE__, slot, tot_len_this_send, 1); 7728 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling); 7729 tp->t_flags &= ~TF_FORCEDATA; 7730 return (0); 7731 7732 send: 7733 if (doing_tlp == 0) { 7734 /* 7735 * Data not a TLP, and its not the rxt firing. If it is the 7736 * rxt firing, we want to leave the tlp_in_progress flag on 7737 * so we don't send another TLP. It has to be a rack timer 7738 * or normal send (response to acked data) to clear the tlp 7739 * in progress flag. 7740 */ 7741 rack->rc_tlp_in_progress = 0; 7742 } 7743 SOCKBUF_LOCK_ASSERT(sb); 7744 if (len > 0) { 7745 if (len >= tp->t_maxseg) 7746 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 7747 else 7748 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 7749 } 7750 /* 7751 * Before ESTABLISHED, force sending of initial options unless TCP 7752 * set not to do any options. NOTE: we assume that the IP/TCP header 7753 * plus TCP options always fit in a single mbuf, leaving room for a 7754 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 7755 * + optlen <= MCLBYTES 7756 */ 7757 optlen = 0; 7758 #ifdef INET6 7759 if (isipv6) 7760 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 7761 else 7762 #endif 7763 hdrlen = sizeof(struct tcpiphdr); 7764 7765 /* 7766 * Compute options for segment. We only have to care about SYN and 7767 * established connection segments. Options for SYN-ACK segments 7768 * are handled in TCP syncache. 7769 */ 7770 to.to_flags = 0; 7771 if ((tp->t_flags & TF_NOOPT) == 0) { 7772 /* Maximum segment size. */ 7773 if (flags & TH_SYN) { 7774 tp->snd_nxt = tp->iss; 7775 to.to_mss = tcp_mssopt(&inp->inp_inc); 7776 #ifdef NETFLIX_TCP_O_UDP 7777 if (tp->t_port) 7778 to.to_mss -= V_tcp_udp_tunneling_overhead; 7779 #endif 7780 to.to_flags |= TOF_MSS; 7781 #ifdef TCP_RFC7413 7782 /* 7783 * Only include the TFO option on the first 7784 * transmission of the SYN|ACK on a 7785 * passively-created TFO socket, as the presence of 7786 * the TFO option may have caused the original 7787 * SYN|ACK to have been dropped by a middlebox. 7788 */ 7789 if ((tp->t_flags & TF_FASTOPEN) && 7790 (tp->t_state == TCPS_SYN_RECEIVED) && 7791 (tp->t_rxtshift == 0)) { 7792 to.to_tfo_len = TCP_FASTOPEN_MAX_COOKIE_LEN; 7793 to.to_tfo_cookie = (u_char *)&tp->t_tfo_cookie; 7794 to.to_flags |= TOF_FASTOPEN; 7795 } 7796 #endif 7797 } 7798 /* Window scaling. */ 7799 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 7800 to.to_wscale = tp->request_r_scale; 7801 to.to_flags |= TOF_SCALE; 7802 } 7803 /* Timestamps. */ 7804 if ((tp->t_flags & TF_RCVD_TSTMP) || 7805 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 7806 to.to_tsval = cts + tp->ts_offset; 7807 to.to_tsecr = tp->ts_recent; 7808 to.to_flags |= TOF_TS; 7809 } 7810 /* Set receive buffer autosizing timestamp. */ 7811 if (tp->rfbuf_ts == 0 && 7812 (so->so_rcv.sb_flags & SB_AUTOSIZE)) 7813 tp->rfbuf_ts = tcp_ts_getticks(); 7814 /* Selective ACK's. */ 7815 if (flags & TH_SYN) 7816 to.to_flags |= TOF_SACKPERM; 7817 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 7818 tp->rcv_numsacks > 0) { 7819 to.to_flags |= TOF_SACK; 7820 to.to_nsacks = tp->rcv_numsacks; 7821 to.to_sacks = (u_char *)tp->sackblks; 7822 } 7823 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 7824 /* TCP-MD5 (RFC2385). */ 7825 if (tp->t_flags & TF_SIGNATURE) 7826 to.to_flags |= TOF_SIGNATURE; 7827 #endif /* TCP_SIGNATURE */ 7828 7829 /* Processing the options. */ 7830 hdrlen += optlen = tcp_addoptions(&to, opt); 7831 } 7832 #ifdef NETFLIX_TCP_O_UDP 7833 if (tp->t_port) { 7834 if (V_tcp_udp_tunneling_port == 0) { 7835 /* The port was removed?? */ 7836 SOCKBUF_UNLOCK(&so->so_snd); 7837 return (EHOSTUNREACH); 7838 } 7839 hdrlen += sizeof(struct udphdr); 7840 } 7841 #endif 7842 ipoptlen = 0; 7843 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 7844 ipoptlen += ipsec_optlen; 7845 #endif 7846 7847 /* 7848 * Adjust data length if insertion of options will bump the packet 7849 * length beyond the t_maxseg length. Clear the FIN bit because we 7850 * cut off the tail of the segment. 7851 */ 7852 if (len + optlen + ipoptlen > tp->t_maxseg) { 7853 if (flags & TH_FIN) { 7854 would_have_fin = 1; 7855 flags &= ~TH_FIN; 7856 } 7857 if (tso) { 7858 uint32_t if_hw_tsomax; 7859 uint32_t moff; 7860 int32_t max_len; 7861 7862 /* extract TSO information */ 7863 if_hw_tsomax = tp->t_tsomax; 7864 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 7865 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 7866 KASSERT(ipoptlen == 0, 7867 ("%s: TSO can't do IP options", __func__)); 7868 7869 /* 7870 * Check if we should limit by maximum payload 7871 * length: 7872 */ 7873 if (if_hw_tsomax != 0) { 7874 /* compute maximum TSO length */ 7875 max_len = (if_hw_tsomax - hdrlen - 7876 max_linkhdr); 7877 if (max_len <= 0) { 7878 len = 0; 7879 } else if (len > max_len) { 7880 sendalot = 1; 7881 len = max_len; 7882 } 7883 } 7884 /* 7885 * Prevent the last segment from being fractional 7886 * unless the send sockbuf can be emptied: 7887 */ 7888 max_len = (tp->t_maxseg - optlen); 7889 if ((sb_offset + len) < sbavail(sb)) { 7890 moff = len % (u_int)max_len; 7891 if (moff != 0) { 7892 len -= moff; 7893 sendalot = 1; 7894 } 7895 } 7896 /* 7897 * In case there are too many small fragments don't 7898 * use TSO: 7899 */ 7900 if (len <= max_len) { 7901 len = max_len; 7902 sendalot = 1; 7903 tso = 0; 7904 } 7905 /* 7906 * Send the FIN in a separate segment after the bulk 7907 * sending is done. We don't trust the TSO 7908 * implementations to clear the FIN flag on all but 7909 * the last segment. 7910 */ 7911 if (tp->t_flags & TF_NEEDFIN) 7912 sendalot = 1; 7913 7914 } else { 7915 len = tp->t_maxseg - optlen - ipoptlen; 7916 sendalot = 1; 7917 } 7918 } else 7919 tso = 0; 7920 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 7921 ("%s: len > IP_MAXPACKET", __func__)); 7922 #ifdef DIAGNOSTIC 7923 #ifdef INET6 7924 if (max_linkhdr + hdrlen > MCLBYTES) 7925 #else 7926 if (max_linkhdr + hdrlen > MHLEN) 7927 #endif 7928 panic("tcphdr too big"); 7929 #endif 7930 7931 /* 7932 * This KASSERT is here to catch edge cases at a well defined place. 7933 * Before, those had triggered (random) panic conditions further 7934 * down. 7935 */ 7936 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 7937 if ((len == 0) && 7938 (flags & TH_FIN) && 7939 (sbused(sb))) { 7940 /* 7941 * We have outstanding data, don't send a fin by itself!. 7942 */ 7943 goto just_return; 7944 } 7945 /* 7946 * Grab a header mbuf, attaching a copy of data to be transmitted, 7947 * and initialize the header from the template for sends on this 7948 * connection. 7949 */ 7950 if (len) { 7951 uint32_t max_val; 7952 uint32_t moff; 7953 7954 if (rack->rc_pace_max_segs) 7955 max_val = rack->rc_pace_max_segs * tp->t_maxseg; 7956 else 7957 max_val = len; 7958 /* 7959 * We allow a limit on sending with hptsi. 7960 */ 7961 if (len > max_val) { 7962 len = max_val; 7963 } 7964 #ifdef INET6 7965 if (MHLEN < hdrlen + max_linkhdr) 7966 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 7967 else 7968 #endif 7969 m = m_gethdr(M_NOWAIT, MT_DATA); 7970 7971 if (m == NULL) { 7972 SOCKBUF_UNLOCK(sb); 7973 error = ENOBUFS; 7974 sack_rxmit = 0; 7975 goto out; 7976 } 7977 m->m_data += max_linkhdr; 7978 m->m_len = hdrlen; 7979 7980 /* 7981 * Start the m_copy functions from the closest mbuf to the 7982 * sb_offset in the socket buffer chain. 7983 */ 7984 mb = sbsndptr_noadv(sb, sb_offset, &moff); 7985 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 7986 m_copydata(mb, moff, (int)len, 7987 mtod(m, caddr_t)+hdrlen); 7988 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 7989 sbsndptr_adv(sb, mb, len); 7990 m->m_len += len; 7991 } else { 7992 struct sockbuf *msb; 7993 7994 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 7995 msb = NULL; 7996 else 7997 msb = sb; 7998 m->m_next = tcp_m_copym(/*tp, */ mb, moff, &len, 7999 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 8000 hw_tls /*, NULL */); 8001 if (len <= (tp->t_maxseg - optlen)) { 8002 /* 8003 * Must have ran out of mbufs for the copy 8004 * shorten it to no longer need tso. Lets 8005 * not put on sendalot since we are low on 8006 * mbufs. 8007 */ 8008 tso = 0; 8009 } 8010 if (m->m_next == NULL) { 8011 SOCKBUF_UNLOCK(sb); 8012 (void)m_free(m); 8013 error = ENOBUFS; 8014 sack_rxmit = 0; 8015 goto out; 8016 } 8017 } 8018 if ((tp->t_flags & TF_FORCEDATA) && len == 1) { 8019 TCPSTAT_INC(tcps_sndprobe); 8020 #ifdef NETFLIX_STATS 8021 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 8022 stats_voi_update_abs_u32(tp->t_stats, 8023 VOI_TCP_RETXPB, len); 8024 else 8025 stats_voi_update_abs_u64(tp->t_stats, 8026 VOI_TCP_TXPB, len); 8027 #endif 8028 } else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { 8029 if (rsm && (rsm->r_flags & RACK_TLP)) { 8030 /* 8031 * TLP should not count in retran count, but 8032 * in its own bin 8033 */ 8034 /* tp->t_sndtlppack++;*/ 8035 /* tp->t_sndtlpbyte += len;*/ 8036 counter_u64_add(rack_tlp_retran, 1); 8037 counter_u64_add(rack_tlp_retran_bytes, len); 8038 } else { 8039 tp->t_sndrexmitpack++; 8040 TCPSTAT_INC(tcps_sndrexmitpack); 8041 TCPSTAT_ADD(tcps_sndrexmitbyte, len); 8042 } 8043 #ifdef NETFLIX_STATS 8044 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 8045 len); 8046 #endif 8047 } else { 8048 TCPSTAT_INC(tcps_sndpack); 8049 TCPSTAT_ADD(tcps_sndbyte, len); 8050 #ifdef NETFLIX_STATS 8051 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 8052 len); 8053 #endif 8054 } 8055 /* 8056 * If we're sending everything we've got, set PUSH. (This 8057 * will keep happy those implementations which only give 8058 * data to the user when a buffer fills or a PUSH comes in.) 8059 */ 8060 if (sb_offset + len == sbused(sb) && 8061 sbused(sb) && 8062 !(flags & TH_SYN)) 8063 flags |= TH_PUSH; 8064 8065 /* 8066 * Are we doing hptsi, if so we must calculate the slot. We 8067 * only do hptsi in ESTABLISHED and with no RESET being 8068 * sent where we have data to send. 8069 */ 8070 if (((tp->t_state == TCPS_ESTABLISHED) || 8071 (tp->t_state == TCPS_CLOSE_WAIT) || 8072 ((tp->t_state == TCPS_FIN_WAIT_1) && 8073 ((tp->t_flags & TF_SENTFIN) == 0) && 8074 ((flags & TH_FIN) == 0))) && 8075 ((flags & TH_RST) == 0) && 8076 (rack->rc_always_pace)) { 8077 /* 8078 * We use the most optimistic possible cwnd/srtt for 8079 * sending calculations. This will make our 8080 * calculation anticipate getting more through 8081 * quicker then possible. But thats ok we don't want 8082 * the peer to have a gap in data sending. 8083 */ 8084 uint32_t srtt, cwnd, tr_perms = 0; 8085 8086 if (rack->r_ctl.rc_rack_min_rtt) 8087 srtt = rack->r_ctl.rc_rack_min_rtt; 8088 else 8089 srtt = TICKS_2_MSEC((tp->t_srtt >> TCP_RTT_SHIFT)); 8090 if (rack->r_ctl.rc_rack_largest_cwnd) 8091 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 8092 else 8093 cwnd = tp->snd_cwnd; 8094 tr_perms = cwnd / srtt; 8095 if (tr_perms == 0) { 8096 tr_perms = tp->t_maxseg; 8097 } 8098 tot_len_this_send += len; 8099 /* 8100 * Calculate how long this will take to drain, if 8101 * the calculation comes out to zero, thats ok we 8102 * will use send_a_lot to possibly spin around for 8103 * more increasing tot_len_this_send to the point 8104 * that its going to require a pace, or we hit the 8105 * cwnd. Which in that case we are just waiting for 8106 * a ACK. 8107 */ 8108 slot = tot_len_this_send / tr_perms; 8109 /* Now do we reduce the time so we don't run dry? */ 8110 if (slot && rack->rc_pace_reduce) { 8111 int32_t reduce; 8112 8113 reduce = (slot / rack->rc_pace_reduce); 8114 if (reduce < slot) { 8115 slot -= reduce; 8116 } else 8117 slot = 0; 8118 } 8119 if (rack->r_enforce_min_pace && 8120 (slot == 0) && 8121 (tot_len_this_send >= (rack->r_min_pace_seg_thresh * tp->t_maxseg))) { 8122 /* We are enforcing a minimum pace time of 1ms */ 8123 slot = rack->r_enforce_min_pace; 8124 } 8125 } 8126 SOCKBUF_UNLOCK(sb); 8127 } else { 8128 SOCKBUF_UNLOCK(sb); 8129 if (tp->t_flags & TF_ACKNOW) 8130 TCPSTAT_INC(tcps_sndacks); 8131 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 8132 TCPSTAT_INC(tcps_sndctrl); 8133 else if (SEQ_GT(tp->snd_up, tp->snd_una)) 8134 TCPSTAT_INC(tcps_sndurg); 8135 else 8136 TCPSTAT_INC(tcps_sndwinup); 8137 8138 m = m_gethdr(M_NOWAIT, MT_DATA); 8139 if (m == NULL) { 8140 error = ENOBUFS; 8141 sack_rxmit = 0; 8142 goto out; 8143 } 8144 #ifdef INET6 8145 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 8146 MHLEN >= hdrlen) { 8147 M_ALIGN(m, hdrlen); 8148 } else 8149 #endif 8150 m->m_data += max_linkhdr; 8151 m->m_len = hdrlen; 8152 } 8153 SOCKBUF_UNLOCK_ASSERT(sb); 8154 m->m_pkthdr.rcvif = (struct ifnet *)0; 8155 #ifdef MAC 8156 mac_inpcb_create_mbuf(inp, m); 8157 #endif 8158 #ifdef INET6 8159 if (isipv6) { 8160 ip6 = mtod(m, struct ip6_hdr *); 8161 #ifdef NETFLIX_TCP_O_UDP 8162 if (tp->t_port) { 8163 udp = (struct udphdr *)((caddr_t)ip6 + ipoptlen + sizeof(struct ip6_hdr)); 8164 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 8165 udp->uh_dport = tp->t_port; 8166 ulen = hdrlen + len - sizeof(struct ip6_hdr); 8167 udp->uh_ulen = htons(ulen); 8168 th = (struct tcphdr *)(udp + 1); 8169 } else 8170 #endif 8171 th = (struct tcphdr *)(ip6 + 1); 8172 tcpip_fillheaders(inp, /*tp->t_port, */ ip6, th); 8173 } else 8174 #endif /* INET6 */ 8175 { 8176 ip = mtod(m, struct ip *); 8177 #ifdef TCPDEBUG 8178 ipov = (struct ipovly *)ip; 8179 #endif 8180 #ifdef NETFLIX_TCP_O_UDP 8181 if (tp->t_port) { 8182 udp = (struct udphdr *)((caddr_t)ip + ipoptlen + sizeof(struct ip)); 8183 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 8184 udp->uh_dport = tp->t_port; 8185 ulen = hdrlen + len - sizeof(struct ip); 8186 udp->uh_ulen = htons(ulen); 8187 th = (struct tcphdr *)(udp + 1); 8188 } else 8189 #endif 8190 th = (struct tcphdr *)(ip + 1); 8191 tcpip_fillheaders(inp,/*tp->t_port, */ ip, th); 8192 } 8193 /* 8194 * Fill in fields, remembering maximum advertised window for use in 8195 * delaying messages about window sizes. If resending a FIN, be sure 8196 * not to use a new sequence number. 8197 */ 8198 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 8199 tp->snd_nxt == tp->snd_max) 8200 tp->snd_nxt--; 8201 /* 8202 * If we are starting a connection, send ECN setup SYN packet. If we 8203 * are on a retransmit, we may resend those bits a number of times 8204 * as per RFC 3168. 8205 */ 8206 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn == 1) { 8207 if (tp->t_rxtshift >= 1) { 8208 if (tp->t_rxtshift <= V_tcp_ecn_maxretries) 8209 flags |= TH_ECE | TH_CWR; 8210 } else 8211 flags |= TH_ECE | TH_CWR; 8212 } 8213 if (tp->t_state == TCPS_ESTABLISHED && 8214 (tp->t_flags & TF_ECN_PERMIT)) { 8215 /* 8216 * If the peer has ECN, mark data packets with ECN capable 8217 * transmission (ECT). Ignore pure ack packets, 8218 * retransmissions and window probes. 8219 */ 8220 if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max) && 8221 !((tp->t_flags & TF_FORCEDATA) && len == 1)) { 8222 #ifdef INET6 8223 if (isipv6) 8224 ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20); 8225 else 8226 #endif 8227 ip->ip_tos |= IPTOS_ECN_ECT0; 8228 TCPSTAT_INC(tcps_ecn_ect0); 8229 } 8230 /* 8231 * Reply with proper ECN notifications. 8232 */ 8233 if (tp->t_flags & TF_ECN_SND_CWR) { 8234 flags |= TH_CWR; 8235 tp->t_flags &= ~TF_ECN_SND_CWR; 8236 } 8237 if (tp->t_flags & TF_ECN_SND_ECE) 8238 flags |= TH_ECE; 8239 } 8240 /* 8241 * If we are doing retransmissions, then snd_nxt will not reflect 8242 * the first unsent octet. For ACK only packets, we do not want the 8243 * sequence number of the retransmitted packet, we want the sequence 8244 * number of the next unsent octet. So, if there is no data (and no 8245 * SYN or FIN), use snd_max instead of snd_nxt when filling in 8246 * ti_seq. But if we are in persist state, snd_max might reflect 8247 * one byte beyond the right edge of the window, so use snd_nxt in 8248 * that case, since we know we aren't doing a retransmission. 8249 * (retransmit and persist are mutually exclusive...) 8250 */ 8251 if (sack_rxmit == 0) { 8252 if (len || (flags & (TH_SYN | TH_FIN)) || 8253 rack->rc_in_persist) { 8254 th->th_seq = htonl(tp->snd_nxt); 8255 rack_seq = tp->snd_nxt; 8256 } else if (flags & TH_RST) { 8257 /* 8258 * For a Reset send the last cum ack in sequence 8259 * (this like any other choice may still generate a 8260 * challenge ack, if a ack-update packet is in 8261 * flight). 8262 */ 8263 th->th_seq = htonl(tp->snd_una); 8264 rack_seq = tp->snd_una; 8265 } else { 8266 th->th_seq = htonl(tp->snd_max); 8267 rack_seq = tp->snd_max; 8268 } 8269 } else { 8270 th->th_seq = htonl(rsm->r_start); 8271 rack_seq = rsm->r_start; 8272 } 8273 th->th_ack = htonl(tp->rcv_nxt); 8274 if (optlen) { 8275 bcopy(opt, th + 1, optlen); 8276 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 8277 } 8278 th->th_flags = flags; 8279 /* 8280 * Calculate receive window. Don't shrink window, but avoid silly 8281 * window syndrome. 8282 */ 8283 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 8284 recwin < (long)tp->t_maxseg) 8285 recwin = 0; 8286 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 8287 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 8288 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 8289 if (recwin > (long)TCP_MAXWIN << tp->rcv_scale) 8290 recwin = (long)TCP_MAXWIN << tp->rcv_scale; 8291 8292 /* 8293 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 8294 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 8295 * handled in syncache. 8296 */ 8297 if (flags & TH_SYN) 8298 th->th_win = htons((u_short) 8299 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 8300 else 8301 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 8302 /* 8303 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 8304 * window. This may cause the remote transmitter to stall. This 8305 * flag tells soreceive() to disable delayed acknowledgements when 8306 * draining the buffer. This can occur if the receiver is 8307 * attempting to read more data than can be buffered prior to 8308 * transmitting on the connection. 8309 */ 8310 if (th->th_win == 0) { 8311 tp->t_sndzerowin++; 8312 tp->t_flags |= TF_RXWIN0SENT; 8313 } else 8314 tp->t_flags &= ~TF_RXWIN0SENT; 8315 if (SEQ_GT(tp->snd_up, tp->snd_nxt)) { 8316 th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt)); 8317 th->th_flags |= TH_URG; 8318 } else 8319 /* 8320 * If no urgent pointer to send, then we pull the urgent 8321 * pointer to the left edge of the send window so that it 8322 * doesn't drift into the send window on sequence number 8323 * wraparound. 8324 */ 8325 tp->snd_up = tp->snd_una; /* drag it along */ 8326 8327 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 8328 if (to.to_flags & TOF_SIGNATURE) { 8329 /* 8330 * Calculate MD5 signature and put it into the place 8331 * determined before. 8332 * NOTE: since TCP options buffer doesn't point into 8333 * mbuf's data, calculate offset and use it. 8334 */ 8335 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 8336 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 8337 /* 8338 * Do not send segment if the calculation of MD5 8339 * digest has failed. 8340 */ 8341 goto out; 8342 } 8343 } 8344 #endif 8345 8346 /* 8347 * Put TCP length in extended header, and then checksum extended 8348 * header and data. 8349 */ 8350 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 8351 #ifdef INET6 8352 if (isipv6) { 8353 /* 8354 * ip6_plen is not need to be filled now, and will be filled 8355 * in ip6_output. 8356 */ 8357 #ifdef NETFLIX_TCP_O_UDP 8358 if (tp->t_port) { 8359 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 8360 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 8361 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 8362 th->th_sum = htons(0); 8363 UDPSTAT_INC(udps_opackets); 8364 } else { 8365 #endif 8366 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 8367 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 8368 th->th_sum = in6_cksum_pseudo(ip6, 8369 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 8370 0); 8371 #ifdef NETFLIX_TCP_O_UDP 8372 } 8373 #endif 8374 } 8375 #endif 8376 #if defined(INET6) && defined(INET) 8377 else 8378 #endif 8379 #ifdef INET 8380 { 8381 #ifdef NETFLIX_TCP_O_UDP 8382 if (tp->t_port) { 8383 m->m_pkthdr.csum_flags = CSUM_UDP; 8384 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 8385 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 8386 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 8387 th->th_sum = htons(0); 8388 UDPSTAT_INC(udps_opackets); 8389 } else { 8390 #endif 8391 m->m_pkthdr.csum_flags = CSUM_TCP; 8392 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 8393 th->th_sum = in_pseudo(ip->ip_src.s_addr, 8394 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 8395 IPPROTO_TCP + len + optlen)); 8396 #ifdef NETFLIX_TCP_O_UDP 8397 } 8398 #endif 8399 /* IP version must be set here for ipv4/ipv6 checking later */ 8400 KASSERT(ip->ip_v == IPVERSION, 8401 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 8402 } 8403 #endif 8404 8405 /* 8406 * Enable TSO and specify the size of the segments. The TCP pseudo 8407 * header checksum is always provided. XXX: Fixme: This is currently 8408 * not the case for IPv6. 8409 */ 8410 if (tso) { 8411 KASSERT(len > tp->t_maxseg - optlen, 8412 ("%s: len <= tso_segsz", __func__)); 8413 m->m_pkthdr.csum_flags |= CSUM_TSO; 8414 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 8415 } 8416 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 8417 KASSERT(len + hdrlen + ipoptlen - ipsec_optlen == m_length(m, NULL), 8418 ("%s: mbuf chain shorter than expected: %d + %u + %u - %u != %u", 8419 __func__, len, hdrlen, ipoptlen, ipsec_optlen, m_length(m, NULL))); 8420 #else 8421 KASSERT(len + hdrlen + ipoptlen == m_length(m, NULL), 8422 ("%s: mbuf chain shorter than expected: %d + %u + %u != %u", 8423 __func__, len, hdrlen, ipoptlen, m_length(m, NULL))); 8424 #endif 8425 8426 #ifdef TCP_HHOOK 8427 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 8428 hhook_run_tcp_est_out(tp, th, &to, len, tso); 8429 #endif 8430 8431 #ifdef TCPDEBUG 8432 /* 8433 * Trace. 8434 */ 8435 if (so->so_options & SO_DEBUG) { 8436 u_short save = 0; 8437 8438 #ifdef INET6 8439 if (!isipv6) 8440 #endif 8441 { 8442 save = ipov->ih_len; 8443 ipov->ih_len = htons(m->m_pkthdr.len /* - hdrlen + 8444 * (th->th_off << 2) */ ); 8445 } 8446 tcp_trace(TA_OUTPUT, tp->t_state, tp, mtod(m, void *), th, 0); 8447 #ifdef INET6 8448 if (!isipv6) 8449 #endif 8450 ipov->ih_len = save; 8451 } 8452 #endif /* TCPDEBUG */ 8453 8454 /* We're getting ready to send; log now. */ 8455 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 8456 union tcp_log_stackspecific log; 8457 8458 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 8459 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 8460 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 8461 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 8462 if (rsm || sack_rxmit) { 8463 log.u_bbr.flex8 = 1; 8464 } else { 8465 log.u_bbr.flex8 = 0; 8466 } 8467 lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 8468 len, &log, false, NULL, NULL, 0, NULL); 8469 } else 8470 lgb = NULL; 8471 8472 /* 8473 * Fill in IP length and desired time to live and send to IP level. 8474 * There should be a better way to handle ttl and tos; we could keep 8475 * them in the template, but need a way to checksum without them. 8476 */ 8477 /* 8478 * m->m_pkthdr.len should have been set before cksum calcuration, 8479 * because in6_cksum() need it. 8480 */ 8481 #ifdef INET6 8482 if (isipv6) { 8483 /* 8484 * we separately set hoplimit for every segment, since the 8485 * user might want to change the value via setsockopt. Also, 8486 * desired default hop limit might be changed via Neighbor 8487 * Discovery. 8488 */ 8489 ip6->ip6_hlim = in6_selecthlim(inp, NULL); 8490 8491 /* 8492 * Set the packet size here for the benefit of DTrace 8493 * probes. ip6_output() will set it properly; it's supposed 8494 * to include the option header lengths as well. 8495 */ 8496 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 8497 8498 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 8499 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 8500 else 8501 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 8502 8503 if (tp->t_state == TCPS_SYN_SENT) 8504 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 8505 8506 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 8507 /* TODO: IPv6 IP6TOS_ECT bit on */ 8508 error = ip6_output(m, tp->t_inpcb->in6p_outputopts, 8509 &inp->inp_route6, 8510 ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 8511 NULL, NULL, inp); 8512 8513 if (error == EMSGSIZE && inp->inp_route6.ro_rt != NULL) 8514 mtu = inp->inp_route6.ro_rt->rt_mtu; 8515 } 8516 #endif /* INET6 */ 8517 #if defined(INET) && defined(INET6) 8518 else 8519 #endif 8520 #ifdef INET 8521 { 8522 ip->ip_len = htons(m->m_pkthdr.len); 8523 #ifdef INET6 8524 if (inp->inp_vflag & INP_IPV6PROTO) 8525 ip->ip_ttl = in6_selecthlim(inp, NULL); 8526 #endif /* INET6 */ 8527 /* 8528 * If we do path MTU discovery, then we set DF on every 8529 * packet. This might not be the best thing to do according 8530 * to RFC3390 Section 2. However the tcp hostcache migitates 8531 * the problem so it affects only the first tcp connection 8532 * with a host. 8533 * 8534 * NB: Don't set DF on small MTU/MSS to have a safe 8535 * fallback. 8536 */ 8537 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 8538 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 8539 if (tp->t_port == 0 || len < V_tcp_minmss) { 8540 ip->ip_off |= htons(IP_DF); 8541 } 8542 } else { 8543 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 8544 } 8545 8546 if (tp->t_state == TCPS_SYN_SENT) 8547 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 8548 8549 TCP_PROBE5(send, NULL, tp, ip, tp, th); 8550 8551 error = ip_output(m, tp->t_inpcb->inp_options, &inp->inp_route, 8552 ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 0, 8553 inp); 8554 if (error == EMSGSIZE && inp->inp_route.ro_rt != NULL) 8555 mtu = inp->inp_route.ro_rt->rt_mtu; 8556 } 8557 #endif /* INET */ 8558 8559 out: 8560 if (lgb) { 8561 lgb->tlb_errno = error; 8562 lgb = NULL; 8563 } 8564 /* 8565 * In transmit state, time the transmission and arrange for the 8566 * retransmit. In persist state, just set snd_max. 8567 */ 8568 if (error == 0) { 8569 if (TCPS_HAVEESTABLISHED(tp->t_state) && 8570 (tp->t_flags & TF_SACK_PERMIT) && 8571 tp->rcv_numsacks > 0) 8572 tcp_clean_dsack_blocks(tp); 8573 if (len == 0) 8574 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 8575 else if (len == 1) { 8576 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 8577 } else if (len > 1) { 8578 int idx; 8579 8580 idx = (len / tp->t_maxseg) + 3; 8581 if (idx >= TCP_MSS_ACCT_ATIMER) 8582 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 8583 else 8584 counter_u64_add(rack_out_size[idx], 1); 8585 } 8586 } 8587 if (sub_from_prr && (error == 0)) { 8588 if (rack->r_ctl.rc_prr_sndcnt >= len) 8589 rack->r_ctl.rc_prr_sndcnt -= len; 8590 else 8591 rack->r_ctl.rc_prr_sndcnt = 0; 8592 } 8593 sub_from_prr = 0; 8594 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, cts, 8595 pass, rsm); 8596 if ((tp->t_flags & TF_FORCEDATA) == 0 || 8597 (rack->rc_in_persist == 0)) { 8598 #ifdef NETFLIX_STATS 8599 tcp_seq startseq = tp->snd_nxt; 8600 #endif 8601 /* 8602 * Advance snd_nxt over sequence space of this segment. 8603 */ 8604 if (error) 8605 /* We don't log or do anything with errors */ 8606 goto timer; 8607 8608 if (flags & (TH_SYN | TH_FIN)) { 8609 if (flags & TH_SYN) 8610 tp->snd_nxt++; 8611 if (flags & TH_FIN) { 8612 tp->snd_nxt++; 8613 tp->t_flags |= TF_SENTFIN; 8614 } 8615 } 8616 /* In the ENOBUFS case we do *not* update snd_max */ 8617 if (sack_rxmit) 8618 goto timer; 8619 8620 tp->snd_nxt += len; 8621 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { 8622 if (tp->snd_una == tp->snd_max) { 8623 /* 8624 * Update the time we just added data since 8625 * none was outstanding. 8626 */ 8627 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 8628 tp->t_acktime = ticks; 8629 } 8630 tp->snd_max = tp->snd_nxt; 8631 #ifdef NETFLIX_STATS 8632 if (!(tp->t_flags & TF_GPUTINPROG) && len) { 8633 tp->t_flags |= TF_GPUTINPROG; 8634 tp->gput_seq = startseq; 8635 tp->gput_ack = startseq + 8636 ulmin(sbavail(sb) - sb_offset, sendwin); 8637 tp->gput_ts = tcp_ts_getticks(); 8638 } 8639 #endif 8640 } 8641 /* 8642 * Set retransmit timer if not currently set, and not doing 8643 * a pure ack or a keep-alive probe. Initial value for 8644 * retransmit timer is smoothed round-trip time + 2 * 8645 * round-trip time variance. Initialize shift counter which 8646 * is used for backoff of retransmit time. 8647 */ 8648 timer: 8649 if ((tp->snd_wnd == 0) && 8650 TCPS_HAVEESTABLISHED(tp->t_state)) { 8651 /* 8652 * If the persists timer was set above (right before 8653 * the goto send), and still needs to be on. Lets 8654 * make sure all is canceled. If the persist timer 8655 * is not running, we want to get it up. 8656 */ 8657 if (rack->rc_in_persist == 0) { 8658 rack_enter_persist(tp, rack, cts); 8659 } 8660 } 8661 } else { 8662 /* 8663 * Persist case, update snd_max but since we are in persist 8664 * mode (no window) we do not update snd_nxt. 8665 */ 8666 int32_t xlen = len; 8667 8668 if (error) 8669 goto nomore; 8670 8671 if (flags & TH_SYN) 8672 ++xlen; 8673 if (flags & TH_FIN) { 8674 ++xlen; 8675 tp->t_flags |= TF_SENTFIN; 8676 } 8677 /* In the ENOBUFS case we do *not* update snd_max */ 8678 if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max)) { 8679 if (tp->snd_una == tp->snd_max) { 8680 /* 8681 * Update the time we just added data since 8682 * none was outstanding. 8683 */ 8684 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 8685 tp->t_acktime = ticks; 8686 } 8687 tp->snd_max = tp->snd_nxt + len; 8688 } 8689 } 8690 nomore: 8691 if (error) { 8692 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 8693 /* 8694 * Failures do not advance the seq counter above. For the 8695 * case of ENOBUFS we will fall out and retry in 1ms with 8696 * the hpts. Everything else will just have to retransmit 8697 * with the timer. 8698 * 8699 * In any case, we do not want to loop around for another 8700 * send without a good reason. 8701 */ 8702 sendalot = 0; 8703 switch (error) { 8704 case EPERM: 8705 tp->t_flags &= ~TF_FORCEDATA; 8706 tp->t_softerror = error; 8707 return (error); 8708 case ENOBUFS: 8709 if (slot == 0) { 8710 /* 8711 * Pace us right away to retry in a some 8712 * time 8713 */ 8714 slot = 1 + rack->rc_enobuf; 8715 if (rack->rc_enobuf < 255) 8716 rack->rc_enobuf++; 8717 if (slot > (rack->rc_rack_rtt / 2)) { 8718 slot = rack->rc_rack_rtt / 2; 8719 } 8720 if (slot < 10) 8721 slot = 10; 8722 } 8723 counter_u64_add(rack_saw_enobuf, 1); 8724 error = 0; 8725 goto enobufs; 8726 case EMSGSIZE: 8727 /* 8728 * For some reason the interface we used initially 8729 * to send segments changed to another or lowered 8730 * its MTU. If TSO was active we either got an 8731 * interface without TSO capabilits or TSO was 8732 * turned off. If we obtained mtu from ip_output() 8733 * then update it and try again. 8734 */ 8735 if (tso) 8736 tp->t_flags &= ~TF_TSO; 8737 if (mtu != 0) { 8738 tcp_mss_update(tp, -1, mtu, NULL, NULL); 8739 goto again; 8740 } 8741 slot = 10; 8742 rack_start_hpts_timer(rack, tp, cts, __LINE__, slot, 0, 1); 8743 tp->t_flags &= ~TF_FORCEDATA; 8744 return (error); 8745 case ENETUNREACH: 8746 counter_u64_add(rack_saw_enetunreach, 1); 8747 case EHOSTDOWN: 8748 case EHOSTUNREACH: 8749 case ENETDOWN: 8750 if (TCPS_HAVERCVDSYN(tp->t_state)) { 8751 tp->t_softerror = error; 8752 } 8753 /* FALLTHROUGH */ 8754 default: 8755 slot = 10; 8756 rack_start_hpts_timer(rack, tp, cts, __LINE__, slot, 0, 1); 8757 tp->t_flags &= ~TF_FORCEDATA; 8758 return (error); 8759 } 8760 } else { 8761 rack->rc_enobuf = 0; 8762 } 8763 TCPSTAT_INC(tcps_sndtotal); 8764 8765 /* 8766 * Data sent (as far as we can tell). If this advertises a larger 8767 * window than any other segment, then remember the size of the 8768 * advertised window. Any pending ACK has now been sent. 8769 */ 8770 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 8771 tp->rcv_adv = tp->rcv_nxt + recwin; 8772 tp->last_ack_sent = tp->rcv_nxt; 8773 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 8774 enobufs: 8775 rack->r_tlp_running = 0; 8776 if ((flags & TH_RST) || (would_have_fin == 1)) { 8777 /* 8778 * We don't send again after a RST. We also do *not* send 8779 * again if we would have had a find, but now have 8780 * outstanding data. 8781 */ 8782 slot = 0; 8783 sendalot = 0; 8784 } 8785 if (slot) { 8786 /* set the rack tcb into the slot N */ 8787 counter_u64_add(rack_paced_segments, 1); 8788 } else if (sendalot) { 8789 if (len) 8790 counter_u64_add(rack_unpaced_segments, 1); 8791 sack_rxmit = 0; 8792 tp->t_flags &= ~TF_FORCEDATA; 8793 goto again; 8794 } else if (len) { 8795 counter_u64_add(rack_unpaced_segments, 1); 8796 } 8797 tp->t_flags &= ~TF_FORCEDATA; 8798 rack_start_hpts_timer(rack, tp, cts, __LINE__, slot, tot_len_this_send, 1); 8799 return (error); 8800 } 8801 8802 /* 8803 * rack_ctloutput() must drop the inpcb lock before performing copyin on 8804 * socket option arguments. When it re-acquires the lock after the copy, it 8805 * has to revalidate that the connection is still valid for the socket 8806 * option. 8807 */ 8808 static int 8809 rack_set_sockopt(struct socket *so, struct sockopt *sopt, 8810 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack) 8811 { 8812 int32_t error = 0, optval; 8813 8814 switch (sopt->sopt_name) { 8815 case TCP_RACK_PROP_RATE: 8816 case TCP_RACK_PROP: 8817 case TCP_RACK_TLP_REDUCE: 8818 case TCP_RACK_EARLY_RECOV: 8819 case TCP_RACK_PACE_ALWAYS: 8820 case TCP_DELACK: 8821 case TCP_RACK_PACE_REDUCE: 8822 case TCP_RACK_PACE_MAX_SEG: 8823 case TCP_RACK_PRR_SENDALOT: 8824 case TCP_RACK_MIN_TO: 8825 case TCP_RACK_EARLY_SEG: 8826 case TCP_RACK_REORD_THRESH: 8827 case TCP_RACK_REORD_FADE: 8828 case TCP_RACK_TLP_THRESH: 8829 case TCP_RACK_PKT_DELAY: 8830 case TCP_RACK_TLP_USE: 8831 case TCP_RACK_TLP_INC_VAR: 8832 case TCP_RACK_IDLE_REDUCE_HIGH: 8833 case TCP_RACK_MIN_PACE: 8834 case TCP_RACK_MIN_PACE_SEG: 8835 case TCP_BBR_RACK_RTT_USE: 8836 case TCP_DATA_AFTER_CLOSE: 8837 break; 8838 default: 8839 return (tcp_default_ctloutput(so, sopt, inp, tp)); 8840 break; 8841 } 8842 INP_WUNLOCK(inp); 8843 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 8844 if (error) 8845 return (error); 8846 INP_WLOCK(inp); 8847 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 8848 INP_WUNLOCK(inp); 8849 return (ECONNRESET); 8850 } 8851 tp = intotcpcb(inp); 8852 rack = (struct tcp_rack *)tp->t_fb_ptr; 8853 switch (sopt->sopt_name) { 8854 case TCP_RACK_PROP_RATE: 8855 if ((optval <= 0) || (optval >= 100)) { 8856 error = EINVAL; 8857 break; 8858 } 8859 RACK_OPTS_INC(tcp_rack_prop_rate); 8860 rack->r_ctl.rc_prop_rate = optval; 8861 break; 8862 case TCP_RACK_TLP_USE: 8863 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 8864 error = EINVAL; 8865 break; 8866 } 8867 RACK_OPTS_INC(tcp_tlp_use); 8868 rack->rack_tlp_threshold_use = optval; 8869 break; 8870 case TCP_RACK_PROP: 8871 /* RACK proportional rate reduction (bool) */ 8872 RACK_OPTS_INC(tcp_rack_prop); 8873 rack->r_ctl.rc_prop_reduce = optval; 8874 break; 8875 case TCP_RACK_TLP_REDUCE: 8876 /* RACK TLP cwnd reduction (bool) */ 8877 RACK_OPTS_INC(tcp_rack_tlp_reduce); 8878 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 8879 break; 8880 case TCP_RACK_EARLY_RECOV: 8881 /* Should recovery happen early (bool) */ 8882 RACK_OPTS_INC(tcp_rack_early_recov); 8883 rack->r_ctl.rc_early_recovery = optval; 8884 break; 8885 case TCP_RACK_PACE_ALWAYS: 8886 /* Use the always pace method (bool) */ 8887 RACK_OPTS_INC(tcp_rack_pace_always); 8888 if (optval > 0) 8889 rack->rc_always_pace = 1; 8890 else 8891 rack->rc_always_pace = 0; 8892 break; 8893 case TCP_RACK_PACE_REDUCE: 8894 /* RACK Hptsi reduction factor (divisor) */ 8895 RACK_OPTS_INC(tcp_rack_pace_reduce); 8896 if (optval) 8897 /* Must be non-zero */ 8898 rack->rc_pace_reduce = optval; 8899 else 8900 error = EINVAL; 8901 break; 8902 case TCP_RACK_PACE_MAX_SEG: 8903 /* Max segments in a pace */ 8904 RACK_OPTS_INC(tcp_rack_max_seg); 8905 rack->rc_pace_max_segs = optval; 8906 break; 8907 case TCP_RACK_PRR_SENDALOT: 8908 /* Allow PRR to send more than one seg */ 8909 RACK_OPTS_INC(tcp_rack_prr_sendalot); 8910 rack->r_ctl.rc_prr_sendalot = optval; 8911 break; 8912 case TCP_RACK_MIN_TO: 8913 /* Minimum time between rack t-o's in ms */ 8914 RACK_OPTS_INC(tcp_rack_min_to); 8915 rack->r_ctl.rc_min_to = optval; 8916 break; 8917 case TCP_RACK_EARLY_SEG: 8918 /* If early recovery max segments */ 8919 RACK_OPTS_INC(tcp_rack_early_seg); 8920 rack->r_ctl.rc_early_recovery_segs = optval; 8921 break; 8922 case TCP_RACK_REORD_THRESH: 8923 /* RACK reorder threshold (shift amount) */ 8924 RACK_OPTS_INC(tcp_rack_reord_thresh); 8925 if ((optval > 0) && (optval < 31)) 8926 rack->r_ctl.rc_reorder_shift = optval; 8927 else 8928 error = EINVAL; 8929 break; 8930 case TCP_RACK_REORD_FADE: 8931 /* Does reordering fade after ms time */ 8932 RACK_OPTS_INC(tcp_rack_reord_fade); 8933 rack->r_ctl.rc_reorder_fade = optval; 8934 break; 8935 case TCP_RACK_TLP_THRESH: 8936 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 8937 RACK_OPTS_INC(tcp_rack_tlp_thresh); 8938 if (optval) 8939 rack->r_ctl.rc_tlp_threshold = optval; 8940 else 8941 error = EINVAL; 8942 break; 8943 case TCP_RACK_PKT_DELAY: 8944 /* RACK added ms i.e. rack-rtt + reord + N */ 8945 RACK_OPTS_INC(tcp_rack_pkt_delay); 8946 rack->r_ctl.rc_pkt_delay = optval; 8947 break; 8948 case TCP_RACK_TLP_INC_VAR: 8949 /* Does TLP include rtt variance in t-o */ 8950 RACK_OPTS_INC(tcp_rack_tlp_inc_var); 8951 rack->r_ctl.rc_prr_inc_var = optval; 8952 break; 8953 case TCP_RACK_IDLE_REDUCE_HIGH: 8954 RACK_OPTS_INC(tcp_rack_idle_reduce_high); 8955 if (optval) 8956 rack->r_idle_reduce_largest = 1; 8957 else 8958 rack->r_idle_reduce_largest = 0; 8959 break; 8960 case TCP_DELACK: 8961 if (optval == 0) 8962 tp->t_delayed_ack = 0; 8963 else 8964 tp->t_delayed_ack = 1; 8965 if (tp->t_flags & TF_DELACK) { 8966 tp->t_flags &= ~TF_DELACK; 8967 tp->t_flags |= TF_ACKNOW; 8968 rack_output(tp); 8969 } 8970 break; 8971 case TCP_RACK_MIN_PACE: 8972 RACK_OPTS_INC(tcp_rack_min_pace); 8973 if (optval > 3) 8974 rack->r_enforce_min_pace = 3; 8975 else 8976 rack->r_enforce_min_pace = optval; 8977 break; 8978 case TCP_RACK_MIN_PACE_SEG: 8979 RACK_OPTS_INC(tcp_rack_min_pace_seg); 8980 if (optval >= 16) 8981 rack->r_min_pace_seg_thresh = 15; 8982 else 8983 rack->r_min_pace_seg_thresh = optval; 8984 break; 8985 case TCP_BBR_RACK_RTT_USE: 8986 if ((optval != USE_RTT_HIGH) && 8987 (optval != USE_RTT_LOW) && 8988 (optval != USE_RTT_AVG)) 8989 error = EINVAL; 8990 else 8991 rack->r_ctl.rc_rate_sample_method = optval; 8992 break; 8993 case TCP_DATA_AFTER_CLOSE: 8994 if (optval) 8995 rack->rc_allow_data_af_clo = 1; 8996 else 8997 rack->rc_allow_data_af_clo = 0; 8998 break; 8999 default: 9000 return (tcp_default_ctloutput(so, sopt, inp, tp)); 9001 break; 9002 } 9003 /* tcp_log_socket_option(tp, sopt->sopt_name, optval, error);*/ 9004 INP_WUNLOCK(inp); 9005 return (error); 9006 } 9007 9008 static int 9009 rack_get_sockopt(struct socket *so, struct sockopt *sopt, 9010 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack) 9011 { 9012 int32_t error, optval; 9013 9014 /* 9015 * Because all our options are either boolean or an int, we can just 9016 * pull everything into optval and then unlock and copy. If we ever 9017 * add a option that is not a int, then this will have quite an 9018 * impact to this routine. 9019 */ 9020 switch (sopt->sopt_name) { 9021 case TCP_RACK_PROP_RATE: 9022 optval = rack->r_ctl.rc_prop_rate; 9023 break; 9024 case TCP_RACK_PROP: 9025 /* RACK proportional rate reduction (bool) */ 9026 optval = rack->r_ctl.rc_prop_reduce; 9027 break; 9028 case TCP_RACK_TLP_REDUCE: 9029 /* RACK TLP cwnd reduction (bool) */ 9030 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 9031 break; 9032 case TCP_RACK_EARLY_RECOV: 9033 /* Should recovery happen early (bool) */ 9034 optval = rack->r_ctl.rc_early_recovery; 9035 break; 9036 case TCP_RACK_PACE_REDUCE: 9037 /* RACK Hptsi reduction factor (divisor) */ 9038 optval = rack->rc_pace_reduce; 9039 break; 9040 case TCP_RACK_PACE_MAX_SEG: 9041 /* Max segments in a pace */ 9042 optval = rack->rc_pace_max_segs; 9043 break; 9044 case TCP_RACK_PACE_ALWAYS: 9045 /* Use the always pace method */ 9046 optval = rack->rc_always_pace; 9047 break; 9048 case TCP_RACK_PRR_SENDALOT: 9049 /* Allow PRR to send more than one seg */ 9050 optval = rack->r_ctl.rc_prr_sendalot; 9051 break; 9052 case TCP_RACK_MIN_TO: 9053 /* Minimum time between rack t-o's in ms */ 9054 optval = rack->r_ctl.rc_min_to; 9055 break; 9056 case TCP_RACK_EARLY_SEG: 9057 /* If early recovery max segments */ 9058 optval = rack->r_ctl.rc_early_recovery_segs; 9059 break; 9060 case TCP_RACK_REORD_THRESH: 9061 /* RACK reorder threshold (shift amount) */ 9062 optval = rack->r_ctl.rc_reorder_shift; 9063 break; 9064 case TCP_RACK_REORD_FADE: 9065 /* Does reordering fade after ms time */ 9066 optval = rack->r_ctl.rc_reorder_fade; 9067 break; 9068 case TCP_RACK_TLP_THRESH: 9069 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 9070 optval = rack->r_ctl.rc_tlp_threshold; 9071 break; 9072 case TCP_RACK_PKT_DELAY: 9073 /* RACK added ms i.e. rack-rtt + reord + N */ 9074 optval = rack->r_ctl.rc_pkt_delay; 9075 break; 9076 case TCP_RACK_TLP_USE: 9077 optval = rack->rack_tlp_threshold_use; 9078 break; 9079 case TCP_RACK_TLP_INC_VAR: 9080 /* Does TLP include rtt variance in t-o */ 9081 optval = rack->r_ctl.rc_prr_inc_var; 9082 break; 9083 case TCP_RACK_IDLE_REDUCE_HIGH: 9084 optval = rack->r_idle_reduce_largest; 9085 break; 9086 case TCP_RACK_MIN_PACE: 9087 optval = rack->r_enforce_min_pace; 9088 break; 9089 case TCP_RACK_MIN_PACE_SEG: 9090 optval = rack->r_min_pace_seg_thresh; 9091 break; 9092 case TCP_BBR_RACK_RTT_USE: 9093 optval = rack->r_ctl.rc_rate_sample_method; 9094 break; 9095 case TCP_DELACK: 9096 optval = tp->t_delayed_ack; 9097 break; 9098 case TCP_DATA_AFTER_CLOSE: 9099 optval = rack->rc_allow_data_af_clo; 9100 break; 9101 default: 9102 return (tcp_default_ctloutput(so, sopt, inp, tp)); 9103 break; 9104 } 9105 INP_WUNLOCK(inp); 9106 error = sooptcopyout(sopt, &optval, sizeof optval); 9107 return (error); 9108 } 9109 9110 static int 9111 rack_ctloutput(struct socket *so, struct sockopt *sopt, struct inpcb *inp, struct tcpcb *tp) 9112 { 9113 int32_t error = EINVAL; 9114 struct tcp_rack *rack; 9115 9116 rack = (struct tcp_rack *)tp->t_fb_ptr; 9117 if (rack == NULL) { 9118 /* Huh? */ 9119 goto out; 9120 } 9121 if (sopt->sopt_dir == SOPT_SET) { 9122 return (rack_set_sockopt(so, sopt, inp, tp, rack)); 9123 } else if (sopt->sopt_dir == SOPT_GET) { 9124 return (rack_get_sockopt(so, sopt, inp, tp, rack)); 9125 } 9126 out: 9127 INP_WUNLOCK(inp); 9128 return (error); 9129 } 9130 9131 9132 struct tcp_function_block __tcp_rack = { 9133 .tfb_tcp_block_name = __XSTRING(STACKNAME), 9134 .tfb_tcp_output = rack_output, 9135 .tfb_tcp_do_segment = rack_do_segment, 9136 .tfb_tcp_ctloutput = rack_ctloutput, 9137 .tfb_tcp_fb_init = rack_init, 9138 .tfb_tcp_fb_fini = rack_fini, 9139 .tfb_tcp_timer_stop_all = rack_stopall, 9140 .tfb_tcp_timer_activate = rack_timer_activate, 9141 .tfb_tcp_timer_active = rack_timer_active, 9142 .tfb_tcp_timer_stop = rack_timer_stop, 9143 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 9144 .tfb_tcp_handoff_ok = rack_handoff_ok 9145 }; 9146 9147 static const char *rack_stack_names[] = { 9148 __XSTRING(STACKNAME), 9149 #ifdef STACKALIAS 9150 __XSTRING(STACKALIAS), 9151 #endif 9152 }; 9153 9154 static int 9155 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 9156 { 9157 memset(mem, 0, size); 9158 return (0); 9159 } 9160 9161 static void 9162 rack_dtor(void *mem, int32_t size, void *arg) 9163 { 9164 9165 } 9166 9167 static bool rack_mod_inited = false; 9168 9169 static int 9170 tcp_addrack(module_t mod, int32_t type, void *data) 9171 { 9172 int32_t err = 0; 9173 int num_stacks; 9174 9175 switch (type) { 9176 case MOD_LOAD: 9177 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 9178 sizeof(struct rack_sendmap), 9179 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 9180 9181 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 9182 sizeof(struct tcp_rack), 9183 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 9184 9185 sysctl_ctx_init(&rack_sysctl_ctx); 9186 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 9187 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 9188 OID_AUTO, 9189 __XSTRING(STACKNAME), 9190 CTLFLAG_RW, 0, 9191 ""); 9192 if (rack_sysctl_root == NULL) { 9193 printf("Failed to add sysctl node\n"); 9194 err = EFAULT; 9195 goto free_uma; 9196 } 9197 rack_init_sysctls(); 9198 num_stacks = nitems(rack_stack_names); 9199 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 9200 rack_stack_names, &num_stacks); 9201 if (err) { 9202 printf("Failed to register %s stack name for " 9203 "%s module\n", rack_stack_names[num_stacks], 9204 __XSTRING(MODNAME)); 9205 sysctl_ctx_free(&rack_sysctl_ctx); 9206 free_uma: 9207 uma_zdestroy(rack_zone); 9208 uma_zdestroy(rack_pcb_zone); 9209 rack_counter_destroy(); 9210 printf("Failed to register rack module -- err:%d\n", err); 9211 return (err); 9212 } 9213 rack_mod_inited = true; 9214 break; 9215 case MOD_QUIESCE: 9216 err = deregister_tcp_functions(&__tcp_rack, true, false); 9217 break; 9218 case MOD_UNLOAD: 9219 err = deregister_tcp_functions(&__tcp_rack, false, true); 9220 if (err == EBUSY) 9221 break; 9222 if (rack_mod_inited) { 9223 uma_zdestroy(rack_zone); 9224 uma_zdestroy(rack_pcb_zone); 9225 sysctl_ctx_free(&rack_sysctl_ctx); 9226 rack_counter_destroy(); 9227 rack_mod_inited = false; 9228 } 9229 err = 0; 9230 break; 9231 default: 9232 return (EOPNOTSUPP); 9233 } 9234 return (err); 9235 } 9236 9237 static moduledata_t tcp_rack = { 9238 .name = __XSTRING(MODNAME), 9239 .evhand = tcp_addrack, 9240 .priv = 0 9241 }; 9242 9243 MODULE_VERSION(MODNAME, 1); 9244 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 9245 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 9246