1 /*- 2 * Copyright (c) 2016-2019 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_inet.h" 31 #include "opt_inet6.h" 32 #include "opt_ipsec.h" 33 #include "opt_tcpdebug.h" 34 #include "opt_ratelimit.h" 35 #include "opt_kern_tls.h" 36 #include <sys/param.h> 37 #include <sys/module.h> 38 #include <sys/kernel.h> 39 #ifdef TCP_HHOOK 40 #include <sys/hhook.h> 41 #endif 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/mbuf.h> 47 #include <sys/proc.h> /* for proc0 declaration */ 48 #include <sys/socket.h> 49 #include <sys/socketvar.h> 50 #ifdef KERN_TLS 51 #include <sys/ktls.h> 52 #endif 53 #include <sys/sysctl.h> 54 #include <sys/systm.h> 55 #ifdef NETFLIX_STATS 56 #include <sys/qmath.h> 57 #include <sys/tree.h> 58 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 59 #endif 60 #include <sys/refcount.h> 61 #include <sys/tree.h> 62 #include <sys/queue.h> 63 #include <sys/smp.h> 64 #include <sys/kthread.h> 65 #include <sys/kern_prefetch.h> 66 67 #include <vm/uma.h> 68 69 #include <net/route.h> 70 #include <net/vnet.h> 71 72 #define TCPSTATES /* for logging */ 73 74 #include <netinet/in.h> 75 #include <netinet/in_kdtrace.h> 76 #include <netinet/in_pcb.h> 77 #include <netinet/ip.h> 78 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 79 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 80 #include <netinet/ip_var.h> 81 #include <netinet/ip6.h> 82 #include <netinet6/in6_pcb.h> 83 #include <netinet6/ip6_var.h> 84 #include <netinet/tcp.h> 85 #define TCPOUTFLAGS 86 #include <netinet/tcp_fsm.h> 87 #include <netinet/tcp_log_buf.h> 88 #include <netinet/tcp_seq.h> 89 #include <netinet/tcp_timer.h> 90 #include <netinet/tcp_var.h> 91 #include <netinet/tcp_hpts.h> 92 #include <netinet/tcpip.h> 93 #include <netinet/cc/cc.h> 94 #include <netinet/tcp_fastopen.h> 95 #include <netinet/tcp_lro.h> 96 #ifdef TCPDEBUG 97 #include <netinet/tcp_debug.h> 98 #endif /* TCPDEBUG */ 99 #ifdef TCP_OFFLOAD 100 #include <netinet/tcp_offload.h> 101 #endif 102 #ifdef INET6 103 #include <netinet6/tcp6_var.h> 104 #endif 105 106 #include <netipsec/ipsec_support.h> 107 108 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 109 #include <netipsec/ipsec.h> 110 #include <netipsec/ipsec6.h> 111 #endif /* IPSEC */ 112 113 #include <netinet/udp.h> 114 #include <netinet/udp_var.h> 115 #include <machine/in_cksum.h> 116 117 #ifdef MAC 118 #include <security/mac/mac_framework.h> 119 #endif 120 #include "sack_filter.h" 121 #include "tcp_rack.h" 122 #include "rack_bbr_common.h" 123 124 uma_zone_t rack_zone; 125 uma_zone_t rack_pcb_zone; 126 127 #ifndef TICKS2SBT 128 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 129 #endif 130 131 struct sysctl_ctx_list rack_sysctl_ctx; 132 struct sysctl_oid *rack_sysctl_root; 133 134 #define CUM_ACKED 1 135 #define SACKED 2 136 137 /* 138 * The RACK module incorporates a number of 139 * TCP ideas that have been put out into the IETF 140 * over the last few years: 141 * - Matt Mathis's Rate Halving which slowly drops 142 * the congestion window so that the ack clock can 143 * be maintained during a recovery. 144 * - Yuchung Cheng's RACK TCP (for which its named) that 145 * will stop us using the number of dup acks and instead 146 * use time as the gage of when we retransmit. 147 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 148 * of Dukkipati et.al. 149 * RACK depends on SACK, so if an endpoint arrives that 150 * cannot do SACK the state machine below will shuttle the 151 * connection back to using the "default" TCP stack that is 152 * in FreeBSD. 153 * 154 * To implement RACK the original TCP stack was first decomposed 155 * into a functional state machine with individual states 156 * for each of the possible TCP connection states. The do_segement 157 * functions role in life is to mandate the connection supports SACK 158 * initially and then assure that the RACK state matches the conenction 159 * state before calling the states do_segment function. Each 160 * state is simplified due to the fact that the original do_segment 161 * has been decomposed and we *know* what state we are in (no 162 * switches on the state) and all tests for SACK are gone. This 163 * greatly simplifies what each state does. 164 * 165 * TCP output is also over-written with a new version since it 166 * must maintain the new rack scoreboard. 167 * 168 */ 169 static int32_t rack_tlp_thresh = 1; 170 static int32_t rack_reorder_thresh = 2; 171 static int32_t rack_reorder_fade = 60000; /* 0 - never fade, def 60,000 172 * - 60 seconds */ 173 /* Attack threshold detections */ 174 static uint32_t rack_highest_sack_thresh_seen = 0; 175 static uint32_t rack_highest_move_thresh_seen = 0; 176 177 static int32_t rack_pkt_delay = 1; 178 static int32_t rack_min_pace_time = 0; 179 static int32_t rack_early_recovery = 1; 180 static int32_t rack_send_a_lot_in_prr = 1; 181 static int32_t rack_min_to = 1; /* Number of ms minimum timeout */ 182 static int32_t rack_verbose_logging = 0; 183 static int32_t rack_ignore_data_after_close = 1; 184 static int32_t use_rack_cheat = 1; 185 static int32_t rack_persist_min = 250; /* 250ms */ 186 static int32_t rack_persist_max = 1000; /* 1 Second */ 187 static int32_t rack_sack_not_required = 0; /* set to one to allow non-sack to use rack */ 188 static int32_t rack_hw_tls_max_seg = 0; /* 0 means use hw-tls single segment */ 189 190 /* Sack attack detection thresholds and such */ 191 static int32_t tcp_force_detection = 0; 192 193 #ifdef NETFLIX_EXP_DETECTION 194 static int32_t tcp_sack_to_ack_thresh = 700; /* 70 % */ 195 static int32_t tcp_sack_to_move_thresh = 600; /* 60 % */ 196 static int32_t tcp_restoral_thresh = 650; /* 65 % (sack:2:ack -5%) */ 197 static int32_t tcp_attack_on_turns_on_logging = 0; 198 static int32_t tcp_map_minimum = 500; 199 #endif 200 static int32_t tcp_sad_decay_val = 800; 201 static int32_t tcp_sad_pacing_interval = 2000; 202 static int32_t tcp_sad_low_pps = 100; 203 204 205 /* 206 * Currently regular tcp has a rto_min of 30ms 207 * the backoff goes 12 times so that ends up 208 * being a total of 122.850 seconds before a 209 * connection is killed. 210 */ 211 static int32_t rack_tlp_min = 10; 212 static int32_t rack_rto_min = 30; /* 30ms same as main freebsd */ 213 static int32_t rack_rto_max = 4000; /* 4 seconds */ 214 static const int32_t rack_free_cache = 2; 215 static int32_t rack_hptsi_segments = 40; 216 static int32_t rack_rate_sample_method = USE_RTT_LOW; 217 static int32_t rack_pace_every_seg = 0; 218 static int32_t rack_delayed_ack_time = 200; /* 200ms */ 219 static int32_t rack_slot_reduction = 4; 220 static int32_t rack_lower_cwnd_at_tlp = 0; 221 static int32_t rack_use_proportional_reduce = 0; 222 static int32_t rack_proportional_rate = 10; 223 static int32_t rack_tlp_max_resend = 2; 224 static int32_t rack_limited_retran = 0; 225 static int32_t rack_always_send_oldest = 0; 226 static int32_t rack_use_sack_filter = 1; 227 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 228 static int32_t rack_per_of_gp = 50; 229 static int32_t rack_tcp_map_entries_limit = 1500; 230 static int32_t rack_tcp_map_split_limit = 256; 231 232 233 /* Rack specific counters */ 234 counter_u64_t rack_badfr; 235 counter_u64_t rack_badfr_bytes; 236 counter_u64_t rack_rtm_prr_retran; 237 counter_u64_t rack_rtm_prr_newdata; 238 counter_u64_t rack_timestamp_mismatch; 239 counter_u64_t rack_reorder_seen; 240 counter_u64_t rack_paced_segments; 241 counter_u64_t rack_unpaced_segments; 242 counter_u64_t rack_calc_zero; 243 counter_u64_t rack_calc_nonzero; 244 counter_u64_t rack_saw_enobuf; 245 counter_u64_t rack_saw_enetunreach; 246 counter_u64_t rack_per_timer_hole; 247 248 /* Tail loss probe counters */ 249 counter_u64_t rack_tlp_tot; 250 counter_u64_t rack_tlp_newdata; 251 counter_u64_t rack_tlp_retran; 252 counter_u64_t rack_tlp_retran_bytes; 253 counter_u64_t rack_tlp_retran_fail; 254 counter_u64_t rack_to_tot; 255 counter_u64_t rack_to_arm_rack; 256 counter_u64_t rack_to_arm_tlp; 257 counter_u64_t rack_to_alloc; 258 counter_u64_t rack_to_alloc_hard; 259 counter_u64_t rack_to_alloc_emerg; 260 counter_u64_t rack_to_alloc_limited; 261 counter_u64_t rack_alloc_limited_conns; 262 counter_u64_t rack_split_limited; 263 264 counter_u64_t rack_sack_proc_all; 265 counter_u64_t rack_sack_proc_short; 266 counter_u64_t rack_sack_proc_restart; 267 counter_u64_t rack_sack_attacks_detected; 268 counter_u64_t rack_sack_attacks_reversed; 269 counter_u64_t rack_sack_used_next_merge; 270 counter_u64_t rack_sack_splits; 271 counter_u64_t rack_sack_used_prev_merge; 272 counter_u64_t rack_sack_skipped_acked; 273 counter_u64_t rack_ack_total; 274 counter_u64_t rack_express_sack; 275 counter_u64_t rack_sack_total; 276 counter_u64_t rack_move_none; 277 counter_u64_t rack_move_some; 278 279 counter_u64_t rack_used_tlpmethod; 280 counter_u64_t rack_used_tlpmethod2; 281 counter_u64_t rack_enter_tlp_calc; 282 counter_u64_t rack_input_idle_reduces; 283 counter_u64_t rack_collapsed_win; 284 counter_u64_t rack_tlp_does_nada; 285 286 /* Counters for HW TLS */ 287 counter_u64_t rack_tls_rwnd; 288 counter_u64_t rack_tls_cwnd; 289 counter_u64_t rack_tls_app; 290 counter_u64_t rack_tls_other; 291 counter_u64_t rack_tls_filled; 292 counter_u64_t rack_tls_rxt; 293 counter_u64_t rack_tls_tlp; 294 295 /* Temp CPU counters */ 296 counter_u64_t rack_find_high; 297 298 counter_u64_t rack_progress_drops; 299 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 300 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 301 302 static void 303 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 304 305 static int 306 rack_process_ack(struct mbuf *m, struct tcphdr *th, 307 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 308 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val); 309 static int 310 rack_process_data(struct mbuf *m, struct tcphdr *th, 311 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 312 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 313 static void 314 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 315 struct tcphdr *th, uint16_t nsegs, uint16_t type, int32_t recovery); 316 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 317 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 318 uint8_t limit_type); 319 static struct rack_sendmap * 320 rack_check_recovery_mode(struct tcpcb *tp, 321 uint32_t tsused); 322 static void 323 rack_cong_signal(struct tcpcb *tp, struct tcphdr *th, 324 uint32_t type); 325 static void rack_counter_destroy(void); 326 static int 327 rack_ctloutput(struct socket *so, struct sockopt *sopt, 328 struct inpcb *inp, struct tcpcb *tp); 329 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 330 static void 331 rack_do_segment(struct mbuf *m, struct tcphdr *th, 332 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 333 uint8_t iptos); 334 static void rack_dtor(void *mem, int32_t size, void *arg); 335 static void 336 rack_earlier_retran(struct tcpcb *tp, struct rack_sendmap *rsm, 337 uint32_t t, uint32_t cts); 338 static struct rack_sendmap * 339 rack_find_high_nonack(struct tcp_rack *rack, 340 struct rack_sendmap *rsm); 341 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 342 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 343 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 344 static int 345 rack_get_sockopt(struct socket *so, struct sockopt *sopt, 346 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack); 347 static int32_t rack_handoff_ok(struct tcpcb *tp); 348 static int32_t rack_init(struct tcpcb *tp); 349 static void rack_init_sysctls(void); 350 static void 351 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 352 struct tcphdr *th); 353 static void 354 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 355 uint32_t seq_out, uint8_t th_flags, int32_t err, uint32_t ts, 356 uint8_t pass, struct rack_sendmap *hintrsm); 357 static void 358 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 359 struct rack_sendmap *rsm); 360 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, int num); 361 static int32_t rack_output(struct tcpcb *tp); 362 363 static uint32_t 364 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 365 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 366 uint32_t cts, int *moved_two); 367 static void rack_post_recovery(struct tcpcb *tp, struct tcphdr *th); 368 static void rack_remxt_tmr(struct tcpcb *tp); 369 static int 370 rack_set_sockopt(struct socket *so, struct sockopt *sopt, 371 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack); 372 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 373 static int32_t rack_stopall(struct tcpcb *tp); 374 static void 375 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, 376 uint32_t delta); 377 static int32_t rack_timer_active(struct tcpcb *tp, uint32_t timer_type); 378 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 379 static void rack_timer_stop(struct tcpcb *tp, uint32_t timer_type); 380 static uint32_t 381 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 382 struct rack_sendmap *rsm, uint32_t ts, int32_t * lenp); 383 static void 384 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 385 struct rack_sendmap *rsm, uint32_t ts); 386 static int 387 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 388 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type); 389 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 390 static int 391 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 392 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 393 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 394 static int 395 rack_do_closing(struct mbuf *m, struct tcphdr *th, 396 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 397 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 398 static int 399 rack_do_established(struct mbuf *m, struct tcphdr *th, 400 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 401 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 402 static int 403 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 404 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 405 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos); 406 static int 407 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 408 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 409 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 410 static int 411 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 412 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 413 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 414 static int 415 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 416 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 417 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 418 static int 419 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 420 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 421 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 422 static int 423 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 424 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 425 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 426 struct rack_sendmap * 427 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 428 uint32_t tsused); 429 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt); 430 static void 431 tcp_rack_partialack(struct tcpcb *tp, struct tcphdr *th); 432 433 int32_t rack_clear_counter=0; 434 435 436 static int 437 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 438 { 439 uint32_t stat; 440 int32_t error; 441 442 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 443 if (error || req->newptr == NULL) 444 return error; 445 446 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 447 if (error) 448 return (error); 449 if (stat == 1) { 450 #ifdef INVARIANTS 451 printf("Clearing RACK counters\n"); 452 #endif 453 counter_u64_zero(rack_badfr); 454 counter_u64_zero(rack_badfr_bytes); 455 counter_u64_zero(rack_rtm_prr_retran); 456 counter_u64_zero(rack_rtm_prr_newdata); 457 counter_u64_zero(rack_timestamp_mismatch); 458 counter_u64_zero(rack_reorder_seen); 459 counter_u64_zero(rack_tlp_tot); 460 counter_u64_zero(rack_tlp_newdata); 461 counter_u64_zero(rack_tlp_retran); 462 counter_u64_zero(rack_tlp_retran_bytes); 463 counter_u64_zero(rack_tlp_retran_fail); 464 counter_u64_zero(rack_to_tot); 465 counter_u64_zero(rack_to_arm_rack); 466 counter_u64_zero(rack_to_arm_tlp); 467 counter_u64_zero(rack_paced_segments); 468 counter_u64_zero(rack_calc_zero); 469 counter_u64_zero(rack_calc_nonzero); 470 counter_u64_zero(rack_unpaced_segments); 471 counter_u64_zero(rack_saw_enobuf); 472 counter_u64_zero(rack_saw_enetunreach); 473 counter_u64_zero(rack_per_timer_hole); 474 counter_u64_zero(rack_to_alloc_hard); 475 counter_u64_zero(rack_to_alloc_emerg); 476 counter_u64_zero(rack_sack_proc_all); 477 counter_u64_zero(rack_sack_proc_short); 478 counter_u64_zero(rack_sack_proc_restart); 479 counter_u64_zero(rack_to_alloc); 480 counter_u64_zero(rack_to_alloc_limited); 481 counter_u64_zero(rack_alloc_limited_conns); 482 counter_u64_zero(rack_split_limited); 483 counter_u64_zero(rack_find_high); 484 counter_u64_zero(rack_tls_rwnd); 485 counter_u64_zero(rack_tls_cwnd); 486 counter_u64_zero(rack_tls_app); 487 counter_u64_zero(rack_tls_other); 488 counter_u64_zero(rack_tls_filled); 489 counter_u64_zero(rack_tls_rxt); 490 counter_u64_zero(rack_tls_tlp); 491 counter_u64_zero(rack_sack_attacks_detected); 492 counter_u64_zero(rack_sack_attacks_reversed); 493 counter_u64_zero(rack_sack_used_next_merge); 494 counter_u64_zero(rack_sack_used_prev_merge); 495 counter_u64_zero(rack_sack_splits); 496 counter_u64_zero(rack_sack_skipped_acked); 497 counter_u64_zero(rack_ack_total); 498 counter_u64_zero(rack_express_sack); 499 counter_u64_zero(rack_sack_total); 500 counter_u64_zero(rack_move_none); 501 counter_u64_zero(rack_move_some); 502 counter_u64_zero(rack_used_tlpmethod); 503 counter_u64_zero(rack_used_tlpmethod2); 504 counter_u64_zero(rack_enter_tlp_calc); 505 counter_u64_zero(rack_progress_drops); 506 counter_u64_zero(rack_tlp_does_nada); 507 counter_u64_zero(rack_collapsed_win); 508 509 } 510 rack_clear_counter = 0; 511 return (0); 512 } 513 514 515 516 static void 517 rack_init_sysctls(void) 518 { 519 struct sysctl_oid *rack_counters; 520 struct sysctl_oid *rack_attack; 521 522 SYSCTL_ADD_S32(&rack_sysctl_ctx, 523 SYSCTL_CHILDREN(rack_sysctl_root), 524 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 525 &rack_rate_sample_method , USE_RTT_LOW, 526 "What method should we use for rate sampling 0=high, 1=low "); 527 SYSCTL_ADD_S32(&rack_sysctl_ctx, 528 SYSCTL_CHILDREN(rack_sysctl_root), 529 OID_AUTO, "hw_tlsmax", CTLFLAG_RW, 530 &rack_hw_tls_max_seg , 0, 531 "Do we have a multplier of TLS records we can send as a max (0=1 TLS record)? "); 532 SYSCTL_ADD_S32(&rack_sysctl_ctx, 533 SYSCTL_CHILDREN(rack_sysctl_root), 534 OID_AUTO, "data_after_close", CTLFLAG_RW, 535 &rack_ignore_data_after_close, 0, 536 "Do we hold off sending a RST until all pending data is ack'd"); 537 SYSCTL_ADD_S32(&rack_sysctl_ctx, 538 SYSCTL_CHILDREN(rack_sysctl_root), 539 OID_AUTO, "cheat_rxt", CTLFLAG_RW, 540 &use_rack_cheat, 1, 541 "Do we use the rxt cheat for rack?"); 542 543 SYSCTL_ADD_U32(&rack_sysctl_ctx, 544 SYSCTL_CHILDREN(rack_sysctl_root), 545 OID_AUTO, "persmin", CTLFLAG_RW, 546 &rack_persist_min, 250, 547 "What is the minimum time in milliseconds between persists"); 548 SYSCTL_ADD_U32(&rack_sysctl_ctx, 549 SYSCTL_CHILDREN(rack_sysctl_root), 550 OID_AUTO, "persmax", CTLFLAG_RW, 551 &rack_persist_max, 1000, 552 "What is the largest delay in milliseconds between persists"); 553 SYSCTL_ADD_S32(&rack_sysctl_ctx, 554 SYSCTL_CHILDREN(rack_sysctl_root), 555 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 556 &rack_sack_not_required, 0, 557 "Do we allow rack to run on connections not supporting SACK?"); 558 SYSCTL_ADD_S32(&rack_sysctl_ctx, 559 SYSCTL_CHILDREN(rack_sysctl_root), 560 OID_AUTO, "tlpmethod", CTLFLAG_RW, 561 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 562 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 563 SYSCTL_ADD_S32(&rack_sysctl_ctx, 564 SYSCTL_CHILDREN(rack_sysctl_root), 565 OID_AUTO, "gp_percentage", CTLFLAG_RW, 566 &rack_per_of_gp, 50, 567 "Do we pace to percentage of goodput (0=old method)?"); 568 SYSCTL_ADD_S32(&rack_sysctl_ctx, 569 SYSCTL_CHILDREN(rack_sysctl_root), 570 OID_AUTO, "min_pace_time", CTLFLAG_RW, 571 &rack_min_pace_time, 0, 572 "Should we enforce a minimum pace time of 1ms"); 573 SYSCTL_ADD_S32(&rack_sysctl_ctx, 574 SYSCTL_CHILDREN(rack_sysctl_root), 575 OID_AUTO, "bb_verbose", CTLFLAG_RW, 576 &rack_verbose_logging, 0, 577 "Should RACK black box logging be verbose"); 578 SYSCTL_ADD_S32(&rack_sysctl_ctx, 579 SYSCTL_CHILDREN(rack_sysctl_root), 580 OID_AUTO, "sackfiltering", CTLFLAG_RW, 581 &rack_use_sack_filter, 1, 582 "Do we use sack filtering?"); 583 SYSCTL_ADD_S32(&rack_sysctl_ctx, 584 SYSCTL_CHILDREN(rack_sysctl_root), 585 OID_AUTO, "delayed_ack", CTLFLAG_RW, 586 &rack_delayed_ack_time, 200, 587 "Delayed ack time (200ms)"); 588 SYSCTL_ADD_S32(&rack_sysctl_ctx, 589 SYSCTL_CHILDREN(rack_sysctl_root), 590 OID_AUTO, "tlpminto", CTLFLAG_RW, 591 &rack_tlp_min, 10, 592 "TLP minimum timeout per the specification (10ms)"); 593 SYSCTL_ADD_S32(&rack_sysctl_ctx, 594 SYSCTL_CHILDREN(rack_sysctl_root), 595 OID_AUTO, "send_oldest", CTLFLAG_RW, 596 &rack_always_send_oldest, 1, 597 "Should we always send the oldest TLP and RACK-TLP"); 598 SYSCTL_ADD_S32(&rack_sysctl_ctx, 599 SYSCTL_CHILDREN(rack_sysctl_root), 600 OID_AUTO, "rack_tlimit", CTLFLAG_RW, 601 &rack_limited_retran, 0, 602 "How many times can a rack timeout drive out sends"); 603 SYSCTL_ADD_S32(&rack_sysctl_ctx, 604 SYSCTL_CHILDREN(rack_sysctl_root), 605 OID_AUTO, "minrto", CTLFLAG_RW, 606 &rack_rto_min, 0, 607 "Minimum RTO in ms -- set with caution below 1000 due to TLP"); 608 SYSCTL_ADD_S32(&rack_sysctl_ctx, 609 SYSCTL_CHILDREN(rack_sysctl_root), 610 OID_AUTO, "maxrto", CTLFLAG_RW, 611 &rack_rto_max, 0, 612 "Maxiumum RTO in ms -- should be at least as large as min_rto"); 613 SYSCTL_ADD_S32(&rack_sysctl_ctx, 614 SYSCTL_CHILDREN(rack_sysctl_root), 615 OID_AUTO, "tlp_retry", CTLFLAG_RW, 616 &rack_tlp_max_resend, 2, 617 "How many times does TLP retry a single segment or multiple with no ACK"); 618 SYSCTL_ADD_S32(&rack_sysctl_ctx, 619 SYSCTL_CHILDREN(rack_sysctl_root), 620 OID_AUTO, "recovery_loss_prop", CTLFLAG_RW, 621 &rack_use_proportional_reduce, 0, 622 "Should we proportionaly reduce cwnd based on the number of losses "); 623 SYSCTL_ADD_S32(&rack_sysctl_ctx, 624 SYSCTL_CHILDREN(rack_sysctl_root), 625 OID_AUTO, "recovery_prop", CTLFLAG_RW, 626 &rack_proportional_rate, 10, 627 "What percent reduction per loss"); 628 SYSCTL_ADD_S32(&rack_sysctl_ctx, 629 SYSCTL_CHILDREN(rack_sysctl_root), 630 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 631 &rack_lower_cwnd_at_tlp, 0, 632 "When a TLP completes a retran should we enter recovery?"); 633 SYSCTL_ADD_S32(&rack_sysctl_ctx, 634 SYSCTL_CHILDREN(rack_sysctl_root), 635 OID_AUTO, "hptsi_reduces", CTLFLAG_RW, 636 &rack_slot_reduction, 4, 637 "When setting a slot should we reduce by divisor"); 638 SYSCTL_ADD_S32(&rack_sysctl_ctx, 639 SYSCTL_CHILDREN(rack_sysctl_root), 640 OID_AUTO, "hptsi_every_seg", CTLFLAG_RW, 641 &rack_pace_every_seg, 0, 642 "Should we use the original pacing mechanism that did not pace much?"); 643 SYSCTL_ADD_S32(&rack_sysctl_ctx, 644 SYSCTL_CHILDREN(rack_sysctl_root), 645 OID_AUTO, "hptsi_seg_max", CTLFLAG_RW, 646 &rack_hptsi_segments, 40, 647 "Should we pace out only a limited size of segments"); 648 SYSCTL_ADD_S32(&rack_sysctl_ctx, 649 SYSCTL_CHILDREN(rack_sysctl_root), 650 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 651 &rack_send_a_lot_in_prr, 1, 652 "Send a lot in prr"); 653 SYSCTL_ADD_S32(&rack_sysctl_ctx, 654 SYSCTL_CHILDREN(rack_sysctl_root), 655 OID_AUTO, "minto", CTLFLAG_RW, 656 &rack_min_to, 1, 657 "Minimum rack timeout in milliseconds"); 658 SYSCTL_ADD_S32(&rack_sysctl_ctx, 659 SYSCTL_CHILDREN(rack_sysctl_root), 660 OID_AUTO, "earlyrecovery", CTLFLAG_RW, 661 &rack_early_recovery, 1, 662 "Do we do early recovery with rack"); 663 SYSCTL_ADD_S32(&rack_sysctl_ctx, 664 SYSCTL_CHILDREN(rack_sysctl_root), 665 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 666 &rack_reorder_thresh, 2, 667 "What factor for rack will be added when seeing reordering (shift right)"); 668 SYSCTL_ADD_S32(&rack_sysctl_ctx, 669 SYSCTL_CHILDREN(rack_sysctl_root), 670 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 671 &rack_tlp_thresh, 1, 672 "what divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 673 SYSCTL_ADD_S32(&rack_sysctl_ctx, 674 SYSCTL_CHILDREN(rack_sysctl_root), 675 OID_AUTO, "reorder_fade", CTLFLAG_RW, 676 &rack_reorder_fade, 0, 677 "Does reorder detection fade, if so how many ms (0 means never)"); 678 SYSCTL_ADD_S32(&rack_sysctl_ctx, 679 SYSCTL_CHILDREN(rack_sysctl_root), 680 OID_AUTO, "pktdelay", CTLFLAG_RW, 681 &rack_pkt_delay, 1, 682 "Extra RACK time (in ms) besides reordering thresh"); 683 684 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 685 SYSCTL_CHILDREN(rack_sysctl_root), 686 OID_AUTO, 687 "stats", 688 CTLFLAG_RW, 0, 689 "Rack Counters"); 690 rack_badfr = counter_u64_alloc(M_WAITOK); 691 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 692 SYSCTL_CHILDREN(rack_counters), 693 OID_AUTO, "badfr", CTLFLAG_RD, 694 &rack_badfr, "Total number of bad FRs"); 695 rack_badfr_bytes = counter_u64_alloc(M_WAITOK); 696 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 697 SYSCTL_CHILDREN(rack_counters), 698 OID_AUTO, "badfr_bytes", CTLFLAG_RD, 699 &rack_badfr_bytes, "Total number of bad FRs"); 700 rack_rtm_prr_retran = counter_u64_alloc(M_WAITOK); 701 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 702 SYSCTL_CHILDREN(rack_counters), 703 OID_AUTO, "prrsndret", CTLFLAG_RD, 704 &rack_rtm_prr_retran, 705 "Total number of prr based retransmits"); 706 rack_rtm_prr_newdata = counter_u64_alloc(M_WAITOK); 707 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 708 SYSCTL_CHILDREN(rack_counters), 709 OID_AUTO, "prrsndnew", CTLFLAG_RD, 710 &rack_rtm_prr_newdata, 711 "Total number of prr based new transmits"); 712 rack_timestamp_mismatch = counter_u64_alloc(M_WAITOK); 713 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 714 SYSCTL_CHILDREN(rack_counters), 715 OID_AUTO, "tsnf", CTLFLAG_RD, 716 &rack_timestamp_mismatch, 717 "Total number of timestamps that we could not find the reported ts"); 718 rack_find_high = counter_u64_alloc(M_WAITOK); 719 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 720 SYSCTL_CHILDREN(rack_counters), 721 OID_AUTO, "findhigh", CTLFLAG_RD, 722 &rack_find_high, 723 "Total number of FIN causing find-high"); 724 rack_reorder_seen = counter_u64_alloc(M_WAITOK); 725 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 726 SYSCTL_CHILDREN(rack_counters), 727 OID_AUTO, "reordering", CTLFLAG_RD, 728 &rack_reorder_seen, 729 "Total number of times we added delay due to reordering"); 730 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 731 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 732 SYSCTL_CHILDREN(rack_counters), 733 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 734 &rack_tlp_tot, 735 "Total number of tail loss probe expirations"); 736 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 737 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 738 SYSCTL_CHILDREN(rack_counters), 739 OID_AUTO, "tlp_new", CTLFLAG_RD, 740 &rack_tlp_newdata, 741 "Total number of tail loss probe sending new data"); 742 743 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 744 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 745 SYSCTL_CHILDREN(rack_counters), 746 OID_AUTO, "tlp_retran", CTLFLAG_RD, 747 &rack_tlp_retran, 748 "Total number of tail loss probe sending retransmitted data"); 749 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 750 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 751 SYSCTL_CHILDREN(rack_counters), 752 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 753 &rack_tlp_retran_bytes, 754 "Total bytes of tail loss probe sending retransmitted data"); 755 rack_tlp_retran_fail = counter_u64_alloc(M_WAITOK); 756 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 757 SYSCTL_CHILDREN(rack_counters), 758 OID_AUTO, "tlp_retran_fail", CTLFLAG_RD, 759 &rack_tlp_retran_fail, 760 "Total number of tail loss probe sending retransmitted data that failed (wait for t3)"); 761 rack_to_tot = counter_u64_alloc(M_WAITOK); 762 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 763 SYSCTL_CHILDREN(rack_counters), 764 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 765 &rack_to_tot, 766 "Total number of times the rack to expired?"); 767 rack_to_arm_rack = counter_u64_alloc(M_WAITOK); 768 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 769 SYSCTL_CHILDREN(rack_counters), 770 OID_AUTO, "arm_rack", CTLFLAG_RD, 771 &rack_to_arm_rack, 772 "Total number of times the rack timer armed?"); 773 rack_to_arm_tlp = counter_u64_alloc(M_WAITOK); 774 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 775 SYSCTL_CHILDREN(rack_counters), 776 OID_AUTO, "arm_tlp", CTLFLAG_RD, 777 &rack_to_arm_tlp, 778 "Total number of times the tlp timer armed?"); 779 780 rack_calc_zero = counter_u64_alloc(M_WAITOK); 781 rack_calc_nonzero = counter_u64_alloc(M_WAITOK); 782 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 783 SYSCTL_CHILDREN(rack_counters), 784 OID_AUTO, "calc_zero", CTLFLAG_RD, 785 &rack_calc_zero, 786 "Total number of times pacing time worked out to zero?"); 787 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 788 SYSCTL_CHILDREN(rack_counters), 789 OID_AUTO, "calc_nonzero", CTLFLAG_RD, 790 &rack_calc_nonzero, 791 "Total number of times pacing time worked out to non-zero?"); 792 rack_paced_segments = counter_u64_alloc(M_WAITOK); 793 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 794 SYSCTL_CHILDREN(rack_counters), 795 OID_AUTO, "paced", CTLFLAG_RD, 796 &rack_paced_segments, 797 "Total number of times a segment send caused hptsi"); 798 rack_unpaced_segments = counter_u64_alloc(M_WAITOK); 799 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 800 SYSCTL_CHILDREN(rack_counters), 801 OID_AUTO, "unpaced", CTLFLAG_RD, 802 &rack_unpaced_segments, 803 "Total number of times a segment did not cause hptsi"); 804 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 805 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 806 SYSCTL_CHILDREN(rack_counters), 807 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 808 &rack_saw_enobuf, 809 "Total number of times a segment did not cause hptsi"); 810 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 811 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 812 SYSCTL_CHILDREN(rack_counters), 813 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 814 &rack_saw_enetunreach, 815 "Total number of times a segment did not cause hptsi"); 816 rack_to_alloc = counter_u64_alloc(M_WAITOK); 817 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 818 SYSCTL_CHILDREN(rack_counters), 819 OID_AUTO, "allocs", CTLFLAG_RD, 820 &rack_to_alloc, 821 "Total allocations of tracking structures"); 822 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 823 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 824 SYSCTL_CHILDREN(rack_counters), 825 OID_AUTO, "allochard", CTLFLAG_RD, 826 &rack_to_alloc_hard, 827 "Total allocations done with sleeping the hard way"); 828 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 829 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 830 SYSCTL_CHILDREN(rack_counters), 831 OID_AUTO, "allocemerg", CTLFLAG_RD, 832 &rack_to_alloc_emerg, 833 "Total allocations done from emergency cache"); 834 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 835 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 836 SYSCTL_CHILDREN(rack_counters), 837 OID_AUTO, "alloc_limited", CTLFLAG_RD, 838 &rack_to_alloc_limited, 839 "Total allocations dropped due to limit"); 840 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 841 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 842 SYSCTL_CHILDREN(rack_counters), 843 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 844 &rack_alloc_limited_conns, 845 "Connections with allocations dropped due to limit"); 846 rack_split_limited = counter_u64_alloc(M_WAITOK); 847 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 848 SYSCTL_CHILDREN(rack_counters), 849 OID_AUTO, "split_limited", CTLFLAG_RD, 850 &rack_split_limited, 851 "Split allocations dropped due to limit"); 852 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 853 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 854 SYSCTL_CHILDREN(rack_counters), 855 OID_AUTO, "sack_long", CTLFLAG_RD, 856 &rack_sack_proc_all, 857 "Total times we had to walk whole list for sack processing"); 858 859 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 860 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 861 SYSCTL_CHILDREN(rack_counters), 862 OID_AUTO, "sack_restart", CTLFLAG_RD, 863 &rack_sack_proc_restart, 864 "Total times we had to walk whole list due to a restart"); 865 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 866 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 867 SYSCTL_CHILDREN(rack_counters), 868 OID_AUTO, "sack_short", CTLFLAG_RD, 869 &rack_sack_proc_short, 870 "Total times we took shortcut for sack processing"); 871 rack_enter_tlp_calc = counter_u64_alloc(M_WAITOK); 872 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 873 SYSCTL_CHILDREN(rack_counters), 874 OID_AUTO, "tlp_calc_entered", CTLFLAG_RD, 875 &rack_enter_tlp_calc, 876 "Total times we called calc-tlp"); 877 rack_used_tlpmethod = counter_u64_alloc(M_WAITOK); 878 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 879 SYSCTL_CHILDREN(rack_counters), 880 OID_AUTO, "hit_tlp_method", CTLFLAG_RD, 881 &rack_used_tlpmethod, 882 "Total number of runt sacks"); 883 rack_used_tlpmethod2 = counter_u64_alloc(M_WAITOK); 884 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 885 SYSCTL_CHILDREN(rack_counters), 886 OID_AUTO, "hit_tlp_method2", CTLFLAG_RD, 887 &rack_used_tlpmethod2, 888 "Total number of times we hit TLP method 2"); 889 /* Sack Attacker detection stuff */ 890 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 891 SYSCTL_CHILDREN(rack_sysctl_root), 892 OID_AUTO, 893 "sack_attack", 894 CTLFLAG_RW, 0, 895 "Rack Sack Attack Counters and Controls"); 896 SYSCTL_ADD_U32(&rack_sysctl_ctx, 897 SYSCTL_CHILDREN(rack_attack), 898 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 899 &rack_highest_sack_thresh_seen, 0, 900 "Highest sack to ack ratio seen"); 901 SYSCTL_ADD_U32(&rack_sysctl_ctx, 902 SYSCTL_CHILDREN(rack_attack), 903 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 904 &rack_highest_move_thresh_seen, 0, 905 "Highest move to non-move ratio seen"); 906 rack_ack_total = counter_u64_alloc(M_WAITOK); 907 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 908 SYSCTL_CHILDREN(rack_attack), 909 OID_AUTO, "acktotal", CTLFLAG_RD, 910 &rack_ack_total, 911 "Total number of Ack's"); 912 913 rack_express_sack = counter_u64_alloc(M_WAITOK); 914 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 915 SYSCTL_CHILDREN(rack_attack), 916 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 917 &rack_express_sack, 918 "Total expresss number of Sack's"); 919 rack_sack_total = counter_u64_alloc(M_WAITOK); 920 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 921 SYSCTL_CHILDREN(rack_attack), 922 OID_AUTO, "sacktotal", CTLFLAG_RD, 923 &rack_sack_total, 924 "Total number of SACK's"); 925 rack_move_none = counter_u64_alloc(M_WAITOK); 926 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 927 SYSCTL_CHILDREN(rack_attack), 928 OID_AUTO, "move_none", CTLFLAG_RD, 929 &rack_move_none, 930 "Total number of SACK index reuse of postions under threshold"); 931 rack_move_some = counter_u64_alloc(M_WAITOK); 932 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 933 SYSCTL_CHILDREN(rack_attack), 934 OID_AUTO, "move_some", CTLFLAG_RD, 935 &rack_move_some, 936 "Total number of SACK index reuse of postions over threshold"); 937 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 938 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 939 SYSCTL_CHILDREN(rack_attack), 940 OID_AUTO, "attacks", CTLFLAG_RD, 941 &rack_sack_attacks_detected, 942 "Total number of SACK attackers that had sack disabled"); 943 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 944 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 945 SYSCTL_CHILDREN(rack_attack), 946 OID_AUTO, "reversed", CTLFLAG_RD, 947 &rack_sack_attacks_reversed, 948 "Total number of SACK attackers that were later determined false positive"); 949 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 950 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 951 SYSCTL_CHILDREN(rack_attack), 952 OID_AUTO, "nextmerge", CTLFLAG_RD, 953 &rack_sack_used_next_merge, 954 "Total number of times we used the next merge"); 955 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 956 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 957 SYSCTL_CHILDREN(rack_attack), 958 OID_AUTO, "prevmerge", CTLFLAG_RD, 959 &rack_sack_used_prev_merge, 960 "Total number of times we used the prev merge"); 961 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 962 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 963 SYSCTL_CHILDREN(rack_attack), 964 OID_AUTO, "skipacked", CTLFLAG_RD, 965 &rack_sack_skipped_acked, 966 "Total number of times we skipped previously sacked"); 967 rack_sack_splits = counter_u64_alloc(M_WAITOK); 968 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 969 SYSCTL_CHILDREN(rack_attack), 970 OID_AUTO, "ofsplit", CTLFLAG_RD, 971 &rack_sack_splits, 972 "Total number of times we did the old fashion tree split"); 973 rack_progress_drops = counter_u64_alloc(M_WAITOK); 974 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 975 SYSCTL_CHILDREN(rack_counters), 976 OID_AUTO, "prog_drops", CTLFLAG_RD, 977 &rack_progress_drops, 978 "Total number of progress drops"); 979 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 980 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 981 SYSCTL_CHILDREN(rack_counters), 982 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 983 &rack_input_idle_reduces, 984 "Total number of idle reductions on input"); 985 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 986 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 987 SYSCTL_CHILDREN(rack_counters), 988 OID_AUTO, "collapsed_win", CTLFLAG_RD, 989 &rack_collapsed_win, 990 "Total number of collapsed windows"); 991 rack_tlp_does_nada = counter_u64_alloc(M_WAITOK); 992 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 993 SYSCTL_CHILDREN(rack_counters), 994 OID_AUTO, "tlp_nada", CTLFLAG_RD, 995 &rack_tlp_does_nada, 996 "Total number of nada tlp calls"); 997 998 rack_tls_rwnd = counter_u64_alloc(M_WAITOK); 999 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1000 SYSCTL_CHILDREN(rack_counters), 1001 OID_AUTO, "tls_rwnd", CTLFLAG_RD, 1002 &rack_tls_rwnd, 1003 "Total hdwr tls rwnd limited"); 1004 1005 rack_tls_cwnd = counter_u64_alloc(M_WAITOK); 1006 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1007 SYSCTL_CHILDREN(rack_counters), 1008 OID_AUTO, "tls_cwnd", CTLFLAG_RD, 1009 &rack_tls_cwnd, 1010 "Total hdwr tls cwnd limited"); 1011 1012 rack_tls_app = counter_u64_alloc(M_WAITOK); 1013 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1014 SYSCTL_CHILDREN(rack_counters), 1015 OID_AUTO, "tls_app", CTLFLAG_RD, 1016 &rack_tls_app, 1017 "Total hdwr tls app limited"); 1018 1019 rack_tls_other = counter_u64_alloc(M_WAITOK); 1020 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1021 SYSCTL_CHILDREN(rack_counters), 1022 OID_AUTO, "tls_other", CTLFLAG_RD, 1023 &rack_tls_other, 1024 "Total hdwr tls other limited"); 1025 1026 rack_tls_filled = counter_u64_alloc(M_WAITOK); 1027 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1028 SYSCTL_CHILDREN(rack_counters), 1029 OID_AUTO, "tls_filled", CTLFLAG_RD, 1030 &rack_tls_filled, 1031 "Total hdwr tls filled"); 1032 1033 rack_tls_rxt = counter_u64_alloc(M_WAITOK); 1034 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1035 SYSCTL_CHILDREN(rack_counters), 1036 OID_AUTO, "tls_rxt", CTLFLAG_RD, 1037 &rack_tls_rxt, 1038 "Total hdwr rxt"); 1039 1040 rack_tls_tlp = counter_u64_alloc(M_WAITOK); 1041 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1042 SYSCTL_CHILDREN(rack_counters), 1043 OID_AUTO, "tls_tlp", CTLFLAG_RD, 1044 &rack_tls_tlp, 1045 "Total hdwr tls tlp"); 1046 rack_per_timer_hole = counter_u64_alloc(M_WAITOK); 1047 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1048 SYSCTL_CHILDREN(rack_counters), 1049 OID_AUTO, "timer_hole", CTLFLAG_RD, 1050 &rack_per_timer_hole, 1051 "Total persists start in timer hole"); 1052 1053 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 1054 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1055 OID_AUTO, "outsize", CTLFLAG_RD, 1056 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 1057 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 1058 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1059 OID_AUTO, "opts", CTLFLAG_RD, 1060 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 1061 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 1062 SYSCTL_CHILDREN(rack_sysctl_root), 1063 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1064 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 1065 } 1066 1067 static __inline int 1068 rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a) 1069 { 1070 if (SEQ_GEQ(b->r_start, a->r_start) && 1071 SEQ_LT(b->r_start, a->r_end)) { 1072 /* 1073 * The entry b is within the 1074 * block a. i.e.: 1075 * a -- |-------------| 1076 * b -- |----| 1077 * <or> 1078 * b -- |------| 1079 * <or> 1080 * b -- |-----------| 1081 */ 1082 return (0); 1083 } else if (SEQ_GEQ(b->r_start, a->r_end)) { 1084 /* 1085 * b falls as either the next 1086 * sequence block after a so a 1087 * is said to be smaller than b. 1088 * i.e: 1089 * a -- |------| 1090 * b -- |--------| 1091 * or 1092 * b -- |-----| 1093 */ 1094 return (1); 1095 } 1096 /* 1097 * Whats left is where a is 1098 * larger than b. i.e: 1099 * a -- |-------| 1100 * b -- |---| 1101 * or even possibly 1102 * b -- |--------------| 1103 */ 1104 return (-1); 1105 } 1106 1107 RB_PROTOTYPE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1108 RB_GENERATE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1109 1110 static inline int32_t 1111 rack_progress_timeout_check(struct tcpcb *tp) 1112 { 1113 if (tp->t_maxunacktime && tp->t_acktime && TSTMP_GT(ticks, tp->t_acktime)) { 1114 if ((ticks - tp->t_acktime) >= tp->t_maxunacktime) { 1115 /* 1116 * There is an assumption that the caller 1117 * will drop the connection so we will 1118 * increment the counters here. 1119 */ 1120 struct tcp_rack *rack; 1121 rack = (struct tcp_rack *)tp->t_fb_ptr; 1122 counter_u64_add(rack_progress_drops, 1); 1123 #ifdef NETFLIX_STATS 1124 TCPSTAT_INC(tcps_progdrops); 1125 #endif 1126 rack_log_progress_event(rack, tp, ticks, PROGRESS_DROP, __LINE__); 1127 return (1); 1128 } 1129 } 1130 return (0); 1131 } 1132 1133 1134 1135 static void 1136 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 1137 { 1138 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1139 union tcp_log_stackspecific log; 1140 struct timeval tv; 1141 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1142 log.u_bbr.flex1 = tsused; 1143 log.u_bbr.flex2 = thresh; 1144 log.u_bbr.flex3 = rsm->r_flags; 1145 log.u_bbr.flex4 = rsm->r_dupack; 1146 log.u_bbr.flex5 = rsm->r_start; 1147 log.u_bbr.flex6 = rsm->r_end; 1148 log.u_bbr.flex8 = mod; 1149 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 1150 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 1151 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1152 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1153 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1154 &rack->rc_inp->inp_socket->so_rcv, 1155 &rack->rc_inp->inp_socket->so_snd, 1156 BBR_LOG_SETTINGS_CHG, 0, 1157 0, &log, false, &tv); 1158 } 1159 } 1160 1161 1162 1163 static void 1164 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 1165 { 1166 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1167 union tcp_log_stackspecific log; 1168 struct timeval tv; 1169 1170 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1171 log.u_bbr.flex1 = TICKS_2_MSEC(rack->rc_tp->t_srtt >> TCP_RTT_SHIFT); 1172 log.u_bbr.flex2 = to; 1173 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 1174 log.u_bbr.flex4 = slot; 1175 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot; 1176 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 1177 log.u_bbr.flex7 = rack->rc_in_persist; 1178 log.u_bbr.flex8 = which; 1179 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 1180 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 1181 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 1182 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1183 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1184 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1185 &rack->rc_inp->inp_socket->so_rcv, 1186 &rack->rc_inp->inp_socket->so_snd, 1187 BBR_LOG_TIMERSTAR, 0, 1188 0, &log, false, &tv); 1189 } 1190 } 1191 1192 static void 1193 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, int no) 1194 { 1195 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1196 union tcp_log_stackspecific log; 1197 struct timeval tv; 1198 1199 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1200 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 1201 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 1202 log.u_bbr.flex8 = to_num; 1203 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 1204 log.u_bbr.flex2 = rack->rc_rack_rtt; 1205 log.u_bbr.flex3 = no; 1206 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 1207 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1208 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1209 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1210 &rack->rc_inp->inp_socket->so_rcv, 1211 &rack->rc_inp->inp_socket->so_snd, 1212 BBR_LOG_RTO, 0, 1213 0, &log, false, &tv); 1214 } 1215 } 1216 1217 static void 1218 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, int32_t t, 1219 uint32_t o_srtt, uint32_t o_var) 1220 { 1221 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 1222 union tcp_log_stackspecific log; 1223 struct timeval tv; 1224 1225 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1226 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 1227 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 1228 log.u_bbr.flex1 = t; 1229 log.u_bbr.flex2 = o_srtt; 1230 log.u_bbr.flex3 = o_var; 1231 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 1232 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 1233 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_rtt_cnt; 1234 log.u_bbr.rttProp = rack->r_ctl.rack_rs.rs_rtt_tot; 1235 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 1236 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 1237 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1238 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1239 TCP_LOG_EVENTP(tp, NULL, 1240 &rack->rc_inp->inp_socket->so_rcv, 1241 &rack->rc_inp->inp_socket->so_snd, 1242 BBR_LOG_BBRRTT, 0, 1243 0, &log, false, &tv); 1244 } 1245 } 1246 1247 static void 1248 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 1249 { 1250 /* 1251 * Log the rtt sample we are 1252 * applying to the srtt algorithm in 1253 * useconds. 1254 */ 1255 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1256 union tcp_log_stackspecific log; 1257 struct timeval tv; 1258 1259 /* Convert our ms to a microsecond */ 1260 memset(&log, 0, sizeof(log)); 1261 log.u_bbr.flex1 = rtt * 1000; 1262 log.u_bbr.flex2 = rack->r_ctl.ack_count; 1263 log.u_bbr.flex3 = rack->r_ctl.sack_count; 1264 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 1265 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra; 1266 log.u_bbr.flex8 = rack->sack_attack_disable; 1267 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1268 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1269 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1270 &rack->rc_inp->inp_socket->so_rcv, 1271 &rack->rc_inp->inp_socket->so_snd, 1272 TCP_LOG_RTT, 0, 1273 0, &log, false, &tv); 1274 } 1275 } 1276 1277 1278 static inline void 1279 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 1280 { 1281 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 1282 union tcp_log_stackspecific log; 1283 struct timeval tv; 1284 1285 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1286 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 1287 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 1288 log.u_bbr.flex1 = line; 1289 log.u_bbr.flex2 = tick; 1290 log.u_bbr.flex3 = tp->t_maxunacktime; 1291 log.u_bbr.flex4 = tp->t_acktime; 1292 log.u_bbr.flex8 = event; 1293 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1294 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1295 TCP_LOG_EVENTP(tp, NULL, 1296 &rack->rc_inp->inp_socket->so_rcv, 1297 &rack->rc_inp->inp_socket->so_snd, 1298 BBR_LOG_PROGRESS, 0, 1299 0, &log, false, &tv); 1300 } 1301 } 1302 1303 static void 1304 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts) 1305 { 1306 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1307 union tcp_log_stackspecific log; 1308 struct timeval tv; 1309 1310 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1311 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 1312 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 1313 log.u_bbr.flex1 = slot; 1314 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 1315 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 1316 log.u_bbr.flex8 = rack->rc_in_persist; 1317 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1318 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1319 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1320 &rack->rc_inp->inp_socket->so_rcv, 1321 &rack->rc_inp->inp_socket->so_snd, 1322 BBR_LOG_BBRSND, 0, 1323 0, &log, false, &tv); 1324 } 1325 } 1326 1327 static void 1328 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out) 1329 { 1330 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1331 union tcp_log_stackspecific log; 1332 struct timeval tv; 1333 1334 memset(&log, 0, sizeof(log)); 1335 log.u_bbr.flex1 = did_out; 1336 log.u_bbr.flex2 = nxt_pkt; 1337 log.u_bbr.flex3 = way_out; 1338 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 1339 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 1340 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 1341 log.u_bbr.flex7 = rack->r_wanted_output; 1342 log.u_bbr.flex8 = rack->rc_in_persist; 1343 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1344 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1345 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1346 &rack->rc_inp->inp_socket->so_rcv, 1347 &rack->rc_inp->inp_socket->so_snd, 1348 BBR_LOG_DOSEG_DONE, 0, 1349 0, &log, false, &tv); 1350 } 1351 } 1352 1353 static void 1354 rack_log_type_hrdwtso(struct tcpcb *tp, struct tcp_rack *rack, int len, int mod, int32_t orig_len, int frm) 1355 { 1356 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 1357 union tcp_log_stackspecific log; 1358 struct timeval tv; 1359 uint32_t cts; 1360 1361 memset(&log, 0, sizeof(log)); 1362 cts = tcp_get_usecs(&tv); 1363 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 1364 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 1365 log.u_bbr.flex4 = len; 1366 log.u_bbr.flex5 = orig_len; 1367 log.u_bbr.flex6 = rack->r_ctl.rc_sacked; 1368 log.u_bbr.flex7 = mod; 1369 log.u_bbr.flex8 = frm; 1370 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1371 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1372 TCP_LOG_EVENTP(tp, NULL, 1373 &tp->t_inpcb->inp_socket->so_rcv, 1374 &tp->t_inpcb->inp_socket->so_snd, 1375 TCP_HDWR_TLS, 0, 1376 0, &log, false, &tv); 1377 } 1378 } 1379 1380 static void 1381 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, uint8_t hpts_calling) 1382 { 1383 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1384 union tcp_log_stackspecific log; 1385 struct timeval tv; 1386 1387 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1388 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 1389 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 1390 log.u_bbr.flex1 = slot; 1391 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 1392 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 1393 log.u_bbr.flex7 = hpts_calling; 1394 log.u_bbr.flex8 = rack->rc_in_persist; 1395 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1396 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1397 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1398 &rack->rc_inp->inp_socket->so_rcv, 1399 &rack->rc_inp->inp_socket->so_snd, 1400 BBR_LOG_JUSTRET, 0, 1401 tlen, &log, false, &tv); 1402 } 1403 } 1404 1405 static void 1406 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line) 1407 { 1408 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1409 union tcp_log_stackspecific log; 1410 struct timeval tv; 1411 1412 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1413 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 1414 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 1415 log.u_bbr.flex1 = line; 1416 log.u_bbr.flex2 = 0; 1417 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 1418 log.u_bbr.flex4 = 0; 1419 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 1420 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 1421 log.u_bbr.flex8 = hpts_removed; 1422 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1423 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1424 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1425 &rack->rc_inp->inp_socket->so_rcv, 1426 &rack->rc_inp->inp_socket->so_snd, 1427 BBR_LOG_TIMERCANC, 0, 1428 0, &log, false, &tv); 1429 } 1430 } 1431 1432 static void 1433 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 1434 { 1435 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1436 union tcp_log_stackspecific log; 1437 struct timeval tv; 1438 1439 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1440 log.u_bbr.flex1 = timers; 1441 log.u_bbr.flex2 = ret; 1442 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 1443 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 1444 log.u_bbr.flex5 = cts; 1445 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 1446 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1447 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1448 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1449 &rack->rc_inp->inp_socket->so_rcv, 1450 &rack->rc_inp->inp_socket->so_snd, 1451 BBR_LOG_TO_PROCESS, 0, 1452 0, &log, false, &tv); 1453 } 1454 } 1455 1456 static void 1457 rack_log_to_prr(struct tcp_rack *rack, int frm) 1458 { 1459 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1460 union tcp_log_stackspecific log; 1461 struct timeval tv; 1462 1463 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1464 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 1465 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 1466 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 1467 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 1468 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 1469 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 1470 log.u_bbr.flex8 = frm; 1471 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1472 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1473 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1474 &rack->rc_inp->inp_socket->so_rcv, 1475 &rack->rc_inp->inp_socket->so_snd, 1476 BBR_LOG_BBRUPD, 0, 1477 0, &log, false, &tv); 1478 } 1479 } 1480 1481 #ifdef NETFLIX_EXP_DETECTION 1482 static void 1483 rack_log_sad(struct tcp_rack *rack, int event) 1484 { 1485 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1486 union tcp_log_stackspecific log; 1487 struct timeval tv; 1488 1489 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1490 log.u_bbr.flex1 = rack->r_ctl.sack_count; 1491 log.u_bbr.flex2 = rack->r_ctl.ack_count; 1492 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra; 1493 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 1494 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced; 1495 log.u_bbr.flex6 = tcp_sack_to_ack_thresh; 1496 log.u_bbr.pkts_out = tcp_sack_to_move_thresh; 1497 log.u_bbr.lt_epoch = (tcp_force_detection << 8); 1498 log.u_bbr.lt_epoch |= rack->do_detection; 1499 log.u_bbr.applimited = tcp_map_minimum; 1500 log.u_bbr.flex7 = rack->sack_attack_disable; 1501 log.u_bbr.flex8 = event; 1502 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1503 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1504 log.u_bbr.delivered = tcp_sad_decay_val; 1505 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1506 &rack->rc_inp->inp_socket->so_rcv, 1507 &rack->rc_inp->inp_socket->so_snd, 1508 TCP_SAD_DETECTION, 0, 1509 0, &log, false, &tv); 1510 } 1511 } 1512 #endif 1513 1514 static void 1515 rack_counter_destroy(void) 1516 { 1517 counter_u64_free(rack_badfr); 1518 counter_u64_free(rack_badfr_bytes); 1519 counter_u64_free(rack_rtm_prr_retran); 1520 counter_u64_free(rack_rtm_prr_newdata); 1521 counter_u64_free(rack_timestamp_mismatch); 1522 counter_u64_free(rack_reorder_seen); 1523 counter_u64_free(rack_tlp_tot); 1524 counter_u64_free(rack_tlp_newdata); 1525 counter_u64_free(rack_tlp_retran); 1526 counter_u64_free(rack_tlp_retran_bytes); 1527 counter_u64_free(rack_tlp_retran_fail); 1528 counter_u64_free(rack_to_tot); 1529 counter_u64_free(rack_to_arm_rack); 1530 counter_u64_free(rack_to_arm_tlp); 1531 counter_u64_free(rack_paced_segments); 1532 counter_u64_free(rack_unpaced_segments); 1533 counter_u64_free(rack_saw_enobuf); 1534 counter_u64_free(rack_saw_enetunreach); 1535 counter_u64_free(rack_to_alloc_hard); 1536 counter_u64_free(rack_to_alloc_emerg); 1537 counter_u64_free(rack_sack_proc_all); 1538 counter_u64_free(rack_sack_proc_short); 1539 counter_u64_free(rack_sack_proc_restart); 1540 counter_u64_free(rack_to_alloc); 1541 counter_u64_free(rack_to_alloc_limited); 1542 counter_u64_free(rack_alloc_limited_conns); 1543 counter_u64_free(rack_split_limited); 1544 counter_u64_free(rack_find_high); 1545 counter_u64_free(rack_enter_tlp_calc); 1546 counter_u64_free(rack_used_tlpmethod); 1547 counter_u64_free(rack_used_tlpmethod2); 1548 counter_u64_free(rack_progress_drops); 1549 counter_u64_free(rack_input_idle_reduces); 1550 counter_u64_free(rack_collapsed_win); 1551 counter_u64_free(rack_tlp_does_nada); 1552 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 1553 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 1554 } 1555 1556 static struct rack_sendmap * 1557 rack_alloc(struct tcp_rack *rack) 1558 { 1559 struct rack_sendmap *rsm; 1560 1561 rsm = uma_zalloc(rack_zone, M_NOWAIT); 1562 if (rsm) { 1563 rack->r_ctl.rc_num_maps_alloced++; 1564 counter_u64_add(rack_to_alloc, 1); 1565 return (rsm); 1566 } 1567 if (rack->rc_free_cnt) { 1568 counter_u64_add(rack_to_alloc_emerg, 1); 1569 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 1570 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 1571 rack->rc_free_cnt--; 1572 return (rsm); 1573 } 1574 return (NULL); 1575 } 1576 1577 static struct rack_sendmap * 1578 rack_alloc_full_limit(struct tcp_rack *rack) 1579 { 1580 if ((rack_tcp_map_entries_limit > 0) && 1581 (rack->do_detection == 0) && 1582 (rack->r_ctl.rc_num_maps_alloced >= rack_tcp_map_entries_limit)) { 1583 counter_u64_add(rack_to_alloc_limited, 1); 1584 if (!rack->alloc_limit_reported) { 1585 rack->alloc_limit_reported = 1; 1586 counter_u64_add(rack_alloc_limited_conns, 1); 1587 } 1588 return (NULL); 1589 } 1590 return (rack_alloc(rack)); 1591 } 1592 1593 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 1594 static struct rack_sendmap * 1595 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 1596 { 1597 struct rack_sendmap *rsm; 1598 1599 if (limit_type) { 1600 /* currently there is only one limit type */ 1601 if (rack_tcp_map_split_limit > 0 && 1602 (rack->do_detection == 0) && 1603 rack->r_ctl.rc_num_split_allocs >= rack_tcp_map_split_limit) { 1604 counter_u64_add(rack_split_limited, 1); 1605 if (!rack->alloc_limit_reported) { 1606 rack->alloc_limit_reported = 1; 1607 counter_u64_add(rack_alloc_limited_conns, 1); 1608 } 1609 return (NULL); 1610 } 1611 } 1612 1613 /* allocate and mark in the limit type, if set */ 1614 rsm = rack_alloc(rack); 1615 if (rsm != NULL && limit_type) { 1616 rsm->r_limit_type = limit_type; 1617 rack->r_ctl.rc_num_split_allocs++; 1618 } 1619 return (rsm); 1620 } 1621 1622 static void 1623 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 1624 { 1625 if (rsm->r_limit_type) { 1626 /* currently there is only one limit type */ 1627 rack->r_ctl.rc_num_split_allocs--; 1628 } 1629 if (rack->r_ctl.rc_tlpsend == rsm) 1630 rack->r_ctl.rc_tlpsend = NULL; 1631 if (rack->r_ctl.rc_sacklast == rsm) 1632 rack->r_ctl.rc_sacklast = NULL; 1633 if (rack->rc_free_cnt < rack_free_cache) { 1634 memset(rsm, 0, sizeof(struct rack_sendmap)); 1635 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 1636 rsm->r_limit_type = 0; 1637 rack->rc_free_cnt++; 1638 return; 1639 } 1640 rack->r_ctl.rc_num_maps_alloced--; 1641 uma_zfree(rack_zone, rsm); 1642 } 1643 1644 /* 1645 * CC wrapper hook functions 1646 */ 1647 static void 1648 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, struct tcphdr *th, uint16_t nsegs, 1649 uint16_t type, int32_t recovery) 1650 { 1651 #ifdef NETFLIX_STATS 1652 int32_t gput; 1653 #endif 1654 1655 INP_WLOCK_ASSERT(tp->t_inpcb); 1656 tp->ccv->nsegs = nsegs; 1657 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th); 1658 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 1659 uint32_t max; 1660 1661 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 1662 if (tp->ccv->bytes_this_ack > max) { 1663 tp->ccv->bytes_this_ack = max; 1664 } 1665 } 1666 if ((!V_tcp_do_newcwv && (tp->snd_cwnd <= tp->snd_wnd)) || 1667 (V_tcp_do_newcwv && (tp->snd_cwnd <= tp->snd_wnd) && 1668 (tp->snd_cwnd < (ctf_flight_size(tp, rack->r_ctl.rc_sacked) * 2)))) 1669 tp->ccv->flags |= CCF_CWND_LIMITED; 1670 else 1671 tp->ccv->flags &= ~CCF_CWND_LIMITED; 1672 1673 if (type == CC_ACK) { 1674 #ifdef NETFLIX_STATS 1675 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 1676 ((int32_t) tp->snd_cwnd) - tp->snd_wnd); 1677 if ((tp->t_flags & TF_GPUTINPROG) && 1678 SEQ_GEQ(th->th_ack, tp->gput_ack)) { 1679 gput = (((int64_t) (th->th_ack - tp->gput_seq)) << 3) / 1680 max(1, tcp_ts_getticks() - tp->gput_ts); 1681 /* We store it in bytes per ms (or kbytes per sec) */ 1682 rack->r_ctl.rc_gp_history[rack->r_ctl.rc_gp_hist_idx] = gput / 8; 1683 rack->r_ctl.rc_gp_hist_idx++; 1684 if (rack->r_ctl.rc_gp_hist_idx >= RACK_GP_HIST) 1685 rack->r_ctl.rc_gp_hist_filled = 1; 1686 rack->r_ctl.rc_gp_hist_idx %= RACK_GP_HIST; 1687 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 1688 gput); 1689 /* 1690 * XXXLAS: This is a temporary hack, and should be 1691 * chained off VOI_TCP_GPUT when stats(9) grows an 1692 * API to deal with chained VOIs. 1693 */ 1694 if (tp->t_stats_gput_prev > 0) 1695 stats_voi_update_abs_s32(tp->t_stats, 1696 VOI_TCP_GPUT_ND, 1697 ((gput - tp->t_stats_gput_prev) * 100) / 1698 tp->t_stats_gput_prev); 1699 tp->t_flags &= ~TF_GPUTINPROG; 1700 tp->t_stats_gput_prev = gput; 1701 1702 if (tp->t_maxpeakrate) { 1703 /* 1704 * We update t_peakrate_thr. This gives us roughly 1705 * one update per round trip time. 1706 */ 1707 tcp_update_peakrate_thr(tp); 1708 } 1709 } 1710 #endif 1711 if (tp->snd_cwnd > tp->snd_ssthresh) { 1712 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack, 1713 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 1714 if (tp->t_bytes_acked >= tp->snd_cwnd) { 1715 tp->t_bytes_acked -= tp->snd_cwnd; 1716 tp->ccv->flags |= CCF_ABC_SENTAWND; 1717 } 1718 } else { 1719 tp->ccv->flags &= ~CCF_ABC_SENTAWND; 1720 tp->t_bytes_acked = 0; 1721 } 1722 } 1723 if (CC_ALGO(tp)->ack_received != NULL) { 1724 /* XXXLAS: Find a way to live without this */ 1725 tp->ccv->curack = th->th_ack; 1726 CC_ALGO(tp)->ack_received(tp->ccv, type); 1727 } 1728 #ifdef NETFLIX_STATS 1729 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, tp->snd_cwnd); 1730 #endif 1731 if (rack->r_ctl.rc_rack_largest_cwnd < tp->snd_cwnd) { 1732 rack->r_ctl.rc_rack_largest_cwnd = tp->snd_cwnd; 1733 } 1734 /* we enforce max peak rate if it is set. */ 1735 if (tp->t_peakrate_thr && tp->snd_cwnd > tp->t_peakrate_thr) { 1736 tp->snd_cwnd = tp->t_peakrate_thr; 1737 } 1738 } 1739 1740 static void 1741 tcp_rack_partialack(struct tcpcb *tp, struct tcphdr *th) 1742 { 1743 struct tcp_rack *rack; 1744 1745 rack = (struct tcp_rack *)tp->t_fb_ptr; 1746 INP_WLOCK_ASSERT(tp->t_inpcb); 1747 if (rack->r_ctl.rc_prr_sndcnt > 0) 1748 rack->r_wanted_output++; 1749 } 1750 1751 static void 1752 rack_post_recovery(struct tcpcb *tp, struct tcphdr *th) 1753 { 1754 struct tcp_rack *rack; 1755 1756 INP_WLOCK_ASSERT(tp->t_inpcb); 1757 rack = (struct tcp_rack *)tp->t_fb_ptr; 1758 if (CC_ALGO(tp)->post_recovery != NULL) { 1759 tp->ccv->curack = th->th_ack; 1760 CC_ALGO(tp)->post_recovery(tp->ccv); 1761 } 1762 /* 1763 * Here we can in theory adjust cwnd to be based on the number of 1764 * losses in the window (rack->r_ctl.rc_loss_count). This is done 1765 * based on the rack_use_proportional flag. 1766 */ 1767 if (rack->r_ctl.rc_prop_reduce && rack->r_ctl.rc_prop_rate) { 1768 int32_t reduce; 1769 1770 reduce = (rack->r_ctl.rc_loss_count * rack->r_ctl.rc_prop_rate); 1771 if (reduce > 50) { 1772 reduce = 50; 1773 } 1774 tp->snd_cwnd -= ((reduce * tp->snd_cwnd) / 100); 1775 } else { 1776 if (tp->snd_cwnd > tp->snd_ssthresh) { 1777 /* Drop us down to the ssthresh (1/2 cwnd at loss) */ 1778 tp->snd_cwnd = tp->snd_ssthresh; 1779 } 1780 } 1781 if (rack->r_ctl.rc_prr_sndcnt > 0) { 1782 /* Suck the next prr cnt back into cwnd */ 1783 tp->snd_cwnd += rack->r_ctl.rc_prr_sndcnt; 1784 rack->r_ctl.rc_prr_sndcnt = 0; 1785 rack_log_to_prr(rack, 1); 1786 } 1787 tp->snd_recover = tp->snd_una; 1788 EXIT_RECOVERY(tp->t_flags); 1789 1790 1791 } 1792 1793 static void 1794 rack_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type) 1795 { 1796 struct tcp_rack *rack; 1797 1798 INP_WLOCK_ASSERT(tp->t_inpcb); 1799 1800 rack = (struct tcp_rack *)tp->t_fb_ptr; 1801 switch (type) { 1802 case CC_NDUPACK: 1803 tp->t_flags &= ~TF_WASFRECOVERY; 1804 tp->t_flags &= ~TF_WASCRECOVERY; 1805 if (!IN_FASTRECOVERY(tp->t_flags)) { 1806 rack->r_ctl.rc_tlp_rtx_out = 0; 1807 rack->r_ctl.rc_prr_delivered = 0; 1808 rack->r_ctl.rc_prr_out = 0; 1809 rack->r_ctl.rc_loss_count = 0; 1810 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 1811 rack_log_to_prr(rack, 2); 1812 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 1813 tp->snd_recover = tp->snd_max; 1814 if (tp->t_flags2 & TF2_ECN_PERMIT) 1815 tp->t_flags2 |= TF2_ECN_SND_CWR; 1816 } 1817 break; 1818 case CC_ECN: 1819 if (!IN_CONGRECOVERY(tp->t_flags)) { 1820 TCPSTAT_INC(tcps_ecn_rcwnd); 1821 tp->snd_recover = tp->snd_max; 1822 if (tp->t_flags2 & TF2_ECN_PERMIT) 1823 tp->t_flags2 |= TF2_ECN_SND_CWR; 1824 } 1825 break; 1826 case CC_RTO: 1827 tp->t_dupacks = 0; 1828 tp->t_bytes_acked = 0; 1829 EXIT_RECOVERY(tp->t_flags); 1830 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 / 1831 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 1832 tp->snd_cwnd = ctf_fixed_maxseg(tp); 1833 break; 1834 case CC_RTO_ERR: 1835 TCPSTAT_INC(tcps_sndrexmitbad); 1836 /* RTO was unnecessary, so reset everything. */ 1837 tp->snd_cwnd = tp->snd_cwnd_prev; 1838 tp->snd_ssthresh = tp->snd_ssthresh_prev; 1839 tp->snd_recover = tp->snd_recover_prev; 1840 if (tp->t_flags & TF_WASFRECOVERY) { 1841 ENTER_FASTRECOVERY(tp->t_flags); 1842 tp->t_flags &= ~TF_WASFRECOVERY; 1843 } 1844 if (tp->t_flags & TF_WASCRECOVERY) { 1845 ENTER_CONGRECOVERY(tp->t_flags); 1846 tp->t_flags &= ~TF_WASCRECOVERY; 1847 } 1848 tp->snd_nxt = tp->snd_max; 1849 tp->t_badrxtwin = 0; 1850 break; 1851 } 1852 1853 if (CC_ALGO(tp)->cong_signal != NULL) { 1854 if (th != NULL) 1855 tp->ccv->curack = th->th_ack; 1856 CC_ALGO(tp)->cong_signal(tp->ccv, type); 1857 } 1858 } 1859 1860 1861 1862 static inline void 1863 rack_cc_after_idle(struct tcpcb *tp) 1864 { 1865 uint32_t i_cwnd; 1866 1867 INP_WLOCK_ASSERT(tp->t_inpcb); 1868 1869 #ifdef NETFLIX_STATS 1870 TCPSTAT_INC(tcps_idle_restarts); 1871 if (tp->t_state == TCPS_ESTABLISHED) 1872 TCPSTAT_INC(tcps_idle_estrestarts); 1873 #endif 1874 if (CC_ALGO(tp)->after_idle != NULL) 1875 CC_ALGO(tp)->after_idle(tp->ccv); 1876 1877 if (tp->snd_cwnd == 1) 1878 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 1879 else 1880 i_cwnd = tcp_compute_initwnd(tcp_maxseg(tp)); 1881 1882 /* 1883 * Being idle is no differnt than the initial window. If the cc 1884 * clamps it down below the initial window raise it to the initial 1885 * window. 1886 */ 1887 if (tp->snd_cwnd < i_cwnd) { 1888 tp->snd_cwnd = i_cwnd; 1889 } 1890 } 1891 1892 1893 /* 1894 * Indicate whether this ack should be delayed. We can delay the ack if 1895 * following conditions are met: 1896 * - There is no delayed ack timer in progress. 1897 * - Our last ack wasn't a 0-sized window. We never want to delay 1898 * the ack that opens up a 0-sized window. 1899 * - LRO wasn't used for this segment. We make sure by checking that the 1900 * segment size is not larger than the MSS. 1901 * - Delayed acks are enabled or this is a half-synchronized T/TCP 1902 * connection. 1903 */ 1904 #define DELAY_ACK(tp, tlen) \ 1905 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 1906 ((tp->t_flags & TF_DELACK) == 0) && \ 1907 (tlen <= tp->t_maxseg) && \ 1908 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 1909 1910 static struct rack_sendmap * 1911 rack_find_lowest_rsm(struct tcp_rack *rack) 1912 { 1913 struct rack_sendmap *rsm; 1914 1915 /* 1916 * Walk the time-order transmitted list looking for an rsm that is 1917 * not acked. This will be the one that was sent the longest time 1918 * ago that is still outstanding. 1919 */ 1920 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 1921 if (rsm->r_flags & RACK_ACKED) { 1922 continue; 1923 } 1924 goto finish; 1925 } 1926 finish: 1927 return (rsm); 1928 } 1929 1930 static struct rack_sendmap * 1931 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 1932 { 1933 struct rack_sendmap *prsm; 1934 1935 /* 1936 * Walk the sequence order list backward until we hit and arrive at 1937 * the highest seq not acked. In theory when this is called it 1938 * should be the last segment (which it was not). 1939 */ 1940 counter_u64_add(rack_find_high, 1); 1941 prsm = rsm; 1942 RB_FOREACH_REVERSE_FROM(prsm, rack_rb_tree_head, rsm) { 1943 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 1944 continue; 1945 } 1946 return (prsm); 1947 } 1948 return (NULL); 1949 } 1950 1951 1952 static uint32_t 1953 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts) 1954 { 1955 int32_t lro; 1956 uint32_t thresh; 1957 1958 /* 1959 * lro is the flag we use to determine if we have seen reordering. 1960 * If it gets set we have seen reordering. The reorder logic either 1961 * works in one of two ways: 1962 * 1963 * If reorder-fade is configured, then we track the last time we saw 1964 * re-ordering occur. If we reach the point where enough time as 1965 * passed we no longer consider reordering has occuring. 1966 * 1967 * Or if reorder-face is 0, then once we see reordering we consider 1968 * the connection to alway be subject to reordering and just set lro 1969 * to 1. 1970 * 1971 * In the end if lro is non-zero we add the extra time for 1972 * reordering in. 1973 */ 1974 if (srtt == 0) 1975 srtt = 1; 1976 if (rack->r_ctl.rc_reorder_ts) { 1977 if (rack->r_ctl.rc_reorder_fade) { 1978 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 1979 lro = cts - rack->r_ctl.rc_reorder_ts; 1980 if (lro == 0) { 1981 /* 1982 * No time as passed since the last 1983 * reorder, mark it as reordering. 1984 */ 1985 lro = 1; 1986 } 1987 } else { 1988 /* Negative time? */ 1989 lro = 0; 1990 } 1991 if (lro > rack->r_ctl.rc_reorder_fade) { 1992 /* Turn off reordering seen too */ 1993 rack->r_ctl.rc_reorder_ts = 0; 1994 lro = 0; 1995 } 1996 } else { 1997 /* Reodering does not fade */ 1998 lro = 1; 1999 } 2000 } else { 2001 lro = 0; 2002 } 2003 thresh = srtt + rack->r_ctl.rc_pkt_delay; 2004 if (lro) { 2005 /* It must be set, if not you get 1/4 rtt */ 2006 if (rack->r_ctl.rc_reorder_shift) 2007 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 2008 else 2009 thresh += (srtt >> 2); 2010 } else { 2011 thresh += 1; 2012 } 2013 /* We don't let the rack timeout be above a RTO */ 2014 if (thresh > TICKS_2_MSEC(rack->rc_tp->t_rxtcur)) { 2015 thresh = TICKS_2_MSEC(rack->rc_tp->t_rxtcur); 2016 } 2017 /* And we don't want it above the RTO max either */ 2018 if (thresh > rack_rto_max) { 2019 thresh = rack_rto_max; 2020 } 2021 return (thresh); 2022 } 2023 2024 static uint32_t 2025 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 2026 struct rack_sendmap *rsm, uint32_t srtt) 2027 { 2028 struct rack_sendmap *prsm; 2029 uint32_t thresh, len; 2030 int maxseg; 2031 2032 if (srtt == 0) 2033 srtt = 1; 2034 if (rack->r_ctl.rc_tlp_threshold) 2035 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 2036 else 2037 thresh = (srtt * 2); 2038 2039 /* Get the previous sent packet, if any */ 2040 maxseg = ctf_fixed_maxseg(tp); 2041 counter_u64_add(rack_enter_tlp_calc, 1); 2042 len = rsm->r_end - rsm->r_start; 2043 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 2044 /* Exactly like the ID */ 2045 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= maxseg) { 2046 uint32_t alt_thresh; 2047 /* 2048 * Compensate for delayed-ack with the d-ack time. 2049 */ 2050 counter_u64_add(rack_used_tlpmethod, 1); 2051 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 2052 if (alt_thresh > thresh) 2053 thresh = alt_thresh; 2054 } 2055 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 2056 /* 2.1 behavior */ 2057 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 2058 if (prsm && (len <= maxseg)) { 2059 /* 2060 * Two packets outstanding, thresh should be (2*srtt) + 2061 * possible inter-packet delay (if any). 2062 */ 2063 uint32_t inter_gap = 0; 2064 int idx, nidx; 2065 2066 counter_u64_add(rack_used_tlpmethod, 1); 2067 idx = rsm->r_rtr_cnt - 1; 2068 nidx = prsm->r_rtr_cnt - 1; 2069 if (TSTMP_GEQ(rsm->r_tim_lastsent[nidx], prsm->r_tim_lastsent[idx])) { 2070 /* Yes it was sent later (or at the same time) */ 2071 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 2072 } 2073 thresh += inter_gap; 2074 } else if (len <= maxseg) { 2075 /* 2076 * Possibly compensate for delayed-ack. 2077 */ 2078 uint32_t alt_thresh; 2079 2080 counter_u64_add(rack_used_tlpmethod2, 1); 2081 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 2082 if (alt_thresh > thresh) 2083 thresh = alt_thresh; 2084 } 2085 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 2086 /* 2.2 behavior */ 2087 if (len <= maxseg) { 2088 uint32_t alt_thresh; 2089 /* 2090 * Compensate for delayed-ack with the d-ack time. 2091 */ 2092 counter_u64_add(rack_used_tlpmethod, 1); 2093 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 2094 if (alt_thresh > thresh) 2095 thresh = alt_thresh; 2096 } 2097 } 2098 /* Not above an RTO */ 2099 if (thresh > TICKS_2_MSEC(tp->t_rxtcur)) { 2100 thresh = TICKS_2_MSEC(tp->t_rxtcur); 2101 } 2102 /* Not above a RTO max */ 2103 if (thresh > rack_rto_max) { 2104 thresh = rack_rto_max; 2105 } 2106 /* Apply user supplied min TLP */ 2107 if (thresh < rack_tlp_min) { 2108 thresh = rack_tlp_min; 2109 } 2110 return (thresh); 2111 } 2112 2113 static uint32_t 2114 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 2115 { 2116 /* 2117 * We want the rack_rtt which is the 2118 * last rtt we measured. However if that 2119 * does not exist we fallback to the srtt (which 2120 * we probably will never do) and then as a last 2121 * resort we use RACK_INITIAL_RTO if no srtt is 2122 * yet set. 2123 */ 2124 if (rack->rc_rack_rtt) 2125 return(rack->rc_rack_rtt); 2126 else if (tp->t_srtt == 0) 2127 return(RACK_INITIAL_RTO); 2128 return (TICKS_2_MSEC(tp->t_srtt >> TCP_RTT_SHIFT)); 2129 } 2130 2131 static struct rack_sendmap * 2132 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 2133 { 2134 /* 2135 * Check to see that we don't need to fall into recovery. We will 2136 * need to do so if our oldest transmit is past the time we should 2137 * have had an ack. 2138 */ 2139 struct tcp_rack *rack; 2140 struct rack_sendmap *rsm; 2141 int32_t idx; 2142 uint32_t srtt, thresh; 2143 2144 rack = (struct tcp_rack *)tp->t_fb_ptr; 2145 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 2146 return (NULL); 2147 } 2148 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 2149 if (rsm == NULL) 2150 return (NULL); 2151 2152 if (rsm->r_flags & RACK_ACKED) { 2153 rsm = rack_find_lowest_rsm(rack); 2154 if (rsm == NULL) 2155 return (NULL); 2156 } 2157 idx = rsm->r_rtr_cnt - 1; 2158 srtt = rack_grab_rtt(tp, rack); 2159 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 2160 if (tsused < rsm->r_tim_lastsent[idx]) { 2161 return (NULL); 2162 } 2163 if ((tsused - rsm->r_tim_lastsent[idx]) < thresh) { 2164 return (NULL); 2165 } 2166 /* Ok if we reach here we are over-due */ 2167 rack->r_ctl.rc_rsm_start = rsm->r_start; 2168 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 2169 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 2170 rack_cong_signal(tp, NULL, CC_NDUPACK); 2171 return (rsm); 2172 } 2173 2174 static uint32_t 2175 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 2176 { 2177 int32_t t; 2178 int32_t tt; 2179 uint32_t ret_val; 2180 2181 t = TICKS_2_MSEC((tp->t_srtt >> TCP_RTT_SHIFT) + ((tp->t_rttvar * 4) >> TCP_RTT_SHIFT)); 2182 TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 2183 rack_persist_min, rack_persist_max); 2184 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 2185 tp->t_rxtshift++; 2186 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 2187 ret_val = (uint32_t)tt; 2188 return (ret_val); 2189 } 2190 2191 static uint32_t 2192 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 2193 { 2194 /* 2195 * Start the FR timer, we do this based on getting the first one in 2196 * the rc_tmap. Note that if its NULL we must stop the timer. in all 2197 * events we need to stop the running timer (if its running) before 2198 * starting the new one. 2199 */ 2200 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 2201 uint32_t srtt_cur; 2202 int32_t idx; 2203 int32_t is_tlp_timer = 0; 2204 struct rack_sendmap *rsm; 2205 2206 if (rack->t_timers_stopped) { 2207 /* All timers have been stopped none are to run */ 2208 return (0); 2209 } 2210 if (rack->rc_in_persist) { 2211 /* We can't start any timer in persists */ 2212 return (rack_get_persists_timer_val(tp, rack)); 2213 } 2214 if ((tp->t_state < TCPS_ESTABLISHED) || 2215 ((tp->t_flags & TF_SACK_PERMIT) == 0)) 2216 goto activate_rxt; 2217 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 2218 if ((rsm == NULL) || sup_rack) { 2219 /* Nothing on the send map */ 2220 activate_rxt: 2221 time_since_sent = 0; 2222 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 2223 if (rsm) { 2224 idx = rsm->r_rtr_cnt - 1; 2225 if (TSTMP_GEQ(rsm->r_tim_lastsent[idx], rack->r_ctl.rc_tlp_rxt_last_time)) 2226 tstmp_touse = rsm->r_tim_lastsent[idx]; 2227 else 2228 tstmp_touse = rack->r_ctl.rc_tlp_rxt_last_time; 2229 if (TSTMP_GT(tstmp_touse, cts)) 2230 time_since_sent = cts - tstmp_touse; 2231 } 2232 if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) { 2233 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 2234 to = TICKS_2_MSEC(tp->t_rxtcur); 2235 if (to > time_since_sent) 2236 to -= time_since_sent; 2237 else 2238 to = rack->r_ctl.rc_min_to; 2239 if (to == 0) 2240 to = 1; 2241 return (to); 2242 } 2243 return (0); 2244 } 2245 if (rsm->r_flags & RACK_ACKED) { 2246 rsm = rack_find_lowest_rsm(rack); 2247 if (rsm == NULL) { 2248 /* No lowest? */ 2249 goto activate_rxt; 2250 } 2251 } 2252 if (rack->sack_attack_disable) { 2253 /* 2254 * We don't want to do 2255 * any TLP's if you are an attacker. 2256 * Though if you are doing what 2257 * is expected you may still have 2258 * SACK-PASSED marks. 2259 */ 2260 goto activate_rxt; 2261 } 2262 /* Convert from ms to usecs */ 2263 if (rsm->r_flags & RACK_SACK_PASSED) { 2264 if ((tp->t_flags & TF_SENTFIN) && 2265 ((tp->snd_max - tp->snd_una) == 1) && 2266 (rsm->r_flags & RACK_HAS_FIN)) { 2267 /* 2268 * We don't start a rack timer if all we have is a 2269 * FIN outstanding. 2270 */ 2271 goto activate_rxt; 2272 } 2273 if ((rack->use_rack_cheat == 0) && 2274 (IN_RECOVERY(tp->t_flags)) && 2275 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 2276 /* 2277 * We are not cheating, in recovery and 2278 * not enough ack's to yet get our next 2279 * retransmission out. 2280 * 2281 * Note that classified attackers do not 2282 * get to use the rack-cheat. 2283 */ 2284 goto activate_tlp; 2285 } 2286 srtt = rack_grab_rtt(tp, rack); 2287 thresh = rack_calc_thresh_rack(rack, srtt, cts); 2288 idx = rsm->r_rtr_cnt - 1; 2289 exp = rsm->r_tim_lastsent[idx] + thresh; 2290 if (SEQ_GEQ(exp, cts)) { 2291 to = exp - cts; 2292 if (to < rack->r_ctl.rc_min_to) { 2293 to = rack->r_ctl.rc_min_to; 2294 } 2295 } else { 2296 to = rack->r_ctl.rc_min_to; 2297 } 2298 } else { 2299 /* Ok we need to do a TLP not RACK */ 2300 activate_tlp: 2301 if ((rack->rc_tlp_in_progress != 0) || 2302 (rack->r_ctl.rc_tlp_rtx_out != 0)) { 2303 /* 2304 * The previous send was a TLP or a tlp_rtx is in 2305 * process. 2306 */ 2307 goto activate_rxt; 2308 } 2309 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 2310 if (rsm == NULL) { 2311 /* We found no rsm to TLP with. */ 2312 goto activate_rxt; 2313 } 2314 if (rsm->r_flags & RACK_HAS_FIN) { 2315 /* If its a FIN we dont do TLP */ 2316 rsm = NULL; 2317 goto activate_rxt; 2318 } 2319 idx = rsm->r_rtr_cnt - 1; 2320 time_since_sent = 0; 2321 if (TSTMP_GEQ(rsm->r_tim_lastsent[idx], rack->r_ctl.rc_tlp_rxt_last_time)) 2322 tstmp_touse = rsm->r_tim_lastsent[idx]; 2323 else 2324 tstmp_touse = rack->r_ctl.rc_tlp_rxt_last_time; 2325 if (TSTMP_GT(tstmp_touse, cts)) 2326 time_since_sent = cts - tstmp_touse; 2327 is_tlp_timer = 1; 2328 if (tp->t_srtt) { 2329 srtt_cur = (tp->t_srtt >> TCP_RTT_SHIFT); 2330 srtt = TICKS_2_MSEC(srtt_cur); 2331 } else 2332 srtt = RACK_INITIAL_RTO; 2333 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 2334 if (thresh > time_since_sent) 2335 to = thresh - time_since_sent; 2336 else 2337 to = rack->r_ctl.rc_min_to; 2338 if (to > TCPTV_REXMTMAX) { 2339 /* 2340 * If the TLP time works out to larger than the max 2341 * RTO lets not do TLP.. just RTO. 2342 */ 2343 goto activate_rxt; 2344 } 2345 if (rsm->r_start != rack->r_ctl.rc_last_tlp_seq) { 2346 /* 2347 * The tail is no longer the last one I did a probe 2348 * on 2349 */ 2350 rack->r_ctl.rc_tlp_seg_send_cnt = 0; 2351 rack->r_ctl.rc_last_tlp_seq = rsm->r_start; 2352 } 2353 } 2354 if (is_tlp_timer == 0) { 2355 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 2356 } else { 2357 if ((rack->r_ctl.rc_tlp_send_cnt > rack_tlp_max_resend) || 2358 (rack->r_ctl.rc_tlp_seg_send_cnt > rack_tlp_max_resend)) { 2359 /* 2360 * We have exceeded how many times we can retran the 2361 * current TLP timer, switch to the RTO timer. 2362 */ 2363 goto activate_rxt; 2364 } else { 2365 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 2366 } 2367 } 2368 if (to == 0) 2369 to = 1; 2370 return (to); 2371 } 2372 2373 static void 2374 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 2375 { 2376 if (rack->rc_in_persist == 0) { 2377 rack->r_ctl.rc_went_idle_time = cts; 2378 rack_timer_cancel(tp, rack, cts, __LINE__); 2379 tp->t_rxtshift = 0; 2380 rack->rc_in_persist = 1; 2381 } 2382 } 2383 2384 static void 2385 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack) 2386 { 2387 if (rack->rc_inp->inp_in_hpts) { 2388 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 2389 rack->r_ctl.rc_hpts_flags = 0; 2390 } 2391 rack->rc_in_persist = 0; 2392 rack->r_ctl.rc_went_idle_time = 0; 2393 tp->t_flags &= ~TF_FORCEDATA; 2394 tp->t_rxtshift = 0; 2395 } 2396 2397 static void 2398 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 2399 int32_t slot, uint32_t tot_len_this_send, int sup_rack) 2400 { 2401 struct inpcb *inp; 2402 uint32_t delayed_ack = 0; 2403 uint32_t hpts_timeout; 2404 uint8_t stopped; 2405 uint32_t left = 0; 2406 2407 inp = tp->t_inpcb; 2408 if (inp->inp_in_hpts) { 2409 /* A previous call is already set up */ 2410 return; 2411 } 2412 if ((tp->t_state == TCPS_CLOSED) || 2413 (tp->t_state == TCPS_LISTEN)) { 2414 return; 2415 } 2416 stopped = rack->rc_tmr_stopped; 2417 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 2418 left = rack->r_ctl.rc_timer_exp - cts; 2419 } 2420 rack->tlp_timer_up = 0; 2421 rack->r_ctl.rc_timer_exp = 0; 2422 if (rack->rc_inp->inp_in_hpts == 0) { 2423 rack->r_ctl.rc_hpts_flags = 0; 2424 } 2425 if (slot) { 2426 /* We are hptsi too */ 2427 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 2428 } else if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 2429 /* 2430 * We are still left on the hpts when the to goes 2431 * it will be for output. 2432 */ 2433 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) 2434 slot = rack->r_ctl.rc_last_output_to - cts; 2435 else 2436 slot = 1; 2437 } 2438 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 2439 if (rack->sack_attack_disable && 2440 (slot < USEC_TO_MSEC(tcp_sad_pacing_interval))) { 2441 /* 2442 * We have a potential attacker on 2443 * the line. We have possibly some 2444 * (or now) pacing time set. We want to 2445 * slow down the processing of sacks by some 2446 * amount (if it is an attacker). Set the default 2447 * slot for attackers in place (unless the orginal 2448 * interval is longer). Its stored in 2449 * micro-seconds, so lets convert to msecs. 2450 */ 2451 slot = USEC_TO_MSEC(tcp_sad_pacing_interval); 2452 } 2453 if (tp->t_flags & TF_DELACK) { 2454 delayed_ack = TICKS_2_MSEC(tcp_delacktime); 2455 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 2456 } 2457 if (delayed_ack && ((hpts_timeout == 0) || 2458 (delayed_ack < hpts_timeout))) 2459 hpts_timeout = delayed_ack; 2460 else 2461 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 2462 /* 2463 * If no timers are going to run and we will fall off the hptsi 2464 * wheel, we resort to a keep-alive timer if its configured. 2465 */ 2466 if ((hpts_timeout == 0) && 2467 (slot == 0)) { 2468 if ((tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 2469 (tp->t_state <= TCPS_CLOSING)) { 2470 /* 2471 * Ok we have no timer (persists, rack, tlp, rxt or 2472 * del-ack), we don't have segments being paced. So 2473 * all that is left is the keepalive timer. 2474 */ 2475 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 2476 /* Get the established keep-alive time */ 2477 hpts_timeout = TP_KEEPIDLE(tp); 2478 } else { 2479 /* Get the initial setup keep-alive time */ 2480 hpts_timeout = TP_KEEPINIT(tp); 2481 } 2482 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 2483 } 2484 } 2485 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 2486 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 2487 /* 2488 * RACK, TLP, persists and RXT timers all are restartable 2489 * based on actions input .. i.e we received a packet (ack 2490 * or sack) and that changes things (rw, or snd_una etc). 2491 * Thus we can restart them with a new value. For 2492 * keep-alive, delayed_ack we keep track of what was left 2493 * and restart the timer with a smaller value. 2494 */ 2495 if (left < hpts_timeout) 2496 hpts_timeout = left; 2497 } 2498 if (hpts_timeout) { 2499 /* 2500 * Hack alert for now we can't time-out over 2,147,483 2501 * seconds (a bit more than 596 hours), which is probably ok 2502 * :). 2503 */ 2504 if (hpts_timeout > 0x7ffffffe) 2505 hpts_timeout = 0x7ffffffe; 2506 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 2507 } 2508 if (slot) { 2509 rack->rc_inp->inp_flags2 |= INP_MBUF_QUEUE_READY; 2510 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) 2511 inp->inp_flags2 |= INP_DONT_SACK_QUEUE; 2512 else 2513 inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE; 2514 rack->r_ctl.rc_last_output_to = cts + slot; 2515 if ((hpts_timeout == 0) || (hpts_timeout > slot)) { 2516 if (rack->rc_inp->inp_in_hpts == 0) 2517 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(slot)); 2518 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 2519 } else { 2520 /* 2521 * Arrange for the hpts to kick back in after the 2522 * t-o if the t-o does not cause a send. 2523 */ 2524 if (rack->rc_inp->inp_in_hpts == 0) 2525 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(hpts_timeout)); 2526 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 2527 } 2528 } else if (hpts_timeout) { 2529 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) { 2530 /* For a rack timer, don't wake us */ 2531 rack->rc_inp->inp_flags2 |= INP_MBUF_QUEUE_READY; 2532 inp->inp_flags2 |= INP_DONT_SACK_QUEUE; 2533 } else { 2534 /* All other timers wake us up */ 2535 rack->rc_inp->inp_flags2 &= ~INP_MBUF_QUEUE_READY; 2536 inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE; 2537 } 2538 if (rack->rc_inp->inp_in_hpts == 0) 2539 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(hpts_timeout)); 2540 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 2541 } else { 2542 /* No timer starting */ 2543 #ifdef INVARIANTS 2544 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 2545 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 2546 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 2547 } 2548 #endif 2549 } 2550 rack->rc_tmr_stopped = 0; 2551 if (slot) 2552 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, cts); 2553 } 2554 2555 /* 2556 * RACK Timer, here we simply do logging and house keeping. 2557 * the normal rack_output() function will call the 2558 * appropriate thing to check if we need to do a RACK retransmit. 2559 * We return 1, saying don't proceed with rack_output only 2560 * when all timers have been stopped (destroyed PCB?). 2561 */ 2562 static int 2563 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 2564 { 2565 /* 2566 * This timer simply provides an internal trigger to send out data. 2567 * The check_recovery_mode call will see if there are needed 2568 * retransmissions, if so we will enter fast-recovery. The output 2569 * call may or may not do the same thing depending on sysctl 2570 * settings. 2571 */ 2572 struct rack_sendmap *rsm; 2573 int32_t recovery, ll; 2574 2575 if (tp->t_timers->tt_flags & TT_STOPPED) { 2576 return (1); 2577 } 2578 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 2579 /* Its not time yet */ 2580 return (0); 2581 } 2582 recovery = IN_RECOVERY(tp->t_flags); 2583 counter_u64_add(rack_to_tot, 1); 2584 if (rack->r_state && (rack->r_state != tp->t_state)) 2585 rack_set_state(tp, rack); 2586 rsm = rack_check_recovery_mode(tp, cts); 2587 if (rsm) 2588 ll = rsm->r_end - rsm->r_start; 2589 else 2590 ll = 0; 2591 rack_log_to_event(rack, RACK_TO_FRM_RACK, ll); 2592 if (rsm) { 2593 uint32_t rtt; 2594 2595 rtt = rack->rc_rack_rtt; 2596 if (rtt == 0) 2597 rtt = 1; 2598 if ((recovery == 0) && 2599 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 2600 /* 2601 * The rack-timeout that enter's us into recovery 2602 * will force out one MSS and set us up so that we 2603 * can do one more send in 2*rtt (transitioning the 2604 * rack timeout into a rack-tlp). 2605 */ 2606 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 2607 rack_log_to_prr(rack, 3); 2608 } else if ((rack->r_ctl.rc_prr_sndcnt < (rsm->r_end - rsm->r_start)) && 2609 rack->use_rack_cheat) { 2610 /* 2611 * When a rack timer goes, if the rack cheat is 2612 * on, arrange it so we can send a full segment. 2613 */ 2614 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 2615 rack_log_to_prr(rack, 4); 2616 } 2617 } else { 2618 /* This is a case that should happen rarely if ever */ 2619 counter_u64_add(rack_tlp_does_nada, 1); 2620 #ifdef TCP_BLACKBOX 2621 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 2622 #endif 2623 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 2624 } 2625 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 2626 return (0); 2627 } 2628 2629 static __inline void 2630 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 2631 struct rack_sendmap *rsm, uint32_t start) 2632 { 2633 int idx; 2634 2635 nrsm->r_start = start; 2636 nrsm->r_end = rsm->r_end; 2637 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 2638 nrsm->r_flags = rsm->r_flags; 2639 nrsm->r_dupack = rsm->r_dupack; 2640 nrsm->r_rtr_bytes = 0; 2641 rsm->r_end = nrsm->r_start; 2642 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 2643 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 2644 } 2645 } 2646 2647 static struct rack_sendmap * 2648 rack_merge_rsm(struct tcp_rack *rack, 2649 struct rack_sendmap *l_rsm, 2650 struct rack_sendmap *r_rsm) 2651 { 2652 /* 2653 * We are merging two ack'd RSM's, 2654 * the l_rsm is on the left (lower seq 2655 * values) and the r_rsm is on the right 2656 * (higher seq value). The simplest way 2657 * to merge these is to move the right 2658 * one into the left. I don't think there 2659 * is any reason we need to try to find 2660 * the oldest (or last oldest retransmitted). 2661 */ 2662 struct rack_sendmap *rm; 2663 2664 l_rsm->r_end = r_rsm->r_end; 2665 if (l_rsm->r_dupack < r_rsm->r_dupack) 2666 l_rsm->r_dupack = r_rsm->r_dupack; 2667 if (r_rsm->r_rtr_bytes) 2668 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 2669 if (r_rsm->r_in_tmap) { 2670 /* This really should not happen */ 2671 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 2672 r_rsm->r_in_tmap = 0; 2673 } 2674 /* Now the flags */ 2675 if (r_rsm->r_flags & RACK_HAS_FIN) 2676 l_rsm->r_flags |= RACK_HAS_FIN; 2677 if (r_rsm->r_flags & RACK_TLP) 2678 l_rsm->r_flags |= RACK_TLP; 2679 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 2680 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 2681 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm); 2682 #ifdef INVARIANTS 2683 if (rm != r_rsm) { 2684 panic("removing head in rack:%p rsm:%p rm:%p", 2685 rack, r_rsm, rm); 2686 } 2687 #endif 2688 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 2689 /* Transfer the split limit to the map we free */ 2690 r_rsm->r_limit_type = l_rsm->r_limit_type; 2691 l_rsm->r_limit_type = 0; 2692 } 2693 rack_free(rack, r_rsm); 2694 return(l_rsm); 2695 } 2696 2697 /* 2698 * TLP Timer, here we simply setup what segment we want to 2699 * have the TLP expire on, the normal rack_output() will then 2700 * send it out. 2701 * 2702 * We return 1, saying don't proceed with rack_output only 2703 * when all timers have been stopped (destroyed PCB?). 2704 */ 2705 static int 2706 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 2707 { 2708 /* 2709 * Tail Loss Probe. 2710 */ 2711 struct rack_sendmap *rsm = NULL; 2712 struct rack_sendmap *insret; 2713 struct socket *so; 2714 uint32_t amm, old_prr_snd = 0; 2715 uint32_t out, avail; 2716 int collapsed_win = 0; 2717 2718 if (tp->t_timers->tt_flags & TT_STOPPED) { 2719 return (1); 2720 } 2721 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 2722 /* Its not time yet */ 2723 return (0); 2724 } 2725 if (rack_progress_timeout_check(tp)) { 2726 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 2727 return (1); 2728 } 2729 /* 2730 * A TLP timer has expired. We have been idle for 2 rtts. So we now 2731 * need to figure out how to force a full MSS segment out. 2732 */ 2733 rack_log_to_event(rack, RACK_TO_FRM_TLP, 0); 2734 counter_u64_add(rack_tlp_tot, 1); 2735 if (rack->r_state && (rack->r_state != tp->t_state)) 2736 rack_set_state(tp, rack); 2737 so = tp->t_inpcb->inp_socket; 2738 #ifdef KERN_TLS 2739 if (rack->rc_inp->inp_socket->so_snd.sb_flags & SB_TLS_IFNET) { 2740 /* 2741 * For hardware TLS we do *not* want to send 2742 * new data, lets instead just do a retransmission. 2743 */ 2744 goto need_retran; 2745 } 2746 #endif 2747 avail = sbavail(&so->so_snd); 2748 out = tp->snd_max - tp->snd_una; 2749 rack->tlp_timer_up = 1; 2750 if (out > tp->snd_wnd) { 2751 /* special case, we need a retransmission */ 2752 collapsed_win = 1; 2753 goto need_retran; 2754 } 2755 /* 2756 * If we are in recovery we can jazz out a segment if new data is 2757 * present simply by setting rc_prr_sndcnt to a segment. 2758 */ 2759 if ((avail > out) && 2760 ((rack_always_send_oldest == 0) || (TAILQ_EMPTY(&rack->r_ctl.rc_tmap)))) { 2761 /* New data is available */ 2762 amm = avail - out; 2763 if (amm > ctf_fixed_maxseg(tp)) { 2764 amm = ctf_fixed_maxseg(tp); 2765 } else if ((amm < ctf_fixed_maxseg(tp)) && ((tp->t_flags & TF_NODELAY) == 0)) { 2766 /* not enough to fill a MTU and no-delay is off */ 2767 goto need_retran; 2768 } 2769 if (IN_RECOVERY(tp->t_flags)) { 2770 /* Unlikely */ 2771 old_prr_snd = rack->r_ctl.rc_prr_sndcnt; 2772 if (out + amm <= tp->snd_wnd) { 2773 rack->r_ctl.rc_prr_sndcnt = amm; 2774 rack_log_to_prr(rack, 4); 2775 } else 2776 goto need_retran; 2777 } else { 2778 /* Set the send-new override */ 2779 if (out + amm <= tp->snd_wnd) 2780 rack->r_ctl.rc_tlp_new_data = amm; 2781 else 2782 goto need_retran; 2783 } 2784 rack->r_ctl.rc_tlp_seg_send_cnt = 0; 2785 rack->r_ctl.rc_last_tlp_seq = tp->snd_max; 2786 rack->r_ctl.rc_tlpsend = NULL; 2787 counter_u64_add(rack_tlp_newdata, 1); 2788 goto send; 2789 } 2790 need_retran: 2791 /* 2792 * Ok we need to arrange the last un-acked segment to be re-sent, or 2793 * optionally the first un-acked segment. 2794 */ 2795 if (collapsed_win == 0) { 2796 if (rack_always_send_oldest) 2797 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 2798 else { 2799 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 2800 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 2801 rsm = rack_find_high_nonack(rack, rsm); 2802 } 2803 } 2804 if (rsm == NULL) { 2805 counter_u64_add(rack_tlp_does_nada, 1); 2806 #ifdef TCP_BLACKBOX 2807 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 2808 #endif 2809 goto out; 2810 } 2811 } else { 2812 /* 2813 * We must find the last segment 2814 * that was acceptable by the client. 2815 */ 2816 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 2817 if ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0) { 2818 /* Found one */ 2819 break; 2820 } 2821 } 2822 if (rsm == NULL) { 2823 /* None? if so send the first */ 2824 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 2825 if (rsm == NULL) { 2826 counter_u64_add(rack_tlp_does_nada, 1); 2827 #ifdef TCP_BLACKBOX 2828 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 2829 #endif 2830 goto out; 2831 } 2832 } 2833 } 2834 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 2835 /* 2836 * We need to split this the last segment in two. 2837 */ 2838 struct rack_sendmap *nrsm; 2839 2840 2841 nrsm = rack_alloc_full_limit(rack); 2842 if (nrsm == NULL) { 2843 /* 2844 * No memory to split, we will just exit and punt 2845 * off to the RXT timer. 2846 */ 2847 counter_u64_add(rack_tlp_does_nada, 1); 2848 goto out; 2849 } 2850 rack_clone_rsm(rack, nrsm, rsm, 2851 (rsm->r_end - ctf_fixed_maxseg(tp))); 2852 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 2853 #ifdef INVARIANTS 2854 if (insret != NULL) { 2855 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 2856 nrsm, insret, rack, rsm); 2857 } 2858 #endif 2859 if (rsm->r_in_tmap) { 2860 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 2861 nrsm->r_in_tmap = 1; 2862 } 2863 rsm->r_flags &= (~RACK_HAS_FIN); 2864 rsm = nrsm; 2865 } 2866 rack->r_ctl.rc_tlpsend = rsm; 2867 rack->r_ctl.rc_tlp_rtx_out = 1; 2868 if (rsm->r_start == rack->r_ctl.rc_last_tlp_seq) { 2869 rack->r_ctl.rc_tlp_seg_send_cnt++; 2870 tp->t_rxtshift++; 2871 } else { 2872 rack->r_ctl.rc_last_tlp_seq = rsm->r_start; 2873 rack->r_ctl.rc_tlp_seg_send_cnt = 1; 2874 } 2875 send: 2876 rack->r_ctl.rc_tlp_send_cnt++; 2877 if (rack->r_ctl.rc_tlp_send_cnt > rack_tlp_max_resend) { 2878 /* 2879 * Can't [re]/transmit a segment we have not heard from the 2880 * peer in max times. We need the retransmit timer to take 2881 * over. 2882 */ 2883 restore: 2884 rack->r_ctl.rc_tlpsend = NULL; 2885 if (rsm) 2886 rsm->r_flags &= ~RACK_TLP; 2887 rack->r_ctl.rc_prr_sndcnt = old_prr_snd; 2888 rack_log_to_prr(rack, 5); 2889 counter_u64_add(rack_tlp_retran_fail, 1); 2890 goto out; 2891 } else if (rsm) { 2892 rsm->r_flags |= RACK_TLP; 2893 } 2894 if (rsm && (rsm->r_start == rack->r_ctl.rc_last_tlp_seq) && 2895 (rack->r_ctl.rc_tlp_seg_send_cnt > rack_tlp_max_resend)) { 2896 /* 2897 * We don't want to send a single segment more than the max 2898 * either. 2899 */ 2900 goto restore; 2901 } 2902 rack->r_timer_override = 1; 2903 rack->r_tlp_running = 1; 2904 rack->rc_tlp_in_progress = 1; 2905 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 2906 return (0); 2907 out: 2908 rack->tlp_timer_up = 0; 2909 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 2910 return (0); 2911 } 2912 2913 /* 2914 * Delayed ack Timer, here we simply need to setup the 2915 * ACK_NOW flag and remove the DELACK flag. From there 2916 * the output routine will send the ack out. 2917 * 2918 * We only return 1, saying don't proceed, if all timers 2919 * are stopped (destroyed PCB?). 2920 */ 2921 static int 2922 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 2923 { 2924 if (tp->t_timers->tt_flags & TT_STOPPED) { 2925 return (1); 2926 } 2927 rack_log_to_event(rack, RACK_TO_FRM_DELACK, 0); 2928 tp->t_flags &= ~TF_DELACK; 2929 tp->t_flags |= TF_ACKNOW; 2930 TCPSTAT_INC(tcps_delack); 2931 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 2932 return (0); 2933 } 2934 2935 /* 2936 * Persists timer, here we simply need to setup the 2937 * FORCE-DATA flag the output routine will send 2938 * the one byte send. 2939 * 2940 * We only return 1, saying don't proceed, if all timers 2941 * are stopped (destroyed PCB?). 2942 */ 2943 static int 2944 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 2945 { 2946 struct tcptemp *t_template; 2947 struct inpcb *inp; 2948 int32_t retval = 1; 2949 2950 inp = tp->t_inpcb; 2951 2952 if (tp->t_timers->tt_flags & TT_STOPPED) { 2953 return (1); 2954 } 2955 if (rack->rc_in_persist == 0) 2956 return (0); 2957 if (rack_progress_timeout_check(tp)) { 2958 tcp_set_inp_to_drop(inp, ETIMEDOUT); 2959 return (1); 2960 } 2961 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 2962 /* 2963 * Persistence timer into zero window. Force a byte to be output, if 2964 * possible. 2965 */ 2966 TCPSTAT_INC(tcps_persisttimeo); 2967 /* 2968 * Hack: if the peer is dead/unreachable, we do not time out if the 2969 * window is closed. After a full backoff, drop the connection if 2970 * the idle time (no responses to probes) reaches the maximum 2971 * backoff that we would use if retransmitting. 2972 */ 2973 if (tp->t_rxtshift == TCP_MAXRXTSHIFT && 2974 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 2975 ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) { 2976 TCPSTAT_INC(tcps_persistdrop); 2977 retval = 1; 2978 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT); 2979 goto out; 2980 } 2981 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 2982 tp->snd_una == tp->snd_max) 2983 rack_exit_persist(tp, rack); 2984 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 2985 /* 2986 * If the user has closed the socket then drop a persisting 2987 * connection after a much reduced timeout. 2988 */ 2989 if (tp->t_state > TCPS_CLOSE_WAIT && 2990 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 2991 retval = 1; 2992 TCPSTAT_INC(tcps_persistdrop); 2993 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT); 2994 goto out; 2995 } 2996 t_template = tcpip_maketemplate(rack->rc_inp); 2997 if (t_template) { 2998 tcp_respond(tp, t_template->tt_ipgen, 2999 &t_template->tt_t, (struct mbuf *)NULL, 3000 tp->rcv_nxt, tp->snd_una - 1, 0); 3001 /* This sends an ack */ 3002 if (tp->t_flags & TF_DELACK) 3003 tp->t_flags &= ~TF_DELACK; 3004 free(t_template, M_TEMP); 3005 } 3006 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 3007 tp->t_rxtshift++; 3008 out: 3009 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, 0); 3010 rack_start_hpts_timer(rack, tp, cts, 3011 0, 0, 0); 3012 return (retval); 3013 } 3014 3015 /* 3016 * If a keepalive goes off, we had no other timers 3017 * happening. We always return 1 here since this 3018 * routine either drops the connection or sends 3019 * out a segment with respond. 3020 */ 3021 static int 3022 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 3023 { 3024 struct tcptemp *t_template; 3025 struct inpcb *inp; 3026 3027 if (tp->t_timers->tt_flags & TT_STOPPED) { 3028 return (1); 3029 } 3030 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 3031 inp = tp->t_inpcb; 3032 rack_log_to_event(rack, RACK_TO_FRM_KEEP, 0); 3033 /* 3034 * Keep-alive timer went off; send something or drop connection if 3035 * idle for too long. 3036 */ 3037 TCPSTAT_INC(tcps_keeptimeo); 3038 if (tp->t_state < TCPS_ESTABLISHED) 3039 goto dropit; 3040 if ((tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 3041 tp->t_state <= TCPS_CLOSING) { 3042 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 3043 goto dropit; 3044 /* 3045 * Send a packet designed to force a response if the peer is 3046 * up and reachable: either an ACK if the connection is 3047 * still alive, or an RST if the peer has closed the 3048 * connection due to timeout or reboot. Using sequence 3049 * number tp->snd_una-1 causes the transmitted zero-length 3050 * segment to lie outside the receive window; by the 3051 * protocol spec, this requires the correspondent TCP to 3052 * respond. 3053 */ 3054 TCPSTAT_INC(tcps_keepprobe); 3055 t_template = tcpip_maketemplate(inp); 3056 if (t_template) { 3057 tcp_respond(tp, t_template->tt_ipgen, 3058 &t_template->tt_t, (struct mbuf *)NULL, 3059 tp->rcv_nxt, tp->snd_una - 1, 0); 3060 free(t_template, M_TEMP); 3061 } 3062 } 3063 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 3064 return (1); 3065 dropit: 3066 TCPSTAT_INC(tcps_keepdrops); 3067 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT); 3068 return (1); 3069 } 3070 3071 /* 3072 * Retransmit helper function, clear up all the ack 3073 * flags and take care of important book keeping. 3074 */ 3075 static void 3076 rack_remxt_tmr(struct tcpcb *tp) 3077 { 3078 /* 3079 * The retransmit timer went off, all sack'd blocks must be 3080 * un-acked. 3081 */ 3082 struct rack_sendmap *rsm, *trsm = NULL; 3083 struct tcp_rack *rack; 3084 int32_t cnt = 0; 3085 3086 rack = (struct tcp_rack *)tp->t_fb_ptr; 3087 rack_timer_cancel(tp, rack, tcp_ts_getticks(), __LINE__); 3088 rack_log_to_event(rack, RACK_TO_FRM_TMR, 0); 3089 if (rack->r_state && (rack->r_state != tp->t_state)) 3090 rack_set_state(tp, rack); 3091 /* 3092 * Ideally we would like to be able to 3093 * mark SACK-PASS on anything not acked here. 3094 * However, if we do that we would burst out 3095 * all that data 1ms apart. This would be unwise, 3096 * so for now we will just let the normal rxt timer 3097 * and tlp timer take care of it. 3098 */ 3099 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 3100 if (rsm->r_flags & RACK_ACKED) { 3101 cnt++; 3102 rsm->r_dupack = 0; 3103 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 3104 if (rsm->r_in_tmap == 0) { 3105 /* We must re-add it back to the tlist */ 3106 if (trsm == NULL) { 3107 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 3108 } else { 3109 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 3110 } 3111 rsm->r_in_tmap = 1; 3112 } 3113 } 3114 trsm = rsm; 3115 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS); 3116 } 3117 /* Clear the count (we just un-acked them) */ 3118 rack->r_ctl.rc_sacked = 0; 3119 /* Clear the tlp rtx mark */ 3120 rack->r_ctl.rc_tlp_rtx_out = 0; 3121 rack->r_ctl.rc_tlp_seg_send_cnt = 0; 3122 rack->r_ctl.rc_resend = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 3123 rack->r_ctl.rc_prr_sndcnt = 0; 3124 rack_log_to_prr(rack, 6); 3125 rack->r_timer_override = 1; 3126 } 3127 3128 /* 3129 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 3130 * we will setup to retransmit the lowest seq number outstanding. 3131 */ 3132 static int 3133 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 3134 { 3135 int32_t rexmt; 3136 struct inpcb *inp; 3137 int32_t retval = 0; 3138 3139 inp = tp->t_inpcb; 3140 if (tp->t_timers->tt_flags & TT_STOPPED) { 3141 return (1); 3142 } 3143 if (rack_progress_timeout_check(tp)) { 3144 tcp_set_inp_to_drop(inp, ETIMEDOUT); 3145 return (1); 3146 } 3147 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 3148 if (TCPS_HAVEESTABLISHED(tp->t_state) && 3149 (tp->snd_una == tp->snd_max)) { 3150 /* Nothing outstanding .. nothing to do */ 3151 return (0); 3152 } 3153 /* 3154 * Retransmission timer went off. Message has not been acked within 3155 * retransmit interval. Back off to a longer retransmit interval 3156 * and retransmit one segment. 3157 */ 3158 rack_remxt_tmr(tp); 3159 if ((rack->r_ctl.rc_resend == NULL) || 3160 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 3161 /* 3162 * If the rwnd collapsed on 3163 * the one we are retransmitting 3164 * it does not count against the 3165 * rxt count. 3166 */ 3167 tp->t_rxtshift++; 3168 } 3169 if (tp->t_rxtshift > TCP_MAXRXTSHIFT) { 3170 tp->t_rxtshift = TCP_MAXRXTSHIFT; 3171 TCPSTAT_INC(tcps_timeoutdrop); 3172 retval = 1; 3173 tcp_set_inp_to_drop(rack->rc_inp, 3174 (tp->t_softerror ? (uint16_t) tp->t_softerror : ETIMEDOUT)); 3175 goto out; 3176 } 3177 if (tp->t_state == TCPS_SYN_SENT) { 3178 /* 3179 * If the SYN was retransmitted, indicate CWND to be limited 3180 * to 1 segment in cc_conn_init(). 3181 */ 3182 tp->snd_cwnd = 1; 3183 } else if (tp->t_rxtshift == 1) { 3184 /* 3185 * first retransmit; record ssthresh and cwnd so they can be 3186 * recovered if this turns out to be a "bad" retransmit. A 3187 * retransmit is considered "bad" if an ACK for this segment 3188 * is received within RTT/2 interval; the assumption here is 3189 * that the ACK was already in flight. See "On Estimating 3190 * End-to-End Network Path Properties" by Allman and Paxson 3191 * for more details. 3192 */ 3193 tp->snd_cwnd_prev = tp->snd_cwnd; 3194 tp->snd_ssthresh_prev = tp->snd_ssthresh; 3195 tp->snd_recover_prev = tp->snd_recover; 3196 if (IN_FASTRECOVERY(tp->t_flags)) 3197 tp->t_flags |= TF_WASFRECOVERY; 3198 else 3199 tp->t_flags &= ~TF_WASFRECOVERY; 3200 if (IN_CONGRECOVERY(tp->t_flags)) 3201 tp->t_flags |= TF_WASCRECOVERY; 3202 else 3203 tp->t_flags &= ~TF_WASCRECOVERY; 3204 tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1)); 3205 tp->t_flags |= TF_PREVVALID; 3206 } else 3207 tp->t_flags &= ~TF_PREVVALID; 3208 TCPSTAT_INC(tcps_rexmttimeo); 3209 if ((tp->t_state == TCPS_SYN_SENT) || 3210 (tp->t_state == TCPS_SYN_RECEIVED)) 3211 rexmt = MSEC_2_TICKS(RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]); 3212 else 3213 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; 3214 TCPT_RANGESET(tp->t_rxtcur, rexmt, 3215 max(MSEC_2_TICKS(rack_rto_min), rexmt), 3216 MSEC_2_TICKS(rack_rto_max)); 3217 /* 3218 * We enter the path for PLMTUD if connection is established or, if 3219 * connection is FIN_WAIT_1 status, reason for the last is that if 3220 * amount of data we send is very small, we could send it in couple 3221 * of packets and process straight to FIN. In that case we won't 3222 * catch ESTABLISHED state. 3223 */ 3224 if (V_tcp_pmtud_blackhole_detect && (((tp->t_state == TCPS_ESTABLISHED)) 3225 || (tp->t_state == TCPS_FIN_WAIT_1))) { 3226 #ifdef INET6 3227 int32_t isipv6; 3228 #endif 3229 3230 /* 3231 * Idea here is that at each stage of mtu probe (usually, 3232 * 1448 -> 1188 -> 524) should be given 2 chances to recover 3233 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 3234 * should take care of that. 3235 */ 3236 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 3237 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 3238 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 3239 tp->t_rxtshift % 2 == 0)) { 3240 /* 3241 * Enter Path MTU Black-hole Detection mechanism: - 3242 * Disable Path MTU Discovery (IP "DF" bit). - 3243 * Reduce MTU to lower value than what we negotiated 3244 * with peer. 3245 */ 3246 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 3247 /* Record that we may have found a black hole. */ 3248 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 3249 /* Keep track of previous MSS. */ 3250 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 3251 } 3252 3253 /* 3254 * Reduce the MSS to blackhole value or to the 3255 * default in an attempt to retransmit. 3256 */ 3257 #ifdef INET6 3258 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? 1 : 0; 3259 if (isipv6 && 3260 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 3261 /* Use the sysctl tuneable blackhole MSS. */ 3262 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 3263 TCPSTAT_INC(tcps_pmtud_blackhole_activated); 3264 } else if (isipv6) { 3265 /* Use the default MSS. */ 3266 tp->t_maxseg = V_tcp_v6mssdflt; 3267 /* 3268 * Disable Path MTU Discovery when we switch 3269 * to minmss. 3270 */ 3271 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 3272 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 3273 } 3274 #endif 3275 #if defined(INET6) && defined(INET) 3276 else 3277 #endif 3278 #ifdef INET 3279 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 3280 /* Use the sysctl tuneable blackhole MSS. */ 3281 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 3282 TCPSTAT_INC(tcps_pmtud_blackhole_activated); 3283 } else { 3284 /* Use the default MSS. */ 3285 tp->t_maxseg = V_tcp_mssdflt; 3286 /* 3287 * Disable Path MTU Discovery when we switch 3288 * to minmss. 3289 */ 3290 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 3291 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 3292 } 3293 #endif 3294 } else { 3295 /* 3296 * If further retransmissions are still unsuccessful 3297 * with a lowered MTU, maybe this isn't a blackhole 3298 * and we restore the previous MSS and blackhole 3299 * detection flags. The limit '6' is determined by 3300 * giving each probe stage (1448, 1188, 524) 2 3301 * chances to recover. 3302 */ 3303 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 3304 (tp->t_rxtshift >= 6)) { 3305 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 3306 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 3307 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 3308 TCPSTAT_INC(tcps_pmtud_blackhole_failed); 3309 } 3310 } 3311 } 3312 /* 3313 * If we backed off this far, our srtt estimate is probably bogus. 3314 * Clobber it so we'll take the next rtt measurement as our srtt; 3315 * move the current srtt into rttvar to keep the current retransmit 3316 * times until then. 3317 */ 3318 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 3319 #ifdef INET6 3320 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 3321 in6_losing(tp->t_inpcb); 3322 else 3323 #endif 3324 in_losing(tp->t_inpcb); 3325 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT); 3326 tp->t_srtt = 0; 3327 } 3328 if (rack_use_sack_filter) 3329 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 3330 tp->snd_recover = tp->snd_max; 3331 tp->t_flags |= TF_ACKNOW; 3332 tp->t_rtttime = 0; 3333 rack_cong_signal(tp, NULL, CC_RTO); 3334 out: 3335 return (retval); 3336 } 3337 3338 static int 3339 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling) 3340 { 3341 int32_t ret = 0; 3342 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 3343 3344 if (timers == 0) { 3345 return (0); 3346 } 3347 if (tp->t_state == TCPS_LISTEN) { 3348 /* no timers on listen sockets */ 3349 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 3350 return (0); 3351 return (1); 3352 } 3353 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 3354 uint32_t left; 3355 3356 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 3357 ret = -1; 3358 rack_log_to_processing(rack, cts, ret, 0); 3359 return (0); 3360 } 3361 if (hpts_calling == 0) { 3362 ret = -2; 3363 rack_log_to_processing(rack, cts, ret, 0); 3364 return (0); 3365 } 3366 /* 3367 * Ok our timer went off early and we are not paced false 3368 * alarm, go back to sleep. 3369 */ 3370 ret = -3; 3371 left = rack->r_ctl.rc_timer_exp - cts; 3372 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(left)); 3373 rack_log_to_processing(rack, cts, ret, left); 3374 rack->rc_last_pto_set = 0; 3375 return (1); 3376 } 3377 rack->rc_tmr_stopped = 0; 3378 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 3379 if (timers & PACE_TMR_DELACK) { 3380 ret = rack_timeout_delack(tp, rack, cts); 3381 } else if (timers & PACE_TMR_RACK) { 3382 rack->r_ctl.rc_tlp_rxt_last_time = cts; 3383 ret = rack_timeout_rack(tp, rack, cts); 3384 } else if (timers & PACE_TMR_TLP) { 3385 rack->r_ctl.rc_tlp_rxt_last_time = cts; 3386 ret = rack_timeout_tlp(tp, rack, cts); 3387 } else if (timers & PACE_TMR_RXT) { 3388 rack->r_ctl.rc_tlp_rxt_last_time = cts; 3389 ret = rack_timeout_rxt(tp, rack, cts); 3390 } else if (timers & PACE_TMR_PERSIT) { 3391 ret = rack_timeout_persist(tp, rack, cts); 3392 } else if (timers & PACE_TMR_KEEP) { 3393 ret = rack_timeout_keepalive(tp, rack, cts); 3394 } 3395 rack_log_to_processing(rack, cts, ret, timers); 3396 return (ret); 3397 } 3398 3399 static void 3400 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 3401 { 3402 uint8_t hpts_removed = 0; 3403 3404 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 3405 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 3406 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 3407 hpts_removed = 1; 3408 } 3409 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 3410 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 3411 if (rack->rc_inp->inp_in_hpts && 3412 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 3413 /* 3414 * Canceling timer's when we have no output being 3415 * paced. We also must remove ourselves from the 3416 * hpts. 3417 */ 3418 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 3419 hpts_removed = 1; 3420 } 3421 rack_log_to_cancel(rack, hpts_removed, line); 3422 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 3423 } 3424 } 3425 3426 static void 3427 rack_timer_stop(struct tcpcb *tp, uint32_t timer_type) 3428 { 3429 return; 3430 } 3431 3432 static int 3433 rack_stopall(struct tcpcb *tp) 3434 { 3435 struct tcp_rack *rack; 3436 rack = (struct tcp_rack *)tp->t_fb_ptr; 3437 rack->t_timers_stopped = 1; 3438 return (0); 3439 } 3440 3441 static void 3442 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, uint32_t delta) 3443 { 3444 return; 3445 } 3446 3447 static int 3448 rack_timer_active(struct tcpcb *tp, uint32_t timer_type) 3449 { 3450 return (0); 3451 } 3452 3453 static void 3454 rack_stop_all_timers(struct tcpcb *tp) 3455 { 3456 struct tcp_rack *rack; 3457 3458 /* 3459 * Assure no timers are running. 3460 */ 3461 if (tcp_timer_active(tp, TT_PERSIST)) { 3462 /* We enter in persists, set the flag appropriately */ 3463 rack = (struct tcp_rack *)tp->t_fb_ptr; 3464 rack->rc_in_persist = 1; 3465 } 3466 tcp_timer_suspend(tp, TT_PERSIST); 3467 tcp_timer_suspend(tp, TT_REXMT); 3468 tcp_timer_suspend(tp, TT_KEEP); 3469 tcp_timer_suspend(tp, TT_DELACK); 3470 } 3471 3472 static void 3473 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 3474 struct rack_sendmap *rsm, uint32_t ts) 3475 { 3476 int32_t idx; 3477 3478 rsm->r_rtr_cnt++; 3479 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 3480 rsm->r_dupack = 0; 3481 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 3482 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 3483 rsm->r_flags |= RACK_OVERMAX; 3484 } 3485 if ((rsm->r_rtr_cnt > 1) && (rack->r_tlp_running == 0)) { 3486 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 3487 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 3488 } 3489 idx = rsm->r_rtr_cnt - 1; 3490 rsm->r_tim_lastsent[idx] = ts; 3491 if (rsm->r_flags & RACK_ACKED) { 3492 /* Problably MTU discovery messing with us */ 3493 rsm->r_flags &= ~RACK_ACKED; 3494 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 3495 } 3496 if (rsm->r_in_tmap) { 3497 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 3498 rsm->r_in_tmap = 0; 3499 } 3500 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 3501 rsm->r_in_tmap = 1; 3502 if (rsm->r_flags & RACK_SACK_PASSED) { 3503 /* We have retransmitted due to the SACK pass */ 3504 rsm->r_flags &= ~RACK_SACK_PASSED; 3505 rsm->r_flags |= RACK_WAS_SACKPASS; 3506 } 3507 } 3508 3509 3510 static uint32_t 3511 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 3512 struct rack_sendmap *rsm, uint32_t ts, int32_t *lenp) 3513 { 3514 /* 3515 * We (re-)transmitted starting at rsm->r_start for some length 3516 * (possibly less than r_end. 3517 */ 3518 struct rack_sendmap *nrsm, *insret; 3519 uint32_t c_end; 3520 int32_t len; 3521 3522 len = *lenp; 3523 c_end = rsm->r_start + len; 3524 if (SEQ_GEQ(c_end, rsm->r_end)) { 3525 /* 3526 * We retransmitted the whole piece or more than the whole 3527 * slopping into the next rsm. 3528 */ 3529 rack_update_rsm(tp, rack, rsm, ts); 3530 if (c_end == rsm->r_end) { 3531 *lenp = 0; 3532 return (0); 3533 } else { 3534 int32_t act_len; 3535 3536 /* Hangs over the end return whats left */ 3537 act_len = rsm->r_end - rsm->r_start; 3538 *lenp = (len - act_len); 3539 return (rsm->r_end); 3540 } 3541 /* We don't get out of this block. */ 3542 } 3543 /* 3544 * Here we retransmitted less than the whole thing which means we 3545 * have to split this into what was transmitted and what was not. 3546 */ 3547 nrsm = rack_alloc_full_limit(rack); 3548 if (nrsm == NULL) { 3549 /* 3550 * We can't get memory, so lets not proceed. 3551 */ 3552 *lenp = 0; 3553 return (0); 3554 } 3555 /* 3556 * So here we are going to take the original rsm and make it what we 3557 * retransmitted. nrsm will be the tail portion we did not 3558 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 3559 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 3560 * 1, 6 and the new piece will be 6, 11. 3561 */ 3562 rack_clone_rsm(rack, nrsm, rsm, c_end); 3563 nrsm->r_dupack = 0; 3564 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 3565 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 3566 #ifdef INVARIANTS 3567 if (insret != NULL) { 3568 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 3569 nrsm, insret, rack, rsm); 3570 } 3571 #endif 3572 if (rsm->r_in_tmap) { 3573 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 3574 nrsm->r_in_tmap = 1; 3575 } 3576 rsm->r_flags &= (~RACK_HAS_FIN); 3577 rack_update_rsm(tp, rack, rsm, ts); 3578 *lenp = 0; 3579 return (0); 3580 } 3581 3582 3583 static void 3584 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 3585 uint32_t seq_out, uint8_t th_flags, int32_t err, uint32_t ts, 3586 uint8_t pass, struct rack_sendmap *hintrsm) 3587 { 3588 struct tcp_rack *rack; 3589 struct rack_sendmap *rsm, *nrsm, *insret, fe; 3590 register uint32_t snd_max, snd_una; 3591 3592 /* 3593 * Add to the RACK log of packets in flight or retransmitted. If 3594 * there is a TS option we will use the TS echoed, if not we will 3595 * grab a TS. 3596 * 3597 * Retransmissions will increment the count and move the ts to its 3598 * proper place. Note that if options do not include TS's then we 3599 * won't be able to effectively use the ACK for an RTT on a retran. 3600 * 3601 * Notes about r_start and r_end. Lets consider a send starting at 3602 * sequence 1 for 10 bytes. In such an example the r_start would be 3603 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 3604 * This means that r_end is actually the first sequence for the next 3605 * slot (11). 3606 * 3607 */ 3608 /* 3609 * If err is set what do we do XXXrrs? should we not add the thing? 3610 * -- i.e. return if err != 0 or should we pretend we sent it? -- 3611 * i.e. proceed with add ** do this for now. 3612 */ 3613 INP_WLOCK_ASSERT(tp->t_inpcb); 3614 if (err) 3615 /* 3616 * We don't log errors -- we could but snd_max does not 3617 * advance in this case either. 3618 */ 3619 return; 3620 3621 if (th_flags & TH_RST) { 3622 /* 3623 * We don't log resets and we return immediately from 3624 * sending 3625 */ 3626 return; 3627 } 3628 rack = (struct tcp_rack *)tp->t_fb_ptr; 3629 snd_una = tp->snd_una; 3630 if (SEQ_LEQ((seq_out + len), snd_una)) { 3631 /* Are sending an old segment to induce an ack (keep-alive)? */ 3632 return; 3633 } 3634 if (SEQ_LT(seq_out, snd_una)) { 3635 /* huh? should we panic? */ 3636 uint32_t end; 3637 3638 end = seq_out + len; 3639 seq_out = snd_una; 3640 if (SEQ_GEQ(end, seq_out)) 3641 len = end - seq_out; 3642 else 3643 len = 0; 3644 } 3645 snd_max = tp->snd_max; 3646 if (th_flags & (TH_SYN | TH_FIN)) { 3647 /* 3648 * The call to rack_log_output is made before bumping 3649 * snd_max. This means we can record one extra byte on a SYN 3650 * or FIN if seq_out is adding more on and a FIN is present 3651 * (and we are not resending). 3652 */ 3653 if (th_flags & TH_SYN) 3654 len++; 3655 if (th_flags & TH_FIN) 3656 len++; 3657 if (SEQ_LT(snd_max, tp->snd_nxt)) { 3658 /* 3659 * The add/update as not been done for the FIN/SYN 3660 * yet. 3661 */ 3662 snd_max = tp->snd_nxt; 3663 } 3664 } 3665 if (len == 0) { 3666 /* We don't log zero window probes */ 3667 return; 3668 } 3669 rack->r_ctl.rc_time_last_sent = ts; 3670 if (IN_RECOVERY(tp->t_flags)) { 3671 rack->r_ctl.rc_prr_out += len; 3672 } 3673 /* First question is it a retransmission or new? */ 3674 if (seq_out == snd_max) { 3675 /* Its new */ 3676 again: 3677 rsm = rack_alloc(rack); 3678 if (rsm == NULL) { 3679 /* 3680 * Hmm out of memory and the tcb got destroyed while 3681 * we tried to wait. 3682 */ 3683 return; 3684 } 3685 if (th_flags & TH_FIN) { 3686 rsm->r_flags = RACK_HAS_FIN; 3687 } else { 3688 rsm->r_flags = 0; 3689 } 3690 rsm->r_tim_lastsent[0] = ts; 3691 rsm->r_rtr_cnt = 1; 3692 rsm->r_rtr_bytes = 0; 3693 if (th_flags & TH_SYN) { 3694 /* The data space is one beyond snd_una */ 3695 rsm->r_start = seq_out + 1; 3696 rsm->r_end = rsm->r_start + (len - 1); 3697 } else { 3698 /* Normal case */ 3699 rsm->r_start = seq_out; 3700 rsm->r_end = rsm->r_start + len; 3701 } 3702 rsm->r_dupack = 0; 3703 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 3704 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 3705 #ifdef INVARIANTS 3706 if (insret != NULL) { 3707 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 3708 nrsm, insret, rack, rsm); 3709 } 3710 #endif 3711 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 3712 rsm->r_in_tmap = 1; 3713 return; 3714 } 3715 /* 3716 * If we reach here its a retransmission and we need to find it. 3717 */ 3718 memset(&fe, 0, sizeof(fe)); 3719 more: 3720 if (hintrsm && (hintrsm->r_start == seq_out)) { 3721 rsm = hintrsm; 3722 hintrsm = NULL; 3723 } else { 3724 /* No hints sorry */ 3725 rsm = NULL; 3726 } 3727 if ((rsm) && (rsm->r_start == seq_out)) { 3728 seq_out = rack_update_entry(tp, rack, rsm, ts, &len); 3729 if (len == 0) { 3730 return; 3731 } else { 3732 goto more; 3733 } 3734 } 3735 /* Ok it was not the last pointer go through it the hard way. */ 3736 refind: 3737 fe.r_start = seq_out; 3738 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 3739 if (rsm) { 3740 if (rsm->r_start == seq_out) { 3741 seq_out = rack_update_entry(tp, rack, rsm, ts, &len); 3742 if (len == 0) { 3743 return; 3744 } else { 3745 goto refind; 3746 } 3747 } 3748 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 3749 /* Transmitted within this piece */ 3750 /* 3751 * Ok we must split off the front and then let the 3752 * update do the rest 3753 */ 3754 nrsm = rack_alloc_full_limit(rack); 3755 if (nrsm == NULL) { 3756 rack_update_rsm(tp, rack, rsm, ts); 3757 return; 3758 } 3759 /* 3760 * copy rsm to nrsm and then trim the front of rsm 3761 * to not include this part. 3762 */ 3763 rack_clone_rsm(rack, nrsm, rsm, seq_out); 3764 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 3765 #ifdef INVARIANTS 3766 if (insret != NULL) { 3767 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 3768 nrsm, insret, rack, rsm); 3769 } 3770 #endif 3771 if (rsm->r_in_tmap) { 3772 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 3773 nrsm->r_in_tmap = 1; 3774 } 3775 rsm->r_flags &= (~RACK_HAS_FIN); 3776 seq_out = rack_update_entry(tp, rack, nrsm, ts, &len); 3777 if (len == 0) { 3778 return; 3779 } 3780 } 3781 } 3782 /* 3783 * Hmm not found in map did they retransmit both old and on into the 3784 * new? 3785 */ 3786 if (seq_out == tp->snd_max) { 3787 goto again; 3788 } else if (SEQ_LT(seq_out, tp->snd_max)) { 3789 #ifdef INVARIANTS 3790 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 3791 seq_out, len, tp->snd_una, tp->snd_max); 3792 printf("Starting Dump of all rack entries\n"); 3793 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 3794 printf("rsm:%p start:%u end:%u\n", 3795 rsm, rsm->r_start, rsm->r_end); 3796 } 3797 printf("Dump complete\n"); 3798 panic("seq_out not found rack:%p tp:%p", 3799 rack, tp); 3800 #endif 3801 } else { 3802 #ifdef INVARIANTS 3803 /* 3804 * Hmm beyond sndmax? (only if we are using the new rtt-pack 3805 * flag) 3806 */ 3807 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 3808 seq_out, len, tp->snd_max, tp); 3809 #endif 3810 } 3811 } 3812 3813 /* 3814 * Record one of the RTT updates from an ack into 3815 * our sample structure. 3816 */ 3817 static void 3818 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt) 3819 { 3820 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 3821 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 3822 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 3823 } 3824 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 3825 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 3826 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 3827 } 3828 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 3829 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 3830 rack->r_ctl.rack_rs.rs_rtt_cnt++; 3831 } 3832 3833 /* 3834 * Collect new round-trip time estimate 3835 * and update averages and current timeout. 3836 */ 3837 static void 3838 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 3839 { 3840 int32_t delta; 3841 uint32_t o_srtt, o_var; 3842 int32_t rtt; 3843 3844 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 3845 /* No valid sample */ 3846 return; 3847 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 3848 /* We are to use the lowest RTT seen in a single ack */ 3849 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 3850 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 3851 /* We are to use the highest RTT seen in a single ack */ 3852 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 3853 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 3854 /* We are to use the average RTT seen in a single ack */ 3855 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 3856 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 3857 } else { 3858 #ifdef INVARIANTS 3859 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 3860 #endif 3861 return; 3862 } 3863 if (rtt == 0) 3864 rtt = 1; 3865 rack_log_rtt_sample(rack, rtt); 3866 o_srtt = tp->t_srtt; 3867 o_var = tp->t_rttvar; 3868 rack = (struct tcp_rack *)tp->t_fb_ptr; 3869 if (tp->t_srtt != 0) { 3870 /* 3871 * srtt is stored as fixed point with 5 bits after the 3872 * binary point (i.e., scaled by 8). The following magic is 3873 * equivalent to the smoothing algorithm in rfc793 with an 3874 * alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed point). 3875 * Adjust rtt to origin 0. 3876 */ 3877 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 3878 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 3879 3880 tp->t_srtt += delta; 3881 if (tp->t_srtt <= 0) 3882 tp->t_srtt = 1; 3883 3884 /* 3885 * We accumulate a smoothed rtt variance (actually, a 3886 * smoothed mean difference), then set the retransmit timer 3887 * to smoothed rtt + 4 times the smoothed variance. rttvar 3888 * is stored as fixed point with 4 bits after the binary 3889 * point (scaled by 16). The following is equivalent to 3890 * rfc793 smoothing with an alpha of .75 (rttvar = 3891 * rttvar*3/4 + |delta| / 4). This replaces rfc793's 3892 * wired-in beta. 3893 */ 3894 if (delta < 0) 3895 delta = -delta; 3896 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 3897 tp->t_rttvar += delta; 3898 if (tp->t_rttvar <= 0) 3899 tp->t_rttvar = 1; 3900 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 3901 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3902 } else { 3903 /* 3904 * No rtt measurement yet - use the unsmoothed rtt. Set the 3905 * variance to half the rtt (so our first retransmit happens 3906 * at 3*rtt). 3907 */ 3908 tp->t_srtt = rtt << TCP_RTT_SHIFT; 3909 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 3910 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3911 } 3912 TCPSTAT_INC(tcps_rttupdated); 3913 rack_log_rtt_upd(tp, rack, rtt, o_srtt, o_var); 3914 tp->t_rttupdated++; 3915 #ifdef NETFLIX_STATS 3916 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 3917 #endif 3918 tp->t_rxtshift = 0; 3919 3920 /* 3921 * the retransmit should happen at rtt + 4 * rttvar. Because of the 3922 * way we do the smoothing, srtt and rttvar will each average +1/2 3923 * tick of bias. When we compute the retransmit timer, we want 1/2 3924 * tick of rounding and 1 extra tick because of +-1/2 tick 3925 * uncertainty in the firing of the timer. The bias will give us 3926 * exactly the 1.5 tick we need. But, because the bias is 3927 * statistical, we have to test that we don't drop below the minimum 3928 * feasible timer (which is 2 ticks). 3929 */ 3930 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 3931 max(MSEC_2_TICKS(rack_rto_min), rtt + 2), MSEC_2_TICKS(rack_rto_max)); 3932 tp->t_softerror = 0; 3933 } 3934 3935 static void 3936 rack_earlier_retran(struct tcpcb *tp, struct rack_sendmap *rsm, 3937 uint32_t t, uint32_t cts) 3938 { 3939 /* 3940 * For this RSM, we acknowledged the data from a previous 3941 * transmission, not the last one we made. This means we did a false 3942 * retransmit. 3943 */ 3944 struct tcp_rack *rack; 3945 3946 if (rsm->r_flags & RACK_HAS_FIN) { 3947 /* 3948 * The sending of the FIN often is multiple sent when we 3949 * have everything outstanding ack'd. We ignore this case 3950 * since its over now. 3951 */ 3952 return; 3953 } 3954 if (rsm->r_flags & RACK_TLP) { 3955 /* 3956 * We expect TLP's to have this occur. 3957 */ 3958 return; 3959 } 3960 rack = (struct tcp_rack *)tp->t_fb_ptr; 3961 /* should we undo cc changes and exit recovery? */ 3962 if (IN_RECOVERY(tp->t_flags)) { 3963 if (rack->r_ctl.rc_rsm_start == rsm->r_start) { 3964 /* 3965 * Undo what we ratched down and exit recovery if 3966 * possible 3967 */ 3968 EXIT_RECOVERY(tp->t_flags); 3969 tp->snd_recover = tp->snd_una; 3970 if (rack->r_ctl.rc_cwnd_at > tp->snd_cwnd) 3971 tp->snd_cwnd = rack->r_ctl.rc_cwnd_at; 3972 if (rack->r_ctl.rc_ssthresh_at > tp->snd_ssthresh) 3973 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at; 3974 } 3975 } 3976 if (rsm->r_flags & RACK_WAS_SACKPASS) { 3977 /* 3978 * We retransmitted based on a sack and the earlier 3979 * retransmission ack'd it - re-ordering is occuring. 3980 */ 3981 counter_u64_add(rack_reorder_seen, 1); 3982 rack->r_ctl.rc_reorder_ts = cts; 3983 } 3984 counter_u64_add(rack_badfr, 1); 3985 counter_u64_add(rack_badfr_bytes, (rsm->r_end - rsm->r_start)); 3986 } 3987 3988 3989 static int 3990 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 3991 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type) 3992 { 3993 int32_t i; 3994 uint32_t t; 3995 3996 if (rsm->r_flags & RACK_ACKED) 3997 /* Already done */ 3998 return (0); 3999 4000 4001 if ((rsm->r_rtr_cnt == 1) || 4002 ((ack_type == CUM_ACKED) && 4003 (to->to_flags & TOF_TS) && 4004 (to->to_tsecr) && 4005 (rsm->r_tim_lastsent[rsm->r_rtr_cnt - 1] == to->to_tsecr)) 4006 ) { 4007 /* 4008 * We will only find a matching timestamp if its cum-acked. 4009 * But if its only one retransmission its for-sure matching 4010 * :-) 4011 */ 4012 t = cts - rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 4013 if ((int)t <= 0) 4014 t = 1; 4015 if (!tp->t_rttlow || tp->t_rttlow > t) 4016 tp->t_rttlow = t; 4017 if (!rack->r_ctl.rc_rack_min_rtt || 4018 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 4019 rack->r_ctl.rc_rack_min_rtt = t; 4020 if (rack->r_ctl.rc_rack_min_rtt == 0) { 4021 rack->r_ctl.rc_rack_min_rtt = 1; 4022 } 4023 } 4024 tcp_rack_xmit_timer(rack, t + 1); 4025 if ((rsm->r_flags & RACK_TLP) && 4026 (!IN_RECOVERY(tp->t_flags))) { 4027 /* Segment was a TLP and our retrans matched */ 4028 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 4029 rack->r_ctl.rc_rsm_start = tp->snd_max; 4030 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 4031 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 4032 rack_cong_signal(tp, NULL, CC_NDUPACK); 4033 /* 4034 * When we enter recovery we need to assure 4035 * we send one packet. 4036 */ 4037 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 4038 rack_log_to_prr(rack, 7); 4039 } 4040 } 4041 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 4042 /* New more recent rack_tmit_time */ 4043 rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 4044 rack->rc_rack_rtt = t; 4045 } 4046 return (1); 4047 } 4048 /* 4049 * We clear the soft/rxtshift since we got an ack. 4050 * There is no assurance we will call the commit() function 4051 * so we need to clear these to avoid incorrect handling. 4052 */ 4053 tp->t_rxtshift = 0; 4054 tp->t_softerror = 0; 4055 if ((to->to_flags & TOF_TS) && 4056 (ack_type == CUM_ACKED) && 4057 (to->to_tsecr) && 4058 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 4059 /* 4060 * Now which timestamp does it match? In this block the ACK 4061 * must be coming from a previous transmission. 4062 */ 4063 for (i = 0; i < rsm->r_rtr_cnt; i++) { 4064 if (rsm->r_tim_lastsent[i] == to->to_tsecr) { 4065 t = cts - rsm->r_tim_lastsent[i]; 4066 if ((int)t <= 0) 4067 t = 1; 4068 if ((i + 1) < rsm->r_rtr_cnt) { 4069 /* Likely */ 4070 rack_earlier_retran(tp, rsm, t, cts); 4071 } 4072 if (!tp->t_rttlow || tp->t_rttlow > t) 4073 tp->t_rttlow = t; 4074 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 4075 rack->r_ctl.rc_rack_min_rtt = t; 4076 if (rack->r_ctl.rc_rack_min_rtt == 0) { 4077 rack->r_ctl.rc_rack_min_rtt = 1; 4078 } 4079 } 4080 /* 4081 * Note the following calls to 4082 * tcp_rack_xmit_timer() are being commented 4083 * out for now. They give us no more accuracy 4084 * and often lead to a wrong choice. We have 4085 * enough samples that have not been 4086 * retransmitted. I leave the commented out 4087 * code in here in case in the future we 4088 * decide to add it back (though I can't forsee 4089 * doing that). That way we will easily see 4090 * where they need to be placed. 4091 */ 4092 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 4093 rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 4094 /* New more recent rack_tmit_time */ 4095 rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 4096 rack->rc_rack_rtt = t; 4097 } 4098 return (1); 4099 } 4100 } 4101 goto ts_not_found; 4102 } else { 4103 /* 4104 * Ok its a SACK block that we retransmitted. or a windows 4105 * machine without timestamps. We can tell nothing from the 4106 * time-stamp since its not there or the time the peer last 4107 * recieved a segment that moved forward its cum-ack point. 4108 */ 4109 ts_not_found: 4110 i = rsm->r_rtr_cnt - 1; 4111 t = cts - rsm->r_tim_lastsent[i]; 4112 if ((int)t <= 0) 4113 t = 1; 4114 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 4115 /* 4116 * We retransmitted and the ack came back in less 4117 * than the smallest rtt we have observed. We most 4118 * likey did an improper retransmit as outlined in 4119 * 4.2 Step 3 point 2 in the rack-draft. 4120 */ 4121 i = rsm->r_rtr_cnt - 2; 4122 t = cts - rsm->r_tim_lastsent[i]; 4123 rack_earlier_retran(tp, rsm, t, cts); 4124 } else if (rack->r_ctl.rc_rack_min_rtt) { 4125 /* 4126 * We retransmitted it and the retransmit did the 4127 * job. 4128 */ 4129 if (!rack->r_ctl.rc_rack_min_rtt || 4130 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 4131 rack->r_ctl.rc_rack_min_rtt = t; 4132 if (rack->r_ctl.rc_rack_min_rtt == 0) { 4133 rack->r_ctl.rc_rack_min_rtt = 1; 4134 } 4135 } 4136 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, rsm->r_tim_lastsent[i])) { 4137 /* New more recent rack_tmit_time */ 4138 rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[i]; 4139 rack->rc_rack_rtt = t; 4140 } 4141 return (1); 4142 } 4143 } 4144 return (0); 4145 } 4146 4147 /* 4148 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 4149 */ 4150 static void 4151 rack_log_sack_passed(struct tcpcb *tp, 4152 struct tcp_rack *rack, struct rack_sendmap *rsm) 4153 { 4154 struct rack_sendmap *nrsm; 4155 4156 nrsm = rsm; 4157 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 4158 rack_head, r_tnext) { 4159 if (nrsm == rsm) { 4160 /* Skip orginal segment he is acked */ 4161 continue; 4162 } 4163 if (nrsm->r_flags & RACK_ACKED) { 4164 /* 4165 * Skip ack'd segments, though we 4166 * should not see these, since tmap 4167 * should not have ack'd segments. 4168 */ 4169 continue; 4170 } 4171 if (nrsm->r_flags & RACK_SACK_PASSED) { 4172 /* 4173 * We found one that is already marked 4174 * passed, we have been here before and 4175 * so all others below this are marked. 4176 */ 4177 break; 4178 } 4179 nrsm->r_flags |= RACK_SACK_PASSED; 4180 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 4181 } 4182 } 4183 4184 static uint32_t 4185 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 4186 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, int *moved_two) 4187 { 4188 uint32_t start, end, changed = 0; 4189 struct rack_sendmap stack_map; 4190 struct rack_sendmap *rsm, *nrsm, fe, *insret, *prev, *next; 4191 int32_t used_ref = 1; 4192 int moved = 0; 4193 4194 start = sack->start; 4195 end = sack->end; 4196 rsm = *prsm; 4197 memset(&fe, 0, sizeof(fe)); 4198 do_rest_ofb: 4199 if ((rsm == NULL) || 4200 (SEQ_LT(end, rsm->r_start)) || 4201 (SEQ_GEQ(start, rsm->r_end)) || 4202 (SEQ_LT(start, rsm->r_start))) { 4203 /* 4204 * We are not in the right spot, 4205 * find the correct spot in the tree. 4206 */ 4207 used_ref = 0; 4208 fe.r_start = start; 4209 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 4210 moved++; 4211 } 4212 if (rsm == NULL) { 4213 /* TSNH */ 4214 goto out; 4215 } 4216 /* Ok we have an ACK for some piece of this rsm */ 4217 if (rsm->r_start != start) { 4218 if ((rsm->r_flags & RACK_ACKED) == 0) { 4219 /** 4220 * Need to split this in two pieces the before and after, 4221 * the before remains in the map, the after must be 4222 * added. In other words we have: 4223 * rsm |--------------| 4224 * sackblk |-------> 4225 * rsm will become 4226 * rsm |---| 4227 * and nrsm will be the sacked piece 4228 * nrsm |----------| 4229 * 4230 * But before we start down that path lets 4231 * see if the sack spans over on top of 4232 * the next guy and it is already sacked. 4233 */ 4234 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4235 if (next && (next->r_flags & RACK_ACKED) && 4236 SEQ_GEQ(end, next->r_start)) { 4237 /** 4238 * So the next one is already acked, and 4239 * we can thus by hookery use our stack_map 4240 * to reflect the piece being sacked and 4241 * then adjust the two tree entries moving 4242 * the start and ends around. So we start like: 4243 * rsm |------------| (not-acked) 4244 * next |-----------| (acked) 4245 * sackblk |--------> 4246 * We want to end like so: 4247 * rsm |------| (not-acked) 4248 * next |-----------------| (acked) 4249 * nrsm |-----| 4250 * Where nrsm is a temporary stack piece we 4251 * use to update all the gizmos. 4252 */ 4253 /* Copy up our fudge block */ 4254 nrsm = &stack_map; 4255 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 4256 /* Now adjust our tree blocks */ 4257 rsm->r_end = start; 4258 next->r_start = start; 4259 /* Clear out the dup ack count of the remainder */ 4260 rsm->r_dupack = 0; 4261 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 4262 /* Now lets make sure our fudge block is right */ 4263 nrsm->r_start = start; 4264 /* Now lets update all the stats and such */ 4265 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED); 4266 changed += (nrsm->r_end - nrsm->r_start); 4267 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 4268 if (nrsm->r_flags & RACK_SACK_PASSED) { 4269 counter_u64_add(rack_reorder_seen, 1); 4270 rack->r_ctl.rc_reorder_ts = cts; 4271 } 4272 /* 4273 * Now we want to go up from rsm (the 4274 * one left un-acked) to the next one 4275 * in the tmap. We do this so when 4276 * we walk backwards we include marking 4277 * sack-passed on rsm (The one passed in 4278 * is skipped since it is generally called 4279 * on something sacked before removing it 4280 * from the tmap). 4281 */ 4282 if (rsm->r_in_tmap) { 4283 nrsm = TAILQ_NEXT(rsm, r_tnext); 4284 /* 4285 * Now that we have the next 4286 * one walk backwards from there. 4287 */ 4288 if (nrsm && nrsm->r_in_tmap) 4289 rack_log_sack_passed(tp, rack, nrsm); 4290 } 4291 /* Now are we done? */ 4292 if (SEQ_LT(end, next->r_end) || 4293 (end == next->r_end)) { 4294 /* Done with block */ 4295 goto out; 4296 } 4297 counter_u64_add(rack_sack_used_next_merge, 1); 4298 /* Postion for the next block */ 4299 start = next->r_end; 4300 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, next); 4301 if (rsm == NULL) 4302 goto out; 4303 } else { 4304 /** 4305 * We can't use any hookery here, so we 4306 * need to split the map. We enter like 4307 * so: 4308 * rsm |--------| 4309 * sackblk |-----> 4310 * We will add the new block nrsm and 4311 * that will be the new portion, and then 4312 * fall through after reseting rsm. So we 4313 * split and look like this: 4314 * rsm |----| 4315 * sackblk |-----> 4316 * nrsm |---| 4317 * We then fall through reseting 4318 * rsm to nrsm, so the next block 4319 * picks it up. 4320 */ 4321 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 4322 if (nrsm == NULL) { 4323 /* 4324 * failed XXXrrs what can we do but loose the sack 4325 * info? 4326 */ 4327 goto out; 4328 } 4329 counter_u64_add(rack_sack_splits, 1); 4330 rack_clone_rsm(rack, nrsm, rsm, start); 4331 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 4332 #ifdef INVARIANTS 4333 if (insret != NULL) { 4334 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 4335 nrsm, insret, rack, rsm); 4336 } 4337 #endif 4338 if (rsm->r_in_tmap) { 4339 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 4340 nrsm->r_in_tmap = 1; 4341 } 4342 rsm->r_flags &= (~RACK_HAS_FIN); 4343 /* Position us to point to the new nrsm that starts the sack blk */ 4344 rsm = nrsm; 4345 } 4346 } else { 4347 /* Already sacked this piece */ 4348 counter_u64_add(rack_sack_skipped_acked, 1); 4349 moved++; 4350 if (end == rsm->r_end) { 4351 /* Done with block */ 4352 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4353 goto out; 4354 } else if (SEQ_LT(end, rsm->r_end)) { 4355 /* A partial sack to a already sacked block */ 4356 moved++; 4357 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4358 goto out; 4359 } else { 4360 /* 4361 * The end goes beyond this guy 4362 * repostion the start to the 4363 * next block. 4364 */ 4365 start = rsm->r_end; 4366 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4367 if (rsm == NULL) 4368 goto out; 4369 } 4370 } 4371 } 4372 if (SEQ_GEQ(end, rsm->r_end)) { 4373 /** 4374 * The end of this block is either beyond this guy or right 4375 * at this guy. I.e.: 4376 * rsm --- |-----| 4377 * end |-----| 4378 * <or> 4379 * end |---------| 4380 */ 4381 if (rsm->r_flags & RACK_TLP) 4382 rack->r_ctl.rc_tlp_rtx_out = 0; 4383 if ((rsm->r_flags & RACK_ACKED) == 0) { 4384 rack_update_rtt(tp, rack, rsm, to, cts, SACKED); 4385 changed += (rsm->r_end - rsm->r_start); 4386 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 4387 if (rsm->r_in_tmap) /* should be true */ 4388 rack_log_sack_passed(tp, rack, rsm); 4389 /* Is Reordering occuring? */ 4390 if (rsm->r_flags & RACK_SACK_PASSED) { 4391 rsm->r_flags &= ~RACK_SACK_PASSED; 4392 counter_u64_add(rack_reorder_seen, 1); 4393 rack->r_ctl.rc_reorder_ts = cts; 4394 } 4395 rsm->r_flags |= RACK_ACKED; 4396 rsm->r_flags &= ~RACK_TLP; 4397 if (rsm->r_in_tmap) { 4398 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 4399 rsm->r_in_tmap = 0; 4400 } 4401 } else { 4402 counter_u64_add(rack_sack_skipped_acked, 1); 4403 moved++; 4404 } 4405 if (end == rsm->r_end) { 4406 /* This block only - done, setup for next */ 4407 goto out; 4408 } 4409 /* 4410 * There is more not coverend by this rsm move on 4411 * to the next block in the RB tree. 4412 */ 4413 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4414 start = rsm->r_end; 4415 rsm = nrsm; 4416 if (rsm == NULL) 4417 goto out; 4418 goto do_rest_ofb; 4419 } 4420 /** 4421 * The end of this sack block is smaller than 4422 * our rsm i.e.: 4423 * rsm --- |-----| 4424 * end |--| 4425 */ 4426 if ((rsm->r_flags & RACK_ACKED) == 0) { 4427 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4428 if (prev && (prev->r_flags & RACK_ACKED)) { 4429 /** 4430 * Goal, we want the right remainder of rsm to shrink 4431 * in place and span from (rsm->r_start = end) to rsm->r_end. 4432 * We want to expand prev to go all the way 4433 * to prev->r_end <- end. 4434 * so in the tree we have before: 4435 * prev |--------| (acked) 4436 * rsm |-------| (non-acked) 4437 * sackblk |-| 4438 * We churn it so we end up with 4439 * prev |----------| (acked) 4440 * rsm |-----| (non-acked) 4441 * nrsm |-| (temporary) 4442 */ 4443 nrsm = &stack_map; 4444 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 4445 prev->r_end = end; 4446 rsm->r_start = end; 4447 /* Now adjust nrsm (stack copy) to be 4448 * the one that is the small 4449 * piece that was "sacked". 4450 */ 4451 nrsm->r_end = end; 4452 rsm->r_dupack = 0; 4453 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 4454 /* 4455 * Now nrsm is our new little piece 4456 * that is acked (which was merged 4457 * to prev). Update the rtt and changed 4458 * based on that. Also check for reordering. 4459 */ 4460 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED); 4461 changed += (nrsm->r_end - nrsm->r_start); 4462 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 4463 if (nrsm->r_flags & RACK_SACK_PASSED) { 4464 counter_u64_add(rack_reorder_seen, 1); 4465 rack->r_ctl.rc_reorder_ts = cts; 4466 } 4467 rsm = prev; 4468 counter_u64_add(rack_sack_used_prev_merge, 1); 4469 } else { 4470 /** 4471 * This is the case where our previous 4472 * block is not acked either, so we must 4473 * split the block in two. 4474 */ 4475 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 4476 if (nrsm == NULL) { 4477 /* failed rrs what can we do but loose the sack info? */ 4478 goto out; 4479 } 4480 /** 4481 * In this case nrsm becomes 4482 * nrsm->r_start = end; 4483 * nrsm->r_end = rsm->r_end; 4484 * which is un-acked. 4485 * <and> 4486 * rsm->r_end = nrsm->r_start; 4487 * i.e. the remaining un-acked 4488 * piece is left on the left 4489 * hand side. 4490 * 4491 * So we start like this 4492 * rsm |----------| (not acked) 4493 * sackblk |---| 4494 * build it so we have 4495 * rsm |---| (acked) 4496 * nrsm |------| (not acked) 4497 */ 4498 counter_u64_add(rack_sack_splits, 1); 4499 rack_clone_rsm(rack, nrsm, rsm, end); 4500 rsm->r_flags &= (~RACK_HAS_FIN); 4501 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 4502 #ifdef INVARIANTS 4503 if (insret != NULL) { 4504 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 4505 nrsm, insret, rack, rsm); 4506 } 4507 #endif 4508 if (rsm->r_in_tmap) { 4509 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 4510 nrsm->r_in_tmap = 1; 4511 } 4512 nrsm->r_dupack = 0; 4513 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 4514 if (rsm->r_flags & RACK_TLP) 4515 rack->r_ctl.rc_tlp_rtx_out = 0; 4516 rack_update_rtt(tp, rack, rsm, to, cts, SACKED); 4517 changed += (rsm->r_end - rsm->r_start); 4518 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 4519 if (rsm->r_in_tmap) /* should be true */ 4520 rack_log_sack_passed(tp, rack, rsm); 4521 /* Is Reordering occuring? */ 4522 if (rsm->r_flags & RACK_SACK_PASSED) { 4523 rsm->r_flags &= ~RACK_SACK_PASSED; 4524 counter_u64_add(rack_reorder_seen, 1); 4525 rack->r_ctl.rc_reorder_ts = cts; 4526 } 4527 rsm->r_flags |= RACK_ACKED; 4528 rsm->r_flags &= ~RACK_TLP; 4529 if (rsm->r_in_tmap) { 4530 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 4531 rsm->r_in_tmap = 0; 4532 } 4533 } 4534 } else if (start != end){ 4535 /* 4536 * The block was already acked. 4537 */ 4538 counter_u64_add(rack_sack_skipped_acked, 1); 4539 moved++; 4540 } 4541 out: 4542 if (rsm && (rsm->r_flags & RACK_ACKED)) { 4543 /* 4544 * Now can we merge where we worked 4545 * with either the previous or 4546 * next block? 4547 */ 4548 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4549 while (next) { 4550 if (next->r_flags & RACK_ACKED) { 4551 /* yep this and next can be merged */ 4552 rsm = rack_merge_rsm(rack, rsm, next); 4553 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4554 } else 4555 break; 4556 } 4557 /* Now what about the previous? */ 4558 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4559 while (prev) { 4560 if (prev->r_flags & RACK_ACKED) { 4561 /* yep the previous and this can be merged */ 4562 rsm = rack_merge_rsm(rack, prev, rsm); 4563 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4564 } else 4565 break; 4566 } 4567 } 4568 if (used_ref == 0) { 4569 counter_u64_add(rack_sack_proc_all, 1); 4570 } else { 4571 counter_u64_add(rack_sack_proc_short, 1); 4572 } 4573 /* Save off the next one for quick reference. */ 4574 if (rsm) 4575 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4576 else 4577 nrsm = NULL; 4578 *prsm = rack->r_ctl.rc_sacklast = nrsm; 4579 /* Pass back the moved. */ 4580 *moved_two = moved; 4581 return (changed); 4582 } 4583 4584 static void inline 4585 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 4586 { 4587 struct rack_sendmap *tmap; 4588 4589 tmap = NULL; 4590 while (rsm && (rsm->r_flags & RACK_ACKED)) { 4591 /* Its no longer sacked, mark it so */ 4592 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 4593 #ifdef INVARIANTS 4594 if (rsm->r_in_tmap) { 4595 panic("rack:%p rsm:%p flags:0x%x in tmap?", 4596 rack, rsm, rsm->r_flags); 4597 } 4598 #endif 4599 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 4600 /* Rebuild it into our tmap */ 4601 if (tmap == NULL) { 4602 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 4603 tmap = rsm; 4604 } else { 4605 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 4606 tmap = rsm; 4607 } 4608 tmap->r_in_tmap = 1; 4609 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4610 } 4611 /* 4612 * Now lets possibly clear the sack filter so we start 4613 * recognizing sacks that cover this area. 4614 */ 4615 if (rack_use_sack_filter) 4616 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 4617 4618 } 4619 4620 static void 4621 rack_do_decay(struct tcp_rack *rack) 4622 { 4623 struct timeval res; 4624 4625 #define timersub(tvp, uvp, vvp) \ 4626 do { \ 4627 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ 4628 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ 4629 if ((vvp)->tv_usec < 0) { \ 4630 (vvp)->tv_sec--; \ 4631 (vvp)->tv_usec += 1000000; \ 4632 } \ 4633 } while (0) 4634 4635 timersub(&rack->r_ctl.rc_last_ack, &rack->r_ctl.rc_last_time_decay, &res); 4636 #undef timersub 4637 4638 rack->r_ctl.input_pkt++; 4639 if ((rack->rc_in_persist) || 4640 (res.tv_sec >= 1) || 4641 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) { 4642 /* 4643 * Check for decay of non-SAD, 4644 * we want all SAD detection metrics to 4645 * decay 1/4 per second (or more) passed. 4646 */ 4647 uint32_t pkt_delta; 4648 4649 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt; 4650 /* Update our saved tracking values */ 4651 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt; 4652 rack->r_ctl.rc_last_time_decay = rack->r_ctl.rc_last_ack; 4653 /* Now do we escape without decay? */ 4654 if (rack->rc_in_persist || 4655 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) || 4656 (pkt_delta < tcp_sad_low_pps)){ 4657 /* 4658 * We don't decay idle connections 4659 * or ones that have a low input pps. 4660 */ 4661 return; 4662 } 4663 /* Decay the counters */ 4664 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count, 4665 tcp_sad_decay_val); 4666 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count, 4667 tcp_sad_decay_val); 4668 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra, 4669 tcp_sad_decay_val); 4670 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move, 4671 tcp_sad_decay_val); 4672 } 4673 } 4674 4675 static void 4676 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th) 4677 { 4678 uint32_t changed, entered_recovery = 0; 4679 struct tcp_rack *rack; 4680 struct rack_sendmap *rsm, *rm; 4681 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 4682 register uint32_t th_ack; 4683 int32_t i, j, k, num_sack_blks = 0; 4684 uint32_t cts, acked, ack_point, sack_changed = 0; 4685 int loop_start = 0, moved_two = 0; 4686 4687 INP_WLOCK_ASSERT(tp->t_inpcb); 4688 if (th->th_flags & TH_RST) { 4689 /* We don't log resets */ 4690 return; 4691 } 4692 rack = (struct tcp_rack *)tp->t_fb_ptr; 4693 cts = tcp_ts_getticks(); 4694 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 4695 changed = 0; 4696 th_ack = th->th_ack; 4697 if (rack->sack_attack_disable == 0) 4698 rack_do_decay(rack); 4699 if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) { 4700 /* 4701 * You only get credit for 4702 * MSS and greater (and you get extra 4703 * credit for larger cum-ack moves). 4704 */ 4705 int ac; 4706 4707 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 4708 rack->r_ctl.ack_count += ac; 4709 counter_u64_add(rack_ack_total, ac); 4710 } 4711 if (rack->r_ctl.ack_count > 0xfff00000) { 4712 /* 4713 * reduce the number to keep us under 4714 * a uint32_t. 4715 */ 4716 rack->r_ctl.ack_count /= 2; 4717 rack->r_ctl.sack_count /= 2; 4718 } 4719 if (SEQ_GT(th_ack, tp->snd_una)) { 4720 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 4721 tp->t_acktime = ticks; 4722 } 4723 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 4724 changed = th_ack - rsm->r_start; 4725 if (changed) { 4726 /* 4727 * The ACK point is advancing to th_ack, we must drop off 4728 * the packets in the rack log and calculate any eligble 4729 * RTT's. 4730 */ 4731 rack->r_wanted_output++; 4732 more: 4733 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 4734 if (rsm == NULL) { 4735 if ((th_ack - 1) == tp->iss) { 4736 /* 4737 * For the SYN incoming case we will not 4738 * have called tcp_output for the sending of 4739 * the SYN, so there will be no map. All 4740 * other cases should probably be a panic. 4741 */ 4742 goto proc_sack; 4743 } 4744 if (tp->t_flags & TF_SENTFIN) { 4745 /* if we send a FIN we will not hav a map */ 4746 goto proc_sack; 4747 } 4748 #ifdef INVARIANTS 4749 panic("No rack map tp:%p for th:%p state:%d rack:%p snd_una:%u snd_max:%u snd_nxt:%u chg:%d\n", 4750 tp, 4751 th, tp->t_state, rack, 4752 tp->snd_una, tp->snd_max, tp->snd_nxt, changed); 4753 #endif 4754 goto proc_sack; 4755 } 4756 if (SEQ_LT(th_ack, rsm->r_start)) { 4757 /* Huh map is missing this */ 4758 #ifdef INVARIANTS 4759 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 4760 rsm->r_start, 4761 th_ack, tp->t_state, rack->r_state); 4762 #endif 4763 goto proc_sack; 4764 } 4765 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED); 4766 /* Now do we consume the whole thing? */ 4767 if (SEQ_GEQ(th_ack, rsm->r_end)) { 4768 /* Its all consumed. */ 4769 uint32_t left; 4770 4771 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 4772 rsm->r_rtr_bytes = 0; 4773 if (rsm->r_flags & RACK_TLP) 4774 rack->r_ctl.rc_tlp_rtx_out = 0; 4775 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4776 #ifdef INVARIANTS 4777 if (rm != rsm) { 4778 panic("removing head in rack:%p rsm:%p rm:%p", 4779 rack, rsm, rm); 4780 } 4781 #endif 4782 if (rsm->r_in_tmap) { 4783 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 4784 rsm->r_in_tmap = 0; 4785 } 4786 if (rsm->r_flags & RACK_ACKED) { 4787 /* 4788 * It was acked on the scoreboard -- remove 4789 * it from total 4790 */ 4791 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 4792 } else if (rsm->r_flags & RACK_SACK_PASSED) { 4793 /* 4794 * There are segments ACKED on the 4795 * scoreboard further up. We are seeing 4796 * reordering. 4797 */ 4798 rsm->r_flags &= ~RACK_SACK_PASSED; 4799 counter_u64_add(rack_reorder_seen, 1); 4800 rsm->r_flags |= RACK_ACKED; 4801 rack->r_ctl.rc_reorder_ts = cts; 4802 } 4803 left = th_ack - rsm->r_end; 4804 if (rsm->r_rtr_cnt > 1) { 4805 /* 4806 * Technically we should make r_rtr_cnt be 4807 * monotonicly increasing and just mod it to 4808 * the timestamp it is replacing.. that way 4809 * we would have the last 3 retransmits. Now 4810 * rc_loss_count will be wrong if we 4811 * retransmit something more than 2 times in 4812 * recovery :( 4813 */ 4814 rack->r_ctl.rc_loss_count += (rsm->r_rtr_cnt - 1); 4815 } 4816 /* Free back to zone */ 4817 rack_free(rack, rsm); 4818 if (left) { 4819 goto more; 4820 } 4821 goto proc_sack; 4822 } 4823 if (rsm->r_flags & RACK_ACKED) { 4824 /* 4825 * It was acked on the scoreboard -- remove it from 4826 * total for the part being cum-acked. 4827 */ 4828 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 4829 } 4830 /* 4831 * Clear the dup ack count for 4832 * the piece that remains. 4833 */ 4834 rsm->r_dupack = 0; 4835 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 4836 if (rsm->r_rtr_bytes) { 4837 /* 4838 * It was retransmitted adjust the 4839 * sack holes for what was acked. 4840 */ 4841 int ack_am; 4842 4843 ack_am = (th_ack - rsm->r_start); 4844 if (ack_am >= rsm->r_rtr_bytes) { 4845 rack->r_ctl.rc_holes_rxt -= ack_am; 4846 rsm->r_rtr_bytes -= ack_am; 4847 } 4848 } 4849 /* Update where the piece starts */ 4850 rsm->r_start = th_ack; 4851 } 4852 proc_sack: 4853 /* Check for reneging */ 4854 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 4855 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 4856 /* 4857 * The peer has moved snd_una up to 4858 * the edge of this send, i.e. one 4859 * that it had previously acked. The only 4860 * way that can be true if the peer threw 4861 * away data (space issues) that it had 4862 * previously sacked (else it would have 4863 * given us snd_una up to (rsm->r_end). 4864 * We need to undo the acked markings here. 4865 * 4866 * Note we have to look to make sure th_ack is 4867 * our rsm->r_start in case we get an old ack 4868 * where th_ack is behind snd_una. 4869 */ 4870 rack_peer_reneges(rack, rsm, th->th_ack); 4871 } 4872 if ((to->to_flags & TOF_SACK) == 0) { 4873 /* We are done nothing left */ 4874 goto out; 4875 } 4876 /* Sack block processing */ 4877 if (SEQ_GT(th_ack, tp->snd_una)) 4878 ack_point = th_ack; 4879 else 4880 ack_point = tp->snd_una; 4881 for (i = 0; i < to->to_nsacks; i++) { 4882 bcopy((to->to_sacks + i * TCPOLEN_SACK), 4883 &sack, sizeof(sack)); 4884 sack.start = ntohl(sack.start); 4885 sack.end = ntohl(sack.end); 4886 if (SEQ_GT(sack.end, sack.start) && 4887 SEQ_GT(sack.start, ack_point) && 4888 SEQ_LT(sack.start, tp->snd_max) && 4889 SEQ_GT(sack.end, ack_point) && 4890 SEQ_LEQ(sack.end, tp->snd_max)) { 4891 sack_blocks[num_sack_blks] = sack; 4892 num_sack_blks++; 4893 #ifdef NETFLIX_STATS 4894 } else if (SEQ_LEQ(sack.start, th_ack) && 4895 SEQ_LEQ(sack.end, th_ack)) { 4896 /* 4897 * Its a D-SACK block. 4898 */ 4899 tcp_record_dsack(sack.start, sack.end); 4900 #endif 4901 } 4902 4903 } 4904 /* 4905 * Sort the SACK blocks so we can update the rack scoreboard with 4906 * just one pass. 4907 */ 4908 if (rack_use_sack_filter) { 4909 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks, 4910 num_sack_blks, th->th_ack); 4911 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 4912 } 4913 if (num_sack_blks == 0) { 4914 /* Nothing to sack (DSACKs?) */ 4915 goto out_with_totals; 4916 } 4917 if (num_sack_blks < 2) { 4918 /* Only one, we don't need to sort */ 4919 goto do_sack_work; 4920 } 4921 /* Sort the sacks */ 4922 for (i = 0; i < num_sack_blks; i++) { 4923 for (j = i + 1; j < num_sack_blks; j++) { 4924 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 4925 sack = sack_blocks[i]; 4926 sack_blocks[i] = sack_blocks[j]; 4927 sack_blocks[j] = sack; 4928 } 4929 } 4930 } 4931 /* 4932 * Now are any of the sack block ends the same (yes some 4933 * implementations send these)? 4934 */ 4935 again: 4936 if (num_sack_blks == 0) 4937 goto out_with_totals; 4938 if (num_sack_blks > 1) { 4939 for (i = 0; i < num_sack_blks; i++) { 4940 for (j = i + 1; j < num_sack_blks; j++) { 4941 if (sack_blocks[i].end == sack_blocks[j].end) { 4942 /* 4943 * Ok these two have the same end we 4944 * want the smallest end and then 4945 * throw away the larger and start 4946 * again. 4947 */ 4948 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 4949 /* 4950 * The second block covers 4951 * more area use that 4952 */ 4953 sack_blocks[i].start = sack_blocks[j].start; 4954 } 4955 /* 4956 * Now collapse out the dup-sack and 4957 * lower the count 4958 */ 4959 for (k = (j + 1); k < num_sack_blks; k++) { 4960 sack_blocks[j].start = sack_blocks[k].start; 4961 sack_blocks[j].end = sack_blocks[k].end; 4962 j++; 4963 } 4964 num_sack_blks--; 4965 goto again; 4966 } 4967 } 4968 } 4969 } 4970 do_sack_work: 4971 /* 4972 * First lets look to see if 4973 * we have retransmitted and 4974 * can use the transmit next? 4975 */ 4976 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 4977 if (rsm && 4978 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 4979 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 4980 /* 4981 * We probably did the FR and the next 4982 * SACK in continues as we would expect. 4983 */ 4984 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &moved_two); 4985 if (acked) { 4986 rack->r_wanted_output++; 4987 changed += acked; 4988 sack_changed += acked; 4989 } 4990 if (num_sack_blks == 1) { 4991 /* 4992 * This is what we would expect from 4993 * a normal implementation to happen 4994 * after we have retransmitted the FR, 4995 * i.e the sack-filter pushes down 4996 * to 1 block and the next to be retransmitted 4997 * is the sequence in the sack block (has more 4998 * are acked). Count this as ACK'd data to boost 4999 * up the chances of recovering any false positives. 5000 */ 5001 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp)); 5002 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 5003 counter_u64_add(rack_express_sack, 1); 5004 if (rack->r_ctl.ack_count > 0xfff00000) { 5005 /* 5006 * reduce the number to keep us under 5007 * a uint32_t. 5008 */ 5009 rack->r_ctl.ack_count /= 2; 5010 rack->r_ctl.sack_count /= 2; 5011 } 5012 goto out_with_totals; 5013 } else { 5014 /* 5015 * Start the loop through the 5016 * rest of blocks, past the first block. 5017 */ 5018 moved_two = 0; 5019 loop_start = 1; 5020 } 5021 } 5022 /* Its a sack of some sort */ 5023 rack->r_ctl.sack_count++; 5024 if (rack->r_ctl.sack_count > 0xfff00000) { 5025 /* 5026 * reduce the number to keep us under 5027 * a uint32_t. 5028 */ 5029 rack->r_ctl.ack_count /= 2; 5030 rack->r_ctl.sack_count /= 2; 5031 } 5032 counter_u64_add(rack_sack_total, 1); 5033 if (rack->sack_attack_disable) { 5034 /* An attacker disablement is in place */ 5035 if (num_sack_blks > 1) { 5036 rack->r_ctl.sack_count += (num_sack_blks - 1); 5037 rack->r_ctl.sack_moved_extra++; 5038 counter_u64_add(rack_move_some, 1); 5039 if (rack->r_ctl.sack_moved_extra > 0xfff00000) { 5040 rack->r_ctl.sack_moved_extra /= 2; 5041 rack->r_ctl.sack_noextra_move /= 2; 5042 } 5043 } 5044 goto out; 5045 } 5046 rsm = rack->r_ctl.rc_sacklast; 5047 for (i = loop_start; i < num_sack_blks; i++) { 5048 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &moved_two); 5049 if (acked) { 5050 rack->r_wanted_output++; 5051 changed += acked; 5052 sack_changed += acked; 5053 } 5054 if (moved_two) { 5055 /* 5056 * If we did not get a SACK for at least a MSS and 5057 * had to move at all, or if we moved more than our 5058 * threshold, it counts against the "extra" move. 5059 */ 5060 rack->r_ctl.sack_moved_extra += moved_two; 5061 counter_u64_add(rack_move_some, 1); 5062 } else { 5063 /* 5064 * else we did not have to move 5065 * any more than we would expect. 5066 */ 5067 rack->r_ctl.sack_noextra_move++; 5068 counter_u64_add(rack_move_none, 1); 5069 } 5070 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) { 5071 /* 5072 * If the SACK was not a full MSS then 5073 * we add to sack_count the number of 5074 * MSS's (or possibly more than 5075 * a MSS if its a TSO send) we had to skip by. 5076 */ 5077 rack->r_ctl.sack_count += moved_two; 5078 counter_u64_add(rack_sack_total, moved_two); 5079 } 5080 /* 5081 * Now we need to setup for the next 5082 * round. First we make sure we won't 5083 * exceed the size of our uint32_t on 5084 * the various counts, and then clear out 5085 * moved_two. 5086 */ 5087 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) || 5088 (rack->r_ctl.sack_noextra_move > 0xfff00000)) { 5089 rack->r_ctl.sack_moved_extra /= 2; 5090 rack->r_ctl.sack_noextra_move /= 2; 5091 } 5092 if (rack->r_ctl.sack_count > 0xfff00000) { 5093 rack->r_ctl.ack_count /= 2; 5094 rack->r_ctl.sack_count /= 2; 5095 } 5096 moved_two = 0; 5097 } 5098 out_with_totals: 5099 if (num_sack_blks > 1) { 5100 /* 5101 * You get an extra stroke if 5102 * you have more than one sack-blk, this 5103 * could be where we are skipping forward 5104 * and the sack-filter is still working, or 5105 * it could be an attacker constantly 5106 * moving us. 5107 */ 5108 rack->r_ctl.sack_moved_extra++; 5109 counter_u64_add(rack_move_some, 1); 5110 } 5111 out: 5112 #ifdef NETFLIX_EXP_DETECTION 5113 if ((rack->do_detection || tcp_force_detection) && 5114 tcp_sack_to_ack_thresh && 5115 tcp_sack_to_move_thresh && 5116 ((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) { 5117 /* 5118 * We have thresholds set to find 5119 * possible attackers and disable sack. 5120 * Check them. 5121 */ 5122 uint64_t ackratio, moveratio, movetotal; 5123 5124 /* Log detecting */ 5125 rack_log_sad(rack, 1); 5126 ackratio = (uint64_t)(rack->r_ctl.sack_count); 5127 ackratio *= (uint64_t)(1000); 5128 if (rack->r_ctl.ack_count) 5129 ackratio /= (uint64_t)(rack->r_ctl.ack_count); 5130 else { 5131 /* We really should not hit here */ 5132 ackratio = 1000; 5133 } 5134 if ((rack->sack_attack_disable == 0) && 5135 (ackratio > rack_highest_sack_thresh_seen)) 5136 rack_highest_sack_thresh_seen = (uint32_t)ackratio; 5137 movetotal = rack->r_ctl.sack_moved_extra; 5138 movetotal += rack->r_ctl.sack_noextra_move; 5139 moveratio = rack->r_ctl.sack_moved_extra; 5140 moveratio *= (uint64_t)1000; 5141 if (movetotal) 5142 moveratio /= movetotal; 5143 else { 5144 /* No moves, thats pretty good */ 5145 moveratio = 0; 5146 } 5147 if ((rack->sack_attack_disable == 0) && 5148 (moveratio > rack_highest_move_thresh_seen)) 5149 rack_highest_move_thresh_seen = (uint32_t)moveratio; 5150 if (rack->sack_attack_disable == 0) { 5151 if ((ackratio > tcp_sack_to_ack_thresh) && 5152 (moveratio > tcp_sack_to_move_thresh)) { 5153 /* Disable sack processing */ 5154 rack->sack_attack_disable = 1; 5155 if (rack->r_rep_attack == 0) { 5156 rack->r_rep_attack = 1; 5157 counter_u64_add(rack_sack_attacks_detected, 1); 5158 } 5159 if (tcp_attack_on_turns_on_logging) { 5160 /* 5161 * Turn on logging, used for debugging 5162 * false positives. 5163 */ 5164 rack->rc_tp->t_logstate = tcp_attack_on_turns_on_logging; 5165 } 5166 /* Clamp the cwnd at flight size */ 5167 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd; 5168 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 5169 rack_log_sad(rack, 2); 5170 } 5171 } else { 5172 /* We are sack-disabled check for false positives */ 5173 if ((ackratio <= tcp_restoral_thresh) || 5174 (rack->r_ctl.rc_num_maps_alloced < tcp_map_minimum)) { 5175 rack->sack_attack_disable = 0; 5176 rack_log_sad(rack, 3); 5177 /* Restart counting */ 5178 rack->r_ctl.sack_count = 0; 5179 rack->r_ctl.sack_moved_extra = 0; 5180 rack->r_ctl.sack_noextra_move = 1; 5181 rack->r_ctl.ack_count = max(1, 5182 (BYTES_THIS_ACK(tp, th)/ctf_fixed_maxseg(rack->rc_tp))); 5183 5184 if (rack->r_rep_reverse == 0) { 5185 rack->r_rep_reverse = 1; 5186 counter_u64_add(rack_sack_attacks_reversed, 1); 5187 } 5188 /* Restore the cwnd */ 5189 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd) 5190 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd; 5191 } 5192 } 5193 } 5194 #endif 5195 if (changed) { 5196 /* Something changed cancel the rack timer */ 5197 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 5198 } 5199 if ((sack_changed) && (!IN_RECOVERY(tp->t_flags))) { 5200 /* 5201 * Ok we have a high probability that we need to go in to 5202 * recovery since we have data sack'd 5203 */ 5204 struct rack_sendmap *rsm; 5205 uint32_t tsused; 5206 5207 tsused = tcp_ts_getticks(); 5208 rsm = tcp_rack_output(tp, rack, tsused); 5209 if (rsm) { 5210 /* Enter recovery */ 5211 rack->r_ctl.rc_rsm_start = rsm->r_start; 5212 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 5213 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 5214 entered_recovery = 1; 5215 rack_cong_signal(tp, NULL, CC_NDUPACK); 5216 /* 5217 * When we enter recovery we need to assure we send 5218 * one packet. 5219 */ 5220 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 5221 rack_log_to_prr(rack, 8); 5222 rack->r_timer_override = 1; 5223 } 5224 } 5225 if (IN_RECOVERY(tp->t_flags) && (entered_recovery == 0)) { 5226 /* Deal with changed and PRR here (in recovery only) */ 5227 uint32_t pipe, snd_una; 5228 5229 rack->r_ctl.rc_prr_delivered += changed; 5230 /* Compute prr_sndcnt */ 5231 if (SEQ_GT(tp->snd_una, th_ack)) { 5232 snd_una = tp->snd_una; 5233 } else { 5234 snd_una = th_ack; 5235 } 5236 pipe = ((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt; 5237 if (pipe > tp->snd_ssthresh) { 5238 long sndcnt; 5239 5240 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 5241 if (rack->r_ctl.rc_prr_recovery_fs > 0) 5242 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 5243 else { 5244 rack->r_ctl.rc_prr_sndcnt = 0; 5245 rack_log_to_prr(rack, 9); 5246 sndcnt = 0; 5247 } 5248 sndcnt++; 5249 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 5250 sndcnt -= rack->r_ctl.rc_prr_out; 5251 else 5252 sndcnt = 0; 5253 rack->r_ctl.rc_prr_sndcnt = sndcnt; 5254 rack_log_to_prr(rack, 10); 5255 } else { 5256 uint32_t limit; 5257 5258 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 5259 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 5260 else 5261 limit = 0; 5262 if (changed > limit) 5263 limit = changed; 5264 limit += ctf_fixed_maxseg(tp); 5265 if (tp->snd_ssthresh > pipe) { 5266 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 5267 rack_log_to_prr(rack, 11); 5268 } else { 5269 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 5270 rack_log_to_prr(rack, 12); 5271 } 5272 } 5273 if (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) { 5274 rack->r_timer_override = 1; 5275 } 5276 } 5277 } 5278 5279 static void 5280 rack_strike_dupack(struct tcp_rack *rack) 5281 { 5282 struct rack_sendmap *rsm; 5283 5284 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5285 if (rsm && (rsm->r_dupack < 0xff)) { 5286 rsm->r_dupack++; 5287 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 5288 rack->r_wanted_output = 1; 5289 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 5290 } else { 5291 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 5292 } 5293 } 5294 } 5295 5296 /* 5297 * Return value of 1, we do not need to call rack_process_data(). 5298 * return value of 0, rack_process_data can be called. 5299 * For ret_val if its 0 the TCP is locked, if its non-zero 5300 * its unlocked and probably unsafe to touch the TCB. 5301 */ 5302 static int 5303 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 5304 struct tcpcb *tp, struct tcpopt *to, 5305 uint32_t tiwin, int32_t tlen, 5306 int32_t * ofia, int32_t thflags, int32_t * ret_val) 5307 { 5308 int32_t ourfinisacked = 0; 5309 int32_t nsegs, acked_amount; 5310 int32_t acked; 5311 struct mbuf *mfree; 5312 struct tcp_rack *rack; 5313 int32_t recovery = 0; 5314 5315 rack = (struct tcp_rack *)tp->t_fb_ptr; 5316 if (SEQ_GT(th->th_ack, tp->snd_max)) { 5317 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val); 5318 rack->r_wanted_output++; 5319 return (1); 5320 } 5321 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 5322 if (rack->rc_in_persist) 5323 tp->t_rxtshift = 0; 5324 if ((th->th_ack == tp->snd_una) && (tiwin == tp->snd_wnd)) 5325 rack_strike_dupack(rack); 5326 rack_log_ack(tp, to, th); 5327 } 5328 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 5329 /* 5330 * Old ack, behind (or duplicate to) the last one rcv'd 5331 * Note: Should mark reordering is occuring! We should also 5332 * look for sack blocks arriving e.g. ack 1, 4-4 then ack 1, 5333 * 3-3, 4-4 would be reording. As well as ack 1, 3-3 <no 5334 * retran and> ack 3 5335 */ 5336 return (0); 5337 } 5338 /* 5339 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 5340 * something we sent. 5341 */ 5342 if (tp->t_flags & TF_NEEDSYN) { 5343 /* 5344 * T/TCP: Connection was half-synchronized, and our SYN has 5345 * been ACK'd (so connection is now fully synchronized). Go 5346 * to non-starred state, increment snd_una for ACK of SYN, 5347 * and check if we can do window scaling. 5348 */ 5349 tp->t_flags &= ~TF_NEEDSYN; 5350 tp->snd_una++; 5351 /* Do window scaling? */ 5352 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 5353 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 5354 tp->rcv_scale = tp->request_r_scale; 5355 /* Send window already scaled. */ 5356 } 5357 } 5358 nsegs = max(1, m->m_pkthdr.lro_nsegs); 5359 INP_WLOCK_ASSERT(tp->t_inpcb); 5360 5361 acked = BYTES_THIS_ACK(tp, th); 5362 TCPSTAT_ADD(tcps_rcvackpack, nsegs); 5363 TCPSTAT_ADD(tcps_rcvackbyte, acked); 5364 5365 /* 5366 * If we just performed our first retransmit, and the ACK arrives 5367 * within our recovery window, then it was a mistake to do the 5368 * retransmit in the first place. Recover our original cwnd and 5369 * ssthresh, and proceed to transmit where we left off. 5370 */ 5371 if (tp->t_flags & TF_PREVVALID) { 5372 tp->t_flags &= ~TF_PREVVALID; 5373 if (tp->t_rxtshift == 1 && 5374 (int)(ticks - tp->t_badrxtwin) < 0) 5375 rack_cong_signal(tp, th, CC_RTO_ERR); 5376 } 5377 /* 5378 * If we have a timestamp reply, update smoothed round trip time. If 5379 * no timestamp is present but transmit timer is running and timed 5380 * sequence number was acked, update smoothed round trip time. Since 5381 * we now have an rtt measurement, cancel the timer backoff (cf., 5382 * Phil Karn's retransmit alg.). Recompute the initial retransmit 5383 * timer. 5384 * 5385 * Some boxes send broken timestamp replies during the SYN+ACK 5386 * phase, ignore timestamps of 0 or we could calculate a huge RTT 5387 * and blow up the retransmit timer. 5388 */ 5389 /* 5390 * If all outstanding data is acked, stop retransmit timer and 5391 * remember to restart (more output or persist). If there is more 5392 * data to be acked, restart retransmit timer, using current 5393 * (possibly backed-off) value. 5394 */ 5395 if (th->th_ack == tp->snd_max) { 5396 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 5397 rack->r_wanted_output++; 5398 } 5399 if (acked == 0) { 5400 if (ofia) 5401 *ofia = ourfinisacked; 5402 return (0); 5403 } 5404 if (rack->r_ctl.rc_early_recovery) { 5405 if (IN_RECOVERY(tp->t_flags)) { 5406 if (SEQ_LT(th->th_ack, tp->snd_recover) && 5407 (SEQ_LT(th->th_ack, tp->snd_max))) { 5408 tcp_rack_partialack(tp, th); 5409 } else { 5410 rack_post_recovery(tp, th); 5411 recovery = 1; 5412 } 5413 } 5414 } 5415 /* 5416 * Let the congestion control algorithm update congestion control 5417 * related information. This typically means increasing the 5418 * congestion window. 5419 */ 5420 rack_ack_received(tp, rack, th, nsegs, CC_ACK, recovery); 5421 SOCKBUF_LOCK(&so->so_snd); 5422 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 5423 tp->snd_wnd -= acked_amount; 5424 mfree = sbcut_locked(&so->so_snd, acked_amount); 5425 if ((sbused(&so->so_snd) == 0) && 5426 (acked > acked_amount) && 5427 (tp->t_state >= TCPS_FIN_WAIT_1)) { 5428 ourfinisacked = 1; 5429 } 5430 /* NB: sowwakeup_locked() does an implicit unlock. */ 5431 sowwakeup_locked(so); 5432 m_freem(mfree); 5433 if (rack->r_ctl.rc_early_recovery == 0) { 5434 if (IN_RECOVERY(tp->t_flags)) { 5435 if (SEQ_LT(th->th_ack, tp->snd_recover) && 5436 (SEQ_LT(th->th_ack, tp->snd_max))) { 5437 tcp_rack_partialack(tp, th); 5438 } else { 5439 rack_post_recovery(tp, th); 5440 } 5441 } 5442 } 5443 tp->snd_una = th->th_ack; 5444 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 5445 tp->snd_recover = tp->snd_una; 5446 5447 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) { 5448 tp->snd_nxt = tp->snd_una; 5449 } 5450 if (tp->snd_una == tp->snd_max) { 5451 /* Nothing left outstanding */ 5452 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 5453 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 5454 tp->t_acktime = 0; 5455 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 5456 /* Set need output so persist might get set */ 5457 rack->r_wanted_output++; 5458 if (rack_use_sack_filter) 5459 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 5460 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 5461 (sbavail(&so->so_snd) == 0) && 5462 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 5463 /* 5464 * The socket was gone and the 5465 * peer sent data, time to 5466 * reset him. 5467 */ 5468 *ret_val = 1; 5469 tp = tcp_close(tp); 5470 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 5471 return (1); 5472 } 5473 } 5474 if (ofia) 5475 *ofia = ourfinisacked; 5476 return (0); 5477 } 5478 5479 static void 5480 rack_collapsed_window(struct tcp_rack *rack) 5481 { 5482 /* 5483 * Now we must walk the 5484 * send map and divide the 5485 * ones left stranded. These 5486 * guys can't cause us to abort 5487 * the connection and are really 5488 * "unsent". However if a buggy 5489 * client actually did keep some 5490 * of the data i.e. collapsed the win 5491 * and refused to ack and then opened 5492 * the win and acked that data. We would 5493 * get into an ack war, the simplier 5494 * method then of just pretending we 5495 * did not send those segments something 5496 * won't work. 5497 */ 5498 struct rack_sendmap *rsm, *nrsm, fe, *insret; 5499 tcp_seq max_seq; 5500 uint32_t maxseg; 5501 5502 max_seq = rack->rc_tp->snd_una + rack->rc_tp->snd_wnd; 5503 maxseg = ctf_fixed_maxseg(rack->rc_tp); 5504 memset(&fe, 0, sizeof(fe)); 5505 fe.r_start = max_seq; 5506 /* Find the first seq past or at maxseq */ 5507 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 5508 if (rsm == NULL) { 5509 /* Nothing to do strange */ 5510 rack->rc_has_collapsed = 0; 5511 return; 5512 } 5513 /* 5514 * Now do we need to split at 5515 * the collapse point? 5516 */ 5517 if (SEQ_GT(max_seq, rsm->r_start)) { 5518 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 5519 if (nrsm == NULL) { 5520 /* We can't get a rsm, mark all? */ 5521 nrsm = rsm; 5522 goto no_split; 5523 } 5524 /* Clone it */ 5525 rack_clone_rsm(rack, nrsm, rsm, max_seq); 5526 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 5527 #ifdef INVARIANTS 5528 if (insret != NULL) { 5529 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 5530 nrsm, insret, rack, rsm); 5531 } 5532 #endif 5533 if (rsm->r_in_tmap) { 5534 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 5535 nrsm->r_in_tmap = 1; 5536 } 5537 /* 5538 * Set in the new RSM as the 5539 * collapsed starting point 5540 */ 5541 rsm = nrsm; 5542 } 5543 no_split: 5544 counter_u64_add(rack_collapsed_win, 1); 5545 RB_FOREACH_FROM(nrsm, rack_rb_tree_head, rsm) { 5546 nrsm->r_flags |= RACK_RWND_COLLAPSED; 5547 rack->rc_has_collapsed = 1; 5548 } 5549 } 5550 5551 static void 5552 rack_un_collapse_window(struct tcp_rack *rack) 5553 { 5554 struct rack_sendmap *rsm; 5555 5556 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 5557 if (rsm->r_flags & RACK_RWND_COLLAPSED) 5558 rsm->r_flags &= ~RACK_RWND_COLLAPSED; 5559 else 5560 break; 5561 } 5562 rack->rc_has_collapsed = 0; 5563 } 5564 5565 /* 5566 * Return value of 1, the TCB is unlocked and most 5567 * likely gone, return value of 0, the TCP is still 5568 * locked. 5569 */ 5570 static int 5571 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 5572 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 5573 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 5574 { 5575 /* 5576 * Update window information. Don't look at window if no ACK: TAC's 5577 * send garbage on first SYN. 5578 */ 5579 int32_t nsegs; 5580 int32_t tfo_syn; 5581 struct tcp_rack *rack; 5582 5583 rack = (struct tcp_rack *)tp->t_fb_ptr; 5584 INP_WLOCK_ASSERT(tp->t_inpcb); 5585 nsegs = max(1, m->m_pkthdr.lro_nsegs); 5586 if ((thflags & TH_ACK) && 5587 (SEQ_LT(tp->snd_wl1, th->th_seq) || 5588 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 5589 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 5590 /* keep track of pure window updates */ 5591 if (tlen == 0 && 5592 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 5593 TCPSTAT_INC(tcps_rcvwinupd); 5594 tp->snd_wnd = tiwin; 5595 tp->snd_wl1 = th->th_seq; 5596 tp->snd_wl2 = th->th_ack; 5597 if (tp->snd_wnd > tp->max_sndwnd) 5598 tp->max_sndwnd = tp->snd_wnd; 5599 rack->r_wanted_output++; 5600 } else if (thflags & TH_ACK) { 5601 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 5602 tp->snd_wnd = tiwin; 5603 tp->snd_wl1 = th->th_seq; 5604 tp->snd_wl2 = th->th_ack; 5605 } 5606 } 5607 if (tp->snd_wnd < ctf_outstanding(tp)) 5608 /* The peer collapsed the window */ 5609 rack_collapsed_window(rack); 5610 else if (rack->rc_has_collapsed) 5611 rack_un_collapse_window(rack); 5612 /* Was persist timer active and now we have window space? */ 5613 if ((rack->rc_in_persist != 0) && 5614 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 5615 rack->r_ctl.rc_pace_min_segs))) { 5616 rack_exit_persist(tp, rack); 5617 tp->snd_nxt = tp->snd_max; 5618 /* Make sure we output to start the timer */ 5619 rack->r_wanted_output++; 5620 } 5621 /* Do we enter persists? */ 5622 if ((rack->rc_in_persist == 0) && 5623 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 5624 TCPS_HAVEESTABLISHED(tp->t_state) && 5625 (tp->snd_max == tp->snd_una) && 5626 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 5627 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 5628 /* 5629 * Here the rwnd is less than 5630 * the pacing size, we are established, 5631 * nothing is outstanding, and there is 5632 * data to send. Enter persists. 5633 */ 5634 tp->snd_nxt = tp->snd_una; 5635 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 5636 } 5637 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 5638 m_freem(m); 5639 return (0); 5640 } 5641 /* 5642 * Process segments with URG. 5643 */ 5644 if ((thflags & TH_URG) && th->th_urp && 5645 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 5646 /* 5647 * This is a kludge, but if we receive and accept random 5648 * urgent pointers, we'll crash in soreceive. It's hard to 5649 * imagine someone actually wanting to send this much urgent 5650 * data. 5651 */ 5652 SOCKBUF_LOCK(&so->so_rcv); 5653 if (th->th_urp + sbavail(&so->so_rcv) > sb_max) { 5654 th->th_urp = 0; /* XXX */ 5655 thflags &= ~TH_URG; /* XXX */ 5656 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */ 5657 goto dodata; /* XXX */ 5658 } 5659 /* 5660 * If this segment advances the known urgent pointer, then 5661 * mark the data stream. This should not happen in 5662 * CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since a 5663 * FIN has been received from the remote side. In these 5664 * states we ignore the URG. 5665 * 5666 * According to RFC961 (Assigned Protocols), the urgent 5667 * pointer points to the last octet of urgent data. We 5668 * continue, however, to consider it to indicate the first 5669 * octet of data past the urgent section as the original 5670 * spec states (in one of two places). 5671 */ 5672 if (SEQ_GT(th->th_seq + th->th_urp, tp->rcv_up)) { 5673 tp->rcv_up = th->th_seq + th->th_urp; 5674 so->so_oobmark = sbavail(&so->so_rcv) + 5675 (tp->rcv_up - tp->rcv_nxt) - 1; 5676 if (so->so_oobmark == 0) 5677 so->so_rcv.sb_state |= SBS_RCVATMARK; 5678 sohasoutofband(so); 5679 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 5680 } 5681 SOCKBUF_UNLOCK(&so->so_rcv); 5682 /* 5683 * Remove out of band data so doesn't get presented to user. 5684 * This can happen independent of advancing the URG pointer, 5685 * but if two URG's are pending at once, some out-of-band 5686 * data may creep in... ick. 5687 */ 5688 if (th->th_urp <= (uint32_t) tlen && 5689 !(so->so_options & SO_OOBINLINE)) { 5690 /* hdr drop is delayed */ 5691 tcp_pulloutofband(so, th, m, drop_hdrlen); 5692 } 5693 } else { 5694 /* 5695 * If no out of band data is expected, pull receive urgent 5696 * pointer along with the receive window. 5697 */ 5698 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 5699 tp->rcv_up = tp->rcv_nxt; 5700 } 5701 dodata: /* XXX */ 5702 INP_WLOCK_ASSERT(tp->t_inpcb); 5703 5704 /* 5705 * Process the segment text, merging it into the TCP sequencing 5706 * queue, and arranging for acknowledgment of receipt if necessary. 5707 * This process logically involves adjusting tp->rcv_wnd as data is 5708 * presented to the user (this happens in tcp_usrreq.c, case 5709 * PRU_RCVD). If a FIN has already been received on this connection 5710 * then we just ignore the text. 5711 */ 5712 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 5713 IS_FASTOPEN(tp->t_flags)); 5714 if ((tlen || (thflags & TH_FIN) || tfo_syn) && 5715 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 5716 tcp_seq save_start = th->th_seq; 5717 tcp_seq save_rnxt = tp->rcv_nxt; 5718 int save_tlen = tlen; 5719 5720 m_adj(m, drop_hdrlen); /* delayed header drop */ 5721 /* 5722 * Insert segment which includes th into TCP reassembly 5723 * queue with control block tp. Set thflags to whether 5724 * reassembly now includes a segment with FIN. This handles 5725 * the common case inline (segment is the next to be 5726 * received on an established connection, and the queue is 5727 * empty), avoiding linkage into and removal from the queue 5728 * and repetition of various conversions. Set DELACK for 5729 * segments received in order, but ack immediately when 5730 * segments are out of order (so fast retransmit can work). 5731 */ 5732 if (th->th_seq == tp->rcv_nxt && 5733 SEGQ_EMPTY(tp) && 5734 (TCPS_HAVEESTABLISHED(tp->t_state) || 5735 tfo_syn)) { 5736 #ifdef NETFLIX_SB_LIMITS 5737 u_int mcnt, appended; 5738 5739 if (so->so_rcv.sb_shlim) { 5740 mcnt = m_memcnt(m); 5741 appended = 0; 5742 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 5743 CFO_NOSLEEP, NULL) == false) { 5744 counter_u64_add(tcp_sb_shlim_fails, 1); 5745 m_freem(m); 5746 return (0); 5747 } 5748 } 5749 #endif 5750 if (DELAY_ACK(tp, tlen) || tfo_syn) { 5751 rack_timer_cancel(tp, rack, 5752 rack->r_ctl.rc_rcvtime, __LINE__); 5753 tp->t_flags |= TF_DELACK; 5754 } else { 5755 rack->r_wanted_output++; 5756 tp->t_flags |= TF_ACKNOW; 5757 } 5758 tp->rcv_nxt += tlen; 5759 thflags = th->th_flags & TH_FIN; 5760 TCPSTAT_ADD(tcps_rcvpack, nsegs); 5761 TCPSTAT_ADD(tcps_rcvbyte, tlen); 5762 SOCKBUF_LOCK(&so->so_rcv); 5763 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 5764 m_freem(m); 5765 } else 5766 #ifdef NETFLIX_SB_LIMITS 5767 appended = 5768 #endif 5769 sbappendstream_locked(&so->so_rcv, m, 0); 5770 /* NB: sorwakeup_locked() does an implicit unlock. */ 5771 sorwakeup_locked(so); 5772 #ifdef NETFLIX_SB_LIMITS 5773 if (so->so_rcv.sb_shlim && appended != mcnt) 5774 counter_fo_release(so->so_rcv.sb_shlim, 5775 mcnt - appended); 5776 #endif 5777 } else { 5778 /* 5779 * XXX: Due to the header drop above "th" is 5780 * theoretically invalid by now. Fortunately 5781 * m_adj() doesn't actually frees any mbufs when 5782 * trimming from the head. 5783 */ 5784 tcp_seq temp = save_start; 5785 thflags = tcp_reass(tp, th, &temp, &tlen, m); 5786 tp->t_flags |= TF_ACKNOW; 5787 } 5788 if ((tp->t_flags & TF_SACK_PERMIT) && (save_tlen > 0)) { 5789 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 5790 /* 5791 * DSACK actually handled in the fastpath 5792 * above. 5793 */ 5794 tcp_update_sack_list(tp, save_start, 5795 save_start + save_tlen); 5796 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 5797 if ((tp->rcv_numsacks >= 1) && 5798 (tp->sackblks[0].end == save_start)) { 5799 /* 5800 * Partial overlap, recorded at todrop 5801 * above. 5802 */ 5803 tcp_update_sack_list(tp, 5804 tp->sackblks[0].start, 5805 tp->sackblks[0].end); 5806 } else { 5807 tcp_update_dsack_list(tp, save_start, 5808 save_start + save_tlen); 5809 } 5810 } else if (tlen >= save_tlen) { 5811 /* Update of sackblks. */ 5812 tcp_update_dsack_list(tp, save_start, 5813 save_start + save_tlen); 5814 } else if (tlen > 0) { 5815 tcp_update_dsack_list(tp, save_start, 5816 save_start + tlen); 5817 } 5818 } 5819 } else { 5820 m_freem(m); 5821 thflags &= ~TH_FIN; 5822 } 5823 5824 /* 5825 * If FIN is received ACK the FIN and let the user know that the 5826 * connection is closing. 5827 */ 5828 if (thflags & TH_FIN) { 5829 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 5830 socantrcvmore(so); 5831 /* 5832 * If connection is half-synchronized (ie NEEDSYN 5833 * flag on) then delay ACK, so it may be piggybacked 5834 * when SYN is sent. Otherwise, since we received a 5835 * FIN then no more input can be expected, send ACK 5836 * now. 5837 */ 5838 if (tp->t_flags & TF_NEEDSYN) { 5839 rack_timer_cancel(tp, rack, 5840 rack->r_ctl.rc_rcvtime, __LINE__); 5841 tp->t_flags |= TF_DELACK; 5842 } else { 5843 tp->t_flags |= TF_ACKNOW; 5844 } 5845 tp->rcv_nxt++; 5846 } 5847 switch (tp->t_state) { 5848 5849 /* 5850 * In SYN_RECEIVED and ESTABLISHED STATES enter the 5851 * CLOSE_WAIT state. 5852 */ 5853 case TCPS_SYN_RECEIVED: 5854 tp->t_starttime = ticks; 5855 /* FALLTHROUGH */ 5856 case TCPS_ESTABLISHED: 5857 rack_timer_cancel(tp, rack, 5858 rack->r_ctl.rc_rcvtime, __LINE__); 5859 tcp_state_change(tp, TCPS_CLOSE_WAIT); 5860 break; 5861 5862 /* 5863 * If still in FIN_WAIT_1 STATE FIN has not been 5864 * acked so enter the CLOSING state. 5865 */ 5866 case TCPS_FIN_WAIT_1: 5867 rack_timer_cancel(tp, rack, 5868 rack->r_ctl.rc_rcvtime, __LINE__); 5869 tcp_state_change(tp, TCPS_CLOSING); 5870 break; 5871 5872 /* 5873 * In FIN_WAIT_2 state enter the TIME_WAIT state, 5874 * starting the time-wait timer, turning off the 5875 * other standard timers. 5876 */ 5877 case TCPS_FIN_WAIT_2: 5878 rack_timer_cancel(tp, rack, 5879 rack->r_ctl.rc_rcvtime, __LINE__); 5880 tcp_twstart(tp); 5881 return (1); 5882 } 5883 } 5884 /* 5885 * Return any desired output. 5886 */ 5887 if ((tp->t_flags & TF_ACKNOW) || 5888 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 5889 rack->r_wanted_output++; 5890 } 5891 INP_WLOCK_ASSERT(tp->t_inpcb); 5892 return (0); 5893 } 5894 5895 /* 5896 * Here nothing is really faster, its just that we 5897 * have broken out the fast-data path also just like 5898 * the fast-ack. 5899 */ 5900 static int 5901 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 5902 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 5903 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 5904 { 5905 int32_t nsegs; 5906 int32_t newsize = 0; /* automatic sockbuf scaling */ 5907 struct tcp_rack *rack; 5908 #ifdef NETFLIX_SB_LIMITS 5909 u_int mcnt, appended; 5910 #endif 5911 #ifdef TCPDEBUG 5912 /* 5913 * The size of tcp_saveipgen must be the size of the max ip header, 5914 * now IPv6. 5915 */ 5916 u_char tcp_saveipgen[IP6_HDR_LEN]; 5917 struct tcphdr tcp_savetcp; 5918 short ostate = 0; 5919 5920 #endif 5921 /* 5922 * If last ACK falls within this segment's sequence numbers, record 5923 * the timestamp. NOTE that the test is modified according to the 5924 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 5925 */ 5926 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 5927 return (0); 5928 } 5929 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 5930 return (0); 5931 } 5932 if (tiwin && tiwin != tp->snd_wnd) { 5933 return (0); 5934 } 5935 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 5936 return (0); 5937 } 5938 if (__predict_false((to->to_flags & TOF_TS) && 5939 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 5940 return (0); 5941 } 5942 if (__predict_false((th->th_ack != tp->snd_una))) { 5943 return (0); 5944 } 5945 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 5946 return (0); 5947 } 5948 if ((to->to_flags & TOF_TS) != 0 && 5949 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 5950 tp->ts_recent_age = tcp_ts_getticks(); 5951 tp->ts_recent = to->to_tsval; 5952 } 5953 rack = (struct tcp_rack *)tp->t_fb_ptr; 5954 /* 5955 * This is a pure, in-sequence data packet with nothing on the 5956 * reassembly queue and we have enough buffer space to take it. 5957 */ 5958 nsegs = max(1, m->m_pkthdr.lro_nsegs); 5959 5960 #ifdef NETFLIX_SB_LIMITS 5961 if (so->so_rcv.sb_shlim) { 5962 mcnt = m_memcnt(m); 5963 appended = 0; 5964 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 5965 CFO_NOSLEEP, NULL) == false) { 5966 counter_u64_add(tcp_sb_shlim_fails, 1); 5967 m_freem(m); 5968 return (1); 5969 } 5970 } 5971 #endif 5972 /* Clean receiver SACK report if present */ 5973 if (tp->rcv_numsacks) 5974 tcp_clean_sackreport(tp); 5975 TCPSTAT_INC(tcps_preddat); 5976 tp->rcv_nxt += tlen; 5977 /* 5978 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 5979 */ 5980 tp->snd_wl1 = th->th_seq; 5981 /* 5982 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 5983 */ 5984 tp->rcv_up = tp->rcv_nxt; 5985 TCPSTAT_ADD(tcps_rcvpack, nsegs); 5986 TCPSTAT_ADD(tcps_rcvbyte, tlen); 5987 #ifdef TCPDEBUG 5988 if (so->so_options & SO_DEBUG) 5989 tcp_trace(TA_INPUT, ostate, tp, 5990 (void *)tcp_saveipgen, &tcp_savetcp, 0); 5991 #endif 5992 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 5993 5994 /* Add data to socket buffer. */ 5995 SOCKBUF_LOCK(&so->so_rcv); 5996 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 5997 m_freem(m); 5998 } else { 5999 /* 6000 * Set new socket buffer size. Give up when limit is 6001 * reached. 6002 */ 6003 if (newsize) 6004 if (!sbreserve_locked(&so->so_rcv, 6005 newsize, so, NULL)) 6006 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 6007 m_adj(m, drop_hdrlen); /* delayed header drop */ 6008 #ifdef NETFLIX_SB_LIMITS 6009 appended = 6010 #endif 6011 sbappendstream_locked(&so->so_rcv, m, 0); 6012 ctf_calc_rwin(so, tp); 6013 } 6014 /* NB: sorwakeup_locked() does an implicit unlock. */ 6015 sorwakeup_locked(so); 6016 #ifdef NETFLIX_SB_LIMITS 6017 if (so->so_rcv.sb_shlim && mcnt != appended) 6018 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 6019 #endif 6020 if (DELAY_ACK(tp, tlen)) { 6021 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 6022 tp->t_flags |= TF_DELACK; 6023 } else { 6024 tp->t_flags |= TF_ACKNOW; 6025 rack->r_wanted_output++; 6026 } 6027 if ((tp->snd_una == tp->snd_max) && rack_use_sack_filter) 6028 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 6029 return (1); 6030 } 6031 6032 /* 6033 * This subfunction is used to try to highly optimize the 6034 * fast path. We again allow window updates that are 6035 * in sequence to remain in the fast-path. We also add 6036 * in the __predict's to attempt to help the compiler. 6037 * Note that if we return a 0, then we can *not* process 6038 * it and the caller should push the packet into the 6039 * slow-path. 6040 */ 6041 static int 6042 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 6043 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 6044 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts, uint8_t iptos) 6045 { 6046 int32_t acked; 6047 int32_t nsegs; 6048 6049 #ifdef TCPDEBUG 6050 /* 6051 * The size of tcp_saveipgen must be the size of the max ip header, 6052 * now IPv6. 6053 */ 6054 u_char tcp_saveipgen[IP6_HDR_LEN]; 6055 struct tcphdr tcp_savetcp; 6056 short ostate = 0; 6057 6058 #endif 6059 struct tcp_rack *rack; 6060 6061 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 6062 /* Old ack, behind (or duplicate to) the last one rcv'd */ 6063 return (0); 6064 } 6065 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 6066 /* Above what we have sent? */ 6067 return (0); 6068 } 6069 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 6070 /* We are retransmitting */ 6071 return (0); 6072 } 6073 if (__predict_false(tiwin == 0)) { 6074 /* zero window */ 6075 return (0); 6076 } 6077 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 6078 /* We need a SYN or a FIN, unlikely.. */ 6079 return (0); 6080 } 6081 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 6082 /* Timestamp is behind .. old ack with seq wrap? */ 6083 return (0); 6084 } 6085 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 6086 /* Still recovering */ 6087 return (0); 6088 } 6089 rack = (struct tcp_rack *)tp->t_fb_ptr; 6090 if (rack->r_ctl.rc_sacked) { 6091 /* We have sack holes on our scoreboard */ 6092 return (0); 6093 } 6094 /* Ok if we reach here, we can process a fast-ack */ 6095 nsegs = max(1, m->m_pkthdr.lro_nsegs); 6096 rack_log_ack(tp, to, th); 6097 /* 6098 * We made progress, clear the tlp 6099 * out flag so we could start a TLP 6100 * again. 6101 */ 6102 rack->r_ctl.rc_tlp_rtx_out = 0; 6103 /* Did the window get updated? */ 6104 if (tiwin != tp->snd_wnd) { 6105 tp->snd_wnd = tiwin; 6106 tp->snd_wl1 = th->th_seq; 6107 if (tp->snd_wnd > tp->max_sndwnd) 6108 tp->max_sndwnd = tp->snd_wnd; 6109 } 6110 /* Do we exit persists? */ 6111 if ((rack->rc_in_persist != 0) && 6112 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 6113 rack->r_ctl.rc_pace_min_segs))) { 6114 rack_exit_persist(tp, rack); 6115 } 6116 /* Do we enter persists? */ 6117 if ((rack->rc_in_persist == 0) && 6118 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 6119 TCPS_HAVEESTABLISHED(tp->t_state) && 6120 (tp->snd_max == tp->snd_una) && 6121 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 6122 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 6123 /* 6124 * Here the rwnd is less than 6125 * the pacing size, we are established, 6126 * nothing is outstanding, and there is 6127 * data to send. Enter persists. 6128 */ 6129 tp->snd_nxt = tp->snd_una; 6130 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 6131 } 6132 /* 6133 * If last ACK falls within this segment's sequence numbers, record 6134 * the timestamp. NOTE that the test is modified according to the 6135 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 6136 */ 6137 if ((to->to_flags & TOF_TS) != 0 && 6138 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 6139 tp->ts_recent_age = tcp_ts_getticks(); 6140 tp->ts_recent = to->to_tsval; 6141 } 6142 /* 6143 * This is a pure ack for outstanding data. 6144 */ 6145 TCPSTAT_INC(tcps_predack); 6146 6147 /* 6148 * "bad retransmit" recovery. 6149 */ 6150 if (tp->t_flags & TF_PREVVALID) { 6151 tp->t_flags &= ~TF_PREVVALID; 6152 if (tp->t_rxtshift == 1 && 6153 (int)(ticks - tp->t_badrxtwin) < 0) 6154 rack_cong_signal(tp, th, CC_RTO_ERR); 6155 } 6156 /* 6157 * Recalculate the transmit timer / rtt. 6158 * 6159 * Some boxes send broken timestamp replies during the SYN+ACK 6160 * phase, ignore timestamps of 0 or we could calculate a huge RTT 6161 * and blow up the retransmit timer. 6162 */ 6163 acked = BYTES_THIS_ACK(tp, th); 6164 6165 #ifdef TCP_HHOOK 6166 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 6167 hhook_run_tcp_est_in(tp, th, to); 6168 #endif 6169 6170 TCPSTAT_ADD(tcps_rcvackpack, nsegs); 6171 TCPSTAT_ADD(tcps_rcvackbyte, acked); 6172 sbdrop(&so->so_snd, acked); 6173 /* 6174 * Let the congestion control algorithm update congestion control 6175 * related information. This typically means increasing the 6176 * congestion window. 6177 */ 6178 rack_ack_received(tp, rack, th, nsegs, CC_ACK, 0); 6179 6180 tp->snd_una = th->th_ack; 6181 if (tp->snd_wnd < ctf_outstanding(tp)) { 6182 /* The peer collapsed the window */ 6183 rack_collapsed_window(rack); 6184 } else if (rack->rc_has_collapsed) 6185 rack_un_collapse_window(rack); 6186 6187 /* 6188 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 6189 */ 6190 tp->snd_wl2 = th->th_ack; 6191 tp->t_dupacks = 0; 6192 m_freem(m); 6193 /* ND6_HINT(tp); *//* Some progress has been made. */ 6194 6195 /* 6196 * If all outstanding data are acked, stop retransmit timer, 6197 * otherwise restart timer using current (possibly backed-off) 6198 * value. If process is waiting for space, wakeup/selwakeup/signal. 6199 * If data are ready to send, let tcp_output decide between more 6200 * output or persist. 6201 */ 6202 #ifdef TCPDEBUG 6203 if (so->so_options & SO_DEBUG) 6204 tcp_trace(TA_INPUT, ostate, tp, 6205 (void *)tcp_saveipgen, 6206 &tcp_savetcp, 0); 6207 #endif 6208 if (tp->snd_una == tp->snd_max) { 6209 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 6210 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 6211 tp->t_acktime = 0; 6212 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 6213 } 6214 /* Wake up the socket if we have room to write more */ 6215 sowwakeup(so); 6216 if (sbavail(&so->so_snd)) { 6217 rack->r_wanted_output++; 6218 } 6219 return (1); 6220 } 6221 6222 /* 6223 * Return value of 1, the TCB is unlocked and most 6224 * likely gone, return value of 0, the TCP is still 6225 * locked. 6226 */ 6227 static int 6228 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 6229 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 6230 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t tos) 6231 { 6232 int32_t ret_val = 0; 6233 int32_t todrop; 6234 int32_t ourfinisacked = 0; 6235 struct tcp_rack *rack; 6236 6237 ctf_calc_rwin(so, tp); 6238 /* 6239 * If the state is SYN_SENT: if seg contains an ACK, but not for our 6240 * SYN, drop the input. if seg contains a RST, then drop the 6241 * connection. if seg does not contain SYN, then drop it. Otherwise 6242 * this is an acceptable SYN segment initialize tp->rcv_nxt and 6243 * tp->irs if seg contains ack then advance tp->snd_una if seg 6244 * contains an ECE and ECN support is enabled, the stream is ECN 6245 * capable. if SYN has been acked change to ESTABLISHED else 6246 * SYN_RCVD state arrange for segment to be acked (eventually) 6247 * continue processing rest of data/controls, beginning with URG 6248 */ 6249 if ((thflags & TH_ACK) && 6250 (SEQ_LEQ(th->th_ack, tp->iss) || 6251 SEQ_GT(th->th_ack, tp->snd_max))) { 6252 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 6253 return (1); 6254 } 6255 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 6256 TCP_PROBE5(connect__refused, NULL, tp, 6257 mtod(m, const char *), tp, th); 6258 tp = tcp_drop(tp, ECONNREFUSED); 6259 ctf_do_drop(m, tp); 6260 return (1); 6261 } 6262 if (thflags & TH_RST) { 6263 ctf_do_drop(m, tp); 6264 return (1); 6265 } 6266 if (!(thflags & TH_SYN)) { 6267 ctf_do_drop(m, tp); 6268 return (1); 6269 } 6270 tp->irs = th->th_seq; 6271 tcp_rcvseqinit(tp); 6272 rack = (struct tcp_rack *)tp->t_fb_ptr; 6273 if (thflags & TH_ACK) { 6274 int tfo_partial = 0; 6275 6276 TCPSTAT_INC(tcps_connects); 6277 soisconnected(so); 6278 #ifdef MAC 6279 mac_socketpeer_set_from_mbuf(m, so); 6280 #endif 6281 /* Do window scaling on this connection? */ 6282 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 6283 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 6284 tp->rcv_scale = tp->request_r_scale; 6285 } 6286 tp->rcv_adv += min(tp->rcv_wnd, 6287 TCP_MAXWIN << tp->rcv_scale); 6288 /* 6289 * If not all the data that was sent in the TFO SYN 6290 * has been acked, resend the remainder right away. 6291 */ 6292 if (IS_FASTOPEN(tp->t_flags) && 6293 (tp->snd_una != tp->snd_max)) { 6294 tp->snd_nxt = th->th_ack; 6295 tfo_partial = 1; 6296 } 6297 /* 6298 * If there's data, delay ACK; if there's also a FIN ACKNOW 6299 * will be turned on later. 6300 */ 6301 if (DELAY_ACK(tp, tlen) && tlen != 0 && (tfo_partial == 0)) { 6302 rack_timer_cancel(tp, rack, 6303 rack->r_ctl.rc_rcvtime, __LINE__); 6304 tp->t_flags |= TF_DELACK; 6305 } else { 6306 rack->r_wanted_output++; 6307 tp->t_flags |= TF_ACKNOW; 6308 } 6309 6310 if (((thflags & (TH_CWR | TH_ECE)) == TH_ECE) && 6311 V_tcp_do_ecn) { 6312 tp->t_flags2 |= TF2_ECN_PERMIT; 6313 TCPSTAT_INC(tcps_ecn_shs); 6314 } 6315 if (SEQ_GT(th->th_ack, tp->snd_una)) { 6316 /* 6317 * We advance snd_una for the 6318 * fast open case. If th_ack is 6319 * acknowledging data beyond 6320 * snd_una we can't just call 6321 * ack-processing since the 6322 * data stream in our send-map 6323 * will start at snd_una + 1 (one 6324 * beyond the SYN). If its just 6325 * equal we don't need to do that 6326 * and there is no send_map. 6327 */ 6328 tp->snd_una++; 6329 } 6330 /* 6331 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 6332 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 6333 */ 6334 tp->t_starttime = ticks; 6335 if (tp->t_flags & TF_NEEDFIN) { 6336 tcp_state_change(tp, TCPS_FIN_WAIT_1); 6337 tp->t_flags &= ~TF_NEEDFIN; 6338 thflags &= ~TH_SYN; 6339 } else { 6340 tcp_state_change(tp, TCPS_ESTABLISHED); 6341 TCP_PROBE5(connect__established, NULL, tp, 6342 mtod(m, const char *), tp, th); 6343 cc_conn_init(tp); 6344 } 6345 } else { 6346 /* 6347 * Received initial SYN in SYN-SENT[*] state => simultaneous 6348 * open. If segment contains CC option and there is a 6349 * cached CC, apply TAO test. If it succeeds, connection is * 6350 * half-synchronized. Otherwise, do 3-way handshake: 6351 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 6352 * there was no CC option, clear cached CC value. 6353 */ 6354 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 6355 tcp_state_change(tp, TCPS_SYN_RECEIVED); 6356 } 6357 INP_WLOCK_ASSERT(tp->t_inpcb); 6358 /* 6359 * Advance th->th_seq to correspond to first data byte. If data, 6360 * trim to stay within window, dropping FIN if necessary. 6361 */ 6362 th->th_seq++; 6363 if (tlen > tp->rcv_wnd) { 6364 todrop = tlen - tp->rcv_wnd; 6365 m_adj(m, -todrop); 6366 tlen = tp->rcv_wnd; 6367 thflags &= ~TH_FIN; 6368 TCPSTAT_INC(tcps_rcvpackafterwin); 6369 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 6370 } 6371 tp->snd_wl1 = th->th_seq - 1; 6372 tp->rcv_up = th->th_seq; 6373 /* 6374 * Client side of transaction: already sent SYN and data. If the 6375 * remote host used T/TCP to validate the SYN, our data will be 6376 * ACK'd; if so, enter normal data segment processing in the middle 6377 * of step 5, ack processing. Otherwise, goto step 6. 6378 */ 6379 if (thflags & TH_ACK) { 6380 /* For syn-sent we need to possibly update the rtt */ 6381 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 6382 uint32_t t; 6383 6384 t = tcp_ts_getticks() - to->to_tsecr; 6385 if (!tp->t_rttlow || tp->t_rttlow > t) 6386 tp->t_rttlow = t; 6387 tcp_rack_xmit_timer(rack, t + 1); 6388 tcp_rack_xmit_timer_commit(rack, tp); 6389 } 6390 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) 6391 return (ret_val); 6392 /* We may have changed to FIN_WAIT_1 above */ 6393 if (tp->t_state == TCPS_FIN_WAIT_1) { 6394 /* 6395 * In FIN_WAIT_1 STATE in addition to the processing 6396 * for the ESTABLISHED state if our FIN is now 6397 * acknowledged then enter FIN_WAIT_2. 6398 */ 6399 if (ourfinisacked) { 6400 /* 6401 * If we can't receive any more data, then 6402 * closing user can proceed. Starting the 6403 * timer is contrary to the specification, 6404 * but if we don't get a FIN we'll hang 6405 * forever. 6406 * 6407 * XXXjl: we should release the tp also, and 6408 * use a compressed state. 6409 */ 6410 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6411 soisdisconnected(so); 6412 tcp_timer_activate(tp, TT_2MSL, 6413 (tcp_fast_finwait2_recycle ? 6414 tcp_finwait2_timeout : 6415 TP_MAXIDLE(tp))); 6416 } 6417 tcp_state_change(tp, TCPS_FIN_WAIT_2); 6418 } 6419 } 6420 } 6421 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6422 tiwin, thflags, nxt_pkt)); 6423 } 6424 6425 /* 6426 * Return value of 1, the TCB is unlocked and most 6427 * likely gone, return value of 0, the TCP is still 6428 * locked. 6429 */ 6430 static int 6431 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 6432 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 6433 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 6434 { 6435 struct tcp_rack *rack; 6436 int32_t ret_val = 0; 6437 int32_t ourfinisacked = 0; 6438 6439 ctf_calc_rwin(so, tp); 6440 if ((thflags & TH_ACK) && 6441 (SEQ_LEQ(th->th_ack, tp->snd_una) || 6442 SEQ_GT(th->th_ack, tp->snd_max))) { 6443 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 6444 return (1); 6445 } 6446 rack = (struct tcp_rack *)tp->t_fb_ptr; 6447 if (IS_FASTOPEN(tp->t_flags)) { 6448 /* 6449 * When a TFO connection is in SYN_RECEIVED, the 6450 * only valid packets are the initial SYN, a 6451 * retransmit/copy of the initial SYN (possibly with 6452 * a subset of the original data), a valid ACK, a 6453 * FIN, or a RST. 6454 */ 6455 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 6456 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 6457 return (1); 6458 } else if (thflags & TH_SYN) { 6459 /* non-initial SYN is ignored */ 6460 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 6461 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 6462 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 6463 ctf_do_drop(m, NULL); 6464 return (0); 6465 } 6466 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 6467 ctf_do_drop(m, NULL); 6468 return (0); 6469 } 6470 } 6471 if ((thflags & TH_RST) || 6472 (tp->t_fin_is_rst && (thflags & TH_FIN))) 6473 return (ctf_process_rst(m, th, so, tp)); 6474 /* 6475 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 6476 * it's less than ts_recent, drop it. 6477 */ 6478 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 6479 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 6480 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 6481 return (ret_val); 6482 } 6483 /* 6484 * In the SYN-RECEIVED state, validate that the packet belongs to 6485 * this connection before trimming the data to fit the receive 6486 * window. Check the sequence number versus IRS since we know the 6487 * sequence numbers haven't wrapped. This is a partial fix for the 6488 * "LAND" DoS attack. 6489 */ 6490 if (SEQ_LT(th->th_seq, tp->irs)) { 6491 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 6492 return (1); 6493 } 6494 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 6495 return (ret_val); 6496 } 6497 /* 6498 * If last ACK falls within this segment's sequence numbers, record 6499 * its timestamp. NOTE: 1) That the test incorporates suggestions 6500 * from the latest proposal of the tcplw@cray.com list (Braden 6501 * 1993/04/26). 2) That updating only on newer timestamps interferes 6502 * with our earlier PAWS tests, so this check should be solely 6503 * predicated on the sequence space of this segment. 3) That we 6504 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 6505 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 6506 * SEG.Len, This modified check allows us to overcome RFC1323's 6507 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 6508 * p.869. In such cases, we can still calculate the RTT correctly 6509 * when RCV.NXT == Last.ACK.Sent. 6510 */ 6511 if ((to->to_flags & TOF_TS) != 0 && 6512 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 6513 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 6514 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 6515 tp->ts_recent_age = tcp_ts_getticks(); 6516 tp->ts_recent = to->to_tsval; 6517 } 6518 tp->snd_wnd = tiwin; 6519 /* 6520 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 6521 * is on (half-synchronized state), then queue data for later 6522 * processing; else drop segment and return. 6523 */ 6524 if ((thflags & TH_ACK) == 0) { 6525 if (IS_FASTOPEN(tp->t_flags)) { 6526 cc_conn_init(tp); 6527 } 6528 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6529 tiwin, thflags, nxt_pkt)); 6530 } 6531 TCPSTAT_INC(tcps_connects); 6532 soisconnected(so); 6533 /* Do window scaling? */ 6534 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 6535 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 6536 tp->rcv_scale = tp->request_r_scale; 6537 } 6538 /* 6539 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 6540 * FIN-WAIT-1 6541 */ 6542 tp->t_starttime = ticks; 6543 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) { 6544 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 6545 tp->t_tfo_pending = NULL; 6546 6547 /* 6548 * Account for the ACK of our SYN prior to 6549 * regular ACK processing below. 6550 */ 6551 tp->snd_una++; 6552 } 6553 if (tp->t_flags & TF_NEEDFIN) { 6554 tcp_state_change(tp, TCPS_FIN_WAIT_1); 6555 tp->t_flags &= ~TF_NEEDFIN; 6556 } else { 6557 tcp_state_change(tp, TCPS_ESTABLISHED); 6558 TCP_PROBE5(accept__established, NULL, tp, 6559 mtod(m, const char *), tp, th); 6560 /* 6561 * TFO connections call cc_conn_init() during SYN 6562 * processing. Calling it again here for such connections 6563 * is not harmless as it would undo the snd_cwnd reduction 6564 * that occurs when a TFO SYN|ACK is retransmitted. 6565 */ 6566 if (!IS_FASTOPEN(tp->t_flags)) 6567 cc_conn_init(tp); 6568 } 6569 /* 6570 * If segment contains data or ACK, will call tcp_reass() later; if 6571 * not, do so now to pass queued data to user. 6572 */ 6573 if (tlen == 0 && (thflags & TH_FIN) == 0) 6574 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 6575 (struct mbuf *)0); 6576 tp->snd_wl1 = th->th_seq - 1; 6577 /* For syn-recv we need to possibly update the rtt */ 6578 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 6579 uint32_t t; 6580 6581 t = tcp_ts_getticks() - to->to_tsecr; 6582 if (!tp->t_rttlow || tp->t_rttlow > t) 6583 tp->t_rttlow = t; 6584 tcp_rack_xmit_timer(rack, t + 1); 6585 tcp_rack_xmit_timer_commit(rack, tp); 6586 } 6587 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 6588 return (ret_val); 6589 } 6590 if (tp->t_state == TCPS_FIN_WAIT_1) { 6591 /* We could have went to FIN_WAIT_1 (or EST) above */ 6592 /* 6593 * In FIN_WAIT_1 STATE in addition to the processing for the 6594 * ESTABLISHED state if our FIN is now acknowledged then 6595 * enter FIN_WAIT_2. 6596 */ 6597 if (ourfinisacked) { 6598 /* 6599 * If we can't receive any more data, then closing 6600 * user can proceed. Starting the timer is contrary 6601 * to the specification, but if we don't get a FIN 6602 * we'll hang forever. 6603 * 6604 * XXXjl: we should release the tp also, and use a 6605 * compressed state. 6606 */ 6607 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6608 soisdisconnected(so); 6609 tcp_timer_activate(tp, TT_2MSL, 6610 (tcp_fast_finwait2_recycle ? 6611 tcp_finwait2_timeout : 6612 TP_MAXIDLE(tp))); 6613 } 6614 tcp_state_change(tp, TCPS_FIN_WAIT_2); 6615 } 6616 } 6617 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6618 tiwin, thflags, nxt_pkt)); 6619 } 6620 6621 /* 6622 * Return value of 1, the TCB is unlocked and most 6623 * likely gone, return value of 0, the TCP is still 6624 * locked. 6625 */ 6626 static int 6627 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 6628 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 6629 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 6630 { 6631 int32_t ret_val = 0; 6632 6633 /* 6634 * Header prediction: check for the two common cases of a 6635 * uni-directional data xfer. If the packet has no control flags, 6636 * is in-sequence, the window didn't change and we're not 6637 * retransmitting, it's a candidate. If the length is zero and the 6638 * ack moved forward, we're the sender side of the xfer. Just free 6639 * the data acked & wake any higher level process that was blocked 6640 * waiting for space. If the length is non-zero and the ack didn't 6641 * move, we're the receiver side. If we're getting packets in-order 6642 * (the reassembly queue is empty), add the data toc The socket 6643 * buffer and note that we need a delayed ack. Make sure that the 6644 * hidden state-flags are also off. Since we check for 6645 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 6646 */ 6647 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 6648 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_URG | TH_ACK)) == TH_ACK) && 6649 __predict_true(SEGQ_EMPTY(tp)) && 6650 __predict_true(th->th_seq == tp->rcv_nxt)) { 6651 struct tcp_rack *rack; 6652 6653 rack = (struct tcp_rack *)tp->t_fb_ptr; 6654 if (tlen == 0) { 6655 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 6656 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime, iptos)) { 6657 return (0); 6658 } 6659 } else { 6660 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 6661 tiwin, nxt_pkt, iptos)) { 6662 return (0); 6663 } 6664 } 6665 } 6666 ctf_calc_rwin(so, tp); 6667 6668 if ((thflags & TH_RST) || 6669 (tp->t_fin_is_rst && (thflags & TH_FIN))) 6670 return (ctf_process_rst(m, th, so, tp)); 6671 6672 /* 6673 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 6674 * synchronized state. 6675 */ 6676 if (thflags & TH_SYN) { 6677 ctf_challenge_ack(m, th, tp, &ret_val); 6678 return (ret_val); 6679 } 6680 /* 6681 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 6682 * it's less than ts_recent, drop it. 6683 */ 6684 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 6685 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 6686 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 6687 return (ret_val); 6688 } 6689 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 6690 return (ret_val); 6691 } 6692 /* 6693 * If last ACK falls within this segment's sequence numbers, record 6694 * its timestamp. NOTE: 1) That the test incorporates suggestions 6695 * from the latest proposal of the tcplw@cray.com list (Braden 6696 * 1993/04/26). 2) That updating only on newer timestamps interferes 6697 * with our earlier PAWS tests, so this check should be solely 6698 * predicated on the sequence space of this segment. 3) That we 6699 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 6700 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 6701 * SEG.Len, This modified check allows us to overcome RFC1323's 6702 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 6703 * p.869. In such cases, we can still calculate the RTT correctly 6704 * when RCV.NXT == Last.ACK.Sent. 6705 */ 6706 if ((to->to_flags & TOF_TS) != 0 && 6707 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 6708 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 6709 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 6710 tp->ts_recent_age = tcp_ts_getticks(); 6711 tp->ts_recent = to->to_tsval; 6712 } 6713 /* 6714 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 6715 * is on (half-synchronized state), then queue data for later 6716 * processing; else drop segment and return. 6717 */ 6718 if ((thflags & TH_ACK) == 0) { 6719 if (tp->t_flags & TF_NEEDSYN) { 6720 6721 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6722 tiwin, thflags, nxt_pkt)); 6723 6724 } else if (tp->t_flags & TF_ACKNOW) { 6725 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 6726 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output++; 6727 return (ret_val); 6728 } else { 6729 ctf_do_drop(m, NULL); 6730 return (0); 6731 } 6732 } 6733 /* 6734 * Ack processing. 6735 */ 6736 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 6737 return (ret_val); 6738 } 6739 if (sbavail(&so->so_snd)) { 6740 if (rack_progress_timeout_check(tp)) { 6741 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 6742 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 6743 return (1); 6744 } 6745 } 6746 /* State changes only happen in rack_process_data() */ 6747 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6748 tiwin, thflags, nxt_pkt)); 6749 } 6750 6751 /* 6752 * Return value of 1, the TCB is unlocked and most 6753 * likely gone, return value of 0, the TCP is still 6754 * locked. 6755 */ 6756 static int 6757 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 6758 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 6759 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 6760 { 6761 int32_t ret_val = 0; 6762 6763 ctf_calc_rwin(so, tp); 6764 if ((thflags & TH_RST) || 6765 (tp->t_fin_is_rst && (thflags & TH_FIN))) 6766 return (ctf_process_rst(m, th, so, tp)); 6767 /* 6768 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 6769 * synchronized state. 6770 */ 6771 if (thflags & TH_SYN) { 6772 ctf_challenge_ack(m, th, tp, &ret_val); 6773 return (ret_val); 6774 } 6775 /* 6776 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 6777 * it's less than ts_recent, drop it. 6778 */ 6779 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 6780 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 6781 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 6782 return (ret_val); 6783 } 6784 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 6785 return (ret_val); 6786 } 6787 /* 6788 * If last ACK falls within this segment's sequence numbers, record 6789 * its timestamp. NOTE: 1) That the test incorporates suggestions 6790 * from the latest proposal of the tcplw@cray.com list (Braden 6791 * 1993/04/26). 2) That updating only on newer timestamps interferes 6792 * with our earlier PAWS tests, so this check should be solely 6793 * predicated on the sequence space of this segment. 3) That we 6794 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 6795 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 6796 * SEG.Len, This modified check allows us to overcome RFC1323's 6797 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 6798 * p.869. In such cases, we can still calculate the RTT correctly 6799 * when RCV.NXT == Last.ACK.Sent. 6800 */ 6801 if ((to->to_flags & TOF_TS) != 0 && 6802 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 6803 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 6804 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 6805 tp->ts_recent_age = tcp_ts_getticks(); 6806 tp->ts_recent = to->to_tsval; 6807 } 6808 /* 6809 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 6810 * is on (half-synchronized state), then queue data for later 6811 * processing; else drop segment and return. 6812 */ 6813 if ((thflags & TH_ACK) == 0) { 6814 if (tp->t_flags & TF_NEEDSYN) { 6815 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6816 tiwin, thflags, nxt_pkt)); 6817 6818 } else if (tp->t_flags & TF_ACKNOW) { 6819 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 6820 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output++; 6821 return (ret_val); 6822 } else { 6823 ctf_do_drop(m, NULL); 6824 return (0); 6825 } 6826 } 6827 /* 6828 * Ack processing. 6829 */ 6830 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 6831 return (ret_val); 6832 } 6833 if (sbavail(&so->so_snd)) { 6834 if (rack_progress_timeout_check(tp)) { 6835 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 6836 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 6837 return (1); 6838 } 6839 } 6840 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6841 tiwin, thflags, nxt_pkt)); 6842 } 6843 6844 static int 6845 rack_check_data_after_close(struct mbuf *m, 6846 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 6847 { 6848 struct tcp_rack *rack; 6849 6850 rack = (struct tcp_rack *)tp->t_fb_ptr; 6851 if (rack->rc_allow_data_af_clo == 0) { 6852 close_now: 6853 tp = tcp_close(tp); 6854 TCPSTAT_INC(tcps_rcvafterclose); 6855 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 6856 return (1); 6857 } 6858 if (sbavail(&so->so_snd) == 0) 6859 goto close_now; 6860 /* Ok we allow data that is ignored and a followup reset */ 6861 tp->rcv_nxt = th->th_seq + *tlen; 6862 tp->t_flags2 |= TF2_DROP_AF_DATA; 6863 rack->r_wanted_output = 1; 6864 *tlen = 0; 6865 return (0); 6866 } 6867 6868 /* 6869 * Return value of 1, the TCB is unlocked and most 6870 * likely gone, return value of 0, the TCP is still 6871 * locked. 6872 */ 6873 static int 6874 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 6875 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 6876 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 6877 { 6878 int32_t ret_val = 0; 6879 int32_t ourfinisacked = 0; 6880 6881 ctf_calc_rwin(so, tp); 6882 6883 if ((thflags & TH_RST) || 6884 (tp->t_fin_is_rst && (thflags & TH_FIN))) 6885 return (ctf_process_rst(m, th, so, tp)); 6886 /* 6887 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 6888 * synchronized state. 6889 */ 6890 if (thflags & TH_SYN) { 6891 ctf_challenge_ack(m, th, tp, &ret_val); 6892 return (ret_val); 6893 } 6894 /* 6895 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 6896 * it's less than ts_recent, drop it. 6897 */ 6898 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 6899 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 6900 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 6901 return (ret_val); 6902 } 6903 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 6904 return (ret_val); 6905 } 6906 /* 6907 * If new data are received on a connection after the user processes 6908 * are gone, then RST the other end. 6909 */ 6910 if ((so->so_state & SS_NOFDREF) && tlen) { 6911 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 6912 return (1); 6913 } 6914 /* 6915 * If last ACK falls within this segment's sequence numbers, record 6916 * its timestamp. NOTE: 1) That the test incorporates suggestions 6917 * from the latest proposal of the tcplw@cray.com list (Braden 6918 * 1993/04/26). 2) That updating only on newer timestamps interferes 6919 * with our earlier PAWS tests, so this check should be solely 6920 * predicated on the sequence space of this segment. 3) That we 6921 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 6922 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 6923 * SEG.Len, This modified check allows us to overcome RFC1323's 6924 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 6925 * p.869. In such cases, we can still calculate the RTT correctly 6926 * when RCV.NXT == Last.ACK.Sent. 6927 */ 6928 if ((to->to_flags & TOF_TS) != 0 && 6929 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 6930 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 6931 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 6932 tp->ts_recent_age = tcp_ts_getticks(); 6933 tp->ts_recent = to->to_tsval; 6934 } 6935 /* 6936 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 6937 * is on (half-synchronized state), then queue data for later 6938 * processing; else drop segment and return. 6939 */ 6940 if ((thflags & TH_ACK) == 0) { 6941 if (tp->t_flags & TF_NEEDSYN) { 6942 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6943 tiwin, thflags, nxt_pkt)); 6944 } else if (tp->t_flags & TF_ACKNOW) { 6945 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 6946 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output++; 6947 return (ret_val); 6948 } else { 6949 ctf_do_drop(m, NULL); 6950 return (0); 6951 } 6952 } 6953 /* 6954 * Ack processing. 6955 */ 6956 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 6957 return (ret_val); 6958 } 6959 if (ourfinisacked) { 6960 /* 6961 * If we can't receive any more data, then closing user can 6962 * proceed. Starting the timer is contrary to the 6963 * specification, but if we don't get a FIN we'll hang 6964 * forever. 6965 * 6966 * XXXjl: we should release the tp also, and use a 6967 * compressed state. 6968 */ 6969 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6970 soisdisconnected(so); 6971 tcp_timer_activate(tp, TT_2MSL, 6972 (tcp_fast_finwait2_recycle ? 6973 tcp_finwait2_timeout : 6974 TP_MAXIDLE(tp))); 6975 } 6976 tcp_state_change(tp, TCPS_FIN_WAIT_2); 6977 } 6978 if (sbavail(&so->so_snd)) { 6979 if (rack_progress_timeout_check(tp)) { 6980 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 6981 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 6982 return (1); 6983 } 6984 } 6985 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6986 tiwin, thflags, nxt_pkt)); 6987 } 6988 6989 /* 6990 * Return value of 1, the TCB is unlocked and most 6991 * likely gone, return value of 0, the TCP is still 6992 * locked. 6993 */ 6994 static int 6995 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 6996 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 6997 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 6998 { 6999 int32_t ret_val = 0; 7000 int32_t ourfinisacked = 0; 7001 7002 ctf_calc_rwin(so, tp); 7003 7004 if ((thflags & TH_RST) || 7005 (tp->t_fin_is_rst && (thflags & TH_FIN))) 7006 return (ctf_process_rst(m, th, so, tp)); 7007 /* 7008 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 7009 * synchronized state. 7010 */ 7011 if (thflags & TH_SYN) { 7012 ctf_challenge_ack(m, th, tp, &ret_val); 7013 return (ret_val); 7014 } 7015 /* 7016 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 7017 * it's less than ts_recent, drop it. 7018 */ 7019 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 7020 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 7021 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 7022 return (ret_val); 7023 } 7024 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 7025 return (ret_val); 7026 } 7027 /* 7028 * If new data are received on a connection after the user processes 7029 * are gone, then RST the other end. 7030 */ 7031 if ((so->so_state & SS_NOFDREF) && tlen) { 7032 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 7033 return (1); 7034 } 7035 /* 7036 * If last ACK falls within this segment's sequence numbers, record 7037 * its timestamp. NOTE: 1) That the test incorporates suggestions 7038 * from the latest proposal of the tcplw@cray.com list (Braden 7039 * 1993/04/26). 2) That updating only on newer timestamps interferes 7040 * with our earlier PAWS tests, so this check should be solely 7041 * predicated on the sequence space of this segment. 3) That we 7042 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 7043 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 7044 * SEG.Len, This modified check allows us to overcome RFC1323's 7045 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 7046 * p.869. In such cases, we can still calculate the RTT correctly 7047 * when RCV.NXT == Last.ACK.Sent. 7048 */ 7049 if ((to->to_flags & TOF_TS) != 0 && 7050 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 7051 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 7052 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 7053 tp->ts_recent_age = tcp_ts_getticks(); 7054 tp->ts_recent = to->to_tsval; 7055 } 7056 /* 7057 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 7058 * is on (half-synchronized state), then queue data for later 7059 * processing; else drop segment and return. 7060 */ 7061 if ((thflags & TH_ACK) == 0) { 7062 if (tp->t_flags & TF_NEEDSYN) { 7063 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 7064 tiwin, thflags, nxt_pkt)); 7065 } else if (tp->t_flags & TF_ACKNOW) { 7066 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 7067 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output++; 7068 return (ret_val); 7069 } else { 7070 ctf_do_drop(m, NULL); 7071 return (0); 7072 } 7073 } 7074 /* 7075 * Ack processing. 7076 */ 7077 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 7078 return (ret_val); 7079 } 7080 if (ourfinisacked) { 7081 tcp_twstart(tp); 7082 m_freem(m); 7083 return (1); 7084 } 7085 if (sbavail(&so->so_snd)) { 7086 if (rack_progress_timeout_check(tp)) { 7087 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 7088 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 7089 return (1); 7090 } 7091 } 7092 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 7093 tiwin, thflags, nxt_pkt)); 7094 } 7095 7096 /* 7097 * Return value of 1, the TCB is unlocked and most 7098 * likely gone, return value of 0, the TCP is still 7099 * locked. 7100 */ 7101 static int 7102 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 7103 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 7104 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 7105 { 7106 int32_t ret_val = 0; 7107 int32_t ourfinisacked = 0; 7108 7109 ctf_calc_rwin(so, tp); 7110 7111 if ((thflags & TH_RST) || 7112 (tp->t_fin_is_rst && (thflags & TH_FIN))) 7113 return (ctf_process_rst(m, th, so, tp)); 7114 /* 7115 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 7116 * synchronized state. 7117 */ 7118 if (thflags & TH_SYN) { 7119 ctf_challenge_ack(m, th, tp, &ret_val); 7120 return (ret_val); 7121 } 7122 /* 7123 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 7124 * it's less than ts_recent, drop it. 7125 */ 7126 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 7127 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 7128 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 7129 return (ret_val); 7130 } 7131 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 7132 return (ret_val); 7133 } 7134 /* 7135 * If new data are received on a connection after the user processes 7136 * are gone, then RST the other end. 7137 */ 7138 if ((so->so_state & SS_NOFDREF) && tlen) { 7139 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 7140 return (1); 7141 } 7142 /* 7143 * If last ACK falls within this segment's sequence numbers, record 7144 * its timestamp. NOTE: 1) That the test incorporates suggestions 7145 * from the latest proposal of the tcplw@cray.com list (Braden 7146 * 1993/04/26). 2) That updating only on newer timestamps interferes 7147 * with our earlier PAWS tests, so this check should be solely 7148 * predicated on the sequence space of this segment. 3) That we 7149 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 7150 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 7151 * SEG.Len, This modified check allows us to overcome RFC1323's 7152 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 7153 * p.869. In such cases, we can still calculate the RTT correctly 7154 * when RCV.NXT == Last.ACK.Sent. 7155 */ 7156 if ((to->to_flags & TOF_TS) != 0 && 7157 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 7158 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 7159 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 7160 tp->ts_recent_age = tcp_ts_getticks(); 7161 tp->ts_recent = to->to_tsval; 7162 } 7163 /* 7164 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 7165 * is on (half-synchronized state), then queue data for later 7166 * processing; else drop segment and return. 7167 */ 7168 if ((thflags & TH_ACK) == 0) { 7169 if (tp->t_flags & TF_NEEDSYN) { 7170 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 7171 tiwin, thflags, nxt_pkt)); 7172 } else if (tp->t_flags & TF_ACKNOW) { 7173 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 7174 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output++; 7175 return (ret_val); 7176 } else { 7177 ctf_do_drop(m, NULL); 7178 return (0); 7179 } 7180 } 7181 /* 7182 * case TCPS_LAST_ACK: Ack processing. 7183 */ 7184 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 7185 return (ret_val); 7186 } 7187 if (ourfinisacked) { 7188 tp = tcp_close(tp); 7189 ctf_do_drop(m, tp); 7190 return (1); 7191 } 7192 if (sbavail(&so->so_snd)) { 7193 if (rack_progress_timeout_check(tp)) { 7194 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 7195 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 7196 return (1); 7197 } 7198 } 7199 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 7200 tiwin, thflags, nxt_pkt)); 7201 } 7202 7203 7204 /* 7205 * Return value of 1, the TCB is unlocked and most 7206 * likely gone, return value of 0, the TCP is still 7207 * locked. 7208 */ 7209 static int 7210 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 7211 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 7212 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 7213 { 7214 int32_t ret_val = 0; 7215 int32_t ourfinisacked = 0; 7216 7217 ctf_calc_rwin(so, tp); 7218 7219 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 7220 if ((thflags & TH_RST) || 7221 (tp->t_fin_is_rst && (thflags & TH_FIN))) 7222 return (ctf_process_rst(m, th, so, tp)); 7223 /* 7224 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 7225 * synchronized state. 7226 */ 7227 if (thflags & TH_SYN) { 7228 ctf_challenge_ack(m, th, tp, &ret_val); 7229 return (ret_val); 7230 } 7231 /* 7232 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 7233 * it's less than ts_recent, drop it. 7234 */ 7235 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 7236 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 7237 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 7238 return (ret_val); 7239 } 7240 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 7241 return (ret_val); 7242 } 7243 /* 7244 * If new data are received on a connection after the user processes 7245 * are gone, then RST the other end. 7246 */ 7247 if ((so->so_state & SS_NOFDREF) && 7248 tlen) { 7249 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 7250 return (1); 7251 } 7252 /* 7253 * If last ACK falls within this segment's sequence numbers, record 7254 * its timestamp. NOTE: 1) That the test incorporates suggestions 7255 * from the latest proposal of the tcplw@cray.com list (Braden 7256 * 1993/04/26). 2) That updating only on newer timestamps interferes 7257 * with our earlier PAWS tests, so this check should be solely 7258 * predicated on the sequence space of this segment. 3) That we 7259 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 7260 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 7261 * SEG.Len, This modified check allows us to overcome RFC1323's 7262 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 7263 * p.869. In such cases, we can still calculate the RTT correctly 7264 * when RCV.NXT == Last.ACK.Sent. 7265 */ 7266 if ((to->to_flags & TOF_TS) != 0 && 7267 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 7268 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 7269 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 7270 tp->ts_recent_age = tcp_ts_getticks(); 7271 tp->ts_recent = to->to_tsval; 7272 } 7273 /* 7274 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 7275 * is on (half-synchronized state), then queue data for later 7276 * processing; else drop segment and return. 7277 */ 7278 if ((thflags & TH_ACK) == 0) { 7279 if (tp->t_flags & TF_NEEDSYN) { 7280 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 7281 tiwin, thflags, nxt_pkt)); 7282 } else if (tp->t_flags & TF_ACKNOW) { 7283 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 7284 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output++; 7285 return (ret_val); 7286 } else { 7287 ctf_do_drop(m, NULL); 7288 return (0); 7289 } 7290 } 7291 /* 7292 * Ack processing. 7293 */ 7294 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 7295 return (ret_val); 7296 } 7297 if (sbavail(&so->so_snd)) { 7298 if (rack_progress_timeout_check(tp)) { 7299 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 7300 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 7301 return (1); 7302 } 7303 } 7304 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 7305 tiwin, thflags, nxt_pkt)); 7306 } 7307 7308 7309 static void inline 7310 rack_clear_rate_sample(struct tcp_rack *rack) 7311 { 7312 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 7313 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 7314 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 7315 } 7316 7317 static void 7318 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack) 7319 { 7320 uint32_t tls_seg = 0; 7321 7322 #ifdef KERN_TLS 7323 if (rack->rc_inp->inp_socket->so_snd.sb_flags & SB_TLS_IFNET) { 7324 tls_seg = ctf_get_opt_tls_size(rack->rc_inp->inp_socket, rack->rc_tp->snd_wnd); 7325 rack->r_ctl.rc_pace_min_segs = tls_seg; 7326 } else 7327 #endif 7328 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 7329 rack->r_ctl.rc_pace_max_segs = ctf_fixed_maxseg(tp) * rack->rc_pace_max_segs; 7330 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) 7331 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 7332 #ifdef KERN_TLS 7333 if (tls_seg != 0) { 7334 if (rack_hw_tls_max_seg > 1) { 7335 rack->r_ctl.rc_pace_max_segs /= tls_seg; 7336 if (rack_hw_tls_max_seg < rack->r_ctl.rc_pace_max_segs) 7337 rack->r_ctl.rc_pace_max_segs = rack_hw_tls_max_seg; 7338 } else { 7339 rack->r_ctl.rc_pace_max_segs = 1; 7340 } 7341 if (rack->r_ctl.rc_pace_max_segs == 0) 7342 rack->r_ctl.rc_pace_max_segs = 1; 7343 rack->r_ctl.rc_pace_max_segs *= tls_seg; 7344 } 7345 #endif 7346 rack_log_type_hrdwtso(tp, rack, tls_seg, rack->rc_inp->inp_socket->so_snd.sb_flags, 0, 2); 7347 } 7348 7349 static int 7350 rack_init(struct tcpcb *tp) 7351 { 7352 struct tcp_rack *rack = NULL; 7353 struct rack_sendmap *insret; 7354 7355 tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 7356 if (tp->t_fb_ptr == NULL) { 7357 /* 7358 * We need to allocate memory but cant. The INP and INP_INFO 7359 * locks and they are recusive (happens during setup. So a 7360 * scheme to drop the locks fails :( 7361 * 7362 */ 7363 return (ENOMEM); 7364 } 7365 memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack)); 7366 7367 rack = (struct tcp_rack *)tp->t_fb_ptr; 7368 RB_INIT(&rack->r_ctl.rc_mtree); 7369 TAILQ_INIT(&rack->r_ctl.rc_free); 7370 TAILQ_INIT(&rack->r_ctl.rc_tmap); 7371 rack->rc_tp = tp; 7372 if (tp->t_inpcb) { 7373 rack->rc_inp = tp->t_inpcb; 7374 } 7375 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 7376 /* Probably not needed but lets be sure */ 7377 rack_clear_rate_sample(rack); 7378 rack->r_cpu = 0; 7379 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 7380 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 7381 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 7382 rack->rc_pace_reduce = rack_slot_reduction; 7383 if (use_rack_cheat) 7384 rack->use_rack_cheat = 1; 7385 if (V_tcp_delack_enabled) 7386 tp->t_delayed_ack = 1; 7387 else 7388 tp->t_delayed_ack = 0; 7389 rack->rc_pace_max_segs = rack_hptsi_segments; 7390 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 7391 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 7392 rack->r_ctl.rc_prop_reduce = rack_use_proportional_reduce; 7393 rack->r_enforce_min_pace = rack_min_pace_time; 7394 rack->r_ctl.rc_prop_rate = rack_proportional_rate; 7395 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 7396 rack->r_ctl.rc_early_recovery = rack_early_recovery; 7397 rack->rc_always_pace = rack_pace_every_seg; 7398 rack_set_pace_segments(tp, rack); 7399 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 7400 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 7401 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 7402 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 7403 rack->r_ctl.rc_min_to = rack_min_to; 7404 rack->rack_per_of_gp = rack_per_of_gp; 7405 microuptime(&rack->r_ctl.rc_last_ack); 7406 rack->r_ctl.rc_last_time_decay = rack->r_ctl.rc_last_ack; 7407 rack->r_ctl.rc_tlp_rxt_last_time = tcp_ts_getticks(); 7408 /* Do we force on detection? */ 7409 if (tcp_force_detection) 7410 rack->do_detection = 1; 7411 else 7412 rack->do_detection = 0; 7413 if (tp->snd_una != tp->snd_max) { 7414 /* Create a send map for the current outstanding data */ 7415 struct rack_sendmap *rsm; 7416 7417 rsm = rack_alloc(rack); 7418 if (rsm == NULL) { 7419 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 7420 tp->t_fb_ptr = NULL; 7421 return (ENOMEM); 7422 } 7423 rsm->r_flags = RACK_OVERMAX; 7424 rsm->r_tim_lastsent[0] = rack->r_ctl.rc_tlp_rxt_last_time; 7425 rsm->r_rtr_cnt = 1; 7426 rsm->r_rtr_bytes = 0; 7427 rsm->r_start = tp->snd_una; 7428 rsm->r_end = tp->snd_max; 7429 rsm->r_dupack = 0; 7430 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7431 #ifdef INVARIANTS 7432 if (insret != NULL) { 7433 panic("Insert in rb tree fails ret:%p rack:%p rsm:%p", 7434 insret, rack, rsm); 7435 } 7436 #endif 7437 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7438 rsm->r_in_tmap = 1; 7439 } 7440 rack_stop_all_timers(tp); 7441 rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), 0, 0, 0); 7442 return (0); 7443 } 7444 7445 static int 7446 rack_handoff_ok(struct tcpcb *tp) 7447 { 7448 if ((tp->t_state == TCPS_CLOSED) || 7449 (tp->t_state == TCPS_LISTEN)) { 7450 /* Sure no problem though it may not stick */ 7451 return (0); 7452 } 7453 if ((tp->t_state == TCPS_SYN_SENT) || 7454 (tp->t_state == TCPS_SYN_RECEIVED)) { 7455 /* 7456 * We really don't know you have to get to ESTAB or beyond 7457 * to tell. 7458 */ 7459 return (EAGAIN); 7460 } 7461 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 7462 return (0); 7463 } 7464 /* 7465 * If we reach here we don't do SACK on this connection so we can 7466 * never do rack. 7467 */ 7468 return (EINVAL); 7469 } 7470 7471 static void 7472 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 7473 { 7474 if (tp->t_fb_ptr) { 7475 struct tcp_rack *rack; 7476 struct rack_sendmap *rsm, *nrsm, *rm; 7477 if (tp->t_inpcb) { 7478 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 7479 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_QUEUE_READY; 7480 } 7481 rack = (struct tcp_rack *)tp->t_fb_ptr; 7482 #ifdef TCP_BLACKBOX 7483 tcp_log_flowend(tp); 7484 #endif 7485 RB_FOREACH_SAFE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm) { 7486 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7487 #ifdef INVARIANTS 7488 if (rm != rsm) { 7489 panic("At fini, rack:%p rsm:%p rm:%p", 7490 rack, rsm, rm); 7491 } 7492 #endif 7493 uma_zfree(rack_zone, rsm); 7494 } 7495 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 7496 while (rsm) { 7497 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 7498 uma_zfree(rack_zone, rsm); 7499 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 7500 } 7501 rack->rc_free_cnt = 0; 7502 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 7503 tp->t_fb_ptr = NULL; 7504 } 7505 /* Make sure snd_nxt is correctly set */ 7506 tp->snd_nxt = tp->snd_max; 7507 } 7508 7509 7510 static void 7511 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 7512 { 7513 switch (tp->t_state) { 7514 case TCPS_SYN_SENT: 7515 rack->r_state = TCPS_SYN_SENT; 7516 rack->r_substate = rack_do_syn_sent; 7517 break; 7518 case TCPS_SYN_RECEIVED: 7519 rack->r_state = TCPS_SYN_RECEIVED; 7520 rack->r_substate = rack_do_syn_recv; 7521 break; 7522 case TCPS_ESTABLISHED: 7523 rack_set_pace_segments(tp, rack); 7524 rack->r_state = TCPS_ESTABLISHED; 7525 rack->r_substate = rack_do_established; 7526 break; 7527 case TCPS_CLOSE_WAIT: 7528 rack->r_state = TCPS_CLOSE_WAIT; 7529 rack->r_substate = rack_do_close_wait; 7530 break; 7531 case TCPS_FIN_WAIT_1: 7532 rack->r_state = TCPS_FIN_WAIT_1; 7533 rack->r_substate = rack_do_fin_wait_1; 7534 break; 7535 case TCPS_CLOSING: 7536 rack->r_state = TCPS_CLOSING; 7537 rack->r_substate = rack_do_closing; 7538 break; 7539 case TCPS_LAST_ACK: 7540 rack->r_state = TCPS_LAST_ACK; 7541 rack->r_substate = rack_do_lastack; 7542 break; 7543 case TCPS_FIN_WAIT_2: 7544 rack->r_state = TCPS_FIN_WAIT_2; 7545 rack->r_substate = rack_do_fin_wait_2; 7546 break; 7547 case TCPS_LISTEN: 7548 case TCPS_CLOSED: 7549 case TCPS_TIME_WAIT: 7550 default: 7551 break; 7552 }; 7553 } 7554 7555 7556 static void 7557 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 7558 { 7559 /* 7560 * We received an ack, and then did not 7561 * call send or were bounced out due to the 7562 * hpts was running. Now a timer is up as well, is 7563 * it the right timer? 7564 */ 7565 struct rack_sendmap *rsm; 7566 int tmr_up; 7567 7568 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 7569 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 7570 return; 7571 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 7572 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 7573 (tmr_up == PACE_TMR_RXT)) { 7574 /* Should be an RXT */ 7575 return; 7576 } 7577 if (rsm == NULL) { 7578 /* Nothing outstanding? */ 7579 if (tp->t_flags & TF_DELACK) { 7580 if (tmr_up == PACE_TMR_DELACK) 7581 /* We are supposed to have delayed ack up and we do */ 7582 return; 7583 } else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) { 7584 /* 7585 * if we hit enobufs then we would expect the possiblity 7586 * of nothing outstanding and the RXT up (and the hptsi timer). 7587 */ 7588 return; 7589 } else if (((tcp_always_keepalive || 7590 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 7591 (tp->t_state <= TCPS_CLOSING)) && 7592 (tmr_up == PACE_TMR_KEEP) && 7593 (tp->snd_max == tp->snd_una)) { 7594 /* We should have keep alive up and we do */ 7595 return; 7596 } 7597 } 7598 if (SEQ_GT(tp->snd_max, tp->snd_una) && 7599 ((tmr_up == PACE_TMR_TLP) || 7600 (tmr_up == PACE_TMR_RACK) || 7601 (tmr_up == PACE_TMR_RXT))) { 7602 /* 7603 * Either a Rack, TLP or RXT is fine if we 7604 * have outstanding data. 7605 */ 7606 return; 7607 } else if (tmr_up == PACE_TMR_DELACK) { 7608 /* 7609 * If the delayed ack was going to go off 7610 * before the rtx/tlp/rack timer were going to 7611 * expire, then that would be the timer in control. 7612 * Note we don't check the time here trusting the 7613 * code is correct. 7614 */ 7615 return; 7616 } 7617 /* 7618 * Ok the timer originally started is not what we want now. 7619 * We will force the hpts to be stopped if any, and restart 7620 * with the slot set to what was in the saved slot. 7621 */ 7622 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 7623 rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), 0, 0, 0); 7624 } 7625 7626 static int 7627 rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so, 7628 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, 7629 int32_t nxt_pkt, struct timeval *tv) 7630 { 7631 int32_t thflags, retval, did_out = 0; 7632 int32_t way_out = 0; 7633 uint32_t cts; 7634 uint32_t tiwin; 7635 struct tcpopt to; 7636 struct tcp_rack *rack; 7637 struct rack_sendmap *rsm; 7638 int32_t prev_state = 0; 7639 7640 if (m->m_flags & M_TSTMP_LRO) { 7641 tv->tv_sec = m->m_pkthdr.rcv_tstmp /1000000000; 7642 tv->tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000)/1000; 7643 } 7644 cts = tcp_tv_to_mssectick(tv); 7645 rack = (struct tcp_rack *)tp->t_fb_ptr; 7646 7647 kern_prefetch(rack, &prev_state); 7648 prev_state = 0; 7649 thflags = th->th_flags; 7650 7651 NET_EPOCH_ASSERT(); 7652 INP_WLOCK_ASSERT(tp->t_inpcb); 7653 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 7654 __func__)); 7655 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 7656 __func__)); 7657 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 7658 union tcp_log_stackspecific log; 7659 struct timeval tv; 7660 7661 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 7662 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 7663 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 7664 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 7665 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 7666 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 7667 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 7668 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 7669 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 7670 tlen, &log, true, &tv); 7671 } 7672 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 7673 way_out = 4; 7674 retval = 0; 7675 goto done_with_input; 7676 } 7677 /* 7678 * If a segment with the ACK-bit set arrives in the SYN-SENT state 7679 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 7680 */ 7681 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 7682 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 7683 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 7684 return(1); 7685 } 7686 /* 7687 * Segment received on connection. Reset idle time and keep-alive 7688 * timer. XXX: This should be done after segment validation to 7689 * ignore broken/spoofed segs. 7690 */ 7691 if (tp->t_idle_reduce && 7692 (tp->snd_max == tp->snd_una) && 7693 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 7694 counter_u64_add(rack_input_idle_reduces, 1); 7695 rack_cc_after_idle(tp); 7696 } 7697 tp->t_rcvtime = ticks; 7698 7699 /* 7700 * Unscale the window into a 32-bit value. For the SYN_SENT state 7701 * the scale is zero. 7702 */ 7703 tiwin = th->th_win << tp->snd_scale; 7704 #ifdef NETFLIX_STATS 7705 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 7706 #endif 7707 if (tiwin > rack->r_ctl.rc_high_rwnd) 7708 rack->r_ctl.rc_high_rwnd = tiwin; 7709 /* 7710 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 7711 * this to occur after we've validated the segment. 7712 */ 7713 if (tp->t_flags2 & TF2_ECN_PERMIT) { 7714 if (thflags & TH_CWR) 7715 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 7716 switch (iptos & IPTOS_ECN_MASK) { 7717 case IPTOS_ECN_CE: 7718 tp->t_flags2 |= TF2_ECN_SND_ECE; 7719 TCPSTAT_INC(tcps_ecn_ce); 7720 break; 7721 case IPTOS_ECN_ECT0: 7722 TCPSTAT_INC(tcps_ecn_ect0); 7723 break; 7724 case IPTOS_ECN_ECT1: 7725 TCPSTAT_INC(tcps_ecn_ect1); 7726 break; 7727 } 7728 /* Congestion experienced. */ 7729 if (thflags & TH_ECE) { 7730 rack_cong_signal(tp, th, CC_ECN); 7731 } 7732 } 7733 /* 7734 * Parse options on any incoming segment. 7735 */ 7736 tcp_dooptions(&to, (u_char *)(th + 1), 7737 (th->th_off << 2) - sizeof(struct tcphdr), 7738 (thflags & TH_SYN) ? TO_SYN : 0); 7739 7740 /* 7741 * If echoed timestamp is later than the current time, fall back to 7742 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 7743 * were used when this connection was established. 7744 */ 7745 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 7746 to.to_tsecr -= tp->ts_offset; 7747 if (TSTMP_GT(to.to_tsecr, cts)) 7748 to.to_tsecr = 0; 7749 } 7750 /* 7751 * If its the first time in we need to take care of options and 7752 * verify we can do SACK for rack! 7753 */ 7754 if (rack->r_state == 0) { 7755 /* Should be init'd by rack_init() */ 7756 KASSERT(rack->rc_inp != NULL, 7757 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 7758 if (rack->rc_inp == NULL) { 7759 rack->rc_inp = tp->t_inpcb; 7760 } 7761 7762 /* 7763 * Process options only when we get SYN/ACK back. The SYN 7764 * case for incoming connections is handled in tcp_syncache. 7765 * According to RFC1323 the window field in a SYN (i.e., a 7766 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 7767 * this is traditional behavior, may need to be cleaned up. 7768 */ 7769 rack->r_cpu = inp_to_cpuid(tp->t_inpcb); 7770 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 7771 if ((to.to_flags & TOF_SCALE) && 7772 (tp->t_flags & TF_REQ_SCALE)) { 7773 tp->t_flags |= TF_RCVD_SCALE; 7774 tp->snd_scale = to.to_wscale; 7775 } 7776 /* 7777 * Initial send window. It will be updated with the 7778 * next incoming segment to the scaled value. 7779 */ 7780 tp->snd_wnd = th->th_win; 7781 if (to.to_flags & TOF_TS) { 7782 tp->t_flags |= TF_RCVD_TSTMP; 7783 tp->ts_recent = to.to_tsval; 7784 tp->ts_recent_age = cts; 7785 } 7786 if (to.to_flags & TOF_MSS) 7787 tcp_mss(tp, to.to_mss); 7788 if ((tp->t_flags & TF_SACK_PERMIT) && 7789 (to.to_flags & TOF_SACKPERM) == 0) 7790 tp->t_flags &= ~TF_SACK_PERMIT; 7791 if (IS_FASTOPEN(tp->t_flags)) { 7792 if (to.to_flags & TOF_FASTOPEN) { 7793 uint16_t mss; 7794 7795 if (to.to_flags & TOF_MSS) 7796 mss = to.to_mss; 7797 else 7798 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 7799 mss = TCP6_MSS; 7800 else 7801 mss = TCP_MSS; 7802 tcp_fastopen_update_cache(tp, mss, 7803 to.to_tfo_len, to.to_tfo_cookie); 7804 } else 7805 tcp_fastopen_disable_path(tp); 7806 } 7807 } 7808 /* 7809 * At this point we are at the initial call. Here we decide 7810 * if we are doing RACK or not. We do this by seeing if 7811 * TF_SACK_PERMIT is set, if not rack is *not* possible and 7812 * we switch to the default code. 7813 */ 7814 if ((tp->t_flags & TF_SACK_PERMIT) == 0) { 7815 tcp_switch_back_to_default(tp); 7816 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen, 7817 tlen, iptos); 7818 return (1); 7819 } 7820 /* Set the flag */ 7821 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 7822 tcp_set_hpts(tp->t_inpcb); 7823 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 7824 } 7825 /* 7826 * This is the one exception case where we set the rack state 7827 * always. All other times (timers etc) we must have a rack-state 7828 * set (so we assure we have done the checks above for SACK). 7829 */ 7830 memcpy(&rack->r_ctl.rc_last_ack, tv, sizeof(struct timeval)); 7831 rack->r_ctl.rc_rcvtime = cts; 7832 if (rack->r_state != tp->t_state) 7833 rack_set_state(tp, rack); 7834 if (SEQ_GT(th->th_ack, tp->snd_una) && 7835 (rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree)) != NULL) 7836 kern_prefetch(rsm, &prev_state); 7837 prev_state = rack->r_state; 7838 rack->r_ctl.rc_tlp_send_cnt = 0; 7839 rack_clear_rate_sample(rack); 7840 retval = (*rack->r_substate) (m, th, so, 7841 tp, &to, drop_hdrlen, 7842 tlen, tiwin, thflags, nxt_pkt, iptos); 7843 #ifdef INVARIANTS 7844 if ((retval == 0) && 7845 (tp->t_inpcb == NULL)) { 7846 panic("retval:%d tp:%p t_inpcb:NULL state:%d", 7847 retval, tp, prev_state); 7848 } 7849 #endif 7850 if (retval == 0) { 7851 /* 7852 * If retval is 1 the tcb is unlocked and most likely the tp 7853 * is gone. 7854 */ 7855 INP_WLOCK_ASSERT(tp->t_inpcb); 7856 if (rack->set_pacing_done_a_iw == 0) { 7857 /* How much has been acked? */ 7858 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 7859 /* We have enough to set in the pacing segment size */ 7860 rack->set_pacing_done_a_iw = 1; 7861 rack_set_pace_segments(tp, rack); 7862 } 7863 } 7864 tcp_rack_xmit_timer_commit(rack, tp); 7865 if ((nxt_pkt == 0) || (IN_RECOVERY(tp->t_flags))) { 7866 if (rack->r_wanted_output != 0) { 7867 did_out = 1; 7868 (void)tp->t_fb->tfb_tcp_output(tp); 7869 } 7870 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 7871 } 7872 if ((nxt_pkt == 0) && 7873 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 7874 (SEQ_GT(tp->snd_max, tp->snd_una) || 7875 (tp->t_flags & TF_DELACK) || 7876 ((tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 7877 (tp->t_state <= TCPS_CLOSING)))) { 7878 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 7879 if ((tp->snd_max == tp->snd_una) && 7880 ((tp->t_flags & TF_DELACK) == 0) && 7881 (rack->rc_inp->inp_in_hpts) && 7882 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 7883 /* keep alive not needed if we are hptsi output yet */ 7884 ; 7885 } else { 7886 if (rack->rc_inp->inp_in_hpts) { 7887 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 7888 counter_u64_add(rack_per_timer_hole, 1); 7889 } 7890 rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), 0, 0, 0); 7891 } 7892 way_out = 1; 7893 } else if (nxt_pkt == 0) { 7894 /* Do we have the correct timer running? */ 7895 rack_timer_audit(tp, rack, &so->so_snd); 7896 way_out = 2; 7897 } 7898 done_with_input: 7899 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out); 7900 if (did_out) 7901 rack->r_wanted_output = 0; 7902 #ifdef INVARIANTS 7903 if (tp->t_inpcb == NULL) { 7904 panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d", 7905 did_out, 7906 retval, tp, prev_state); 7907 } 7908 #endif 7909 } 7910 return (retval); 7911 } 7912 7913 void 7914 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 7915 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 7916 { 7917 struct timeval tv; 7918 7919 /* First lets see if we have old packets */ 7920 if (tp->t_in_pkt) { 7921 if (ctf_do_queued_segments(so, tp, 1)) { 7922 m_freem(m); 7923 return; 7924 } 7925 } 7926 if (m->m_flags & M_TSTMP_LRO) { 7927 tv.tv_sec = m->m_pkthdr.rcv_tstmp /1000000000; 7928 tv.tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000)/1000; 7929 } else { 7930 /* Should not be should we kassert instead? */ 7931 tcp_get_usecs(&tv); 7932 } 7933 if(rack_do_segment_nounlock(m, th, so, tp, 7934 drop_hdrlen, tlen, iptos, 0, &tv) == 0) 7935 INP_WUNLOCK(tp->t_inpcb); 7936 } 7937 7938 struct rack_sendmap * 7939 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 7940 { 7941 struct rack_sendmap *rsm = NULL; 7942 int32_t idx; 7943 uint32_t srtt = 0, thresh = 0, ts_low = 0; 7944 7945 /* Return the next guy to be re-transmitted */ 7946 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 7947 return (NULL); 7948 } 7949 if (tp->t_flags & TF_SENTFIN) { 7950 /* retran the end FIN? */ 7951 return (NULL); 7952 } 7953 /* ok lets look at this one */ 7954 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 7955 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 7956 goto check_it; 7957 } 7958 rsm = rack_find_lowest_rsm(rack); 7959 if (rsm == NULL) { 7960 return (NULL); 7961 } 7962 check_it: 7963 if (rsm->r_flags & RACK_ACKED) { 7964 return (NULL); 7965 } 7966 if ((rsm->r_flags & RACK_SACK_PASSED) == 0) { 7967 /* Its not yet ready */ 7968 return (NULL); 7969 } 7970 srtt = rack_grab_rtt(tp, rack); 7971 idx = rsm->r_rtr_cnt - 1; 7972 ts_low = rsm->r_tim_lastsent[idx]; 7973 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 7974 if ((tsused == ts_low) || 7975 (TSTMP_LT(tsused, ts_low))) { 7976 /* No time since sending */ 7977 return (NULL); 7978 } 7979 if ((tsused - ts_low) < thresh) { 7980 /* It has not been long enough yet */ 7981 return (NULL); 7982 } 7983 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 7984 ((rsm->r_flags & RACK_SACK_PASSED) && 7985 (rack->sack_attack_disable == 0))) { 7986 /* 7987 * We have passed the dup-ack threshold <or> 7988 * a SACK has indicated this is missing. 7989 * Note that if you are a declared attacker 7990 * it is only the dup-ack threshold that 7991 * will cause retransmits. 7992 */ 7993 /* log retransmit reason */ 7994 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 7995 return (rsm); 7996 } 7997 return (NULL); 7998 } 7999 8000 static int32_t 8001 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len) 8002 { 8003 int32_t slot = 0; 8004 8005 if ((rack->rack_per_of_gp == 0) || 8006 (rack->rc_always_pace == 0)) { 8007 /* 8008 * We use the most optimistic possible cwnd/srtt for 8009 * sending calculations. This will make our 8010 * calculation anticipate getting more through 8011 * quicker then possible. But thats ok we don't want 8012 * the peer to have a gap in data sending. 8013 */ 8014 uint32_t srtt, cwnd, tr_perms = 0; 8015 8016 old_method: 8017 if (rack->r_ctl.rc_rack_min_rtt) 8018 srtt = rack->r_ctl.rc_rack_min_rtt; 8019 else 8020 srtt = TICKS_2_MSEC((tp->t_srtt >> TCP_RTT_SHIFT)); 8021 if (rack->r_ctl.rc_rack_largest_cwnd) 8022 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 8023 else 8024 cwnd = tp->snd_cwnd; 8025 tr_perms = cwnd / srtt; 8026 if (tr_perms == 0) { 8027 tr_perms = ctf_fixed_maxseg(tp); 8028 } 8029 /* 8030 * Calculate how long this will take to drain, if 8031 * the calculation comes out to zero, thats ok we 8032 * will use send_a_lot to possibly spin around for 8033 * more increasing tot_len_this_send to the point 8034 * that its going to require a pace, or we hit the 8035 * cwnd. Which in that case we are just waiting for 8036 * a ACK. 8037 */ 8038 slot = len / tr_perms; 8039 /* Now do we reduce the time so we don't run dry? */ 8040 if (slot && rack->rc_pace_reduce) { 8041 int32_t reduce; 8042 8043 reduce = (slot / rack->rc_pace_reduce); 8044 if (reduce < slot) { 8045 slot -= reduce; 8046 } else 8047 slot = 0; 8048 } 8049 } else { 8050 int cnt; 8051 uint64_t bw_est, bw_raise, res, lentim; 8052 8053 bw_est = 0; 8054 for (cnt=0; cnt<RACK_GP_HIST; cnt++) { 8055 if ((rack->r_ctl.rc_gp_hist_filled == 0) && 8056 (rack->r_ctl.rc_gp_history[cnt] == 0)) 8057 break; 8058 bw_est += rack->r_ctl.rc_gp_history[cnt]; 8059 } 8060 if (bw_est == 0) { 8061 /* 8062 * No way yet to make a b/w estimate 8063 * (no goodput est yet). 8064 */ 8065 goto old_method; 8066 } 8067 /* Covert to bytes per second */ 8068 bw_est *= MSEC_IN_SECOND; 8069 /* 8070 * Now ratchet it up by our percentage. Note 8071 * that the minimum you can do is 1 which would 8072 * get you 101% of the average last N goodput estimates. 8073 * The max you can do is 256 which would yeild you 8074 * 356% of the last N goodput estimates. 8075 */ 8076 bw_raise = bw_est * (uint64_t)rack->rack_per_of_gp; 8077 bw_est += bw_raise; 8078 /* average by the number we added */ 8079 bw_est /= cnt; 8080 /* Now calculate a rate based on this b/w */ 8081 lentim = (uint64_t) len * (uint64_t)MSEC_IN_SECOND; 8082 res = lentim / bw_est; 8083 slot = (uint32_t)res; 8084 } 8085 if (rack->r_enforce_min_pace && 8086 (slot == 0)) { 8087 /* We are enforcing a minimum pace time of 1ms */ 8088 slot = rack->r_enforce_min_pace; 8089 } 8090 if (slot) 8091 counter_u64_add(rack_calc_nonzero, 1); 8092 else 8093 counter_u64_add(rack_calc_zero, 1); 8094 return (slot); 8095 } 8096 8097 static int 8098 rack_output(struct tcpcb *tp) 8099 { 8100 struct socket *so; 8101 uint32_t recwin, sendwin; 8102 uint32_t sb_offset; 8103 int32_t len, flags, error = 0; 8104 struct mbuf *m; 8105 struct mbuf *mb; 8106 uint32_t if_hw_tsomaxsegcount = 0; 8107 uint32_t if_hw_tsomaxsegsize = 0; 8108 int32_t maxseg; 8109 long tot_len_this_send = 0; 8110 struct ip *ip = NULL; 8111 #ifdef TCPDEBUG 8112 struct ipovly *ipov = NULL; 8113 #endif 8114 struct udphdr *udp = NULL; 8115 struct tcp_rack *rack; 8116 struct tcphdr *th; 8117 uint8_t pass = 0; 8118 uint8_t wanted_cookie = 0; 8119 u_char opt[TCP_MAXOLEN]; 8120 unsigned ipoptlen, optlen, hdrlen, ulen=0; 8121 uint32_t rack_seq; 8122 8123 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 8124 unsigned ipsec_optlen = 0; 8125 8126 #endif 8127 int32_t idle, sendalot; 8128 int32_t sub_from_prr = 0; 8129 volatile int32_t sack_rxmit; 8130 struct rack_sendmap *rsm = NULL; 8131 int32_t tso, mtu; 8132 struct tcpopt to; 8133 int32_t slot = 0; 8134 int32_t sup_rack = 0; 8135 uint32_t cts; 8136 uint8_t hpts_calling, new_data_tlp = 0, doing_tlp = 0; 8137 int32_t do_a_prefetch; 8138 int32_t prefetch_rsm = 0; 8139 int force_tso = 0; 8140 int32_t orig_len; 8141 int32_t prefetch_so_done = 0; 8142 struct tcp_log_buffer *lgb = NULL; 8143 struct inpcb *inp; 8144 struct sockbuf *sb; 8145 #ifdef INET6 8146 struct ip6_hdr *ip6 = NULL; 8147 int32_t isipv6; 8148 #endif 8149 uint8_t filled_all = 0; 8150 bool hw_tls = false; 8151 8152 /* setup and take the cache hits here */ 8153 rack = (struct tcp_rack *)tp->t_fb_ptr; 8154 inp = rack->rc_inp; 8155 so = inp->inp_socket; 8156 sb = &so->so_snd; 8157 kern_prefetch(sb, &do_a_prefetch); 8158 do_a_prefetch = 1; 8159 8160 #ifdef KERN_TLS 8161 hw_tls = (so->so_snd.sb_flags & SB_TLS_IFNET) != 0; 8162 #endif 8163 8164 INP_WLOCK_ASSERT(inp); 8165 #ifdef TCP_OFFLOAD 8166 if (tp->t_flags & TF_TOE) 8167 return (tcp_offload_output(tp)); 8168 #endif 8169 maxseg = ctf_fixed_maxseg(tp); 8170 /* 8171 * For TFO connections in SYN_RECEIVED, only allow the initial 8172 * SYN|ACK and those sent by the retransmit timer. 8173 */ 8174 if (IS_FASTOPEN(tp->t_flags) && 8175 (tp->t_state == TCPS_SYN_RECEIVED) && 8176 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 8177 (rack->r_ctl.rc_resend == NULL)) /* not a retransmit */ 8178 return (0); 8179 #ifdef INET6 8180 if (rack->r_state) { 8181 /* Use the cache line loaded if possible */ 8182 isipv6 = rack->r_is_v6; 8183 } else { 8184 isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 8185 } 8186 #endif 8187 cts = tcp_ts_getticks(); 8188 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 8189 inp->inp_in_hpts) { 8190 /* 8191 * We are on the hpts for some timer but not hptsi output. 8192 * Remove from the hpts unconditionally. 8193 */ 8194 rack_timer_cancel(tp, rack, cts, __LINE__); 8195 } 8196 /* Mark that we have called rack_output(). */ 8197 if ((rack->r_timer_override) || 8198 (tp->t_flags & TF_FORCEDATA) || 8199 (tp->t_state < TCPS_ESTABLISHED)) { 8200 if (tp->t_inpcb->inp_in_hpts) 8201 tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT); 8202 } else if (tp->t_inpcb->inp_in_hpts) { 8203 /* 8204 * On the hpts you can't pass even if ACKNOW is on, we will 8205 * when the hpts fires. 8206 */ 8207 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 8208 return (0); 8209 } 8210 hpts_calling = inp->inp_hpts_calls; 8211 inp->inp_hpts_calls = 0; 8212 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 8213 if (rack_process_timers(tp, rack, cts, hpts_calling)) { 8214 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 8215 return (0); 8216 } 8217 } 8218 rack->r_wanted_output = 0; 8219 rack->r_timer_override = 0; 8220 /* 8221 * For TFO connections in SYN_SENT or SYN_RECEIVED, 8222 * only allow the initial SYN or SYN|ACK and those sent 8223 * by the retransmit timer. 8224 */ 8225 if (IS_FASTOPEN(tp->t_flags) && 8226 ((tp->t_state == TCPS_SYN_RECEIVED) || 8227 (tp->t_state == TCPS_SYN_SENT)) && 8228 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 8229 (tp->t_rxtshift == 0)) /* not a retransmit */ 8230 return (0); 8231 /* 8232 * Determine length of data that should be transmitted, and flags 8233 * that will be used. If there is some data or critical controls 8234 * (SYN, RST) to send, then transmit; otherwise, investigate 8235 * further. 8236 */ 8237 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 8238 if (tp->t_idle_reduce) { 8239 if (idle && ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 8240 rack_cc_after_idle(tp); 8241 } 8242 tp->t_flags &= ~TF_LASTIDLE; 8243 if (idle) { 8244 if (tp->t_flags & TF_MORETOCOME) { 8245 tp->t_flags |= TF_LASTIDLE; 8246 idle = 0; 8247 } 8248 } 8249 again: 8250 /* 8251 * If we've recently taken a timeout, snd_max will be greater than 8252 * snd_nxt. There may be SACK information that allows us to avoid 8253 * resending already delivered data. Adjust snd_nxt accordingly. 8254 */ 8255 sendalot = 0; 8256 cts = tcp_ts_getticks(); 8257 tso = 0; 8258 mtu = 0; 8259 sb_offset = tp->snd_max - tp->snd_una; 8260 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 8261 8262 flags = tcp_outflags[tp->t_state]; 8263 while (rack->rc_free_cnt < rack_free_cache) { 8264 rsm = rack_alloc(rack); 8265 if (rsm == NULL) { 8266 if (inp->inp_hpts_calls) 8267 /* Retry in a ms */ 8268 slot = 1; 8269 goto just_return_nolock; 8270 } 8271 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 8272 rack->rc_free_cnt++; 8273 rsm = NULL; 8274 } 8275 if (inp->inp_hpts_calls) 8276 inp->inp_hpts_calls = 0; 8277 sack_rxmit = 0; 8278 len = 0; 8279 rsm = NULL; 8280 if (flags & TH_RST) { 8281 SOCKBUF_LOCK(sb); 8282 goto send; 8283 } 8284 if (rack->r_ctl.rc_tlpsend) { 8285 /* Tail loss probe */ 8286 long cwin; 8287 long tlen; 8288 8289 doing_tlp = 1; 8290 /* 8291 * Check if we can do a TLP with a RACK'd packet 8292 * this can happen if we are not doing the rack 8293 * cheat and we skipped to a TLP and it 8294 * went off. 8295 */ 8296 rsm = tcp_rack_output(tp, rack, cts); 8297 if (rsm == NULL) 8298 rsm = rack->r_ctl.rc_tlpsend; 8299 rack->r_ctl.rc_tlpsend = NULL; 8300 sack_rxmit = 1; 8301 tlen = rsm->r_end - rsm->r_start; 8302 if (tlen > ctf_fixed_maxseg(tp)) 8303 tlen = ctf_fixed_maxseg(tp); 8304 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 8305 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 8306 __func__, __LINE__, 8307 rsm->r_start, tp->snd_una, tp, rack, rsm)); 8308 sb_offset = rsm->r_start - tp->snd_una; 8309 cwin = min(tp->snd_wnd, tlen); 8310 len = cwin; 8311 } else if (rack->r_ctl.rc_resend) { 8312 /* Retransmit timer */ 8313 rsm = rack->r_ctl.rc_resend; 8314 rack->r_ctl.rc_resend = NULL; 8315 len = rsm->r_end - rsm->r_start; 8316 sack_rxmit = 1; 8317 sendalot = 0; 8318 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 8319 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 8320 __func__, __LINE__, 8321 rsm->r_start, tp->snd_una, tp, rack, rsm)); 8322 sb_offset = rsm->r_start - tp->snd_una; 8323 if (len >= ctf_fixed_maxseg(tp)) { 8324 len = ctf_fixed_maxseg(tp); 8325 } 8326 } else if ((rack->rc_in_persist == 0) && 8327 ((rsm = tcp_rack_output(tp, rack, cts)) != NULL)) { 8328 int maxseg; 8329 8330 maxseg = ctf_fixed_maxseg(tp); 8331 if ((!IN_RECOVERY(tp->t_flags)) && 8332 ((tp->t_flags & (TF_WASFRECOVERY | TF_WASCRECOVERY)) == 0)) { 8333 /* Enter recovery if not induced by a time-out */ 8334 rack->r_ctl.rc_rsm_start = rsm->r_start; 8335 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 8336 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 8337 rack_cong_signal(tp, NULL, CC_NDUPACK); 8338 /* 8339 * When we enter recovery we need to assure we send 8340 * one packet. 8341 */ 8342 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 8343 rack_log_to_prr(rack, 13); 8344 } 8345 #ifdef INVARIANTS 8346 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 8347 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 8348 tp, rack, rsm, rsm->r_start, tp->snd_una); 8349 } 8350 #endif 8351 len = rsm->r_end - rsm->r_start; 8352 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 8353 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 8354 __func__, __LINE__, 8355 rsm->r_start, tp->snd_una, tp, rack, rsm)); 8356 sb_offset = rsm->r_start - tp->snd_una; 8357 /* Can we send it within the PRR boundary? */ 8358 if ((rack->use_rack_cheat == 0) && (len > rack->r_ctl.rc_prr_sndcnt)) { 8359 /* It does not fit */ 8360 if ((ctf_flight_size(tp, rack->r_ctl.rc_sacked) > len) && 8361 (rack->r_ctl.rc_prr_sndcnt < maxseg)) { 8362 /* 8363 * prr is less than a segment, we 8364 * have more acks due in besides 8365 * what we need to resend. Lets not send 8366 * to avoid sending small pieces of 8367 * what we need to retransmit. 8368 */ 8369 len = 0; 8370 goto just_return_nolock; 8371 } 8372 len = rack->r_ctl.rc_prr_sndcnt; 8373 } 8374 sendalot = 0; 8375 if (len >= maxseg) { 8376 len = maxseg; 8377 } 8378 if (len > 0) { 8379 sub_from_prr = 1; 8380 sack_rxmit = 1; 8381 TCPSTAT_INC(tcps_sack_rexmits); 8382 TCPSTAT_ADD(tcps_sack_rexmit_bytes, 8383 min(len, ctf_fixed_maxseg(tp))); 8384 counter_u64_add(rack_rtm_prr_retran, 1); 8385 } 8386 } 8387 /* 8388 * Enforce a connection sendmap count limit if set 8389 * as long as we are not retransmiting. 8390 */ 8391 if ((rsm == NULL) && 8392 (rack->do_detection == 0) && 8393 (rack_tcp_map_entries_limit > 0) && 8394 (rack->r_ctl.rc_num_maps_alloced >= rack_tcp_map_entries_limit)) { 8395 counter_u64_add(rack_to_alloc_limited, 1); 8396 if (!rack->alloc_limit_reported) { 8397 rack->alloc_limit_reported = 1; 8398 counter_u64_add(rack_alloc_limited_conns, 1); 8399 } 8400 goto just_return_nolock; 8401 } 8402 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 8403 /* we are retransmitting the fin */ 8404 len--; 8405 if (len) { 8406 /* 8407 * When retransmitting data do *not* include the 8408 * FIN. This could happen from a TLP probe. 8409 */ 8410 flags &= ~TH_FIN; 8411 } 8412 } 8413 #ifdef INVARIANTS 8414 /* For debugging */ 8415 rack->r_ctl.rc_rsm_at_retran = rsm; 8416 #endif 8417 /* 8418 * Get standard flags, and add SYN or FIN if requested by 'hidden' 8419 * state flags. 8420 */ 8421 if (tp->t_flags & TF_NEEDFIN) 8422 flags |= TH_FIN; 8423 if (tp->t_flags & TF_NEEDSYN) 8424 flags |= TH_SYN; 8425 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 8426 void *end_rsm; 8427 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 8428 if (end_rsm) 8429 kern_prefetch(end_rsm, &prefetch_rsm); 8430 prefetch_rsm = 1; 8431 } 8432 SOCKBUF_LOCK(sb); 8433 /* 8434 * If in persist timeout with window of 0, send 1 byte. Otherwise, 8435 * if window is small but nonzero and time TF_SENTFIN expired, we 8436 * will send what we can and go to transmit state. 8437 */ 8438 if (tp->t_flags & TF_FORCEDATA) { 8439 if (sendwin == 0) { 8440 /* 8441 * If we still have some data to send, then clear 8442 * the FIN bit. Usually this would happen below 8443 * when it realizes that we aren't sending all the 8444 * data. However, if we have exactly 1 byte of 8445 * unsent data, then it won't clear the FIN bit 8446 * below, and if we are in persist state, we wind up 8447 * sending the packet without recording that we sent 8448 * the FIN bit. 8449 * 8450 * We can't just blindly clear the FIN bit, because 8451 * if we don't have any more data to send then the 8452 * probe will be the FIN itself. 8453 */ 8454 if (sb_offset < sbused(sb)) 8455 flags &= ~TH_FIN; 8456 sendwin = 1; 8457 } else { 8458 if ((rack->rc_in_persist != 0) && 8459 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 8460 rack->r_ctl.rc_pace_min_segs))) 8461 rack_exit_persist(tp, rack); 8462 /* 8463 * If we are dropping persist mode then we need to 8464 * correct snd_nxt/snd_max and off. 8465 */ 8466 tp->snd_nxt = tp->snd_max; 8467 sb_offset = tp->snd_nxt - tp->snd_una; 8468 } 8469 } 8470 /* 8471 * If snd_nxt == snd_max and we have transmitted a FIN, the 8472 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a 8473 * negative length. This can also occur when TCP opens up its 8474 * congestion window while receiving additional duplicate acks after 8475 * fast-retransmit because TCP will reset snd_nxt to snd_max after 8476 * the fast-retransmit. 8477 * 8478 * In the normal retransmit-FIN-only case, however, snd_nxt will be 8479 * set to snd_una, the sb_offset will be 0, and the length may wind 8480 * up 0. 8481 * 8482 * If sack_rxmit is true we are retransmitting from the scoreboard 8483 * in which case len is already set. 8484 */ 8485 if (sack_rxmit == 0) { 8486 uint32_t avail; 8487 8488 avail = sbavail(sb); 8489 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail) 8490 sb_offset = tp->snd_nxt - tp->snd_una; 8491 else 8492 sb_offset = 0; 8493 if (IN_RECOVERY(tp->t_flags) == 0) { 8494 if (rack->r_ctl.rc_tlp_new_data) { 8495 /* TLP is forcing out new data */ 8496 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 8497 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 8498 } 8499 if (rack->r_ctl.rc_tlp_new_data > tp->snd_wnd) 8500 len = tp->snd_wnd; 8501 else 8502 len = rack->r_ctl.rc_tlp_new_data; 8503 rack->r_ctl.rc_tlp_new_data = 0; 8504 new_data_tlp = doing_tlp = 1; 8505 } else { 8506 if (sendwin > avail) { 8507 /* use the available */ 8508 if (avail > sb_offset) { 8509 len = (int32_t)(avail - sb_offset); 8510 } else { 8511 len = 0; 8512 } 8513 } else { 8514 if (sendwin > sb_offset) { 8515 len = (int32_t)(sendwin - sb_offset); 8516 } else { 8517 len = 0; 8518 } 8519 } 8520 } 8521 } else { 8522 uint32_t outstanding; 8523 8524 /* 8525 * We are inside of a SACK recovery episode and are 8526 * sending new data, having retransmitted all the 8527 * data possible so far in the scoreboard. 8528 */ 8529 outstanding = tp->snd_max - tp->snd_una; 8530 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 8531 if (tp->snd_wnd > outstanding) { 8532 len = tp->snd_wnd - outstanding; 8533 /* Check to see if we have the data */ 8534 if (((sb_offset + len) > avail) && 8535 (avail > sb_offset)) 8536 len = avail - sb_offset; 8537 else 8538 len = 0; 8539 } else 8540 len = 0; 8541 } else if (avail > sb_offset) 8542 len = avail - sb_offset; 8543 else 8544 len = 0; 8545 if (len > 0) { 8546 if (len > rack->r_ctl.rc_prr_sndcnt) 8547 len = rack->r_ctl.rc_prr_sndcnt; 8548 if (len > 0) { 8549 sub_from_prr = 1; 8550 counter_u64_add(rack_rtm_prr_newdata, 1); 8551 } 8552 } 8553 if (len > ctf_fixed_maxseg(tp)) { 8554 /* 8555 * We should never send more than a MSS when 8556 * retransmitting or sending new data in prr 8557 * mode unless the override flag is on. Most 8558 * likely the PRR algorithm is not going to 8559 * let us send a lot as well :-) 8560 */ 8561 if (rack->r_ctl.rc_prr_sendalot == 0) 8562 len = ctf_fixed_maxseg(tp); 8563 } else if (len < ctf_fixed_maxseg(tp)) { 8564 /* 8565 * Do we send any? The idea here is if the 8566 * send empty's the socket buffer we want to 8567 * do it. However if not then lets just wait 8568 * for our prr_sndcnt to get bigger. 8569 */ 8570 long leftinsb; 8571 8572 leftinsb = sbavail(sb) - sb_offset; 8573 if (leftinsb > len) { 8574 /* This send does not empty the sb */ 8575 len = 0; 8576 } 8577 } 8578 } 8579 } 8580 if (prefetch_so_done == 0) { 8581 kern_prefetch(so, &prefetch_so_done); 8582 prefetch_so_done = 1; 8583 } 8584 /* 8585 * Lop off SYN bit if it has already been sent. However, if this is 8586 * SYN-SENT state and if segment contains data and if we don't know 8587 * that foreign host supports TAO, suppress sending segment. 8588 */ 8589 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) && 8590 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) { 8591 if (tp->t_state != TCPS_SYN_RECEIVED) 8592 flags &= ~TH_SYN; 8593 /* 8594 * When sending additional segments following a TFO SYN|ACK, 8595 * do not include the SYN bit. 8596 */ 8597 if (IS_FASTOPEN(tp->t_flags) && 8598 (tp->t_state == TCPS_SYN_RECEIVED)) 8599 flags &= ~TH_SYN; 8600 sb_offset--, len++; 8601 } 8602 /* 8603 * Be careful not to send data and/or FIN on SYN segments. This 8604 * measure is needed to prevent interoperability problems with not 8605 * fully conformant TCP implementations. 8606 */ 8607 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 8608 len = 0; 8609 flags &= ~TH_FIN; 8610 } 8611 /* 8612 * On TFO sockets, ensure no data is sent in the following cases: 8613 * 8614 * - When retransmitting SYN|ACK on a passively-created socket 8615 * 8616 * - When retransmitting SYN on an actively created socket 8617 * 8618 * - When sending a zero-length cookie (cookie request) on an 8619 * actively created socket 8620 * 8621 * - When the socket is in the CLOSED state (RST is being sent) 8622 */ 8623 if (IS_FASTOPEN(tp->t_flags) && 8624 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 8625 ((tp->t_state == TCPS_SYN_SENT) && 8626 (tp->t_tfo_client_cookie_len == 0)) || 8627 (flags & TH_RST))) { 8628 sack_rxmit = 0; 8629 len = 0; 8630 } 8631 /* Without fast-open there should never be data sent on a SYN */ 8632 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) 8633 len = 0; 8634 orig_len = len; 8635 if (len <= 0) { 8636 /* 8637 * If FIN has been sent but not acked, but we haven't been 8638 * called to retransmit, len will be < 0. Otherwise, window 8639 * shrank after we sent into it. If window shrank to 0, 8640 * cancel pending retransmit, pull snd_nxt back to (closed) 8641 * window, and set the persist timer if it isn't already 8642 * going. If the window didn't close completely, just wait 8643 * for an ACK. 8644 * 8645 * We also do a general check here to ensure that we will 8646 * set the persist timer when we have data to send, but a 8647 * 0-byte window. This makes sure the persist timer is set 8648 * even if the packet hits one of the "goto send" lines 8649 * below. 8650 */ 8651 len = 0; 8652 if ((tp->snd_wnd == 0) && 8653 (TCPS_HAVEESTABLISHED(tp->t_state)) && 8654 (tp->snd_una == tp->snd_max) && 8655 (sb_offset < (int)sbavail(sb))) { 8656 tp->snd_nxt = tp->snd_una; 8657 rack_enter_persist(tp, rack, cts); 8658 } 8659 } else if ((rsm == NULL) && 8660 ((doing_tlp == 0) || (new_data_tlp == 1)) && 8661 (len < rack->r_ctl.rc_pace_max_segs)) { 8662 /* 8663 * We are not sending a full segment for 8664 * some reason. Should we not send anything (think 8665 * sws or persists)? 8666 */ 8667 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 8668 (TCPS_HAVEESTABLISHED(tp->t_state)) && 8669 (len < (int)(sbavail(sb) - sb_offset))) { 8670 /* 8671 * Here the rwnd is less than 8672 * the pacing size, this is not a retransmit, 8673 * we are established and 8674 * the send is not the last in the socket buffer 8675 * we send nothing, and may enter persists. 8676 */ 8677 len = 0; 8678 if (tp->snd_max == tp->snd_una) { 8679 /* 8680 * Nothing out we can 8681 * go into persists. 8682 */ 8683 rack_enter_persist(tp, rack, cts); 8684 tp->snd_nxt = tp->snd_una; 8685 } 8686 } else if ((tp->snd_cwnd >= max(rack->r_ctl.rc_pace_min_segs, (maxseg * 4))) && 8687 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * maxseg)) && 8688 (len < (int)(sbavail(sb) - sb_offset)) && 8689 (len < rack->r_ctl.rc_pace_min_segs)) { 8690 /* 8691 * Here we are not retransmitting, and 8692 * the cwnd is not so small that we could 8693 * not send at least a min size (rxt timer 8694 * not having gone off), We have 2 segments or 8695 * more already in flight, its not the tail end 8696 * of the socket buffer and the cwnd is blocking 8697 * us from sending out a minimum pacing segment size. 8698 * Lets not send anything. 8699 */ 8700 len = 0; 8701 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 8702 min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 8703 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * maxseg)) && 8704 (len < (int)(sbavail(sb) - sb_offset)) && 8705 (TCPS_HAVEESTABLISHED(tp->t_state))) { 8706 /* 8707 * Here we have a send window but we have 8708 * filled it up and we can't send another pacing segment. 8709 * We also have in flight more than 2 segments 8710 * and we are not completing the sb i.e. we allow 8711 * the last bytes of the sb to go out even if 8712 * its not a full pacing segment. 8713 */ 8714 len = 0; 8715 } 8716 } 8717 /* len will be >= 0 after this point. */ 8718 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 8719 tcp_sndbuf_autoscale(tp, so, sendwin); 8720 /* 8721 * Decide if we can use TCP Segmentation Offloading (if supported by 8722 * hardware). 8723 * 8724 * TSO may only be used if we are in a pure bulk sending state. The 8725 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 8726 * options prevent using TSO. With TSO the TCP header is the same 8727 * (except for the sequence number) for all generated packets. This 8728 * makes it impossible to transmit any options which vary per 8729 * generated segment or packet. 8730 * 8731 * IPv4 handling has a clear separation of ip options and ip header 8732 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 8733 * the right thing below to provide length of just ip options and thus 8734 * checking for ipoptlen is enough to decide if ip options are present. 8735 */ 8736 8737 #ifdef INET6 8738 if (isipv6) 8739 ipoptlen = ip6_optlen(tp->t_inpcb); 8740 else 8741 #endif 8742 if (tp->t_inpcb->inp_options) 8743 ipoptlen = tp->t_inpcb->inp_options->m_len - 8744 offsetof(struct ipoption, ipopt_list); 8745 else 8746 ipoptlen = 0; 8747 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 8748 /* 8749 * Pre-calculate here as we save another lookup into the darknesses 8750 * of IPsec that way and can actually decide if TSO is ok. 8751 */ 8752 #ifdef INET6 8753 if (isipv6 && IPSEC_ENABLED(ipv6)) 8754 ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb); 8755 #ifdef INET 8756 else 8757 #endif 8758 #endif /* INET6 */ 8759 #ifdef INET 8760 if (IPSEC_ENABLED(ipv4)) 8761 ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb); 8762 #endif /* INET */ 8763 #endif 8764 8765 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 8766 ipoptlen += ipsec_optlen; 8767 #endif 8768 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > ctf_fixed_maxseg(tp) && 8769 (tp->t_port == 0) && 8770 ((tp->t_flags & TF_SIGNATURE) == 0) && 8771 tp->rcv_numsacks == 0 && sack_rxmit == 0 && 8772 ipoptlen == 0) 8773 tso = 1; 8774 { 8775 uint32_t outstanding; 8776 8777 outstanding = tp->snd_max - tp->snd_una; 8778 if (tp->t_flags & TF_SENTFIN) { 8779 /* 8780 * If we sent a fin, snd_max is 1 higher than 8781 * snd_una 8782 */ 8783 outstanding--; 8784 } 8785 if (sack_rxmit) { 8786 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 8787 flags &= ~TH_FIN; 8788 } else { 8789 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + 8790 sbused(sb))) 8791 flags &= ~TH_FIN; 8792 } 8793 } 8794 recwin = sbspace(&so->so_rcv); 8795 8796 /* 8797 * Sender silly window avoidance. We transmit under the following 8798 * conditions when len is non-zero: 8799 * 8800 * - We have a full segment (or more with TSO) - This is the last 8801 * buffer in a write()/send() and we are either idle or running 8802 * NODELAY - we've timed out (e.g. persist timer) - we have more 8803 * then 1/2 the maximum send window's worth of data (receiver may be 8804 * limited the window size) - we need to retransmit 8805 */ 8806 if (len) { 8807 if (len >= ctf_fixed_maxseg(tp)) { 8808 pass = 1; 8809 goto send; 8810 } 8811 /* 8812 * NOTE! on localhost connections an 'ack' from the remote 8813 * end may occur synchronously with the output and cause us 8814 * to flush a buffer queued with moretocome. XXX 8815 * 8816 */ 8817 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 8818 (idle || (tp->t_flags & TF_NODELAY)) && 8819 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(&so->so_snd)) && 8820 (tp->t_flags & TF_NOPUSH) == 0) { 8821 pass = 2; 8822 goto send; 8823 } 8824 if (tp->t_flags & TF_FORCEDATA) { /* typ. timeout case */ 8825 pass = 3; 8826 goto send; 8827 } 8828 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 8829 goto send; 8830 } 8831 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 8832 pass = 4; 8833 goto send; 8834 } 8835 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */ 8836 pass = 5; 8837 goto send; 8838 } 8839 if (sack_rxmit) { 8840 pass = 6; 8841 goto send; 8842 } 8843 } 8844 /* 8845 * Sending of standalone window updates. 8846 * 8847 * Window updates are important when we close our window due to a 8848 * full socket buffer and are opening it again after the application 8849 * reads data from it. Once the window has opened again and the 8850 * remote end starts to send again the ACK clock takes over and 8851 * provides the most current window information. 8852 * 8853 * We must avoid the silly window syndrome whereas every read from 8854 * the receive buffer, no matter how small, causes a window update 8855 * to be sent. We also should avoid sending a flurry of window 8856 * updates when the socket buffer had queued a lot of data and the 8857 * application is doing small reads. 8858 * 8859 * Prevent a flurry of pointless window updates by only sending an 8860 * update when we can increase the advertized window by more than 8861 * 1/4th of the socket buffer capacity. When the buffer is getting 8862 * full or is very small be more aggressive and send an update 8863 * whenever we can increase by two mss sized segments. In all other 8864 * situations the ACK's to new incoming data will carry further 8865 * window increases. 8866 * 8867 * Don't send an independent window update if a delayed ACK is 8868 * pending (it will get piggy-backed on it) or the remote side 8869 * already has done a half-close and won't send more data. Skip 8870 * this if the connection is in T/TCP half-open state. 8871 */ 8872 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 8873 !(tp->t_flags & TF_DELACK) && 8874 !TCPS_HAVERCVDFIN(tp->t_state)) { 8875 /* 8876 * "adv" is the amount we could increase the window, taking 8877 * into account that we are limited by TCP_MAXWIN << 8878 * tp->rcv_scale. 8879 */ 8880 int32_t adv; 8881 int oldwin; 8882 8883 adv = min(recwin, (long)TCP_MAXWIN << tp->rcv_scale); 8884 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 8885 oldwin = (tp->rcv_adv - tp->rcv_nxt); 8886 adv -= oldwin; 8887 } else 8888 oldwin = 0; 8889 8890 /* 8891 * If the new window size ends up being the same as the old 8892 * size when it is scaled, then don't force a window update. 8893 */ 8894 if (oldwin >> tp->rcv_scale == (adv + oldwin) >> tp->rcv_scale) 8895 goto dontupdate; 8896 8897 if (adv >= (int32_t)(2 * ctf_fixed_maxseg(tp)) && 8898 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 8899 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 8900 so->so_rcv.sb_hiwat <= 8 * ctf_fixed_maxseg(tp))) { 8901 pass = 7; 8902 goto send; 8903 } 8904 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) 8905 goto send; 8906 } 8907 dontupdate: 8908 8909 /* 8910 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 8911 * is also a catch-all for the retransmit timer timeout case. 8912 */ 8913 if (tp->t_flags & TF_ACKNOW) { 8914 pass = 8; 8915 goto send; 8916 } 8917 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 8918 pass = 9; 8919 goto send; 8920 } 8921 if (SEQ_GT(tp->snd_up, tp->snd_una)) { 8922 pass = 10; 8923 goto send; 8924 } 8925 /* 8926 * If our state indicates that FIN should be sent and we have not 8927 * yet done so, then we need to send. 8928 */ 8929 if ((flags & TH_FIN) && 8930 (tp->snd_nxt == tp->snd_una)) { 8931 pass = 11; 8932 goto send; 8933 } 8934 /* 8935 * No reason to send a segment, just return. 8936 */ 8937 just_return: 8938 SOCKBUF_UNLOCK(sb); 8939 just_return_nolock: 8940 if (tot_len_this_send == 0) 8941 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 8942 if (slot) { 8943 /* set the rack tcb into the slot N */ 8944 counter_u64_add(rack_paced_segments, 1); 8945 } else if (tot_len_this_send) { 8946 counter_u64_add(rack_unpaced_segments, 1); 8947 } 8948 /* Check if we need to go into persists or not */ 8949 if ((rack->rc_in_persist == 0) && 8950 (tp->snd_max == tp->snd_una) && 8951 TCPS_HAVEESTABLISHED(tp->t_state) && 8952 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 8953 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd) && 8954 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs))) { 8955 /* Yes lets make sure to move to persist before timer-start */ 8956 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 8957 } 8958 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); 8959 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling); 8960 tp->t_flags &= ~TF_FORCEDATA; 8961 return (0); 8962 8963 send: 8964 if ((flags & TH_FIN) && 8965 sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 8966 /* 8967 * We do not transmit a FIN 8968 * with data outstanding. We 8969 * need to make it so all data 8970 * is acked first. 8971 */ 8972 flags &= ~TH_FIN; 8973 } 8974 if (doing_tlp == 0) { 8975 /* 8976 * Data not a TLP, and its not the rxt firing. If it is the 8977 * rxt firing, we want to leave the tlp_in_progress flag on 8978 * so we don't send another TLP. It has to be a rack timer 8979 * or normal send (response to acked data) to clear the tlp 8980 * in progress flag. 8981 */ 8982 rack->rc_tlp_in_progress = 0; 8983 } 8984 SOCKBUF_LOCK_ASSERT(sb); 8985 if (len > 0) { 8986 if (len >= ctf_fixed_maxseg(tp)) 8987 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 8988 else 8989 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 8990 } 8991 /* 8992 * Before ESTABLISHED, force sending of initial options unless TCP 8993 * set not to do any options. NOTE: we assume that the IP/TCP header 8994 * plus TCP options always fit in a single mbuf, leaving room for a 8995 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 8996 * + optlen <= MCLBYTES 8997 */ 8998 optlen = 0; 8999 #ifdef INET6 9000 if (isipv6) 9001 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 9002 else 9003 #endif 9004 hdrlen = sizeof(struct tcpiphdr); 9005 9006 /* 9007 * Compute options for segment. We only have to care about SYN and 9008 * established connection segments. Options for SYN-ACK segments 9009 * are handled in TCP syncache. 9010 */ 9011 to.to_flags = 0; 9012 if ((tp->t_flags & TF_NOOPT) == 0) { 9013 /* Maximum segment size. */ 9014 if (flags & TH_SYN) { 9015 tp->snd_nxt = tp->iss; 9016 to.to_mss = tcp_mssopt(&inp->inp_inc); 9017 #ifdef NETFLIX_TCPOUDP 9018 if (tp->t_port) 9019 to.to_mss -= V_tcp_udp_tunneling_overhead; 9020 #endif 9021 to.to_flags |= TOF_MSS; 9022 9023 /* 9024 * On SYN or SYN|ACK transmits on TFO connections, 9025 * only include the TFO option if it is not a 9026 * retransmit, as the presence of the TFO option may 9027 * have caused the original SYN or SYN|ACK to have 9028 * been dropped by a middlebox. 9029 */ 9030 if (IS_FASTOPEN(tp->t_flags) && 9031 (tp->t_rxtshift == 0)) { 9032 if (tp->t_state == TCPS_SYN_RECEIVED) { 9033 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 9034 to.to_tfo_cookie = 9035 (u_int8_t *)&tp->t_tfo_cookie.server; 9036 to.to_flags |= TOF_FASTOPEN; 9037 wanted_cookie = 1; 9038 } else if (tp->t_state == TCPS_SYN_SENT) { 9039 to.to_tfo_len = 9040 tp->t_tfo_client_cookie_len; 9041 to.to_tfo_cookie = 9042 tp->t_tfo_cookie.client; 9043 to.to_flags |= TOF_FASTOPEN; 9044 wanted_cookie = 1; 9045 /* 9046 * If we wind up having more data to 9047 * send with the SYN than can fit in 9048 * one segment, don't send any more 9049 * until the SYN|ACK comes back from 9050 * the other end. 9051 */ 9052 sendalot = 0; 9053 } 9054 } 9055 } 9056 /* Window scaling. */ 9057 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 9058 to.to_wscale = tp->request_r_scale; 9059 to.to_flags |= TOF_SCALE; 9060 } 9061 /* Timestamps. */ 9062 if ((tp->t_flags & TF_RCVD_TSTMP) || 9063 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 9064 to.to_tsval = cts + tp->ts_offset; 9065 to.to_tsecr = tp->ts_recent; 9066 to.to_flags |= TOF_TS; 9067 } 9068 /* Set receive buffer autosizing timestamp. */ 9069 if (tp->rfbuf_ts == 0 && 9070 (so->so_rcv.sb_flags & SB_AUTOSIZE)) 9071 tp->rfbuf_ts = tcp_ts_getticks(); 9072 /* Selective ACK's. */ 9073 if (flags & TH_SYN) 9074 to.to_flags |= TOF_SACKPERM; 9075 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 9076 tp->rcv_numsacks > 0) { 9077 to.to_flags |= TOF_SACK; 9078 to.to_nsacks = tp->rcv_numsacks; 9079 to.to_sacks = (u_char *)tp->sackblks; 9080 } 9081 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 9082 /* TCP-MD5 (RFC2385). */ 9083 if (tp->t_flags & TF_SIGNATURE) 9084 to.to_flags |= TOF_SIGNATURE; 9085 #endif /* TCP_SIGNATURE */ 9086 9087 /* Processing the options. */ 9088 hdrlen += optlen = tcp_addoptions(&to, opt); 9089 /* 9090 * If we wanted a TFO option to be added, but it was unable 9091 * to fit, ensure no data is sent. 9092 */ 9093 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie && 9094 !(to.to_flags & TOF_FASTOPEN)) 9095 len = 0; 9096 } 9097 #ifdef NETFLIX_TCPOUDP 9098 if (tp->t_port) { 9099 if (V_tcp_udp_tunneling_port == 0) { 9100 /* The port was removed?? */ 9101 SOCKBUF_UNLOCK(&so->so_snd); 9102 return (EHOSTUNREACH); 9103 } 9104 hdrlen += sizeof(struct udphdr); 9105 } 9106 #endif 9107 #ifdef INET6 9108 if (isipv6) 9109 ipoptlen = ip6_optlen(tp->t_inpcb); 9110 else 9111 #endif 9112 if (tp->t_inpcb->inp_options) 9113 ipoptlen = tp->t_inpcb->inp_options->m_len - 9114 offsetof(struct ipoption, ipopt_list); 9115 else 9116 ipoptlen = 0; 9117 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 9118 ipoptlen += ipsec_optlen; 9119 #endif 9120 9121 #ifdef KERN_TLS 9122 /* force TSO for so TLS offload can get mss */ 9123 if (sb->sb_flags & SB_TLS_IFNET) { 9124 force_tso = 1; 9125 } 9126 #endif 9127 /* 9128 * Adjust data length if insertion of options will bump the packet 9129 * length beyond the t_maxseg length. Clear the FIN bit because we 9130 * cut off the tail of the segment. 9131 */ 9132 if (len + optlen + ipoptlen > tp->t_maxseg) { 9133 if (tso) { 9134 uint32_t if_hw_tsomax; 9135 uint32_t moff; 9136 int32_t max_len; 9137 9138 /* extract TSO information */ 9139 if_hw_tsomax = tp->t_tsomax; 9140 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 9141 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 9142 KASSERT(ipoptlen == 0, 9143 ("%s: TSO can't do IP options", __func__)); 9144 9145 /* 9146 * Check if we should limit by maximum payload 9147 * length: 9148 */ 9149 if (if_hw_tsomax != 0) { 9150 /* compute maximum TSO length */ 9151 max_len = (if_hw_tsomax - hdrlen - 9152 max_linkhdr); 9153 if (max_len <= 0) { 9154 len = 0; 9155 } else if (len > max_len) { 9156 sendalot = 1; 9157 len = max_len; 9158 } 9159 } 9160 /* 9161 * Prevent the last segment from being fractional 9162 * unless the send sockbuf can be emptied: 9163 */ 9164 max_len = (tp->t_maxseg - optlen); 9165 if (((sb_offset + len) < sbavail(sb)) && 9166 (hw_tls == 0)) { 9167 moff = len % (u_int)max_len; 9168 if (moff != 0) { 9169 len -= moff; 9170 sendalot = 1; 9171 } 9172 } 9173 /* 9174 * In case there are too many small fragments don't 9175 * use TSO: 9176 */ 9177 if (len <= maxseg) { 9178 len = max_len; 9179 sendalot = 1; 9180 tso = 0; 9181 } 9182 /* 9183 * Send the FIN in a separate segment after the bulk 9184 * sending is done. We don't trust the TSO 9185 * implementations to clear the FIN flag on all but 9186 * the last segment. 9187 */ 9188 if (tp->t_flags & TF_NEEDFIN) 9189 sendalot = 1; 9190 9191 } else { 9192 if (optlen + ipoptlen >= tp->t_maxseg) { 9193 /* 9194 * Since we don't have enough space to put 9195 * the IP header chain and the TCP header in 9196 * one packet as required by RFC 7112, don't 9197 * send it. Also ensure that at least one 9198 * byte of the payload can be put into the 9199 * TCP segment. 9200 */ 9201 SOCKBUF_UNLOCK(&so->so_snd); 9202 error = EMSGSIZE; 9203 sack_rxmit = 0; 9204 goto out; 9205 } 9206 len = tp->t_maxseg - optlen - ipoptlen; 9207 sendalot = 1; 9208 } 9209 } else 9210 tso = 0; 9211 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 9212 ("%s: len > IP_MAXPACKET", __func__)); 9213 #ifdef DIAGNOSTIC 9214 #ifdef INET6 9215 if (max_linkhdr + hdrlen > MCLBYTES) 9216 #else 9217 if (max_linkhdr + hdrlen > MHLEN) 9218 #endif 9219 panic("tcphdr too big"); 9220 #endif 9221 9222 /* 9223 * This KASSERT is here to catch edge cases at a well defined place. 9224 * Before, those had triggered (random) panic conditions further 9225 * down. 9226 */ 9227 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 9228 if ((len == 0) && 9229 (flags & TH_FIN) && 9230 (sbused(sb))) { 9231 /* 9232 * We have outstanding data, don't send a fin by itself!. 9233 */ 9234 goto just_return; 9235 } 9236 /* 9237 * Grab a header mbuf, attaching a copy of data to be transmitted, 9238 * and initialize the header from the template for sends on this 9239 * connection. 9240 */ 9241 if (len) { 9242 uint32_t max_val; 9243 uint32_t moff; 9244 9245 if (rack->rc_pace_max_segs) 9246 max_val = rack->rc_pace_max_segs * ctf_fixed_maxseg(tp); 9247 else 9248 max_val = len; 9249 if (rack->r_ctl.rc_pace_max_segs < max_val) 9250 max_val = rack->r_ctl.rc_pace_max_segs; 9251 /* 9252 * We allow a limit on sending with hptsi. 9253 */ 9254 if (len > max_val) { 9255 len = max_val; 9256 } 9257 #ifdef INET6 9258 if (MHLEN < hdrlen + max_linkhdr) 9259 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 9260 else 9261 #endif 9262 m = m_gethdr(M_NOWAIT, MT_DATA); 9263 9264 if (m == NULL) { 9265 SOCKBUF_UNLOCK(sb); 9266 error = ENOBUFS; 9267 sack_rxmit = 0; 9268 goto out; 9269 } 9270 m->m_data += max_linkhdr; 9271 m->m_len = hdrlen; 9272 9273 /* 9274 * Start the m_copy functions from the closest mbuf to the 9275 * sb_offset in the socket buffer chain. 9276 */ 9277 mb = sbsndptr_noadv(sb, sb_offset, &moff); 9278 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 9279 m_copydata(mb, moff, (int)len, 9280 mtod(m, caddr_t)+hdrlen); 9281 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 9282 sbsndptr_adv(sb, mb, len); 9283 m->m_len += len; 9284 } else { 9285 struct sockbuf *msb; 9286 9287 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 9288 msb = NULL; 9289 else 9290 msb = sb; 9291 m->m_next = tcp_m_copym( 9292 #ifdef NETFLIX_COPY_ARGS 9293 tp, 9294 #endif 9295 mb, moff, &len, 9296 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 9297 ((rsm == NULL) ? hw_tls : 0) 9298 #ifdef NETFLIX_COPY_ARGS 9299 , &filled_all 9300 #endif 9301 ); 9302 if (len <= (tp->t_maxseg - optlen)) { 9303 /* 9304 * Must have ran out of mbufs for the copy 9305 * shorten it to no longer need tso. Lets 9306 * not put on sendalot since we are low on 9307 * mbufs. 9308 */ 9309 tso = 0; 9310 } 9311 if (m->m_next == NULL) { 9312 SOCKBUF_UNLOCK(sb); 9313 (void)m_free(m); 9314 error = ENOBUFS; 9315 sack_rxmit = 0; 9316 goto out; 9317 } 9318 } 9319 if ((tp->t_flags & TF_FORCEDATA) && len == 1) { 9320 TCPSTAT_INC(tcps_sndprobe); 9321 #ifdef NETFLIX_STATS 9322 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 9323 stats_voi_update_abs_u32(tp->t_stats, 9324 VOI_TCP_RETXPB, len); 9325 else 9326 stats_voi_update_abs_u64(tp->t_stats, 9327 VOI_TCP_TXPB, len); 9328 #endif 9329 } else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { 9330 if (rsm && (rsm->r_flags & RACK_TLP)) { 9331 /* 9332 * TLP should not count in retran count, but 9333 * in its own bin 9334 */ 9335 counter_u64_add(rack_tlp_retran, 1); 9336 counter_u64_add(rack_tlp_retran_bytes, len); 9337 } else { 9338 tp->t_sndrexmitpack++; 9339 TCPSTAT_INC(tcps_sndrexmitpack); 9340 TCPSTAT_ADD(tcps_sndrexmitbyte, len); 9341 } 9342 #ifdef NETFLIX_STATS 9343 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 9344 len); 9345 #endif 9346 } else { 9347 TCPSTAT_INC(tcps_sndpack); 9348 TCPSTAT_ADD(tcps_sndbyte, len); 9349 #ifdef NETFLIX_STATS 9350 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 9351 len); 9352 #endif 9353 } 9354 /* 9355 * If we're sending everything we've got, set PUSH. (This 9356 * will keep happy those implementations which only give 9357 * data to the user when a buffer fills or a PUSH comes in.) 9358 */ 9359 if (sb_offset + len == sbused(sb) && 9360 sbused(sb) && 9361 !(flags & TH_SYN)) 9362 flags |= TH_PUSH; 9363 9364 /* 9365 * Are we doing pacing, if so we must calculate the slot. We 9366 * only do hptsi in ESTABLISHED and with no RESET being 9367 * sent where we have data to send. 9368 */ 9369 if (((tp->t_state == TCPS_ESTABLISHED) || 9370 (tp->t_state == TCPS_CLOSE_WAIT) || 9371 ((tp->t_state == TCPS_FIN_WAIT_1) && 9372 ((tp->t_flags & TF_SENTFIN) == 0) && 9373 ((flags & TH_FIN) == 0))) && 9374 ((flags & TH_RST) == 0)) { 9375 /* Get our pacing rate */ 9376 tot_len_this_send += len; 9377 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send); 9378 } 9379 SOCKBUF_UNLOCK(sb); 9380 } else { 9381 SOCKBUF_UNLOCK(sb); 9382 if (tp->t_flags & TF_ACKNOW) 9383 TCPSTAT_INC(tcps_sndacks); 9384 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 9385 TCPSTAT_INC(tcps_sndctrl); 9386 else if (SEQ_GT(tp->snd_up, tp->snd_una)) 9387 TCPSTAT_INC(tcps_sndurg); 9388 else 9389 TCPSTAT_INC(tcps_sndwinup); 9390 9391 m = m_gethdr(M_NOWAIT, MT_DATA); 9392 if (m == NULL) { 9393 error = ENOBUFS; 9394 sack_rxmit = 0; 9395 goto out; 9396 } 9397 #ifdef INET6 9398 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 9399 MHLEN >= hdrlen) { 9400 M_ALIGN(m, hdrlen); 9401 } else 9402 #endif 9403 m->m_data += max_linkhdr; 9404 m->m_len = hdrlen; 9405 } 9406 SOCKBUF_UNLOCK_ASSERT(sb); 9407 m->m_pkthdr.rcvif = (struct ifnet *)0; 9408 #ifdef MAC 9409 mac_inpcb_create_mbuf(inp, m); 9410 #endif 9411 #ifdef INET6 9412 if (isipv6) { 9413 ip6 = mtod(m, struct ip6_hdr *); 9414 #ifdef NETFLIX_TCPOUDP 9415 if (tp->t_port) { 9416 udp = (struct udphdr *)((caddr_t)ip6 + ipoptlen + sizeof(struct ip6_hdr)); 9417 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 9418 udp->uh_dport = tp->t_port; 9419 ulen = hdrlen + len - sizeof(struct ip6_hdr); 9420 udp->uh_ulen = htons(ulen); 9421 th = (struct tcphdr *)(udp + 1); 9422 } else 9423 #endif 9424 th = (struct tcphdr *)(ip6 + 1); 9425 tcpip_fillheaders(inp, 9426 #ifdef NETFLIX_TCPOUDP 9427 tp->t_port, 9428 #endif 9429 ip6, th); 9430 } else 9431 #endif /* INET6 */ 9432 { 9433 ip = mtod(m, struct ip *); 9434 #ifdef TCPDEBUG 9435 ipov = (struct ipovly *)ip; 9436 #endif 9437 #ifdef NETFLIX_TCPOUDP 9438 if (tp->t_port) { 9439 udp = (struct udphdr *)((caddr_t)ip + ipoptlen + sizeof(struct ip)); 9440 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 9441 udp->uh_dport = tp->t_port; 9442 ulen = hdrlen + len - sizeof(struct ip); 9443 udp->uh_ulen = htons(ulen); 9444 th = (struct tcphdr *)(udp + 1); 9445 } else 9446 #endif 9447 th = (struct tcphdr *)(ip + 1); 9448 tcpip_fillheaders(inp, 9449 #ifdef NETFLIX_TCPOUDP 9450 tp->t_port, 9451 #endif 9452 ip, th); 9453 } 9454 /* 9455 * Fill in fields, remembering maximum advertised window for use in 9456 * delaying messages about window sizes. If resending a FIN, be sure 9457 * not to use a new sequence number. 9458 */ 9459 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 9460 tp->snd_nxt == tp->snd_max) 9461 tp->snd_nxt--; 9462 /* 9463 * If we are starting a connection, send ECN setup SYN packet. If we 9464 * are on a retransmit, we may resend those bits a number of times 9465 * as per RFC 3168. 9466 */ 9467 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn == 1) { 9468 if (tp->t_rxtshift >= 1) { 9469 if (tp->t_rxtshift <= V_tcp_ecn_maxretries) 9470 flags |= TH_ECE | TH_CWR; 9471 } else 9472 flags |= TH_ECE | TH_CWR; 9473 } 9474 if (tp->t_state == TCPS_ESTABLISHED && 9475 (tp->t_flags2 & TF2_ECN_PERMIT)) { 9476 /* 9477 * If the peer has ECN, mark data packets with ECN capable 9478 * transmission (ECT). Ignore pure ack packets, 9479 * retransmissions and window probes. 9480 */ 9481 if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max) && 9482 !((tp->t_flags & TF_FORCEDATA) && len == 1)) { 9483 #ifdef INET6 9484 if (isipv6) 9485 ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20); 9486 else 9487 #endif 9488 ip->ip_tos |= IPTOS_ECN_ECT0; 9489 TCPSTAT_INC(tcps_ecn_ect0); 9490 } 9491 /* 9492 * Reply with proper ECN notifications. 9493 */ 9494 if (tp->t_flags2 & TF2_ECN_SND_CWR) { 9495 flags |= TH_CWR; 9496 tp->t_flags2 &= ~TF2_ECN_SND_CWR; 9497 } 9498 if (tp->t_flags2 & TF2_ECN_SND_ECE) 9499 flags |= TH_ECE; 9500 } 9501 /* 9502 * If we are doing retransmissions, then snd_nxt will not reflect 9503 * the first unsent octet. For ACK only packets, we do not want the 9504 * sequence number of the retransmitted packet, we want the sequence 9505 * number of the next unsent octet. So, if there is no data (and no 9506 * SYN or FIN), use snd_max instead of snd_nxt when filling in 9507 * ti_seq. But if we are in persist state, snd_max might reflect 9508 * one byte beyond the right edge of the window, so use snd_nxt in 9509 * that case, since we know we aren't doing a retransmission. 9510 * (retransmit and persist are mutually exclusive...) 9511 */ 9512 if (sack_rxmit == 0) { 9513 if (len || (flags & (TH_SYN | TH_FIN)) || 9514 rack->rc_in_persist) { 9515 th->th_seq = htonl(tp->snd_nxt); 9516 rack_seq = tp->snd_nxt; 9517 } else if (flags & TH_RST) { 9518 /* 9519 * For a Reset send the last cum ack in sequence 9520 * (this like any other choice may still generate a 9521 * challenge ack, if a ack-update packet is in 9522 * flight). 9523 */ 9524 th->th_seq = htonl(tp->snd_una); 9525 rack_seq = tp->snd_una; 9526 } else { 9527 th->th_seq = htonl(tp->snd_max); 9528 rack_seq = tp->snd_max; 9529 } 9530 } else { 9531 th->th_seq = htonl(rsm->r_start); 9532 rack_seq = rsm->r_start; 9533 } 9534 th->th_ack = htonl(tp->rcv_nxt); 9535 if (optlen) { 9536 bcopy(opt, th + 1, optlen); 9537 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 9538 } 9539 th->th_flags = flags; 9540 /* 9541 * Calculate receive window. Don't shrink window, but avoid silly 9542 * window syndrome. 9543 * If a RST segment is sent, advertise a window of zero. 9544 */ 9545 if (flags & TH_RST) { 9546 recwin = 0; 9547 } else { 9548 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 9549 recwin < (long)ctf_fixed_maxseg(tp)) 9550 recwin = 0; 9551 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 9552 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 9553 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 9554 if (recwin > (long)TCP_MAXWIN << tp->rcv_scale) 9555 recwin = (long)TCP_MAXWIN << tp->rcv_scale; 9556 } 9557 9558 /* 9559 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 9560 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 9561 * handled in syncache. 9562 */ 9563 if (flags & TH_SYN) 9564 th->th_win = htons((u_short) 9565 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 9566 else 9567 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 9568 /* 9569 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 9570 * window. This may cause the remote transmitter to stall. This 9571 * flag tells soreceive() to disable delayed acknowledgements when 9572 * draining the buffer. This can occur if the receiver is 9573 * attempting to read more data than can be buffered prior to 9574 * transmitting on the connection. 9575 */ 9576 if (th->th_win == 0) { 9577 tp->t_sndzerowin++; 9578 tp->t_flags |= TF_RXWIN0SENT; 9579 } else 9580 tp->t_flags &= ~TF_RXWIN0SENT; 9581 if (SEQ_GT(tp->snd_up, tp->snd_nxt)) { 9582 th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt)); 9583 th->th_flags |= TH_URG; 9584 } else 9585 /* 9586 * If no urgent pointer to send, then we pull the urgent 9587 * pointer to the left edge of the send window so that it 9588 * doesn't drift into the send window on sequence number 9589 * wraparound. 9590 */ 9591 tp->snd_up = tp->snd_una; /* drag it along */ 9592 9593 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 9594 if (to.to_flags & TOF_SIGNATURE) { 9595 /* 9596 * Calculate MD5 signature and put it into the place 9597 * determined before. 9598 * NOTE: since TCP options buffer doesn't point into 9599 * mbuf's data, calculate offset and use it. 9600 */ 9601 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 9602 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 9603 /* 9604 * Do not send segment if the calculation of MD5 9605 * digest has failed. 9606 */ 9607 goto out; 9608 } 9609 } 9610 #endif 9611 9612 /* 9613 * Put TCP length in extended header, and then checksum extended 9614 * header and data. 9615 */ 9616 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 9617 #ifdef INET6 9618 if (isipv6) { 9619 /* 9620 * ip6_plen is not need to be filled now, and will be filled 9621 * in ip6_output. 9622 */ 9623 if (tp->t_port) { 9624 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 9625 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 9626 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 9627 th->th_sum = htons(0); 9628 UDPSTAT_INC(udps_opackets); 9629 } else { 9630 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 9631 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 9632 th->th_sum = in6_cksum_pseudo(ip6, 9633 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 9634 0); 9635 } 9636 } 9637 #endif 9638 #if defined(INET6) && defined(INET) 9639 else 9640 #endif 9641 #ifdef INET 9642 { 9643 if (tp->t_port) { 9644 m->m_pkthdr.csum_flags = CSUM_UDP; 9645 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 9646 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 9647 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 9648 th->th_sum = htons(0); 9649 UDPSTAT_INC(udps_opackets); 9650 } else { 9651 m->m_pkthdr.csum_flags = CSUM_TCP; 9652 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 9653 th->th_sum = in_pseudo(ip->ip_src.s_addr, 9654 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 9655 IPPROTO_TCP + len + optlen)); 9656 } 9657 /* IP version must be set here for ipv4/ipv6 checking later */ 9658 KASSERT(ip->ip_v == IPVERSION, 9659 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 9660 } 9661 #endif 9662 /* 9663 * Enable TSO and specify the size of the segments. The TCP pseudo 9664 * header checksum is always provided. XXX: Fixme: This is currently 9665 * not the case for IPv6. 9666 */ 9667 if (tso || force_tso) { 9668 KASSERT(force_tso || len > tp->t_maxseg - optlen, 9669 ("%s: len <= tso_segsz", __func__)); 9670 m->m_pkthdr.csum_flags |= CSUM_TSO; 9671 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 9672 } 9673 KASSERT(len + hdrlen == m_length(m, NULL), 9674 ("%s: mbuf chain different than expected: %d + %u != %u", 9675 __func__, len, hdrlen, m_length(m, NULL))); 9676 9677 #ifdef TCP_HHOOK 9678 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 9679 hhook_run_tcp_est_out(tp, th, &to, len, tso); 9680 #endif 9681 #ifdef TCPDEBUG 9682 /* 9683 * Trace. 9684 */ 9685 if (so->so_options & SO_DEBUG) { 9686 u_short save = 0; 9687 9688 #ifdef INET6 9689 if (!isipv6) 9690 #endif 9691 { 9692 save = ipov->ih_len; 9693 ipov->ih_len = htons(m->m_pkthdr.len /* - hdrlen + 9694 * (th->th_off << 2) */ ); 9695 } 9696 tcp_trace(TA_OUTPUT, tp->t_state, tp, mtod(m, void *), th, 0); 9697 #ifdef INET6 9698 if (!isipv6) 9699 #endif 9700 ipov->ih_len = save; 9701 } 9702 #endif /* TCPDEBUG */ 9703 9704 /* We're getting ready to send; log now. */ 9705 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 9706 union tcp_log_stackspecific log; 9707 struct timeval tv; 9708 9709 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 9710 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 9711 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 9712 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 9713 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 9714 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 9715 log.u_bbr.flex4 = orig_len; 9716 if (filled_all) 9717 log.u_bbr.flex5 = 0x80000000; 9718 else 9719 log.u_bbr.flex5 = 0; 9720 if (rsm || sack_rxmit) { 9721 log.u_bbr.flex8 = 1; 9722 } else { 9723 log.u_bbr.flex8 = 0; 9724 } 9725 log.u_bbr.pkts_out = tp->t_maxseg; 9726 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 9727 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 9728 lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 9729 len, &log, false, NULL, NULL, 0, &tv); 9730 } else 9731 lgb = NULL; 9732 9733 /* 9734 * Fill in IP length and desired time to live and send to IP level. 9735 * There should be a better way to handle ttl and tos; we could keep 9736 * them in the template, but need a way to checksum without them. 9737 */ 9738 /* 9739 * m->m_pkthdr.len should have been set before cksum calcuration, 9740 * because in6_cksum() need it. 9741 */ 9742 #ifdef INET6 9743 if (isipv6) { 9744 /* 9745 * we separately set hoplimit for every segment, since the 9746 * user might want to change the value via setsockopt. Also, 9747 * desired default hop limit might be changed via Neighbor 9748 * Discovery. 9749 */ 9750 ip6->ip6_hlim = in6_selecthlim(inp, NULL); 9751 9752 /* 9753 * Set the packet size here for the benefit of DTrace 9754 * probes. ip6_output() will set it properly; it's supposed 9755 * to include the option header lengths as well. 9756 */ 9757 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 9758 9759 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 9760 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 9761 else 9762 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 9763 9764 if (tp->t_state == TCPS_SYN_SENT) 9765 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 9766 9767 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 9768 /* TODO: IPv6 IP6TOS_ECT bit on */ 9769 error = ip6_output(m, tp->t_inpcb->in6p_outputopts, 9770 &inp->inp_route6, 9771 ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 9772 NULL, NULL, inp); 9773 9774 if (error == EMSGSIZE && inp->inp_route6.ro_rt != NULL) 9775 mtu = inp->inp_route6.ro_rt->rt_mtu; 9776 } 9777 #endif /* INET6 */ 9778 #if defined(INET) && defined(INET6) 9779 else 9780 #endif 9781 #ifdef INET 9782 { 9783 ip->ip_len = htons(m->m_pkthdr.len); 9784 #ifdef INET6 9785 if (inp->inp_vflag & INP_IPV6PROTO) 9786 ip->ip_ttl = in6_selecthlim(inp, NULL); 9787 #endif /* INET6 */ 9788 /* 9789 * If we do path MTU discovery, then we set DF on every 9790 * packet. This might not be the best thing to do according 9791 * to RFC3390 Section 2. However the tcp hostcache migitates 9792 * the problem so it affects only the first tcp connection 9793 * with a host. 9794 * 9795 * NB: Don't set DF on small MTU/MSS to have a safe 9796 * fallback. 9797 */ 9798 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 9799 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 9800 if (tp->t_port == 0 || len < V_tcp_minmss) { 9801 ip->ip_off |= htons(IP_DF); 9802 } 9803 } else { 9804 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 9805 } 9806 9807 if (tp->t_state == TCPS_SYN_SENT) 9808 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 9809 9810 TCP_PROBE5(send, NULL, tp, ip, tp, th); 9811 9812 error = ip_output(m, tp->t_inpcb->inp_options, &inp->inp_route, 9813 ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 0, 9814 inp); 9815 if (error == EMSGSIZE && inp->inp_route.ro_rt != NULL) 9816 mtu = inp->inp_route.ro_rt->rt_mtu; 9817 } 9818 #endif /* INET */ 9819 9820 out: 9821 if (lgb) { 9822 lgb->tlb_errno = error; 9823 lgb = NULL; 9824 } 9825 /* 9826 * In transmit state, time the transmission and arrange for the 9827 * retransmit. In persist state, just set snd_max. 9828 */ 9829 if (error == 0) { 9830 if (TCPS_HAVEESTABLISHED(tp->t_state) && 9831 (tp->t_flags & TF_SACK_PERMIT) && 9832 tp->rcv_numsacks > 0) 9833 tcp_clean_dsack_blocks(tp); 9834 if (len == 0) 9835 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 9836 else if (len == 1) { 9837 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 9838 } else if (len > 1) { 9839 int idx; 9840 9841 idx = (len / ctf_fixed_maxseg(tp)) + 3; 9842 if (idx >= TCP_MSS_ACCT_ATIMER) 9843 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 9844 else 9845 counter_u64_add(rack_out_size[idx], 1); 9846 } 9847 if (hw_tls && len > 0) { 9848 if (filled_all) { 9849 counter_u64_add(rack_tls_filled, 1); 9850 rack_log_type_hrdwtso(tp, rack, len, 0, orig_len, 1); 9851 } else { 9852 if (rsm) { 9853 counter_u64_add(rack_tls_rxt, 1); 9854 rack_log_type_hrdwtso(tp, rack, len, 2, orig_len, 1); 9855 } else if (doing_tlp) { 9856 counter_u64_add(rack_tls_tlp, 1); 9857 rack_log_type_hrdwtso(tp, rack, len, 3, orig_len, 1); 9858 } else if ( (ctf_outstanding(tp) + rack->r_ctl.rc_pace_min_segs) > sbavail(sb)) { 9859 counter_u64_add(rack_tls_app, 1); 9860 rack_log_type_hrdwtso(tp, rack, len, 4, orig_len, 1); 9861 } else if ((ctf_flight_size(tp, rack->r_ctl.rc_sacked) + rack->r_ctl.rc_pace_min_segs) > tp->snd_cwnd) { 9862 counter_u64_add(rack_tls_cwnd, 1); 9863 rack_log_type_hrdwtso(tp, rack, len, 5, orig_len, 1); 9864 } else if ((ctf_outstanding(tp) + rack->r_ctl.rc_pace_min_segs) > tp->snd_wnd) { 9865 counter_u64_add(rack_tls_rwnd, 1); 9866 rack_log_type_hrdwtso(tp, rack, len, 6, orig_len, 1); 9867 } else { 9868 rack_log_type_hrdwtso(tp, rack, len, 7, orig_len, 1); 9869 counter_u64_add(rack_tls_other, 1); 9870 } 9871 } 9872 } 9873 } 9874 if (sub_from_prr && (error == 0)) { 9875 if (rack->r_ctl.rc_prr_sndcnt >= len) 9876 rack->r_ctl.rc_prr_sndcnt -= len; 9877 else 9878 rack->r_ctl.rc_prr_sndcnt = 0; 9879 } 9880 sub_from_prr = 0; 9881 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, cts, 9882 pass, rsm); 9883 if ((error == 0) && 9884 (len > 0) && 9885 (tp->snd_una == tp->snd_max)) 9886 rack->r_ctl.rc_tlp_rxt_last_time = cts; 9887 if ((tp->t_flags & TF_FORCEDATA) == 0 || 9888 (rack->rc_in_persist == 0)) { 9889 tcp_seq startseq = tp->snd_nxt; 9890 9891 /* 9892 * Advance snd_nxt over sequence space of this segment. 9893 */ 9894 if (error) 9895 /* We don't log or do anything with errors */ 9896 goto nomore; 9897 9898 if (flags & (TH_SYN | TH_FIN)) { 9899 if (flags & TH_SYN) 9900 tp->snd_nxt++; 9901 if (flags & TH_FIN) { 9902 tp->snd_nxt++; 9903 tp->t_flags |= TF_SENTFIN; 9904 } 9905 } 9906 /* In the ENOBUFS case we do *not* update snd_max */ 9907 if (sack_rxmit) 9908 goto nomore; 9909 9910 tp->snd_nxt += len; 9911 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { 9912 if (tp->snd_una == tp->snd_max) { 9913 /* 9914 * Update the time we just added data since 9915 * none was outstanding. 9916 */ 9917 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 9918 tp->t_acktime = ticks; 9919 } 9920 tp->snd_max = tp->snd_nxt; 9921 /* 9922 * Time this transmission if not a retransmission and 9923 * not currently timing anything. 9924 * This is only relevant in case of switching back to 9925 * the base stack. 9926 */ 9927 if (tp->t_rtttime == 0) { 9928 tp->t_rtttime = ticks; 9929 tp->t_rtseq = startseq; 9930 TCPSTAT_INC(tcps_segstimed); 9931 } 9932 #ifdef NETFLIX_STATS 9933 if (!(tp->t_flags & TF_GPUTINPROG) && len) { 9934 tp->t_flags |= TF_GPUTINPROG; 9935 tp->gput_seq = startseq; 9936 tp->gput_ack = startseq + 9937 ulmin(sbavail(sb) - sb_offset, sendwin); 9938 tp->gput_ts = tcp_ts_getticks(); 9939 } 9940 #endif 9941 } 9942 } else { 9943 /* 9944 * Persist case, update snd_max but since we are in persist 9945 * mode (no window) we do not update snd_nxt. 9946 */ 9947 int32_t xlen = len; 9948 9949 if (error) 9950 goto nomore; 9951 9952 if (flags & TH_SYN) 9953 ++xlen; 9954 if (flags & TH_FIN) { 9955 ++xlen; 9956 tp->t_flags |= TF_SENTFIN; 9957 } 9958 /* In the ENOBUFS case we do *not* update snd_max */ 9959 if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max)) { 9960 if (tp->snd_una == tp->snd_max) { 9961 /* 9962 * Update the time we just added data since 9963 * none was outstanding. 9964 */ 9965 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 9966 tp->t_acktime = ticks; 9967 } 9968 tp->snd_max = tp->snd_nxt + len; 9969 } 9970 } 9971 nomore: 9972 if (error) { 9973 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 9974 /* 9975 * Failures do not advance the seq counter above. For the 9976 * case of ENOBUFS we will fall out and retry in 1ms with 9977 * the hpts. Everything else will just have to retransmit 9978 * with the timer. 9979 * 9980 * In any case, we do not want to loop around for another 9981 * send without a good reason. 9982 */ 9983 sendalot = 0; 9984 switch (error) { 9985 case EPERM: 9986 tp->t_flags &= ~TF_FORCEDATA; 9987 tp->t_softerror = error; 9988 return (error); 9989 case ENOBUFS: 9990 if (slot == 0) { 9991 /* 9992 * Pace us right away to retry in a some 9993 * time 9994 */ 9995 slot = 1 + rack->rc_enobuf; 9996 if (rack->rc_enobuf < 255) 9997 rack->rc_enobuf++; 9998 if (slot > (rack->rc_rack_rtt / 2)) { 9999 slot = rack->rc_rack_rtt / 2; 10000 } 10001 if (slot < 10) 10002 slot = 10; 10003 } 10004 counter_u64_add(rack_saw_enobuf, 1); 10005 error = 0; 10006 goto enobufs; 10007 case EMSGSIZE: 10008 /* 10009 * For some reason the interface we used initially 10010 * to send segments changed to another or lowered 10011 * its MTU. If TSO was active we either got an 10012 * interface without TSO capabilits or TSO was 10013 * turned off. If we obtained mtu from ip_output() 10014 * then update it and try again. 10015 */ 10016 if (tso) 10017 tp->t_flags &= ~TF_TSO; 10018 if (mtu != 0) { 10019 tcp_mss_update(tp, -1, mtu, NULL, NULL); 10020 goto again; 10021 } 10022 slot = 10; 10023 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 10024 tp->t_flags &= ~TF_FORCEDATA; 10025 return (error); 10026 case ENETUNREACH: 10027 counter_u64_add(rack_saw_enetunreach, 1); 10028 case EHOSTDOWN: 10029 case EHOSTUNREACH: 10030 case ENETDOWN: 10031 if (TCPS_HAVERCVDSYN(tp->t_state)) { 10032 tp->t_softerror = error; 10033 } 10034 /* FALLTHROUGH */ 10035 default: 10036 slot = 10; 10037 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 10038 tp->t_flags &= ~TF_FORCEDATA; 10039 return (error); 10040 } 10041 } else { 10042 rack->rc_enobuf = 0; 10043 } 10044 TCPSTAT_INC(tcps_sndtotal); 10045 10046 /* 10047 * Data sent (as far as we can tell). If this advertises a larger 10048 * window than any other segment, then remember the size of the 10049 * advertised window. Any pending ACK has now been sent. 10050 */ 10051 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 10052 tp->rcv_adv = tp->rcv_nxt + recwin; 10053 tp->last_ack_sent = tp->rcv_nxt; 10054 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 10055 enobufs: 10056 rack->r_tlp_running = 0; 10057 if (flags & TH_RST) { 10058 /* 10059 * We don't send again after sending a RST. 10060 */ 10061 slot = 0; 10062 sendalot = 0; 10063 } 10064 if (rsm && (slot == 0)) { 10065 /* 10066 * Dup ack retransmission possibly, so 10067 * lets assure we have at least min rack 10068 * time, if its a rack resend then the rack 10069 * to will also be set to this. 10070 */ 10071 slot = rack->r_ctl.rc_min_to; 10072 } 10073 if (slot) { 10074 /* set the rack tcb into the slot N */ 10075 counter_u64_add(rack_paced_segments, 1); 10076 } else if (sendalot) { 10077 if (len) 10078 counter_u64_add(rack_unpaced_segments, 1); 10079 sack_rxmit = 0; 10080 tp->t_flags &= ~TF_FORCEDATA; 10081 goto again; 10082 } else if (len) { 10083 counter_u64_add(rack_unpaced_segments, 1); 10084 } 10085 tp->t_flags &= ~TF_FORCEDATA; 10086 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); 10087 return (error); 10088 } 10089 10090 /* 10091 * rack_ctloutput() must drop the inpcb lock before performing copyin on 10092 * socket option arguments. When it re-acquires the lock after the copy, it 10093 * has to revalidate that the connection is still valid for the socket 10094 * option. 10095 */ 10096 static int 10097 rack_set_sockopt(struct socket *so, struct sockopt *sopt, 10098 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack) 10099 { 10100 int32_t error = 0, optval; 10101 10102 switch (sopt->sopt_name) { 10103 case TCP_RACK_PROP_RATE: 10104 case TCP_RACK_PROP: 10105 case TCP_RACK_TLP_REDUCE: 10106 case TCP_RACK_EARLY_RECOV: 10107 case TCP_RACK_PACE_ALWAYS: 10108 case TCP_DELACK: 10109 case TCP_RACK_PACE_REDUCE: 10110 case TCP_RACK_PACE_MAX_SEG: 10111 case TCP_RACK_PRR_SENDALOT: 10112 case TCP_RACK_MIN_TO: 10113 case TCP_RACK_EARLY_SEG: 10114 case TCP_RACK_REORD_THRESH: 10115 case TCP_RACK_REORD_FADE: 10116 case TCP_RACK_TLP_THRESH: 10117 case TCP_RACK_PKT_DELAY: 10118 case TCP_RACK_TLP_USE: 10119 case TCP_RACK_TLP_INC_VAR: 10120 case TCP_RACK_IDLE_REDUCE_HIGH: 10121 case TCP_RACK_MIN_PACE: 10122 case TCP_RACK_GP_INCREASE: 10123 case TCP_BBR_RACK_RTT_USE: 10124 case TCP_BBR_USE_RACK_CHEAT: 10125 case TCP_RACK_DO_DETECTION: 10126 case TCP_DATA_AFTER_CLOSE: 10127 break; 10128 default: 10129 return (tcp_default_ctloutput(so, sopt, inp, tp)); 10130 break; 10131 } 10132 INP_WUNLOCK(inp); 10133 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 10134 if (error) 10135 return (error); 10136 INP_WLOCK(inp); 10137 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 10138 INP_WUNLOCK(inp); 10139 return (ECONNRESET); 10140 } 10141 tp = intotcpcb(inp); 10142 rack = (struct tcp_rack *)tp->t_fb_ptr; 10143 switch (sopt->sopt_name) { 10144 case TCP_RACK_DO_DETECTION: 10145 RACK_OPTS_INC(tcp_rack_no_sack); 10146 if (optval == 0) 10147 rack->do_detection = 0; 10148 else 10149 rack->do_detection = 1; 10150 break; 10151 case TCP_RACK_PROP_RATE: 10152 if ((optval <= 0) || (optval >= 100)) { 10153 error = EINVAL; 10154 break; 10155 } 10156 RACK_OPTS_INC(tcp_rack_prop_rate); 10157 rack->r_ctl.rc_prop_rate = optval; 10158 break; 10159 case TCP_RACK_TLP_USE: 10160 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 10161 error = EINVAL; 10162 break; 10163 } 10164 RACK_OPTS_INC(tcp_tlp_use); 10165 rack->rack_tlp_threshold_use = optval; 10166 break; 10167 case TCP_RACK_PROP: 10168 /* RACK proportional rate reduction (bool) */ 10169 RACK_OPTS_INC(tcp_rack_prop); 10170 rack->r_ctl.rc_prop_reduce = optval; 10171 break; 10172 case TCP_RACK_TLP_REDUCE: 10173 /* RACK TLP cwnd reduction (bool) */ 10174 RACK_OPTS_INC(tcp_rack_tlp_reduce); 10175 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 10176 break; 10177 case TCP_RACK_EARLY_RECOV: 10178 /* Should recovery happen early (bool) */ 10179 RACK_OPTS_INC(tcp_rack_early_recov); 10180 rack->r_ctl.rc_early_recovery = optval; 10181 break; 10182 case TCP_RACK_PACE_ALWAYS: 10183 /* Use the always pace method (bool) */ 10184 RACK_OPTS_INC(tcp_rack_pace_always); 10185 if (optval > 0) 10186 rack->rc_always_pace = 1; 10187 else 10188 rack->rc_always_pace = 0; 10189 break; 10190 case TCP_RACK_PACE_REDUCE: 10191 /* RACK Hptsi reduction factor (divisor) */ 10192 RACK_OPTS_INC(tcp_rack_pace_reduce); 10193 if (optval) 10194 /* Must be non-zero */ 10195 rack->rc_pace_reduce = optval; 10196 else 10197 error = EINVAL; 10198 break; 10199 case TCP_RACK_PACE_MAX_SEG: 10200 /* Max segments in a pace */ 10201 RACK_OPTS_INC(tcp_rack_max_seg); 10202 rack->rc_pace_max_segs = optval; 10203 rack_set_pace_segments(tp, rack); 10204 break; 10205 case TCP_RACK_PRR_SENDALOT: 10206 /* Allow PRR to send more than one seg */ 10207 RACK_OPTS_INC(tcp_rack_prr_sendalot); 10208 rack->r_ctl.rc_prr_sendalot = optval; 10209 break; 10210 case TCP_RACK_MIN_TO: 10211 /* Minimum time between rack t-o's in ms */ 10212 RACK_OPTS_INC(tcp_rack_min_to); 10213 rack->r_ctl.rc_min_to = optval; 10214 break; 10215 case TCP_RACK_EARLY_SEG: 10216 /* If early recovery max segments */ 10217 RACK_OPTS_INC(tcp_rack_early_seg); 10218 rack->r_ctl.rc_early_recovery_segs = optval; 10219 break; 10220 case TCP_RACK_REORD_THRESH: 10221 /* RACK reorder threshold (shift amount) */ 10222 RACK_OPTS_INC(tcp_rack_reord_thresh); 10223 if ((optval > 0) && (optval < 31)) 10224 rack->r_ctl.rc_reorder_shift = optval; 10225 else 10226 error = EINVAL; 10227 break; 10228 case TCP_RACK_REORD_FADE: 10229 /* Does reordering fade after ms time */ 10230 RACK_OPTS_INC(tcp_rack_reord_fade); 10231 rack->r_ctl.rc_reorder_fade = optval; 10232 break; 10233 case TCP_RACK_TLP_THRESH: 10234 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 10235 RACK_OPTS_INC(tcp_rack_tlp_thresh); 10236 if (optval) 10237 rack->r_ctl.rc_tlp_threshold = optval; 10238 else 10239 error = EINVAL; 10240 break; 10241 case TCP_BBR_USE_RACK_CHEAT: 10242 RACK_OPTS_INC(tcp_rack_cheat); 10243 if (optval) 10244 rack->use_rack_cheat = 1; 10245 else 10246 rack->use_rack_cheat = 0; 10247 break; 10248 case TCP_RACK_PKT_DELAY: 10249 /* RACK added ms i.e. rack-rtt + reord + N */ 10250 RACK_OPTS_INC(tcp_rack_pkt_delay); 10251 rack->r_ctl.rc_pkt_delay = optval; 10252 break; 10253 case TCP_RACK_TLP_INC_VAR: 10254 /* Does TLP include rtt variance in t-o */ 10255 error = EINVAL; 10256 break; 10257 case TCP_RACK_IDLE_REDUCE_HIGH: 10258 error = EINVAL; 10259 break; 10260 case TCP_DELACK: 10261 if (optval == 0) 10262 tp->t_delayed_ack = 0; 10263 else 10264 tp->t_delayed_ack = 1; 10265 if (tp->t_flags & TF_DELACK) { 10266 tp->t_flags &= ~TF_DELACK; 10267 tp->t_flags |= TF_ACKNOW; 10268 rack_output(tp); 10269 } 10270 break; 10271 case TCP_RACK_MIN_PACE: 10272 RACK_OPTS_INC(tcp_rack_min_pace); 10273 if (optval > 3) 10274 rack->r_enforce_min_pace = 3; 10275 else 10276 rack->r_enforce_min_pace = optval; 10277 break; 10278 case TCP_RACK_GP_INCREASE: 10279 if ((optval >= 0) && 10280 (optval <= 256)) 10281 rack->rack_per_of_gp = optval; 10282 else 10283 error = EINVAL; 10284 10285 break; 10286 case TCP_BBR_RACK_RTT_USE: 10287 if ((optval != USE_RTT_HIGH) && 10288 (optval != USE_RTT_LOW) && 10289 (optval != USE_RTT_AVG)) 10290 error = EINVAL; 10291 else 10292 rack->r_ctl.rc_rate_sample_method = optval; 10293 break; 10294 case TCP_DATA_AFTER_CLOSE: 10295 if (optval) 10296 rack->rc_allow_data_af_clo = 1; 10297 else 10298 rack->rc_allow_data_af_clo = 0; 10299 break; 10300 default: 10301 return (tcp_default_ctloutput(so, sopt, inp, tp)); 10302 break; 10303 } 10304 #ifdef NETFLIX_STATS 10305 tcp_log_socket_option(tp, sopt->sopt_name, optval, error); 10306 #endif 10307 INP_WUNLOCK(inp); 10308 return (error); 10309 } 10310 10311 static int 10312 rack_get_sockopt(struct socket *so, struct sockopt *sopt, 10313 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack) 10314 { 10315 int32_t error, optval; 10316 10317 /* 10318 * Because all our options are either boolean or an int, we can just 10319 * pull everything into optval and then unlock and copy. If we ever 10320 * add a option that is not a int, then this will have quite an 10321 * impact to this routine. 10322 */ 10323 error = 0; 10324 switch (sopt->sopt_name) { 10325 case TCP_RACK_DO_DETECTION: 10326 optval = rack->do_detection; 10327 break; 10328 10329 case TCP_RACK_PROP_RATE: 10330 optval = rack->r_ctl.rc_prop_rate; 10331 break; 10332 case TCP_RACK_PROP: 10333 /* RACK proportional rate reduction (bool) */ 10334 optval = rack->r_ctl.rc_prop_reduce; 10335 break; 10336 case TCP_RACK_TLP_REDUCE: 10337 /* RACK TLP cwnd reduction (bool) */ 10338 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 10339 break; 10340 case TCP_RACK_EARLY_RECOV: 10341 /* Should recovery happen early (bool) */ 10342 optval = rack->r_ctl.rc_early_recovery; 10343 break; 10344 case TCP_RACK_PACE_REDUCE: 10345 /* RACK Hptsi reduction factor (divisor) */ 10346 optval = rack->rc_pace_reduce; 10347 break; 10348 case TCP_RACK_PACE_MAX_SEG: 10349 /* Max segments in a pace */ 10350 optval = rack->rc_pace_max_segs; 10351 break; 10352 case TCP_RACK_PACE_ALWAYS: 10353 /* Use the always pace method */ 10354 optval = rack->rc_always_pace; 10355 break; 10356 case TCP_RACK_PRR_SENDALOT: 10357 /* Allow PRR to send more than one seg */ 10358 optval = rack->r_ctl.rc_prr_sendalot; 10359 break; 10360 case TCP_RACK_MIN_TO: 10361 /* Minimum time between rack t-o's in ms */ 10362 optval = rack->r_ctl.rc_min_to; 10363 break; 10364 case TCP_RACK_EARLY_SEG: 10365 /* If early recovery max segments */ 10366 optval = rack->r_ctl.rc_early_recovery_segs; 10367 break; 10368 case TCP_RACK_REORD_THRESH: 10369 /* RACK reorder threshold (shift amount) */ 10370 optval = rack->r_ctl.rc_reorder_shift; 10371 break; 10372 case TCP_RACK_REORD_FADE: 10373 /* Does reordering fade after ms time */ 10374 optval = rack->r_ctl.rc_reorder_fade; 10375 break; 10376 case TCP_BBR_USE_RACK_CHEAT: 10377 /* Do we use the rack cheat for rxt */ 10378 optval = rack->use_rack_cheat; 10379 break; 10380 case TCP_RACK_TLP_THRESH: 10381 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 10382 optval = rack->r_ctl.rc_tlp_threshold; 10383 break; 10384 case TCP_RACK_PKT_DELAY: 10385 /* RACK added ms i.e. rack-rtt + reord + N */ 10386 optval = rack->r_ctl.rc_pkt_delay; 10387 break; 10388 case TCP_RACK_TLP_USE: 10389 optval = rack->rack_tlp_threshold_use; 10390 break; 10391 case TCP_RACK_TLP_INC_VAR: 10392 /* Does TLP include rtt variance in t-o */ 10393 error = EINVAL; 10394 break; 10395 case TCP_RACK_IDLE_REDUCE_HIGH: 10396 error = EINVAL; 10397 break; 10398 case TCP_RACK_MIN_PACE: 10399 optval = rack->r_enforce_min_pace; 10400 break; 10401 case TCP_RACK_GP_INCREASE: 10402 optval = rack->rack_per_of_gp; 10403 break; 10404 case TCP_BBR_RACK_RTT_USE: 10405 optval = rack->r_ctl.rc_rate_sample_method; 10406 break; 10407 case TCP_DELACK: 10408 optval = tp->t_delayed_ack; 10409 break; 10410 case TCP_DATA_AFTER_CLOSE: 10411 optval = rack->rc_allow_data_af_clo; 10412 break; 10413 default: 10414 return (tcp_default_ctloutput(so, sopt, inp, tp)); 10415 break; 10416 } 10417 INP_WUNLOCK(inp); 10418 if (error == 0) { 10419 error = sooptcopyout(sopt, &optval, sizeof optval); 10420 } 10421 return (error); 10422 } 10423 10424 static int 10425 rack_ctloutput(struct socket *so, struct sockopt *sopt, struct inpcb *inp, struct tcpcb *tp) 10426 { 10427 int32_t error = EINVAL; 10428 struct tcp_rack *rack; 10429 10430 rack = (struct tcp_rack *)tp->t_fb_ptr; 10431 if (rack == NULL) { 10432 /* Huh? */ 10433 goto out; 10434 } 10435 if (sopt->sopt_dir == SOPT_SET) { 10436 return (rack_set_sockopt(so, sopt, inp, tp, rack)); 10437 } else if (sopt->sopt_dir == SOPT_GET) { 10438 return (rack_get_sockopt(so, sopt, inp, tp, rack)); 10439 } 10440 out: 10441 INP_WUNLOCK(inp); 10442 return (error); 10443 } 10444 10445 10446 static struct tcp_function_block __tcp_rack = { 10447 .tfb_tcp_block_name = __XSTRING(STACKNAME), 10448 .tfb_tcp_output = rack_output, 10449 .tfb_do_queued_segments = ctf_do_queued_segments, 10450 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 10451 .tfb_tcp_do_segment = rack_do_segment, 10452 .tfb_tcp_ctloutput = rack_ctloutput, 10453 .tfb_tcp_fb_init = rack_init, 10454 .tfb_tcp_fb_fini = rack_fini, 10455 .tfb_tcp_timer_stop_all = rack_stopall, 10456 .tfb_tcp_timer_activate = rack_timer_activate, 10457 .tfb_tcp_timer_active = rack_timer_active, 10458 .tfb_tcp_timer_stop = rack_timer_stop, 10459 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 10460 .tfb_tcp_handoff_ok = rack_handoff_ok 10461 }; 10462 10463 static const char *rack_stack_names[] = { 10464 __XSTRING(STACKNAME), 10465 #ifdef STACKALIAS 10466 __XSTRING(STACKALIAS), 10467 #endif 10468 }; 10469 10470 static int 10471 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 10472 { 10473 memset(mem, 0, size); 10474 return (0); 10475 } 10476 10477 static void 10478 rack_dtor(void *mem, int32_t size, void *arg) 10479 { 10480 10481 } 10482 10483 static bool rack_mod_inited = false; 10484 10485 static int 10486 tcp_addrack(module_t mod, int32_t type, void *data) 10487 { 10488 int32_t err = 0; 10489 int num_stacks; 10490 10491 switch (type) { 10492 case MOD_LOAD: 10493 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 10494 sizeof(struct rack_sendmap), 10495 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 10496 10497 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 10498 sizeof(struct tcp_rack), 10499 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 10500 10501 sysctl_ctx_init(&rack_sysctl_ctx); 10502 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 10503 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 10504 OID_AUTO, 10505 #ifdef STACKALIAS 10506 __XSTRING(STACKALIAS), 10507 #else 10508 __XSTRING(STACKNAME), 10509 #endif 10510 CTLFLAG_RW, 0, 10511 ""); 10512 if (rack_sysctl_root == NULL) { 10513 printf("Failed to add sysctl node\n"); 10514 err = EFAULT; 10515 goto free_uma; 10516 } 10517 rack_init_sysctls(); 10518 num_stacks = nitems(rack_stack_names); 10519 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 10520 rack_stack_names, &num_stacks); 10521 if (err) { 10522 printf("Failed to register %s stack name for " 10523 "%s module\n", rack_stack_names[num_stacks], 10524 __XSTRING(MODNAME)); 10525 sysctl_ctx_free(&rack_sysctl_ctx); 10526 free_uma: 10527 uma_zdestroy(rack_zone); 10528 uma_zdestroy(rack_pcb_zone); 10529 rack_counter_destroy(); 10530 printf("Failed to register rack module -- err:%d\n", err); 10531 return (err); 10532 } 10533 tcp_lro_reg_mbufq(); 10534 rack_mod_inited = true; 10535 break; 10536 case MOD_QUIESCE: 10537 err = deregister_tcp_functions(&__tcp_rack, true, false); 10538 break; 10539 case MOD_UNLOAD: 10540 err = deregister_tcp_functions(&__tcp_rack, false, true); 10541 if (err == EBUSY) 10542 break; 10543 if (rack_mod_inited) { 10544 uma_zdestroy(rack_zone); 10545 uma_zdestroy(rack_pcb_zone); 10546 sysctl_ctx_free(&rack_sysctl_ctx); 10547 rack_counter_destroy(); 10548 rack_mod_inited = false; 10549 } 10550 tcp_lro_dereg_mbufq(); 10551 err = 0; 10552 break; 10553 default: 10554 return (EOPNOTSUPP); 10555 } 10556 return (err); 10557 } 10558 10559 static moduledata_t tcp_rack = { 10560 .name = __XSTRING(MODNAME), 10561 .evhand = tcp_addrack, 10562 .priv = 0 10563 }; 10564 10565 MODULE_VERSION(MODNAME, 1); 10566 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 10567 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 10568