1 /*- 2 * Copyright (c) 2016-2019 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_inet.h" 31 #include "opt_inet6.h" 32 #include "opt_ipsec.h" 33 #include "opt_tcpdebug.h" 34 #include "opt_ratelimit.h" 35 #include "opt_kern_tls.h" 36 #include <sys/param.h> 37 #include <sys/module.h> 38 #include <sys/kernel.h> 39 #ifdef TCP_HHOOK 40 #include <sys/hhook.h> 41 #endif 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/mbuf.h> 47 #include <sys/proc.h> /* for proc0 declaration */ 48 #include <sys/socket.h> 49 #include <sys/socketvar.h> 50 #ifdef KERN_TLS 51 #include <sys/ktls.h> 52 #endif 53 #include <sys/sysctl.h> 54 #include <sys/systm.h> 55 #ifdef NETFLIX_STATS 56 #include <sys/qmath.h> 57 #include <sys/tree.h> 58 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 59 #endif 60 #include <sys/refcount.h> 61 #include <sys/tree.h> 62 #include <sys/queue.h> 63 #include <sys/smp.h> 64 #include <sys/kthread.h> 65 #include <sys/kern_prefetch.h> 66 67 #include <vm/uma.h> 68 69 #include <net/route.h> 70 #include <net/vnet.h> 71 72 #define TCPSTATES /* for logging */ 73 74 #include <netinet/in.h> 75 #include <netinet/in_kdtrace.h> 76 #include <netinet/in_pcb.h> 77 #include <netinet/ip.h> 78 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 79 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 80 #include <netinet/ip_var.h> 81 #include <netinet/ip6.h> 82 #include <netinet6/in6_pcb.h> 83 #include <netinet6/ip6_var.h> 84 #include <netinet/tcp.h> 85 #define TCPOUTFLAGS 86 #include <netinet/tcp_fsm.h> 87 #include <netinet/tcp_log_buf.h> 88 #include <netinet/tcp_seq.h> 89 #include <netinet/tcp_timer.h> 90 #include <netinet/tcp_var.h> 91 #include <netinet/tcp_hpts.h> 92 #include <netinet/tcpip.h> 93 #include <netinet/cc/cc.h> 94 #include <netinet/tcp_fastopen.h> 95 #include <netinet/tcp_lro.h> 96 #ifdef TCPDEBUG 97 #include <netinet/tcp_debug.h> 98 #endif /* TCPDEBUG */ 99 #ifdef TCP_OFFLOAD 100 #include <netinet/tcp_offload.h> 101 #endif 102 #ifdef INET6 103 #include <netinet6/tcp6_var.h> 104 #endif 105 106 #include <netipsec/ipsec_support.h> 107 108 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 109 #include <netipsec/ipsec.h> 110 #include <netipsec/ipsec6.h> 111 #endif /* IPSEC */ 112 113 #include <netinet/udp.h> 114 #include <netinet/udp_var.h> 115 #include <machine/in_cksum.h> 116 117 #ifdef MAC 118 #include <security/mac/mac_framework.h> 119 #endif 120 #include "sack_filter.h" 121 #include "tcp_rack.h" 122 #include "rack_bbr_common.h" 123 124 uma_zone_t rack_zone; 125 uma_zone_t rack_pcb_zone; 126 127 #ifndef TICKS2SBT 128 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 129 #endif 130 131 struct sysctl_ctx_list rack_sysctl_ctx; 132 struct sysctl_oid *rack_sysctl_root; 133 134 #define CUM_ACKED 1 135 #define SACKED 2 136 137 /* 138 * The RACK module incorporates a number of 139 * TCP ideas that have been put out into the IETF 140 * over the last few years: 141 * - Matt Mathis's Rate Halving which slowly drops 142 * the congestion window so that the ack clock can 143 * be maintained during a recovery. 144 * - Yuchung Cheng's RACK TCP (for which its named) that 145 * will stop us using the number of dup acks and instead 146 * use time as the gage of when we retransmit. 147 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 148 * of Dukkipati et.al. 149 * RACK depends on SACK, so if an endpoint arrives that 150 * cannot do SACK the state machine below will shuttle the 151 * connection back to using the "default" TCP stack that is 152 * in FreeBSD. 153 * 154 * To implement RACK the original TCP stack was first decomposed 155 * into a functional state machine with individual states 156 * for each of the possible TCP connection states. The do_segement 157 * functions role in life is to mandate the connection supports SACK 158 * initially and then assure that the RACK state matches the conenction 159 * state before calling the states do_segment function. Each 160 * state is simplified due to the fact that the original do_segment 161 * has been decomposed and we *know* what state we are in (no 162 * switches on the state) and all tests for SACK are gone. This 163 * greatly simplifies what each state does. 164 * 165 * TCP output is also over-written with a new version since it 166 * must maintain the new rack scoreboard. 167 * 168 */ 169 static int32_t rack_tlp_thresh = 1; 170 static int32_t rack_reorder_thresh = 2; 171 static int32_t rack_reorder_fade = 60000; /* 0 - never fade, def 60,000 172 * - 60 seconds */ 173 /* Attack threshold detections */ 174 static uint32_t rack_highest_sack_thresh_seen = 0; 175 static uint32_t rack_highest_move_thresh_seen = 0; 176 177 static int32_t rack_pkt_delay = 1; 178 static int32_t rack_min_pace_time = 0; 179 static int32_t rack_early_recovery = 1; 180 static int32_t rack_send_a_lot_in_prr = 1; 181 static int32_t rack_min_to = 1; /* Number of ms minimum timeout */ 182 static int32_t rack_verbose_logging = 0; 183 static int32_t rack_ignore_data_after_close = 1; 184 static int32_t use_rack_cheat = 1; 185 static int32_t rack_persist_min = 250; /* 250ms */ 186 static int32_t rack_persist_max = 1000; /* 1 Second */ 187 static int32_t rack_sack_not_required = 0; /* set to one to allow non-sack to use rack */ 188 static int32_t rack_hw_tls_max_seg = 0; /* 0 means use hw-tls single segment */ 189 190 /* Sack attack detection thresholds and such */ 191 static int32_t tcp_force_detection = 0; 192 193 #ifdef NETFLIX_EXP_DETECTION 194 static int32_t tcp_sack_to_ack_thresh = 700; /* 70 % */ 195 static int32_t tcp_sack_to_move_thresh = 600; /* 60 % */ 196 static int32_t tcp_restoral_thresh = 650; /* 65 % (sack:2:ack -5%) */ 197 static int32_t tcp_attack_on_turns_on_logging = 0; 198 static int32_t tcp_map_minimum = 500; 199 #endif 200 static int32_t tcp_sad_decay_val = 800; 201 static int32_t tcp_sad_pacing_interval = 2000; 202 static int32_t tcp_sad_low_pps = 100; 203 204 205 /* 206 * Currently regular tcp has a rto_min of 30ms 207 * the backoff goes 12 times so that ends up 208 * being a total of 122.850 seconds before a 209 * connection is killed. 210 */ 211 static int32_t rack_tlp_min = 10; 212 static int32_t rack_rto_min = 30; /* 30ms same as main freebsd */ 213 static int32_t rack_rto_max = 4000; /* 4 seconds */ 214 static const int32_t rack_free_cache = 2; 215 static int32_t rack_hptsi_segments = 40; 216 static int32_t rack_rate_sample_method = USE_RTT_LOW; 217 static int32_t rack_pace_every_seg = 0; 218 static int32_t rack_delayed_ack_time = 200; /* 200ms */ 219 static int32_t rack_slot_reduction = 4; 220 static int32_t rack_lower_cwnd_at_tlp = 0; 221 static int32_t rack_use_proportional_reduce = 0; 222 static int32_t rack_proportional_rate = 10; 223 static int32_t rack_tlp_max_resend = 2; 224 static int32_t rack_limited_retran = 0; 225 static int32_t rack_always_send_oldest = 0; 226 static int32_t rack_use_sack_filter = 1; 227 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 228 static int32_t rack_per_of_gp = 50; 229 static int32_t rack_tcp_map_entries_limit = 1500; 230 static int32_t rack_tcp_map_split_limit = 256; 231 232 233 /* Rack specific counters */ 234 counter_u64_t rack_badfr; 235 counter_u64_t rack_badfr_bytes; 236 counter_u64_t rack_rtm_prr_retran; 237 counter_u64_t rack_rtm_prr_newdata; 238 counter_u64_t rack_timestamp_mismatch; 239 counter_u64_t rack_reorder_seen; 240 counter_u64_t rack_paced_segments; 241 counter_u64_t rack_unpaced_segments; 242 counter_u64_t rack_calc_zero; 243 counter_u64_t rack_calc_nonzero; 244 counter_u64_t rack_saw_enobuf; 245 counter_u64_t rack_saw_enetunreach; 246 counter_u64_t rack_per_timer_hole; 247 248 /* Tail loss probe counters */ 249 counter_u64_t rack_tlp_tot; 250 counter_u64_t rack_tlp_newdata; 251 counter_u64_t rack_tlp_retran; 252 counter_u64_t rack_tlp_retran_bytes; 253 counter_u64_t rack_tlp_retran_fail; 254 counter_u64_t rack_to_tot; 255 counter_u64_t rack_to_arm_rack; 256 counter_u64_t rack_to_arm_tlp; 257 counter_u64_t rack_to_alloc; 258 counter_u64_t rack_to_alloc_hard; 259 counter_u64_t rack_to_alloc_emerg; 260 counter_u64_t rack_to_alloc_limited; 261 counter_u64_t rack_alloc_limited_conns; 262 counter_u64_t rack_split_limited; 263 264 counter_u64_t rack_sack_proc_all; 265 counter_u64_t rack_sack_proc_short; 266 counter_u64_t rack_sack_proc_restart; 267 counter_u64_t rack_sack_attacks_detected; 268 counter_u64_t rack_sack_attacks_reversed; 269 counter_u64_t rack_sack_used_next_merge; 270 counter_u64_t rack_sack_splits; 271 counter_u64_t rack_sack_used_prev_merge; 272 counter_u64_t rack_sack_skipped_acked; 273 counter_u64_t rack_ack_total; 274 counter_u64_t rack_express_sack; 275 counter_u64_t rack_sack_total; 276 counter_u64_t rack_move_none; 277 counter_u64_t rack_move_some; 278 279 counter_u64_t rack_used_tlpmethod; 280 counter_u64_t rack_used_tlpmethod2; 281 counter_u64_t rack_enter_tlp_calc; 282 counter_u64_t rack_input_idle_reduces; 283 counter_u64_t rack_collapsed_win; 284 counter_u64_t rack_tlp_does_nada; 285 286 /* Counters for HW TLS */ 287 counter_u64_t rack_tls_rwnd; 288 counter_u64_t rack_tls_cwnd; 289 counter_u64_t rack_tls_app; 290 counter_u64_t rack_tls_other; 291 counter_u64_t rack_tls_filled; 292 counter_u64_t rack_tls_rxt; 293 counter_u64_t rack_tls_tlp; 294 295 /* Temp CPU counters */ 296 counter_u64_t rack_find_high; 297 298 counter_u64_t rack_progress_drops; 299 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 300 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 301 302 static void 303 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 304 305 static int 306 rack_process_ack(struct mbuf *m, struct tcphdr *th, 307 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 308 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val); 309 static int 310 rack_process_data(struct mbuf *m, struct tcphdr *th, 311 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 312 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 313 static void 314 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 315 struct tcphdr *th, uint16_t nsegs, uint16_t type, int32_t recovery); 316 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 317 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 318 uint8_t limit_type); 319 static struct rack_sendmap * 320 rack_check_recovery_mode(struct tcpcb *tp, 321 uint32_t tsused); 322 static void 323 rack_cong_signal(struct tcpcb *tp, struct tcphdr *th, 324 uint32_t type); 325 static void rack_counter_destroy(void); 326 static int 327 rack_ctloutput(struct socket *so, struct sockopt *sopt, 328 struct inpcb *inp, struct tcpcb *tp); 329 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 330 static void 331 rack_do_segment(struct mbuf *m, struct tcphdr *th, 332 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 333 uint8_t iptos); 334 static void rack_dtor(void *mem, int32_t size, void *arg); 335 static void 336 rack_earlier_retran(struct tcpcb *tp, struct rack_sendmap *rsm, 337 uint32_t t, uint32_t cts); 338 static struct rack_sendmap * 339 rack_find_high_nonack(struct tcp_rack *rack, 340 struct rack_sendmap *rsm); 341 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 342 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 343 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 344 static int 345 rack_get_sockopt(struct socket *so, struct sockopt *sopt, 346 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack); 347 static int32_t rack_handoff_ok(struct tcpcb *tp); 348 static int32_t rack_init(struct tcpcb *tp); 349 static void rack_init_sysctls(void); 350 static void 351 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 352 struct tcphdr *th); 353 static void 354 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 355 uint32_t seq_out, uint8_t th_flags, int32_t err, uint32_t ts, 356 uint8_t pass, struct rack_sendmap *hintrsm); 357 static void 358 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 359 struct rack_sendmap *rsm); 360 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, int num); 361 static int32_t rack_output(struct tcpcb *tp); 362 363 static uint32_t 364 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 365 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 366 uint32_t cts, int *moved_two); 367 static void rack_post_recovery(struct tcpcb *tp, struct tcphdr *th); 368 static void rack_remxt_tmr(struct tcpcb *tp); 369 static int 370 rack_set_sockopt(struct socket *so, struct sockopt *sopt, 371 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack); 372 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 373 static int32_t rack_stopall(struct tcpcb *tp); 374 static void 375 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, 376 uint32_t delta); 377 static int32_t rack_timer_active(struct tcpcb *tp, uint32_t timer_type); 378 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 379 static void rack_timer_stop(struct tcpcb *tp, uint32_t timer_type); 380 static uint32_t 381 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 382 struct rack_sendmap *rsm, uint32_t ts, int32_t * lenp); 383 static void 384 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 385 struct rack_sendmap *rsm, uint32_t ts); 386 static int 387 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 388 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type); 389 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 390 static int 391 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 392 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 393 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 394 static int 395 rack_do_closing(struct mbuf *m, struct tcphdr *th, 396 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 397 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 398 static int 399 rack_do_established(struct mbuf *m, struct tcphdr *th, 400 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 401 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 402 static int 403 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 404 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 405 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt); 406 static int 407 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 408 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 409 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 410 static int 411 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 412 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 413 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 414 static int 415 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 416 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 417 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 418 static int 419 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 420 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 421 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 422 static int 423 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 424 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 425 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 426 struct rack_sendmap * 427 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 428 uint32_t tsused); 429 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt); 430 static void 431 tcp_rack_partialack(struct tcpcb *tp, struct tcphdr *th); 432 433 int32_t rack_clear_counter=0; 434 435 436 static int 437 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 438 { 439 uint32_t stat; 440 int32_t error; 441 442 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 443 if (error || req->newptr == NULL) 444 return error; 445 446 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 447 if (error) 448 return (error); 449 if (stat == 1) { 450 #ifdef INVARIANTS 451 printf("Clearing RACK counters\n"); 452 #endif 453 counter_u64_zero(rack_badfr); 454 counter_u64_zero(rack_badfr_bytes); 455 counter_u64_zero(rack_rtm_prr_retran); 456 counter_u64_zero(rack_rtm_prr_newdata); 457 counter_u64_zero(rack_timestamp_mismatch); 458 counter_u64_zero(rack_reorder_seen); 459 counter_u64_zero(rack_tlp_tot); 460 counter_u64_zero(rack_tlp_newdata); 461 counter_u64_zero(rack_tlp_retran); 462 counter_u64_zero(rack_tlp_retran_bytes); 463 counter_u64_zero(rack_tlp_retran_fail); 464 counter_u64_zero(rack_to_tot); 465 counter_u64_zero(rack_to_arm_rack); 466 counter_u64_zero(rack_to_arm_tlp); 467 counter_u64_zero(rack_paced_segments); 468 counter_u64_zero(rack_calc_zero); 469 counter_u64_zero(rack_calc_nonzero); 470 counter_u64_zero(rack_unpaced_segments); 471 counter_u64_zero(rack_saw_enobuf); 472 counter_u64_zero(rack_saw_enetunreach); 473 counter_u64_zero(rack_per_timer_hole); 474 counter_u64_zero(rack_to_alloc_hard); 475 counter_u64_zero(rack_to_alloc_emerg); 476 counter_u64_zero(rack_sack_proc_all); 477 counter_u64_zero(rack_sack_proc_short); 478 counter_u64_zero(rack_sack_proc_restart); 479 counter_u64_zero(rack_to_alloc); 480 counter_u64_zero(rack_to_alloc_limited); 481 counter_u64_zero(rack_alloc_limited_conns); 482 counter_u64_zero(rack_split_limited); 483 counter_u64_zero(rack_find_high); 484 counter_u64_zero(rack_tls_rwnd); 485 counter_u64_zero(rack_tls_cwnd); 486 counter_u64_zero(rack_tls_app); 487 counter_u64_zero(rack_tls_other); 488 counter_u64_zero(rack_tls_filled); 489 counter_u64_zero(rack_tls_rxt); 490 counter_u64_zero(rack_tls_tlp); 491 counter_u64_zero(rack_sack_attacks_detected); 492 counter_u64_zero(rack_sack_attacks_reversed); 493 counter_u64_zero(rack_sack_used_next_merge); 494 counter_u64_zero(rack_sack_used_prev_merge); 495 counter_u64_zero(rack_sack_splits); 496 counter_u64_zero(rack_sack_skipped_acked); 497 counter_u64_zero(rack_ack_total); 498 counter_u64_zero(rack_express_sack); 499 counter_u64_zero(rack_sack_total); 500 counter_u64_zero(rack_move_none); 501 counter_u64_zero(rack_move_some); 502 counter_u64_zero(rack_used_tlpmethod); 503 counter_u64_zero(rack_used_tlpmethod2); 504 counter_u64_zero(rack_enter_tlp_calc); 505 counter_u64_zero(rack_progress_drops); 506 counter_u64_zero(rack_tlp_does_nada); 507 counter_u64_zero(rack_collapsed_win); 508 509 } 510 rack_clear_counter = 0; 511 return (0); 512 } 513 514 515 516 static void 517 rack_init_sysctls(void) 518 { 519 struct sysctl_oid *rack_counters; 520 struct sysctl_oid *rack_attack; 521 522 SYSCTL_ADD_S32(&rack_sysctl_ctx, 523 SYSCTL_CHILDREN(rack_sysctl_root), 524 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 525 &rack_rate_sample_method , USE_RTT_LOW, 526 "What method should we use for rate sampling 0=high, 1=low "); 527 SYSCTL_ADD_S32(&rack_sysctl_ctx, 528 SYSCTL_CHILDREN(rack_sysctl_root), 529 OID_AUTO, "hw_tlsmax", CTLFLAG_RW, 530 &rack_hw_tls_max_seg , 0, 531 "Do we have a multplier of TLS records we can send as a max (0=1 TLS record)? "); 532 SYSCTL_ADD_S32(&rack_sysctl_ctx, 533 SYSCTL_CHILDREN(rack_sysctl_root), 534 OID_AUTO, "data_after_close", CTLFLAG_RW, 535 &rack_ignore_data_after_close, 0, 536 "Do we hold off sending a RST until all pending data is ack'd"); 537 SYSCTL_ADD_S32(&rack_sysctl_ctx, 538 SYSCTL_CHILDREN(rack_sysctl_root), 539 OID_AUTO, "cheat_rxt", CTLFLAG_RW, 540 &use_rack_cheat, 1, 541 "Do we use the rxt cheat for rack?"); 542 543 SYSCTL_ADD_U32(&rack_sysctl_ctx, 544 SYSCTL_CHILDREN(rack_sysctl_root), 545 OID_AUTO, "persmin", CTLFLAG_RW, 546 &rack_persist_min, 250, 547 "What is the minimum time in milliseconds between persists"); 548 SYSCTL_ADD_U32(&rack_sysctl_ctx, 549 SYSCTL_CHILDREN(rack_sysctl_root), 550 OID_AUTO, "persmax", CTLFLAG_RW, 551 &rack_persist_max, 1000, 552 "What is the largest delay in milliseconds between persists"); 553 SYSCTL_ADD_S32(&rack_sysctl_ctx, 554 SYSCTL_CHILDREN(rack_sysctl_root), 555 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 556 &rack_sack_not_required, 0, 557 "Do we allow rack to run on connections not supporting SACK?"); 558 SYSCTL_ADD_S32(&rack_sysctl_ctx, 559 SYSCTL_CHILDREN(rack_sysctl_root), 560 OID_AUTO, "tlpmethod", CTLFLAG_RW, 561 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 562 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 563 SYSCTL_ADD_S32(&rack_sysctl_ctx, 564 SYSCTL_CHILDREN(rack_sysctl_root), 565 OID_AUTO, "gp_percentage", CTLFLAG_RW, 566 &rack_per_of_gp, 50, 567 "Do we pace to percentage of goodput (0=old method)?"); 568 SYSCTL_ADD_S32(&rack_sysctl_ctx, 569 SYSCTL_CHILDREN(rack_sysctl_root), 570 OID_AUTO, "min_pace_time", CTLFLAG_RW, 571 &rack_min_pace_time, 0, 572 "Should we enforce a minimum pace time of 1ms"); 573 SYSCTL_ADD_S32(&rack_sysctl_ctx, 574 SYSCTL_CHILDREN(rack_sysctl_root), 575 OID_AUTO, "bb_verbose", CTLFLAG_RW, 576 &rack_verbose_logging, 0, 577 "Should RACK black box logging be verbose"); 578 SYSCTL_ADD_S32(&rack_sysctl_ctx, 579 SYSCTL_CHILDREN(rack_sysctl_root), 580 OID_AUTO, "sackfiltering", CTLFLAG_RW, 581 &rack_use_sack_filter, 1, 582 "Do we use sack filtering?"); 583 SYSCTL_ADD_S32(&rack_sysctl_ctx, 584 SYSCTL_CHILDREN(rack_sysctl_root), 585 OID_AUTO, "delayed_ack", CTLFLAG_RW, 586 &rack_delayed_ack_time, 200, 587 "Delayed ack time (200ms)"); 588 SYSCTL_ADD_S32(&rack_sysctl_ctx, 589 SYSCTL_CHILDREN(rack_sysctl_root), 590 OID_AUTO, "tlpminto", CTLFLAG_RW, 591 &rack_tlp_min, 10, 592 "TLP minimum timeout per the specification (10ms)"); 593 SYSCTL_ADD_S32(&rack_sysctl_ctx, 594 SYSCTL_CHILDREN(rack_sysctl_root), 595 OID_AUTO, "send_oldest", CTLFLAG_RW, 596 &rack_always_send_oldest, 1, 597 "Should we always send the oldest TLP and RACK-TLP"); 598 SYSCTL_ADD_S32(&rack_sysctl_ctx, 599 SYSCTL_CHILDREN(rack_sysctl_root), 600 OID_AUTO, "rack_tlimit", CTLFLAG_RW, 601 &rack_limited_retran, 0, 602 "How many times can a rack timeout drive out sends"); 603 SYSCTL_ADD_S32(&rack_sysctl_ctx, 604 SYSCTL_CHILDREN(rack_sysctl_root), 605 OID_AUTO, "minrto", CTLFLAG_RW, 606 &rack_rto_min, 0, 607 "Minimum RTO in ms -- set with caution below 1000 due to TLP"); 608 SYSCTL_ADD_S32(&rack_sysctl_ctx, 609 SYSCTL_CHILDREN(rack_sysctl_root), 610 OID_AUTO, "maxrto", CTLFLAG_RW, 611 &rack_rto_max, 0, 612 "Maxiumum RTO in ms -- should be at least as large as min_rto"); 613 SYSCTL_ADD_S32(&rack_sysctl_ctx, 614 SYSCTL_CHILDREN(rack_sysctl_root), 615 OID_AUTO, "tlp_retry", CTLFLAG_RW, 616 &rack_tlp_max_resend, 2, 617 "How many times does TLP retry a single segment or multiple with no ACK"); 618 SYSCTL_ADD_S32(&rack_sysctl_ctx, 619 SYSCTL_CHILDREN(rack_sysctl_root), 620 OID_AUTO, "recovery_loss_prop", CTLFLAG_RW, 621 &rack_use_proportional_reduce, 0, 622 "Should we proportionaly reduce cwnd based on the number of losses "); 623 SYSCTL_ADD_S32(&rack_sysctl_ctx, 624 SYSCTL_CHILDREN(rack_sysctl_root), 625 OID_AUTO, "recovery_prop", CTLFLAG_RW, 626 &rack_proportional_rate, 10, 627 "What percent reduction per loss"); 628 SYSCTL_ADD_S32(&rack_sysctl_ctx, 629 SYSCTL_CHILDREN(rack_sysctl_root), 630 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 631 &rack_lower_cwnd_at_tlp, 0, 632 "When a TLP completes a retran should we enter recovery?"); 633 SYSCTL_ADD_S32(&rack_sysctl_ctx, 634 SYSCTL_CHILDREN(rack_sysctl_root), 635 OID_AUTO, "hptsi_reduces", CTLFLAG_RW, 636 &rack_slot_reduction, 4, 637 "When setting a slot should we reduce by divisor"); 638 SYSCTL_ADD_S32(&rack_sysctl_ctx, 639 SYSCTL_CHILDREN(rack_sysctl_root), 640 OID_AUTO, "hptsi_every_seg", CTLFLAG_RW, 641 &rack_pace_every_seg, 0, 642 "Should we use the original pacing mechanism that did not pace much?"); 643 SYSCTL_ADD_S32(&rack_sysctl_ctx, 644 SYSCTL_CHILDREN(rack_sysctl_root), 645 OID_AUTO, "hptsi_seg_max", CTLFLAG_RW, 646 &rack_hptsi_segments, 40, 647 "Should we pace out only a limited size of segments"); 648 SYSCTL_ADD_S32(&rack_sysctl_ctx, 649 SYSCTL_CHILDREN(rack_sysctl_root), 650 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 651 &rack_send_a_lot_in_prr, 1, 652 "Send a lot in prr"); 653 SYSCTL_ADD_S32(&rack_sysctl_ctx, 654 SYSCTL_CHILDREN(rack_sysctl_root), 655 OID_AUTO, "minto", CTLFLAG_RW, 656 &rack_min_to, 1, 657 "Minimum rack timeout in milliseconds"); 658 SYSCTL_ADD_S32(&rack_sysctl_ctx, 659 SYSCTL_CHILDREN(rack_sysctl_root), 660 OID_AUTO, "earlyrecovery", CTLFLAG_RW, 661 &rack_early_recovery, 1, 662 "Do we do early recovery with rack"); 663 SYSCTL_ADD_S32(&rack_sysctl_ctx, 664 SYSCTL_CHILDREN(rack_sysctl_root), 665 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 666 &rack_reorder_thresh, 2, 667 "What factor for rack will be added when seeing reordering (shift right)"); 668 SYSCTL_ADD_S32(&rack_sysctl_ctx, 669 SYSCTL_CHILDREN(rack_sysctl_root), 670 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 671 &rack_tlp_thresh, 1, 672 "what divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 673 SYSCTL_ADD_S32(&rack_sysctl_ctx, 674 SYSCTL_CHILDREN(rack_sysctl_root), 675 OID_AUTO, "reorder_fade", CTLFLAG_RW, 676 &rack_reorder_fade, 0, 677 "Does reorder detection fade, if so how many ms (0 means never)"); 678 SYSCTL_ADD_S32(&rack_sysctl_ctx, 679 SYSCTL_CHILDREN(rack_sysctl_root), 680 OID_AUTO, "pktdelay", CTLFLAG_RW, 681 &rack_pkt_delay, 1, 682 "Extra RACK time (in ms) besides reordering thresh"); 683 684 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 685 SYSCTL_CHILDREN(rack_sysctl_root), 686 OID_AUTO, 687 "stats", 688 CTLFLAG_RW, 0, 689 "Rack Counters"); 690 rack_badfr = counter_u64_alloc(M_WAITOK); 691 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 692 SYSCTL_CHILDREN(rack_counters), 693 OID_AUTO, "badfr", CTLFLAG_RD, 694 &rack_badfr, "Total number of bad FRs"); 695 rack_badfr_bytes = counter_u64_alloc(M_WAITOK); 696 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 697 SYSCTL_CHILDREN(rack_counters), 698 OID_AUTO, "badfr_bytes", CTLFLAG_RD, 699 &rack_badfr_bytes, "Total number of bad FRs"); 700 rack_rtm_prr_retran = counter_u64_alloc(M_WAITOK); 701 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 702 SYSCTL_CHILDREN(rack_counters), 703 OID_AUTO, "prrsndret", CTLFLAG_RD, 704 &rack_rtm_prr_retran, 705 "Total number of prr based retransmits"); 706 rack_rtm_prr_newdata = counter_u64_alloc(M_WAITOK); 707 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 708 SYSCTL_CHILDREN(rack_counters), 709 OID_AUTO, "prrsndnew", CTLFLAG_RD, 710 &rack_rtm_prr_newdata, 711 "Total number of prr based new transmits"); 712 rack_timestamp_mismatch = counter_u64_alloc(M_WAITOK); 713 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 714 SYSCTL_CHILDREN(rack_counters), 715 OID_AUTO, "tsnf", CTLFLAG_RD, 716 &rack_timestamp_mismatch, 717 "Total number of timestamps that we could not find the reported ts"); 718 rack_find_high = counter_u64_alloc(M_WAITOK); 719 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 720 SYSCTL_CHILDREN(rack_counters), 721 OID_AUTO, "findhigh", CTLFLAG_RD, 722 &rack_find_high, 723 "Total number of FIN causing find-high"); 724 rack_reorder_seen = counter_u64_alloc(M_WAITOK); 725 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 726 SYSCTL_CHILDREN(rack_counters), 727 OID_AUTO, "reordering", CTLFLAG_RD, 728 &rack_reorder_seen, 729 "Total number of times we added delay due to reordering"); 730 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 731 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 732 SYSCTL_CHILDREN(rack_counters), 733 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 734 &rack_tlp_tot, 735 "Total number of tail loss probe expirations"); 736 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 737 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 738 SYSCTL_CHILDREN(rack_counters), 739 OID_AUTO, "tlp_new", CTLFLAG_RD, 740 &rack_tlp_newdata, 741 "Total number of tail loss probe sending new data"); 742 743 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 744 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 745 SYSCTL_CHILDREN(rack_counters), 746 OID_AUTO, "tlp_retran", CTLFLAG_RD, 747 &rack_tlp_retran, 748 "Total number of tail loss probe sending retransmitted data"); 749 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 750 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 751 SYSCTL_CHILDREN(rack_counters), 752 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 753 &rack_tlp_retran_bytes, 754 "Total bytes of tail loss probe sending retransmitted data"); 755 rack_tlp_retran_fail = counter_u64_alloc(M_WAITOK); 756 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 757 SYSCTL_CHILDREN(rack_counters), 758 OID_AUTO, "tlp_retran_fail", CTLFLAG_RD, 759 &rack_tlp_retran_fail, 760 "Total number of tail loss probe sending retransmitted data that failed (wait for t3)"); 761 rack_to_tot = counter_u64_alloc(M_WAITOK); 762 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 763 SYSCTL_CHILDREN(rack_counters), 764 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 765 &rack_to_tot, 766 "Total number of times the rack to expired?"); 767 rack_to_arm_rack = counter_u64_alloc(M_WAITOK); 768 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 769 SYSCTL_CHILDREN(rack_counters), 770 OID_AUTO, "arm_rack", CTLFLAG_RD, 771 &rack_to_arm_rack, 772 "Total number of times the rack timer armed?"); 773 rack_to_arm_tlp = counter_u64_alloc(M_WAITOK); 774 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 775 SYSCTL_CHILDREN(rack_counters), 776 OID_AUTO, "arm_tlp", CTLFLAG_RD, 777 &rack_to_arm_tlp, 778 "Total number of times the tlp timer armed?"); 779 780 rack_calc_zero = counter_u64_alloc(M_WAITOK); 781 rack_calc_nonzero = counter_u64_alloc(M_WAITOK); 782 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 783 SYSCTL_CHILDREN(rack_counters), 784 OID_AUTO, "calc_zero", CTLFLAG_RD, 785 &rack_calc_zero, 786 "Total number of times pacing time worked out to zero?"); 787 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 788 SYSCTL_CHILDREN(rack_counters), 789 OID_AUTO, "calc_nonzero", CTLFLAG_RD, 790 &rack_calc_nonzero, 791 "Total number of times pacing time worked out to non-zero?"); 792 rack_paced_segments = counter_u64_alloc(M_WAITOK); 793 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 794 SYSCTL_CHILDREN(rack_counters), 795 OID_AUTO, "paced", CTLFLAG_RD, 796 &rack_paced_segments, 797 "Total number of times a segment send caused hptsi"); 798 rack_unpaced_segments = counter_u64_alloc(M_WAITOK); 799 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 800 SYSCTL_CHILDREN(rack_counters), 801 OID_AUTO, "unpaced", CTLFLAG_RD, 802 &rack_unpaced_segments, 803 "Total number of times a segment did not cause hptsi"); 804 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 805 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 806 SYSCTL_CHILDREN(rack_counters), 807 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 808 &rack_saw_enobuf, 809 "Total number of times a segment did not cause hptsi"); 810 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 811 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 812 SYSCTL_CHILDREN(rack_counters), 813 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 814 &rack_saw_enetunreach, 815 "Total number of times a segment did not cause hptsi"); 816 rack_to_alloc = counter_u64_alloc(M_WAITOK); 817 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 818 SYSCTL_CHILDREN(rack_counters), 819 OID_AUTO, "allocs", CTLFLAG_RD, 820 &rack_to_alloc, 821 "Total allocations of tracking structures"); 822 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 823 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 824 SYSCTL_CHILDREN(rack_counters), 825 OID_AUTO, "allochard", CTLFLAG_RD, 826 &rack_to_alloc_hard, 827 "Total allocations done with sleeping the hard way"); 828 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 829 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 830 SYSCTL_CHILDREN(rack_counters), 831 OID_AUTO, "allocemerg", CTLFLAG_RD, 832 &rack_to_alloc_emerg, 833 "Total allocations done from emergency cache"); 834 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 835 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 836 SYSCTL_CHILDREN(rack_counters), 837 OID_AUTO, "alloc_limited", CTLFLAG_RD, 838 &rack_to_alloc_limited, 839 "Total allocations dropped due to limit"); 840 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 841 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 842 SYSCTL_CHILDREN(rack_counters), 843 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 844 &rack_alloc_limited_conns, 845 "Connections with allocations dropped due to limit"); 846 rack_split_limited = counter_u64_alloc(M_WAITOK); 847 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 848 SYSCTL_CHILDREN(rack_counters), 849 OID_AUTO, "split_limited", CTLFLAG_RD, 850 &rack_split_limited, 851 "Split allocations dropped due to limit"); 852 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 853 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 854 SYSCTL_CHILDREN(rack_counters), 855 OID_AUTO, "sack_long", CTLFLAG_RD, 856 &rack_sack_proc_all, 857 "Total times we had to walk whole list for sack processing"); 858 859 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 860 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 861 SYSCTL_CHILDREN(rack_counters), 862 OID_AUTO, "sack_restart", CTLFLAG_RD, 863 &rack_sack_proc_restart, 864 "Total times we had to walk whole list due to a restart"); 865 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 866 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 867 SYSCTL_CHILDREN(rack_counters), 868 OID_AUTO, "sack_short", CTLFLAG_RD, 869 &rack_sack_proc_short, 870 "Total times we took shortcut for sack processing"); 871 rack_enter_tlp_calc = counter_u64_alloc(M_WAITOK); 872 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 873 SYSCTL_CHILDREN(rack_counters), 874 OID_AUTO, "tlp_calc_entered", CTLFLAG_RD, 875 &rack_enter_tlp_calc, 876 "Total times we called calc-tlp"); 877 rack_used_tlpmethod = counter_u64_alloc(M_WAITOK); 878 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 879 SYSCTL_CHILDREN(rack_counters), 880 OID_AUTO, "hit_tlp_method", CTLFLAG_RD, 881 &rack_used_tlpmethod, 882 "Total number of runt sacks"); 883 rack_used_tlpmethod2 = counter_u64_alloc(M_WAITOK); 884 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 885 SYSCTL_CHILDREN(rack_counters), 886 OID_AUTO, "hit_tlp_method2", CTLFLAG_RD, 887 &rack_used_tlpmethod2, 888 "Total number of times we hit TLP method 2"); 889 /* Sack Attacker detection stuff */ 890 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 891 SYSCTL_CHILDREN(rack_sysctl_root), 892 OID_AUTO, 893 "sack_attack", 894 CTLFLAG_RW, 0, 895 "Rack Sack Attack Counters and Controls"); 896 SYSCTL_ADD_U32(&rack_sysctl_ctx, 897 SYSCTL_CHILDREN(rack_attack), 898 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 899 &rack_highest_sack_thresh_seen, 0, 900 "Highest sack to ack ratio seen"); 901 SYSCTL_ADD_U32(&rack_sysctl_ctx, 902 SYSCTL_CHILDREN(rack_attack), 903 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 904 &rack_highest_move_thresh_seen, 0, 905 "Highest move to non-move ratio seen"); 906 rack_ack_total = counter_u64_alloc(M_WAITOK); 907 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 908 SYSCTL_CHILDREN(rack_attack), 909 OID_AUTO, "acktotal", CTLFLAG_RD, 910 &rack_ack_total, 911 "Total number of Ack's"); 912 913 rack_express_sack = counter_u64_alloc(M_WAITOK); 914 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 915 SYSCTL_CHILDREN(rack_attack), 916 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 917 &rack_express_sack, 918 "Total expresss number of Sack's"); 919 rack_sack_total = counter_u64_alloc(M_WAITOK); 920 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 921 SYSCTL_CHILDREN(rack_attack), 922 OID_AUTO, "sacktotal", CTLFLAG_RD, 923 &rack_sack_total, 924 "Total number of SACK's"); 925 rack_move_none = counter_u64_alloc(M_WAITOK); 926 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 927 SYSCTL_CHILDREN(rack_attack), 928 OID_AUTO, "move_none", CTLFLAG_RD, 929 &rack_move_none, 930 "Total number of SACK index reuse of postions under threshold"); 931 rack_move_some = counter_u64_alloc(M_WAITOK); 932 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 933 SYSCTL_CHILDREN(rack_attack), 934 OID_AUTO, "move_some", CTLFLAG_RD, 935 &rack_move_some, 936 "Total number of SACK index reuse of postions over threshold"); 937 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 938 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 939 SYSCTL_CHILDREN(rack_attack), 940 OID_AUTO, "attacks", CTLFLAG_RD, 941 &rack_sack_attacks_detected, 942 "Total number of SACK attackers that had sack disabled"); 943 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 944 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 945 SYSCTL_CHILDREN(rack_attack), 946 OID_AUTO, "reversed", CTLFLAG_RD, 947 &rack_sack_attacks_reversed, 948 "Total number of SACK attackers that were later determined false positive"); 949 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 950 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 951 SYSCTL_CHILDREN(rack_attack), 952 OID_AUTO, "nextmerge", CTLFLAG_RD, 953 &rack_sack_used_next_merge, 954 "Total number of times we used the next merge"); 955 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 956 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 957 SYSCTL_CHILDREN(rack_attack), 958 OID_AUTO, "prevmerge", CTLFLAG_RD, 959 &rack_sack_used_prev_merge, 960 "Total number of times we used the prev merge"); 961 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 962 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 963 SYSCTL_CHILDREN(rack_attack), 964 OID_AUTO, "skipacked", CTLFLAG_RD, 965 &rack_sack_skipped_acked, 966 "Total number of times we skipped previously sacked"); 967 rack_sack_splits = counter_u64_alloc(M_WAITOK); 968 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 969 SYSCTL_CHILDREN(rack_attack), 970 OID_AUTO, "ofsplit", CTLFLAG_RD, 971 &rack_sack_splits, 972 "Total number of times we did the old fashion tree split"); 973 rack_progress_drops = counter_u64_alloc(M_WAITOK); 974 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 975 SYSCTL_CHILDREN(rack_counters), 976 OID_AUTO, "prog_drops", CTLFLAG_RD, 977 &rack_progress_drops, 978 "Total number of progress drops"); 979 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 980 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 981 SYSCTL_CHILDREN(rack_counters), 982 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 983 &rack_input_idle_reduces, 984 "Total number of idle reductions on input"); 985 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 986 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 987 SYSCTL_CHILDREN(rack_counters), 988 OID_AUTO, "collapsed_win", CTLFLAG_RD, 989 &rack_collapsed_win, 990 "Total number of collapsed windows"); 991 rack_tlp_does_nada = counter_u64_alloc(M_WAITOK); 992 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 993 SYSCTL_CHILDREN(rack_counters), 994 OID_AUTO, "tlp_nada", CTLFLAG_RD, 995 &rack_tlp_does_nada, 996 "Total number of nada tlp calls"); 997 998 rack_tls_rwnd = counter_u64_alloc(M_WAITOK); 999 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1000 SYSCTL_CHILDREN(rack_counters), 1001 OID_AUTO, "tls_rwnd", CTLFLAG_RD, 1002 &rack_tls_rwnd, 1003 "Total hdwr tls rwnd limited"); 1004 1005 rack_tls_cwnd = counter_u64_alloc(M_WAITOK); 1006 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1007 SYSCTL_CHILDREN(rack_counters), 1008 OID_AUTO, "tls_cwnd", CTLFLAG_RD, 1009 &rack_tls_cwnd, 1010 "Total hdwr tls cwnd limited"); 1011 1012 rack_tls_app = counter_u64_alloc(M_WAITOK); 1013 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1014 SYSCTL_CHILDREN(rack_counters), 1015 OID_AUTO, "tls_app", CTLFLAG_RD, 1016 &rack_tls_app, 1017 "Total hdwr tls app limited"); 1018 1019 rack_tls_other = counter_u64_alloc(M_WAITOK); 1020 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1021 SYSCTL_CHILDREN(rack_counters), 1022 OID_AUTO, "tls_other", CTLFLAG_RD, 1023 &rack_tls_other, 1024 "Total hdwr tls other limited"); 1025 1026 rack_tls_filled = counter_u64_alloc(M_WAITOK); 1027 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1028 SYSCTL_CHILDREN(rack_counters), 1029 OID_AUTO, "tls_filled", CTLFLAG_RD, 1030 &rack_tls_filled, 1031 "Total hdwr tls filled"); 1032 1033 rack_tls_rxt = counter_u64_alloc(M_WAITOK); 1034 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1035 SYSCTL_CHILDREN(rack_counters), 1036 OID_AUTO, "tls_rxt", CTLFLAG_RD, 1037 &rack_tls_rxt, 1038 "Total hdwr rxt"); 1039 1040 rack_tls_tlp = counter_u64_alloc(M_WAITOK); 1041 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1042 SYSCTL_CHILDREN(rack_counters), 1043 OID_AUTO, "tls_tlp", CTLFLAG_RD, 1044 &rack_tls_tlp, 1045 "Total hdwr tls tlp"); 1046 rack_per_timer_hole = counter_u64_alloc(M_WAITOK); 1047 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1048 SYSCTL_CHILDREN(rack_counters), 1049 OID_AUTO, "timer_hole", CTLFLAG_RD, 1050 &rack_per_timer_hole, 1051 "Total persists start in timer hole"); 1052 1053 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 1054 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1055 OID_AUTO, "outsize", CTLFLAG_RD, 1056 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 1057 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 1058 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1059 OID_AUTO, "opts", CTLFLAG_RD, 1060 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 1061 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 1062 SYSCTL_CHILDREN(rack_sysctl_root), 1063 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1064 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 1065 } 1066 1067 static __inline int 1068 rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a) 1069 { 1070 if (SEQ_GEQ(b->r_start, a->r_start) && 1071 SEQ_LT(b->r_start, a->r_end)) { 1072 /* 1073 * The entry b is within the 1074 * block a. i.e.: 1075 * a -- |-------------| 1076 * b -- |----| 1077 * <or> 1078 * b -- |------| 1079 * <or> 1080 * b -- |-----------| 1081 */ 1082 return (0); 1083 } else if (SEQ_GEQ(b->r_start, a->r_end)) { 1084 /* 1085 * b falls as either the next 1086 * sequence block after a so a 1087 * is said to be smaller than b. 1088 * i.e: 1089 * a -- |------| 1090 * b -- |--------| 1091 * or 1092 * b -- |-----| 1093 */ 1094 return (1); 1095 } 1096 /* 1097 * Whats left is where a is 1098 * larger than b. i.e: 1099 * a -- |-------| 1100 * b -- |---| 1101 * or even possibly 1102 * b -- |--------------| 1103 */ 1104 return (-1); 1105 } 1106 1107 RB_PROTOTYPE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1108 RB_GENERATE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1109 1110 static inline int32_t 1111 rack_progress_timeout_check(struct tcpcb *tp) 1112 { 1113 if (tp->t_maxunacktime && tp->t_acktime && TSTMP_GT(ticks, tp->t_acktime)) { 1114 if ((ticks - tp->t_acktime) >= tp->t_maxunacktime) { 1115 /* 1116 * There is an assumption that the caller 1117 * will drop the connection so we will 1118 * increment the counters here. 1119 */ 1120 struct tcp_rack *rack; 1121 rack = (struct tcp_rack *)tp->t_fb_ptr; 1122 counter_u64_add(rack_progress_drops, 1); 1123 #ifdef NETFLIX_STATS 1124 TCPSTAT_INC(tcps_progdrops); 1125 #endif 1126 rack_log_progress_event(rack, tp, ticks, PROGRESS_DROP, __LINE__); 1127 return (1); 1128 } 1129 } 1130 return (0); 1131 } 1132 1133 1134 1135 static void 1136 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 1137 { 1138 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1139 union tcp_log_stackspecific log; 1140 struct timeval tv; 1141 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1142 log.u_bbr.flex1 = tsused; 1143 log.u_bbr.flex2 = thresh; 1144 log.u_bbr.flex3 = rsm->r_flags; 1145 log.u_bbr.flex4 = rsm->r_dupack; 1146 log.u_bbr.flex5 = rsm->r_start; 1147 log.u_bbr.flex6 = rsm->r_end; 1148 log.u_bbr.flex8 = mod; 1149 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 1150 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 1151 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1152 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1153 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1154 &rack->rc_inp->inp_socket->so_rcv, 1155 &rack->rc_inp->inp_socket->so_snd, 1156 BBR_LOG_SETTINGS_CHG, 0, 1157 0, &log, false, &tv); 1158 } 1159 } 1160 1161 1162 1163 static void 1164 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 1165 { 1166 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1167 union tcp_log_stackspecific log; 1168 struct timeval tv; 1169 1170 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1171 log.u_bbr.flex1 = TICKS_2_MSEC(rack->rc_tp->t_srtt >> TCP_RTT_SHIFT); 1172 log.u_bbr.flex2 = to; 1173 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 1174 log.u_bbr.flex4 = slot; 1175 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot; 1176 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 1177 log.u_bbr.flex7 = rack->rc_in_persist; 1178 log.u_bbr.flex8 = which; 1179 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 1180 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 1181 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 1182 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1183 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1184 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1185 &rack->rc_inp->inp_socket->so_rcv, 1186 &rack->rc_inp->inp_socket->so_snd, 1187 BBR_LOG_TIMERSTAR, 0, 1188 0, &log, false, &tv); 1189 } 1190 } 1191 1192 static void 1193 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, int no) 1194 { 1195 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1196 union tcp_log_stackspecific log; 1197 struct timeval tv; 1198 1199 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1200 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 1201 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 1202 log.u_bbr.flex8 = to_num; 1203 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 1204 log.u_bbr.flex2 = rack->rc_rack_rtt; 1205 log.u_bbr.flex3 = no; 1206 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 1207 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1208 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1209 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1210 &rack->rc_inp->inp_socket->so_rcv, 1211 &rack->rc_inp->inp_socket->so_snd, 1212 BBR_LOG_RTO, 0, 1213 0, &log, false, &tv); 1214 } 1215 } 1216 1217 static void 1218 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, int32_t t, 1219 uint32_t o_srtt, uint32_t o_var) 1220 { 1221 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 1222 union tcp_log_stackspecific log; 1223 struct timeval tv; 1224 1225 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1226 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 1227 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 1228 log.u_bbr.flex1 = t; 1229 log.u_bbr.flex2 = o_srtt; 1230 log.u_bbr.flex3 = o_var; 1231 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 1232 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 1233 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_rtt_cnt; 1234 log.u_bbr.rttProp = rack->r_ctl.rack_rs.rs_rtt_tot; 1235 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 1236 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 1237 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1238 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1239 TCP_LOG_EVENTP(tp, NULL, 1240 &rack->rc_inp->inp_socket->so_rcv, 1241 &rack->rc_inp->inp_socket->so_snd, 1242 BBR_LOG_BBRRTT, 0, 1243 0, &log, false, &tv); 1244 } 1245 } 1246 1247 static void 1248 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 1249 { 1250 /* 1251 * Log the rtt sample we are 1252 * applying to the srtt algorithm in 1253 * useconds. 1254 */ 1255 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1256 union tcp_log_stackspecific log; 1257 struct timeval tv; 1258 1259 /* Convert our ms to a microsecond */ 1260 memset(&log, 0, sizeof(log)); 1261 log.u_bbr.flex1 = rtt * 1000; 1262 log.u_bbr.flex2 = rack->r_ctl.ack_count; 1263 log.u_bbr.flex3 = rack->r_ctl.sack_count; 1264 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 1265 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra; 1266 log.u_bbr.flex8 = rack->sack_attack_disable; 1267 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1268 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1269 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1270 &rack->rc_inp->inp_socket->so_rcv, 1271 &rack->rc_inp->inp_socket->so_snd, 1272 TCP_LOG_RTT, 0, 1273 0, &log, false, &tv); 1274 } 1275 } 1276 1277 1278 static inline void 1279 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 1280 { 1281 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 1282 union tcp_log_stackspecific log; 1283 struct timeval tv; 1284 1285 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1286 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 1287 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 1288 log.u_bbr.flex1 = line; 1289 log.u_bbr.flex2 = tick; 1290 log.u_bbr.flex3 = tp->t_maxunacktime; 1291 log.u_bbr.flex4 = tp->t_acktime; 1292 log.u_bbr.flex8 = event; 1293 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1294 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1295 TCP_LOG_EVENTP(tp, NULL, 1296 &rack->rc_inp->inp_socket->so_rcv, 1297 &rack->rc_inp->inp_socket->so_snd, 1298 BBR_LOG_PROGRESS, 0, 1299 0, &log, false, &tv); 1300 } 1301 } 1302 1303 static void 1304 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts) 1305 { 1306 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1307 union tcp_log_stackspecific log; 1308 struct timeval tv; 1309 1310 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1311 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 1312 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 1313 log.u_bbr.flex1 = slot; 1314 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 1315 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 1316 log.u_bbr.flex8 = rack->rc_in_persist; 1317 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1318 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1319 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1320 &rack->rc_inp->inp_socket->so_rcv, 1321 &rack->rc_inp->inp_socket->so_snd, 1322 BBR_LOG_BBRSND, 0, 1323 0, &log, false, &tv); 1324 } 1325 } 1326 1327 static void 1328 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out) 1329 { 1330 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1331 union tcp_log_stackspecific log; 1332 struct timeval tv; 1333 1334 memset(&log, 0, sizeof(log)); 1335 log.u_bbr.flex1 = did_out; 1336 log.u_bbr.flex2 = nxt_pkt; 1337 log.u_bbr.flex3 = way_out; 1338 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 1339 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 1340 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 1341 log.u_bbr.flex7 = rack->r_wanted_output; 1342 log.u_bbr.flex8 = rack->rc_in_persist; 1343 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1344 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1345 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1346 &rack->rc_inp->inp_socket->so_rcv, 1347 &rack->rc_inp->inp_socket->so_snd, 1348 BBR_LOG_DOSEG_DONE, 0, 1349 0, &log, false, &tv); 1350 } 1351 } 1352 1353 static void 1354 rack_log_type_hrdwtso(struct tcpcb *tp, struct tcp_rack *rack, int len, int mod, int32_t orig_len, int frm) 1355 { 1356 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 1357 union tcp_log_stackspecific log; 1358 struct timeval tv; 1359 uint32_t cts; 1360 1361 memset(&log, 0, sizeof(log)); 1362 cts = tcp_get_usecs(&tv); 1363 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 1364 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 1365 log.u_bbr.flex4 = len; 1366 log.u_bbr.flex5 = orig_len; 1367 log.u_bbr.flex6 = rack->r_ctl.rc_sacked; 1368 log.u_bbr.flex7 = mod; 1369 log.u_bbr.flex8 = frm; 1370 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1371 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1372 TCP_LOG_EVENTP(tp, NULL, 1373 &tp->t_inpcb->inp_socket->so_rcv, 1374 &tp->t_inpcb->inp_socket->so_snd, 1375 TCP_HDWR_TLS, 0, 1376 0, &log, false, &tv); 1377 } 1378 } 1379 1380 static void 1381 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, uint8_t hpts_calling) 1382 { 1383 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1384 union tcp_log_stackspecific log; 1385 struct timeval tv; 1386 1387 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1388 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 1389 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 1390 log.u_bbr.flex1 = slot; 1391 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 1392 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 1393 log.u_bbr.flex7 = hpts_calling; 1394 log.u_bbr.flex8 = rack->rc_in_persist; 1395 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1396 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1397 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1398 &rack->rc_inp->inp_socket->so_rcv, 1399 &rack->rc_inp->inp_socket->so_snd, 1400 BBR_LOG_JUSTRET, 0, 1401 tlen, &log, false, &tv); 1402 } 1403 } 1404 1405 static void 1406 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line) 1407 { 1408 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1409 union tcp_log_stackspecific log; 1410 struct timeval tv; 1411 1412 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1413 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 1414 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 1415 log.u_bbr.flex1 = line; 1416 log.u_bbr.flex2 = 0; 1417 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 1418 log.u_bbr.flex4 = 0; 1419 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 1420 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 1421 log.u_bbr.flex8 = hpts_removed; 1422 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1423 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1424 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1425 &rack->rc_inp->inp_socket->so_rcv, 1426 &rack->rc_inp->inp_socket->so_snd, 1427 BBR_LOG_TIMERCANC, 0, 1428 0, &log, false, &tv); 1429 } 1430 } 1431 1432 static void 1433 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 1434 { 1435 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1436 union tcp_log_stackspecific log; 1437 struct timeval tv; 1438 1439 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1440 log.u_bbr.flex1 = timers; 1441 log.u_bbr.flex2 = ret; 1442 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 1443 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 1444 log.u_bbr.flex5 = cts; 1445 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 1446 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1447 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1448 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1449 &rack->rc_inp->inp_socket->so_rcv, 1450 &rack->rc_inp->inp_socket->so_snd, 1451 BBR_LOG_TO_PROCESS, 0, 1452 0, &log, false, &tv); 1453 } 1454 } 1455 1456 static void 1457 rack_log_to_prr(struct tcp_rack *rack, int frm) 1458 { 1459 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1460 union tcp_log_stackspecific log; 1461 struct timeval tv; 1462 1463 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1464 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 1465 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 1466 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 1467 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 1468 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 1469 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 1470 log.u_bbr.flex8 = frm; 1471 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1472 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1473 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1474 &rack->rc_inp->inp_socket->so_rcv, 1475 &rack->rc_inp->inp_socket->so_snd, 1476 BBR_LOG_BBRUPD, 0, 1477 0, &log, false, &tv); 1478 } 1479 } 1480 1481 #ifdef NETFLIX_EXP_DETECTION 1482 static void 1483 rack_log_sad(struct tcp_rack *rack, int event) 1484 { 1485 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1486 union tcp_log_stackspecific log; 1487 struct timeval tv; 1488 1489 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 1490 log.u_bbr.flex1 = rack->r_ctl.sack_count; 1491 log.u_bbr.flex2 = rack->r_ctl.ack_count; 1492 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra; 1493 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 1494 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced; 1495 log.u_bbr.flex6 = tcp_sack_to_ack_thresh; 1496 log.u_bbr.pkts_out = tcp_sack_to_move_thresh; 1497 log.u_bbr.lt_epoch = (tcp_force_detection << 8); 1498 log.u_bbr.lt_epoch |= rack->do_detection; 1499 log.u_bbr.applimited = tcp_map_minimum; 1500 log.u_bbr.flex7 = rack->sack_attack_disable; 1501 log.u_bbr.flex8 = event; 1502 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1503 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 1504 log.u_bbr.delivered = tcp_sad_decay_val; 1505 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1506 &rack->rc_inp->inp_socket->so_rcv, 1507 &rack->rc_inp->inp_socket->so_snd, 1508 TCP_SAD_DETECTION, 0, 1509 0, &log, false, &tv); 1510 } 1511 } 1512 #endif 1513 1514 static void 1515 rack_counter_destroy(void) 1516 { 1517 counter_u64_free(rack_badfr); 1518 counter_u64_free(rack_badfr_bytes); 1519 counter_u64_free(rack_rtm_prr_retran); 1520 counter_u64_free(rack_rtm_prr_newdata); 1521 counter_u64_free(rack_timestamp_mismatch); 1522 counter_u64_free(rack_reorder_seen); 1523 counter_u64_free(rack_tlp_tot); 1524 counter_u64_free(rack_tlp_newdata); 1525 counter_u64_free(rack_tlp_retran); 1526 counter_u64_free(rack_tlp_retran_bytes); 1527 counter_u64_free(rack_tlp_retran_fail); 1528 counter_u64_free(rack_to_tot); 1529 counter_u64_free(rack_to_arm_rack); 1530 counter_u64_free(rack_to_arm_tlp); 1531 counter_u64_free(rack_paced_segments); 1532 counter_u64_free(rack_unpaced_segments); 1533 counter_u64_free(rack_saw_enobuf); 1534 counter_u64_free(rack_saw_enetunreach); 1535 counter_u64_free(rack_to_alloc_hard); 1536 counter_u64_free(rack_to_alloc_emerg); 1537 counter_u64_free(rack_sack_proc_all); 1538 counter_u64_free(rack_sack_proc_short); 1539 counter_u64_free(rack_sack_proc_restart); 1540 counter_u64_free(rack_to_alloc); 1541 counter_u64_free(rack_to_alloc_limited); 1542 counter_u64_free(rack_alloc_limited_conns); 1543 counter_u64_free(rack_split_limited); 1544 counter_u64_free(rack_find_high); 1545 counter_u64_free(rack_enter_tlp_calc); 1546 counter_u64_free(rack_used_tlpmethod); 1547 counter_u64_free(rack_used_tlpmethod2); 1548 counter_u64_free(rack_progress_drops); 1549 counter_u64_free(rack_input_idle_reduces); 1550 counter_u64_free(rack_collapsed_win); 1551 counter_u64_free(rack_tlp_does_nada); 1552 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 1553 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 1554 } 1555 1556 static struct rack_sendmap * 1557 rack_alloc(struct tcp_rack *rack) 1558 { 1559 struct rack_sendmap *rsm; 1560 1561 rsm = uma_zalloc(rack_zone, M_NOWAIT); 1562 if (rsm) { 1563 rack->r_ctl.rc_num_maps_alloced++; 1564 counter_u64_add(rack_to_alloc, 1); 1565 return (rsm); 1566 } 1567 if (rack->rc_free_cnt) { 1568 counter_u64_add(rack_to_alloc_emerg, 1); 1569 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 1570 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 1571 rack->rc_free_cnt--; 1572 return (rsm); 1573 } 1574 return (NULL); 1575 } 1576 1577 static struct rack_sendmap * 1578 rack_alloc_full_limit(struct tcp_rack *rack) 1579 { 1580 if ((rack_tcp_map_entries_limit > 0) && 1581 (rack->do_detection == 0) && 1582 (rack->r_ctl.rc_num_maps_alloced >= rack_tcp_map_entries_limit)) { 1583 counter_u64_add(rack_to_alloc_limited, 1); 1584 if (!rack->alloc_limit_reported) { 1585 rack->alloc_limit_reported = 1; 1586 counter_u64_add(rack_alloc_limited_conns, 1); 1587 } 1588 return (NULL); 1589 } 1590 return (rack_alloc(rack)); 1591 } 1592 1593 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 1594 static struct rack_sendmap * 1595 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 1596 { 1597 struct rack_sendmap *rsm; 1598 1599 if (limit_type) { 1600 /* currently there is only one limit type */ 1601 if (rack_tcp_map_split_limit > 0 && 1602 (rack->do_detection == 0) && 1603 rack->r_ctl.rc_num_split_allocs >= rack_tcp_map_split_limit) { 1604 counter_u64_add(rack_split_limited, 1); 1605 if (!rack->alloc_limit_reported) { 1606 rack->alloc_limit_reported = 1; 1607 counter_u64_add(rack_alloc_limited_conns, 1); 1608 } 1609 return (NULL); 1610 } 1611 } 1612 1613 /* allocate and mark in the limit type, if set */ 1614 rsm = rack_alloc(rack); 1615 if (rsm != NULL && limit_type) { 1616 rsm->r_limit_type = limit_type; 1617 rack->r_ctl.rc_num_split_allocs++; 1618 } 1619 return (rsm); 1620 } 1621 1622 static void 1623 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 1624 { 1625 if (rsm->r_limit_type) { 1626 /* currently there is only one limit type */ 1627 rack->r_ctl.rc_num_split_allocs--; 1628 } 1629 if (rack->r_ctl.rc_tlpsend == rsm) 1630 rack->r_ctl.rc_tlpsend = NULL; 1631 if (rack->r_ctl.rc_sacklast == rsm) 1632 rack->r_ctl.rc_sacklast = NULL; 1633 if (rack->rc_free_cnt < rack_free_cache) { 1634 memset(rsm, 0, sizeof(struct rack_sendmap)); 1635 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 1636 rsm->r_limit_type = 0; 1637 rack->rc_free_cnt++; 1638 return; 1639 } 1640 rack->r_ctl.rc_num_maps_alloced--; 1641 uma_zfree(rack_zone, rsm); 1642 } 1643 1644 /* 1645 * CC wrapper hook functions 1646 */ 1647 static void 1648 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, struct tcphdr *th, uint16_t nsegs, 1649 uint16_t type, int32_t recovery) 1650 { 1651 #ifdef NETFLIX_STATS 1652 int32_t gput; 1653 #endif 1654 1655 INP_WLOCK_ASSERT(tp->t_inpcb); 1656 tp->ccv->nsegs = nsegs; 1657 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th); 1658 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 1659 uint32_t max; 1660 1661 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 1662 if (tp->ccv->bytes_this_ack > max) { 1663 tp->ccv->bytes_this_ack = max; 1664 } 1665 } 1666 if (tp->snd_cwnd <= tp->snd_wnd) 1667 tp->ccv->flags |= CCF_CWND_LIMITED; 1668 else 1669 tp->ccv->flags &= ~CCF_CWND_LIMITED; 1670 1671 if (type == CC_ACK) { 1672 #ifdef NETFLIX_STATS 1673 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 1674 ((int32_t) tp->snd_cwnd) - tp->snd_wnd); 1675 if ((tp->t_flags & TF_GPUTINPROG) && 1676 SEQ_GEQ(th->th_ack, tp->gput_ack)) { 1677 gput = (((int64_t) (th->th_ack - tp->gput_seq)) << 3) / 1678 max(1, tcp_ts_getticks() - tp->gput_ts); 1679 /* We store it in bytes per ms (or kbytes per sec) */ 1680 rack->r_ctl.rc_gp_history[rack->r_ctl.rc_gp_hist_idx] = gput / 8; 1681 rack->r_ctl.rc_gp_hist_idx++; 1682 if (rack->r_ctl.rc_gp_hist_idx >= RACK_GP_HIST) 1683 rack->r_ctl.rc_gp_hist_filled = 1; 1684 rack->r_ctl.rc_gp_hist_idx %= RACK_GP_HIST; 1685 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 1686 gput); 1687 /* 1688 * XXXLAS: This is a temporary hack, and should be 1689 * chained off VOI_TCP_GPUT when stats(9) grows an 1690 * API to deal with chained VOIs. 1691 */ 1692 if (tp->t_stats_gput_prev > 0) 1693 stats_voi_update_abs_s32(tp->t_stats, 1694 VOI_TCP_GPUT_ND, 1695 ((gput - tp->t_stats_gput_prev) * 100) / 1696 tp->t_stats_gput_prev); 1697 tp->t_flags &= ~TF_GPUTINPROG; 1698 tp->t_stats_gput_prev = gput; 1699 1700 if (tp->t_maxpeakrate) { 1701 /* 1702 * We update t_peakrate_thr. This gives us roughly 1703 * one update per round trip time. 1704 */ 1705 tcp_update_peakrate_thr(tp); 1706 } 1707 } 1708 #endif 1709 if (tp->snd_cwnd > tp->snd_ssthresh) { 1710 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack, 1711 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 1712 if (tp->t_bytes_acked >= tp->snd_cwnd) { 1713 tp->t_bytes_acked -= tp->snd_cwnd; 1714 tp->ccv->flags |= CCF_ABC_SENTAWND; 1715 } 1716 } else { 1717 tp->ccv->flags &= ~CCF_ABC_SENTAWND; 1718 tp->t_bytes_acked = 0; 1719 } 1720 } 1721 if (CC_ALGO(tp)->ack_received != NULL) { 1722 /* XXXLAS: Find a way to live without this */ 1723 tp->ccv->curack = th->th_ack; 1724 CC_ALGO(tp)->ack_received(tp->ccv, type); 1725 } 1726 #ifdef NETFLIX_STATS 1727 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, tp->snd_cwnd); 1728 #endif 1729 if (rack->r_ctl.rc_rack_largest_cwnd < tp->snd_cwnd) { 1730 rack->r_ctl.rc_rack_largest_cwnd = tp->snd_cwnd; 1731 } 1732 /* we enforce max peak rate if it is set. */ 1733 if (tp->t_peakrate_thr && tp->snd_cwnd > tp->t_peakrate_thr) { 1734 tp->snd_cwnd = tp->t_peakrate_thr; 1735 } 1736 } 1737 1738 static void 1739 tcp_rack_partialack(struct tcpcb *tp, struct tcphdr *th) 1740 { 1741 struct tcp_rack *rack; 1742 1743 rack = (struct tcp_rack *)tp->t_fb_ptr; 1744 INP_WLOCK_ASSERT(tp->t_inpcb); 1745 if (rack->r_ctl.rc_prr_sndcnt > 0) 1746 rack->r_wanted_output++; 1747 } 1748 1749 static void 1750 rack_post_recovery(struct tcpcb *tp, struct tcphdr *th) 1751 { 1752 struct tcp_rack *rack; 1753 1754 INP_WLOCK_ASSERT(tp->t_inpcb); 1755 rack = (struct tcp_rack *)tp->t_fb_ptr; 1756 if (CC_ALGO(tp)->post_recovery != NULL) { 1757 tp->ccv->curack = th->th_ack; 1758 CC_ALGO(tp)->post_recovery(tp->ccv); 1759 } 1760 /* 1761 * Here we can in theory adjust cwnd to be based on the number of 1762 * losses in the window (rack->r_ctl.rc_loss_count). This is done 1763 * based on the rack_use_proportional flag. 1764 */ 1765 if (rack->r_ctl.rc_prop_reduce && rack->r_ctl.rc_prop_rate) { 1766 int32_t reduce; 1767 1768 reduce = (rack->r_ctl.rc_loss_count * rack->r_ctl.rc_prop_rate); 1769 if (reduce > 50) { 1770 reduce = 50; 1771 } 1772 tp->snd_cwnd -= ((reduce * tp->snd_cwnd) / 100); 1773 } else { 1774 if (tp->snd_cwnd > tp->snd_ssthresh) { 1775 /* Drop us down to the ssthresh (1/2 cwnd at loss) */ 1776 tp->snd_cwnd = tp->snd_ssthresh; 1777 } 1778 } 1779 if (rack->r_ctl.rc_prr_sndcnt > 0) { 1780 /* Suck the next prr cnt back into cwnd */ 1781 tp->snd_cwnd += rack->r_ctl.rc_prr_sndcnt; 1782 rack->r_ctl.rc_prr_sndcnt = 0; 1783 rack_log_to_prr(rack, 1); 1784 } 1785 tp->snd_recover = tp->snd_una; 1786 EXIT_RECOVERY(tp->t_flags); 1787 1788 1789 } 1790 1791 static void 1792 rack_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type) 1793 { 1794 struct tcp_rack *rack; 1795 1796 INP_WLOCK_ASSERT(tp->t_inpcb); 1797 1798 rack = (struct tcp_rack *)tp->t_fb_ptr; 1799 switch (type) { 1800 case CC_NDUPACK: 1801 tp->t_flags &= ~TF_WASFRECOVERY; 1802 tp->t_flags &= ~TF_WASCRECOVERY; 1803 if (!IN_FASTRECOVERY(tp->t_flags)) { 1804 rack->r_ctl.rc_tlp_rtx_out = 0; 1805 rack->r_ctl.rc_prr_delivered = 0; 1806 rack->r_ctl.rc_prr_out = 0; 1807 rack->r_ctl.rc_loss_count = 0; 1808 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 1809 rack_log_to_prr(rack, 2); 1810 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 1811 tp->snd_recover = tp->snd_max; 1812 if (tp->t_flags & TF_ECN_PERMIT) 1813 tp->t_flags |= TF_ECN_SND_CWR; 1814 } 1815 break; 1816 case CC_ECN: 1817 if (!IN_CONGRECOVERY(tp->t_flags)) { 1818 TCPSTAT_INC(tcps_ecn_rcwnd); 1819 tp->snd_recover = tp->snd_max; 1820 if (tp->t_flags & TF_ECN_PERMIT) 1821 tp->t_flags |= TF_ECN_SND_CWR; 1822 } 1823 break; 1824 case CC_RTO: 1825 tp->t_dupacks = 0; 1826 tp->t_bytes_acked = 0; 1827 EXIT_RECOVERY(tp->t_flags); 1828 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 / 1829 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 1830 tp->snd_cwnd = ctf_fixed_maxseg(tp); 1831 break; 1832 case CC_RTO_ERR: 1833 TCPSTAT_INC(tcps_sndrexmitbad); 1834 /* RTO was unnecessary, so reset everything. */ 1835 tp->snd_cwnd = tp->snd_cwnd_prev; 1836 tp->snd_ssthresh = tp->snd_ssthresh_prev; 1837 tp->snd_recover = tp->snd_recover_prev; 1838 if (tp->t_flags & TF_WASFRECOVERY) { 1839 ENTER_FASTRECOVERY(tp->t_flags); 1840 tp->t_flags &= ~TF_WASFRECOVERY; 1841 } 1842 if (tp->t_flags & TF_WASCRECOVERY) { 1843 ENTER_CONGRECOVERY(tp->t_flags); 1844 tp->t_flags &= ~TF_WASCRECOVERY; 1845 } 1846 tp->snd_nxt = tp->snd_max; 1847 tp->t_badrxtwin = 0; 1848 break; 1849 } 1850 1851 if (CC_ALGO(tp)->cong_signal != NULL) { 1852 if (th != NULL) 1853 tp->ccv->curack = th->th_ack; 1854 CC_ALGO(tp)->cong_signal(tp->ccv, type); 1855 } 1856 } 1857 1858 1859 1860 static inline void 1861 rack_cc_after_idle(struct tcpcb *tp) 1862 { 1863 uint32_t i_cwnd; 1864 1865 INP_WLOCK_ASSERT(tp->t_inpcb); 1866 1867 #ifdef NETFLIX_STATS 1868 TCPSTAT_INC(tcps_idle_restarts); 1869 if (tp->t_state == TCPS_ESTABLISHED) 1870 TCPSTAT_INC(tcps_idle_estrestarts); 1871 #endif 1872 if (CC_ALGO(tp)->after_idle != NULL) 1873 CC_ALGO(tp)->after_idle(tp->ccv); 1874 1875 if (tp->snd_cwnd == 1) 1876 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 1877 else 1878 i_cwnd = tcp_compute_initwnd(tcp_maxseg(tp)); 1879 1880 /* 1881 * Being idle is no differnt than the initial window. If the cc 1882 * clamps it down below the initial window raise it to the initial 1883 * window. 1884 */ 1885 if (tp->snd_cwnd < i_cwnd) { 1886 tp->snd_cwnd = i_cwnd; 1887 } 1888 } 1889 1890 1891 /* 1892 * Indicate whether this ack should be delayed. We can delay the ack if 1893 * following conditions are met: 1894 * - There is no delayed ack timer in progress. 1895 * - Our last ack wasn't a 0-sized window. We never want to delay 1896 * the ack that opens up a 0-sized window. 1897 * - LRO wasn't used for this segment. We make sure by checking that the 1898 * segment size is not larger than the MSS. 1899 * - Delayed acks are enabled or this is a half-synchronized T/TCP 1900 * connection. 1901 */ 1902 #define DELAY_ACK(tp, tlen) \ 1903 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 1904 ((tp->t_flags & TF_DELACK) == 0) && \ 1905 (tlen <= tp->t_maxseg) && \ 1906 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 1907 1908 static struct rack_sendmap * 1909 rack_find_lowest_rsm(struct tcp_rack *rack) 1910 { 1911 struct rack_sendmap *rsm; 1912 1913 /* 1914 * Walk the time-order transmitted list looking for an rsm that is 1915 * not acked. This will be the one that was sent the longest time 1916 * ago that is still outstanding. 1917 */ 1918 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 1919 if (rsm->r_flags & RACK_ACKED) { 1920 continue; 1921 } 1922 goto finish; 1923 } 1924 finish: 1925 return (rsm); 1926 } 1927 1928 static struct rack_sendmap * 1929 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 1930 { 1931 struct rack_sendmap *prsm; 1932 1933 /* 1934 * Walk the sequence order list backward until we hit and arrive at 1935 * the highest seq not acked. In theory when this is called it 1936 * should be the last segment (which it was not). 1937 */ 1938 counter_u64_add(rack_find_high, 1); 1939 prsm = rsm; 1940 RB_FOREACH_REVERSE_FROM(prsm, rack_rb_tree_head, rsm) { 1941 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 1942 continue; 1943 } 1944 return (prsm); 1945 } 1946 return (NULL); 1947 } 1948 1949 1950 static uint32_t 1951 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts) 1952 { 1953 int32_t lro; 1954 uint32_t thresh; 1955 1956 /* 1957 * lro is the flag we use to determine if we have seen reordering. 1958 * If it gets set we have seen reordering. The reorder logic either 1959 * works in one of two ways: 1960 * 1961 * If reorder-fade is configured, then we track the last time we saw 1962 * re-ordering occur. If we reach the point where enough time as 1963 * passed we no longer consider reordering has occuring. 1964 * 1965 * Or if reorder-face is 0, then once we see reordering we consider 1966 * the connection to alway be subject to reordering and just set lro 1967 * to 1. 1968 * 1969 * In the end if lro is non-zero we add the extra time for 1970 * reordering in. 1971 */ 1972 if (srtt == 0) 1973 srtt = 1; 1974 if (rack->r_ctl.rc_reorder_ts) { 1975 if (rack->r_ctl.rc_reorder_fade) { 1976 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 1977 lro = cts - rack->r_ctl.rc_reorder_ts; 1978 if (lro == 0) { 1979 /* 1980 * No time as passed since the last 1981 * reorder, mark it as reordering. 1982 */ 1983 lro = 1; 1984 } 1985 } else { 1986 /* Negative time? */ 1987 lro = 0; 1988 } 1989 if (lro > rack->r_ctl.rc_reorder_fade) { 1990 /* Turn off reordering seen too */ 1991 rack->r_ctl.rc_reorder_ts = 0; 1992 lro = 0; 1993 } 1994 } else { 1995 /* Reodering does not fade */ 1996 lro = 1; 1997 } 1998 } else { 1999 lro = 0; 2000 } 2001 thresh = srtt + rack->r_ctl.rc_pkt_delay; 2002 if (lro) { 2003 /* It must be set, if not you get 1/4 rtt */ 2004 if (rack->r_ctl.rc_reorder_shift) 2005 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 2006 else 2007 thresh += (srtt >> 2); 2008 } else { 2009 thresh += 1; 2010 } 2011 /* We don't let the rack timeout be above a RTO */ 2012 if (thresh > TICKS_2_MSEC(rack->rc_tp->t_rxtcur)) { 2013 thresh = TICKS_2_MSEC(rack->rc_tp->t_rxtcur); 2014 } 2015 /* And we don't want it above the RTO max either */ 2016 if (thresh > rack_rto_max) { 2017 thresh = rack_rto_max; 2018 } 2019 return (thresh); 2020 } 2021 2022 static uint32_t 2023 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 2024 struct rack_sendmap *rsm, uint32_t srtt) 2025 { 2026 struct rack_sendmap *prsm; 2027 uint32_t thresh, len; 2028 int maxseg; 2029 2030 if (srtt == 0) 2031 srtt = 1; 2032 if (rack->r_ctl.rc_tlp_threshold) 2033 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 2034 else 2035 thresh = (srtt * 2); 2036 2037 /* Get the previous sent packet, if any */ 2038 maxseg = ctf_fixed_maxseg(tp); 2039 counter_u64_add(rack_enter_tlp_calc, 1); 2040 len = rsm->r_end - rsm->r_start; 2041 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 2042 /* Exactly like the ID */ 2043 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= maxseg) { 2044 uint32_t alt_thresh; 2045 /* 2046 * Compensate for delayed-ack with the d-ack time. 2047 */ 2048 counter_u64_add(rack_used_tlpmethod, 1); 2049 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 2050 if (alt_thresh > thresh) 2051 thresh = alt_thresh; 2052 } 2053 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 2054 /* 2.1 behavior */ 2055 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 2056 if (prsm && (len <= maxseg)) { 2057 /* 2058 * Two packets outstanding, thresh should be (2*srtt) + 2059 * possible inter-packet delay (if any). 2060 */ 2061 uint32_t inter_gap = 0; 2062 int idx, nidx; 2063 2064 counter_u64_add(rack_used_tlpmethod, 1); 2065 idx = rsm->r_rtr_cnt - 1; 2066 nidx = prsm->r_rtr_cnt - 1; 2067 if (TSTMP_GEQ(rsm->r_tim_lastsent[nidx], prsm->r_tim_lastsent[idx])) { 2068 /* Yes it was sent later (or at the same time) */ 2069 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 2070 } 2071 thresh += inter_gap; 2072 } else if (len <= maxseg) { 2073 /* 2074 * Possibly compensate for delayed-ack. 2075 */ 2076 uint32_t alt_thresh; 2077 2078 counter_u64_add(rack_used_tlpmethod2, 1); 2079 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 2080 if (alt_thresh > thresh) 2081 thresh = alt_thresh; 2082 } 2083 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 2084 /* 2.2 behavior */ 2085 if (len <= maxseg) { 2086 uint32_t alt_thresh; 2087 /* 2088 * Compensate for delayed-ack with the d-ack time. 2089 */ 2090 counter_u64_add(rack_used_tlpmethod, 1); 2091 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 2092 if (alt_thresh > thresh) 2093 thresh = alt_thresh; 2094 } 2095 } 2096 /* Not above an RTO */ 2097 if (thresh > TICKS_2_MSEC(tp->t_rxtcur)) { 2098 thresh = TICKS_2_MSEC(tp->t_rxtcur); 2099 } 2100 /* Not above a RTO max */ 2101 if (thresh > rack_rto_max) { 2102 thresh = rack_rto_max; 2103 } 2104 /* Apply user supplied min TLP */ 2105 if (thresh < rack_tlp_min) { 2106 thresh = rack_tlp_min; 2107 } 2108 return (thresh); 2109 } 2110 2111 static uint32_t 2112 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 2113 { 2114 /* 2115 * We want the rack_rtt which is the 2116 * last rtt we measured. However if that 2117 * does not exist we fallback to the srtt (which 2118 * we probably will never do) and then as a last 2119 * resort we use RACK_INITIAL_RTO if no srtt is 2120 * yet set. 2121 */ 2122 if (rack->rc_rack_rtt) 2123 return(rack->rc_rack_rtt); 2124 else if (tp->t_srtt == 0) 2125 return(RACK_INITIAL_RTO); 2126 return (TICKS_2_MSEC(tp->t_srtt >> TCP_RTT_SHIFT)); 2127 } 2128 2129 static struct rack_sendmap * 2130 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 2131 { 2132 /* 2133 * Check to see that we don't need to fall into recovery. We will 2134 * need to do so if our oldest transmit is past the time we should 2135 * have had an ack. 2136 */ 2137 struct tcp_rack *rack; 2138 struct rack_sendmap *rsm; 2139 int32_t idx; 2140 uint32_t srtt, thresh; 2141 2142 rack = (struct tcp_rack *)tp->t_fb_ptr; 2143 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 2144 return (NULL); 2145 } 2146 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 2147 if (rsm == NULL) 2148 return (NULL); 2149 2150 if (rsm->r_flags & RACK_ACKED) { 2151 rsm = rack_find_lowest_rsm(rack); 2152 if (rsm == NULL) 2153 return (NULL); 2154 } 2155 idx = rsm->r_rtr_cnt - 1; 2156 srtt = rack_grab_rtt(tp, rack); 2157 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 2158 if (tsused < rsm->r_tim_lastsent[idx]) { 2159 return (NULL); 2160 } 2161 if ((tsused - rsm->r_tim_lastsent[idx]) < thresh) { 2162 return (NULL); 2163 } 2164 /* Ok if we reach here we are over-due */ 2165 rack->r_ctl.rc_rsm_start = rsm->r_start; 2166 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 2167 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 2168 rack_cong_signal(tp, NULL, CC_NDUPACK); 2169 return (rsm); 2170 } 2171 2172 static uint32_t 2173 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 2174 { 2175 int32_t t; 2176 int32_t tt; 2177 uint32_t ret_val; 2178 2179 t = TICKS_2_MSEC((tp->t_srtt >> TCP_RTT_SHIFT) + ((tp->t_rttvar * 4) >> TCP_RTT_SHIFT)); 2180 TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 2181 rack_persist_min, rack_persist_max); 2182 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 2183 tp->t_rxtshift++; 2184 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 2185 ret_val = (uint32_t)tt; 2186 return (ret_val); 2187 } 2188 2189 static uint32_t 2190 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 2191 { 2192 /* 2193 * Start the FR timer, we do this based on getting the first one in 2194 * the rc_tmap. Note that if its NULL we must stop the timer. in all 2195 * events we need to stop the running timer (if its running) before 2196 * starting the new one. 2197 */ 2198 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 2199 uint32_t srtt_cur; 2200 int32_t idx; 2201 int32_t is_tlp_timer = 0; 2202 struct rack_sendmap *rsm; 2203 2204 if (rack->t_timers_stopped) { 2205 /* All timers have been stopped none are to run */ 2206 return (0); 2207 } 2208 if (rack->rc_in_persist) { 2209 /* We can't start any timer in persists */ 2210 return (rack_get_persists_timer_val(tp, rack)); 2211 } 2212 if ((tp->t_state < TCPS_ESTABLISHED) || 2213 ((tp->t_flags & TF_SACK_PERMIT) == 0)) 2214 goto activate_rxt; 2215 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 2216 if ((rsm == NULL) || sup_rack) { 2217 /* Nothing on the send map */ 2218 activate_rxt: 2219 time_since_sent = 0; 2220 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 2221 if (rsm) { 2222 idx = rsm->r_rtr_cnt - 1; 2223 if (TSTMP_GEQ(rsm->r_tim_lastsent[idx], rack->r_ctl.rc_tlp_rxt_last_time)) 2224 tstmp_touse = rsm->r_tim_lastsent[idx]; 2225 else 2226 tstmp_touse = rack->r_ctl.rc_tlp_rxt_last_time; 2227 if (TSTMP_GT(tstmp_touse, cts)) 2228 time_since_sent = cts - tstmp_touse; 2229 } 2230 if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) { 2231 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 2232 to = TICKS_2_MSEC(tp->t_rxtcur); 2233 if (to > time_since_sent) 2234 to -= time_since_sent; 2235 else 2236 to = rack->r_ctl.rc_min_to; 2237 if (to == 0) 2238 to = 1; 2239 return (to); 2240 } 2241 return (0); 2242 } 2243 if (rsm->r_flags & RACK_ACKED) { 2244 rsm = rack_find_lowest_rsm(rack); 2245 if (rsm == NULL) { 2246 /* No lowest? */ 2247 goto activate_rxt; 2248 } 2249 } 2250 if (rack->sack_attack_disable) { 2251 /* 2252 * We don't want to do 2253 * any TLP's if you are an attacker. 2254 * Though if you are doing what 2255 * is expected you may still have 2256 * SACK-PASSED marks. 2257 */ 2258 goto activate_rxt; 2259 } 2260 /* Convert from ms to usecs */ 2261 if (rsm->r_flags & RACK_SACK_PASSED) { 2262 if ((tp->t_flags & TF_SENTFIN) && 2263 ((tp->snd_max - tp->snd_una) == 1) && 2264 (rsm->r_flags & RACK_HAS_FIN)) { 2265 /* 2266 * We don't start a rack timer if all we have is a 2267 * FIN outstanding. 2268 */ 2269 goto activate_rxt; 2270 } 2271 if ((rack->use_rack_cheat == 0) && 2272 (IN_RECOVERY(tp->t_flags)) && 2273 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 2274 /* 2275 * We are not cheating, in recovery and 2276 * not enough ack's to yet get our next 2277 * retransmission out. 2278 * 2279 * Note that classified attackers do not 2280 * get to use the rack-cheat. 2281 */ 2282 goto activate_tlp; 2283 } 2284 srtt = rack_grab_rtt(tp, rack); 2285 thresh = rack_calc_thresh_rack(rack, srtt, cts); 2286 idx = rsm->r_rtr_cnt - 1; 2287 exp = rsm->r_tim_lastsent[idx] + thresh; 2288 if (SEQ_GEQ(exp, cts)) { 2289 to = exp - cts; 2290 if (to < rack->r_ctl.rc_min_to) { 2291 to = rack->r_ctl.rc_min_to; 2292 } 2293 } else { 2294 to = rack->r_ctl.rc_min_to; 2295 } 2296 } else { 2297 /* Ok we need to do a TLP not RACK */ 2298 activate_tlp: 2299 if ((rack->rc_tlp_in_progress != 0) || 2300 (rack->r_ctl.rc_tlp_rtx_out != 0)) { 2301 /* 2302 * The previous send was a TLP or a tlp_rtx is in 2303 * process. 2304 */ 2305 goto activate_rxt; 2306 } 2307 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 2308 if (rsm == NULL) { 2309 /* We found no rsm to TLP with. */ 2310 goto activate_rxt; 2311 } 2312 if (rsm->r_flags & RACK_HAS_FIN) { 2313 /* If its a FIN we dont do TLP */ 2314 rsm = NULL; 2315 goto activate_rxt; 2316 } 2317 idx = rsm->r_rtr_cnt - 1; 2318 time_since_sent = 0; 2319 if (TSTMP_GEQ(rsm->r_tim_lastsent[idx], rack->r_ctl.rc_tlp_rxt_last_time)) 2320 tstmp_touse = rsm->r_tim_lastsent[idx]; 2321 else 2322 tstmp_touse = rack->r_ctl.rc_tlp_rxt_last_time; 2323 if (TSTMP_GT(tstmp_touse, cts)) 2324 time_since_sent = cts - tstmp_touse; 2325 is_tlp_timer = 1; 2326 if (tp->t_srtt) { 2327 srtt_cur = (tp->t_srtt >> TCP_RTT_SHIFT); 2328 srtt = TICKS_2_MSEC(srtt_cur); 2329 } else 2330 srtt = RACK_INITIAL_RTO; 2331 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 2332 if (thresh > time_since_sent) 2333 to = thresh - time_since_sent; 2334 else 2335 to = rack->r_ctl.rc_min_to; 2336 if (to > TCPTV_REXMTMAX) { 2337 /* 2338 * If the TLP time works out to larger than the max 2339 * RTO lets not do TLP.. just RTO. 2340 */ 2341 goto activate_rxt; 2342 } 2343 if (rsm->r_start != rack->r_ctl.rc_last_tlp_seq) { 2344 /* 2345 * The tail is no longer the last one I did a probe 2346 * on 2347 */ 2348 rack->r_ctl.rc_tlp_seg_send_cnt = 0; 2349 rack->r_ctl.rc_last_tlp_seq = rsm->r_start; 2350 } 2351 } 2352 if (is_tlp_timer == 0) { 2353 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 2354 } else { 2355 if ((rack->r_ctl.rc_tlp_send_cnt > rack_tlp_max_resend) || 2356 (rack->r_ctl.rc_tlp_seg_send_cnt > rack_tlp_max_resend)) { 2357 /* 2358 * We have exceeded how many times we can retran the 2359 * current TLP timer, switch to the RTO timer. 2360 */ 2361 goto activate_rxt; 2362 } else { 2363 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 2364 } 2365 } 2366 if (to == 0) 2367 to = 1; 2368 return (to); 2369 } 2370 2371 static void 2372 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 2373 { 2374 if (rack->rc_in_persist == 0) { 2375 rack->r_ctl.rc_went_idle_time = cts; 2376 rack_timer_cancel(tp, rack, cts, __LINE__); 2377 tp->t_rxtshift = 0; 2378 rack->rc_in_persist = 1; 2379 } 2380 } 2381 2382 static void 2383 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack) 2384 { 2385 if (rack->rc_inp->inp_in_hpts) { 2386 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 2387 rack->r_ctl.rc_hpts_flags = 0; 2388 } 2389 rack->rc_in_persist = 0; 2390 rack->r_ctl.rc_went_idle_time = 0; 2391 tp->t_flags &= ~TF_FORCEDATA; 2392 tp->t_rxtshift = 0; 2393 } 2394 2395 static void 2396 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 2397 int32_t slot, uint32_t tot_len_this_send, int sup_rack) 2398 { 2399 struct inpcb *inp; 2400 uint32_t delayed_ack = 0; 2401 uint32_t hpts_timeout; 2402 uint8_t stopped; 2403 uint32_t left = 0; 2404 2405 inp = tp->t_inpcb; 2406 if (inp->inp_in_hpts) { 2407 /* A previous call is already set up */ 2408 return; 2409 } 2410 if ((tp->t_state == TCPS_CLOSED) || 2411 (tp->t_state == TCPS_LISTEN)) { 2412 return; 2413 } 2414 stopped = rack->rc_tmr_stopped; 2415 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 2416 left = rack->r_ctl.rc_timer_exp - cts; 2417 } 2418 rack->tlp_timer_up = 0; 2419 rack->r_ctl.rc_timer_exp = 0; 2420 if (rack->rc_inp->inp_in_hpts == 0) { 2421 rack->r_ctl.rc_hpts_flags = 0; 2422 } 2423 if (slot) { 2424 /* We are hptsi too */ 2425 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 2426 } else if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 2427 /* 2428 * We are still left on the hpts when the to goes 2429 * it will be for output. 2430 */ 2431 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) 2432 slot = rack->r_ctl.rc_last_output_to - cts; 2433 else 2434 slot = 1; 2435 } 2436 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 2437 if (rack->sack_attack_disable && 2438 (slot < USEC_TO_MSEC(tcp_sad_pacing_interval))) { 2439 /* 2440 * We have a potential attacker on 2441 * the line. We have possibly some 2442 * (or now) pacing time set. We want to 2443 * slow down the processing of sacks by some 2444 * amount (if it is an attacker). Set the default 2445 * slot for attackers in place (unless the orginal 2446 * interval is longer). Its stored in 2447 * micro-seconds, so lets convert to msecs. 2448 */ 2449 slot = USEC_TO_MSEC(tcp_sad_pacing_interval); 2450 } 2451 if (tp->t_flags & TF_DELACK) { 2452 delayed_ack = TICKS_2_MSEC(tcp_delacktime); 2453 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 2454 } 2455 if (delayed_ack && ((hpts_timeout == 0) || 2456 (delayed_ack < hpts_timeout))) 2457 hpts_timeout = delayed_ack; 2458 else 2459 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 2460 /* 2461 * If no timers are going to run and we will fall off the hptsi 2462 * wheel, we resort to a keep-alive timer if its configured. 2463 */ 2464 if ((hpts_timeout == 0) && 2465 (slot == 0)) { 2466 if ((tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 2467 (tp->t_state <= TCPS_CLOSING)) { 2468 /* 2469 * Ok we have no timer (persists, rack, tlp, rxt or 2470 * del-ack), we don't have segments being paced. So 2471 * all that is left is the keepalive timer. 2472 */ 2473 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 2474 /* Get the established keep-alive time */ 2475 hpts_timeout = TP_KEEPIDLE(tp); 2476 } else { 2477 /* Get the initial setup keep-alive time */ 2478 hpts_timeout = TP_KEEPINIT(tp); 2479 } 2480 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 2481 } 2482 } 2483 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 2484 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 2485 /* 2486 * RACK, TLP, persists and RXT timers all are restartable 2487 * based on actions input .. i.e we received a packet (ack 2488 * or sack) and that changes things (rw, or snd_una etc). 2489 * Thus we can restart them with a new value. For 2490 * keep-alive, delayed_ack we keep track of what was left 2491 * and restart the timer with a smaller value. 2492 */ 2493 if (left < hpts_timeout) 2494 hpts_timeout = left; 2495 } 2496 if (hpts_timeout) { 2497 /* 2498 * Hack alert for now we can't time-out over 2,147,483 2499 * seconds (a bit more than 596 hours), which is probably ok 2500 * :). 2501 */ 2502 if (hpts_timeout > 0x7ffffffe) 2503 hpts_timeout = 0x7ffffffe; 2504 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 2505 } 2506 if (slot) { 2507 rack->rc_inp->inp_flags2 |= INP_MBUF_QUEUE_READY; 2508 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) 2509 inp->inp_flags2 |= INP_DONT_SACK_QUEUE; 2510 else 2511 inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE; 2512 rack->r_ctl.rc_last_output_to = cts + slot; 2513 if ((hpts_timeout == 0) || (hpts_timeout > slot)) { 2514 if (rack->rc_inp->inp_in_hpts == 0) 2515 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(slot)); 2516 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 2517 } else { 2518 /* 2519 * Arrange for the hpts to kick back in after the 2520 * t-o if the t-o does not cause a send. 2521 */ 2522 if (rack->rc_inp->inp_in_hpts == 0) 2523 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(hpts_timeout)); 2524 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 2525 } 2526 } else if (hpts_timeout) { 2527 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) { 2528 /* For a rack timer, don't wake us */ 2529 rack->rc_inp->inp_flags2 |= INP_MBUF_QUEUE_READY; 2530 inp->inp_flags2 |= INP_DONT_SACK_QUEUE; 2531 } else { 2532 /* All other timers wake us up */ 2533 rack->rc_inp->inp_flags2 &= ~INP_MBUF_QUEUE_READY; 2534 inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE; 2535 } 2536 if (rack->rc_inp->inp_in_hpts == 0) 2537 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(hpts_timeout)); 2538 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 2539 } else { 2540 /* No timer starting */ 2541 #ifdef INVARIANTS 2542 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 2543 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 2544 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 2545 } 2546 #endif 2547 } 2548 rack->rc_tmr_stopped = 0; 2549 if (slot) 2550 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, cts); 2551 } 2552 2553 /* 2554 * RACK Timer, here we simply do logging and house keeping. 2555 * the normal rack_output() function will call the 2556 * appropriate thing to check if we need to do a RACK retransmit. 2557 * We return 1, saying don't proceed with rack_output only 2558 * when all timers have been stopped (destroyed PCB?). 2559 */ 2560 static int 2561 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 2562 { 2563 /* 2564 * This timer simply provides an internal trigger to send out data. 2565 * The check_recovery_mode call will see if there are needed 2566 * retransmissions, if so we will enter fast-recovery. The output 2567 * call may or may not do the same thing depending on sysctl 2568 * settings. 2569 */ 2570 struct rack_sendmap *rsm; 2571 int32_t recovery, ll; 2572 2573 if (tp->t_timers->tt_flags & TT_STOPPED) { 2574 return (1); 2575 } 2576 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 2577 /* Its not time yet */ 2578 return (0); 2579 } 2580 recovery = IN_RECOVERY(tp->t_flags); 2581 counter_u64_add(rack_to_tot, 1); 2582 if (rack->r_state && (rack->r_state != tp->t_state)) 2583 rack_set_state(tp, rack); 2584 rsm = rack_check_recovery_mode(tp, cts); 2585 if (rsm) 2586 ll = rsm->r_end - rsm->r_start; 2587 else 2588 ll = 0; 2589 rack_log_to_event(rack, RACK_TO_FRM_RACK, ll); 2590 if (rsm) { 2591 uint32_t rtt; 2592 2593 rtt = rack->rc_rack_rtt; 2594 if (rtt == 0) 2595 rtt = 1; 2596 if ((recovery == 0) && 2597 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 2598 /* 2599 * The rack-timeout that enter's us into recovery 2600 * will force out one MSS and set us up so that we 2601 * can do one more send in 2*rtt (transitioning the 2602 * rack timeout into a rack-tlp). 2603 */ 2604 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 2605 rack_log_to_prr(rack, 3); 2606 } else if ((rack->r_ctl.rc_prr_sndcnt < (rsm->r_end - rsm->r_start)) && 2607 rack->use_rack_cheat) { 2608 /* 2609 * When a rack timer goes, if the rack cheat is 2610 * on, arrange it so we can send a full segment. 2611 */ 2612 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 2613 rack_log_to_prr(rack, 4); 2614 } 2615 } else { 2616 /* This is a case that should happen rarely if ever */ 2617 counter_u64_add(rack_tlp_does_nada, 1); 2618 #ifdef TCP_BLACKBOX 2619 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 2620 #endif 2621 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 2622 } 2623 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 2624 return (0); 2625 } 2626 2627 static __inline void 2628 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 2629 struct rack_sendmap *rsm, uint32_t start) 2630 { 2631 int idx; 2632 2633 nrsm->r_start = start; 2634 nrsm->r_end = rsm->r_end; 2635 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 2636 nrsm->r_flags = rsm->r_flags; 2637 nrsm->r_dupack = rsm->r_dupack; 2638 nrsm->r_rtr_bytes = 0; 2639 rsm->r_end = nrsm->r_start; 2640 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 2641 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 2642 } 2643 } 2644 2645 static struct rack_sendmap * 2646 rack_merge_rsm(struct tcp_rack *rack, 2647 struct rack_sendmap *l_rsm, 2648 struct rack_sendmap *r_rsm) 2649 { 2650 /* 2651 * We are merging two ack'd RSM's, 2652 * the l_rsm is on the left (lower seq 2653 * values) and the r_rsm is on the right 2654 * (higher seq value). The simplest way 2655 * to merge these is to move the right 2656 * one into the left. I don't think there 2657 * is any reason we need to try to find 2658 * the oldest (or last oldest retransmitted). 2659 */ 2660 struct rack_sendmap *rm; 2661 2662 l_rsm->r_end = r_rsm->r_end; 2663 if (l_rsm->r_dupack < r_rsm->r_dupack) 2664 l_rsm->r_dupack = r_rsm->r_dupack; 2665 if (r_rsm->r_rtr_bytes) 2666 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 2667 if (r_rsm->r_in_tmap) { 2668 /* This really should not happen */ 2669 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 2670 r_rsm->r_in_tmap = 0; 2671 } 2672 /* Now the flags */ 2673 if (r_rsm->r_flags & RACK_HAS_FIN) 2674 l_rsm->r_flags |= RACK_HAS_FIN; 2675 if (r_rsm->r_flags & RACK_TLP) 2676 l_rsm->r_flags |= RACK_TLP; 2677 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 2678 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 2679 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm); 2680 #ifdef INVARIANTS 2681 if (rm != r_rsm) { 2682 panic("removing head in rack:%p rsm:%p rm:%p", 2683 rack, r_rsm, rm); 2684 } 2685 #endif 2686 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 2687 /* Transfer the split limit to the map we free */ 2688 r_rsm->r_limit_type = l_rsm->r_limit_type; 2689 l_rsm->r_limit_type = 0; 2690 } 2691 rack_free(rack, r_rsm); 2692 return(l_rsm); 2693 } 2694 2695 /* 2696 * TLP Timer, here we simply setup what segment we want to 2697 * have the TLP expire on, the normal rack_output() will then 2698 * send it out. 2699 * 2700 * We return 1, saying don't proceed with rack_output only 2701 * when all timers have been stopped (destroyed PCB?). 2702 */ 2703 static int 2704 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 2705 { 2706 /* 2707 * Tail Loss Probe. 2708 */ 2709 struct rack_sendmap *rsm = NULL; 2710 struct rack_sendmap *insret; 2711 struct socket *so; 2712 uint32_t amm, old_prr_snd = 0; 2713 uint32_t out, avail; 2714 int collapsed_win = 0; 2715 2716 if (tp->t_timers->tt_flags & TT_STOPPED) { 2717 return (1); 2718 } 2719 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 2720 /* Its not time yet */ 2721 return (0); 2722 } 2723 if (rack_progress_timeout_check(tp)) { 2724 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 2725 return (1); 2726 } 2727 /* 2728 * A TLP timer has expired. We have been idle for 2 rtts. So we now 2729 * need to figure out how to force a full MSS segment out. 2730 */ 2731 rack_log_to_event(rack, RACK_TO_FRM_TLP, 0); 2732 counter_u64_add(rack_tlp_tot, 1); 2733 if (rack->r_state && (rack->r_state != tp->t_state)) 2734 rack_set_state(tp, rack); 2735 so = tp->t_inpcb->inp_socket; 2736 #ifdef KERN_TLS 2737 if (rack->rc_inp->inp_socket->so_snd.sb_flags & SB_TLS_IFNET) { 2738 /* 2739 * For hardware TLS we do *not* want to send 2740 * new data, lets instead just do a retransmission. 2741 */ 2742 goto need_retran; 2743 } 2744 #endif 2745 avail = sbavail(&so->so_snd); 2746 out = tp->snd_max - tp->snd_una; 2747 rack->tlp_timer_up = 1; 2748 if (out > tp->snd_wnd) { 2749 /* special case, we need a retransmission */ 2750 collapsed_win = 1; 2751 goto need_retran; 2752 } 2753 /* 2754 * If we are in recovery we can jazz out a segment if new data is 2755 * present simply by setting rc_prr_sndcnt to a segment. 2756 */ 2757 if ((avail > out) && 2758 ((rack_always_send_oldest == 0) || (TAILQ_EMPTY(&rack->r_ctl.rc_tmap)))) { 2759 /* New data is available */ 2760 amm = avail - out; 2761 if (amm > ctf_fixed_maxseg(tp)) { 2762 amm = ctf_fixed_maxseg(tp); 2763 } else if ((amm < ctf_fixed_maxseg(tp)) && ((tp->t_flags & TF_NODELAY) == 0)) { 2764 /* not enough to fill a MTU and no-delay is off */ 2765 goto need_retran; 2766 } 2767 if (IN_RECOVERY(tp->t_flags)) { 2768 /* Unlikely */ 2769 old_prr_snd = rack->r_ctl.rc_prr_sndcnt; 2770 if (out + amm <= tp->snd_wnd) { 2771 rack->r_ctl.rc_prr_sndcnt = amm; 2772 rack_log_to_prr(rack, 4); 2773 } else 2774 goto need_retran; 2775 } else { 2776 /* Set the send-new override */ 2777 if (out + amm <= tp->snd_wnd) 2778 rack->r_ctl.rc_tlp_new_data = amm; 2779 else 2780 goto need_retran; 2781 } 2782 rack->r_ctl.rc_tlp_seg_send_cnt = 0; 2783 rack->r_ctl.rc_last_tlp_seq = tp->snd_max; 2784 rack->r_ctl.rc_tlpsend = NULL; 2785 counter_u64_add(rack_tlp_newdata, 1); 2786 goto send; 2787 } 2788 need_retran: 2789 /* 2790 * Ok we need to arrange the last un-acked segment to be re-sent, or 2791 * optionally the first un-acked segment. 2792 */ 2793 if (collapsed_win == 0) { 2794 if (rack_always_send_oldest) 2795 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 2796 else { 2797 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 2798 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 2799 rsm = rack_find_high_nonack(rack, rsm); 2800 } 2801 } 2802 if (rsm == NULL) { 2803 counter_u64_add(rack_tlp_does_nada, 1); 2804 #ifdef TCP_BLACKBOX 2805 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 2806 #endif 2807 goto out; 2808 } 2809 } else { 2810 /* 2811 * We must find the last segment 2812 * that was acceptable by the client. 2813 */ 2814 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 2815 if ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0) { 2816 /* Found one */ 2817 break; 2818 } 2819 } 2820 if (rsm == NULL) { 2821 /* None? if so send the first */ 2822 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 2823 if (rsm == NULL) { 2824 counter_u64_add(rack_tlp_does_nada, 1); 2825 #ifdef TCP_BLACKBOX 2826 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 2827 #endif 2828 goto out; 2829 } 2830 } 2831 } 2832 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 2833 /* 2834 * We need to split this the last segment in two. 2835 */ 2836 struct rack_sendmap *nrsm; 2837 2838 2839 nrsm = rack_alloc_full_limit(rack); 2840 if (nrsm == NULL) { 2841 /* 2842 * No memory to split, we will just exit and punt 2843 * off to the RXT timer. 2844 */ 2845 counter_u64_add(rack_tlp_does_nada, 1); 2846 goto out; 2847 } 2848 rack_clone_rsm(rack, nrsm, rsm, 2849 (rsm->r_end - ctf_fixed_maxseg(tp))); 2850 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 2851 #ifdef INVARIANTS 2852 if (insret != NULL) { 2853 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 2854 nrsm, insret, rack, rsm); 2855 } 2856 #endif 2857 if (rsm->r_in_tmap) { 2858 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 2859 nrsm->r_in_tmap = 1; 2860 } 2861 rsm->r_flags &= (~RACK_HAS_FIN); 2862 rsm = nrsm; 2863 } 2864 rack->r_ctl.rc_tlpsend = rsm; 2865 rack->r_ctl.rc_tlp_rtx_out = 1; 2866 if (rsm->r_start == rack->r_ctl.rc_last_tlp_seq) { 2867 rack->r_ctl.rc_tlp_seg_send_cnt++; 2868 tp->t_rxtshift++; 2869 } else { 2870 rack->r_ctl.rc_last_tlp_seq = rsm->r_start; 2871 rack->r_ctl.rc_tlp_seg_send_cnt = 1; 2872 } 2873 send: 2874 rack->r_ctl.rc_tlp_send_cnt++; 2875 if (rack->r_ctl.rc_tlp_send_cnt > rack_tlp_max_resend) { 2876 /* 2877 * Can't [re]/transmit a segment we have not heard from the 2878 * peer in max times. We need the retransmit timer to take 2879 * over. 2880 */ 2881 restore: 2882 rack->r_ctl.rc_tlpsend = NULL; 2883 if (rsm) 2884 rsm->r_flags &= ~RACK_TLP; 2885 rack->r_ctl.rc_prr_sndcnt = old_prr_snd; 2886 rack_log_to_prr(rack, 5); 2887 counter_u64_add(rack_tlp_retran_fail, 1); 2888 goto out; 2889 } else if (rsm) { 2890 rsm->r_flags |= RACK_TLP; 2891 } 2892 if (rsm && (rsm->r_start == rack->r_ctl.rc_last_tlp_seq) && 2893 (rack->r_ctl.rc_tlp_seg_send_cnt > rack_tlp_max_resend)) { 2894 /* 2895 * We don't want to send a single segment more than the max 2896 * either. 2897 */ 2898 goto restore; 2899 } 2900 rack->r_timer_override = 1; 2901 rack->r_tlp_running = 1; 2902 rack->rc_tlp_in_progress = 1; 2903 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 2904 return (0); 2905 out: 2906 rack->tlp_timer_up = 0; 2907 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 2908 return (0); 2909 } 2910 2911 /* 2912 * Delayed ack Timer, here we simply need to setup the 2913 * ACK_NOW flag and remove the DELACK flag. From there 2914 * the output routine will send the ack out. 2915 * 2916 * We only return 1, saying don't proceed, if all timers 2917 * are stopped (destroyed PCB?). 2918 */ 2919 static int 2920 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 2921 { 2922 if (tp->t_timers->tt_flags & TT_STOPPED) { 2923 return (1); 2924 } 2925 rack_log_to_event(rack, RACK_TO_FRM_DELACK, 0); 2926 tp->t_flags &= ~TF_DELACK; 2927 tp->t_flags |= TF_ACKNOW; 2928 TCPSTAT_INC(tcps_delack); 2929 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 2930 return (0); 2931 } 2932 2933 /* 2934 * Persists timer, here we simply need to setup the 2935 * FORCE-DATA flag the output routine will send 2936 * the one byte send. 2937 * 2938 * We only return 1, saying don't proceed, if all timers 2939 * are stopped (destroyed PCB?). 2940 */ 2941 static int 2942 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 2943 { 2944 struct tcptemp *t_template; 2945 struct inpcb *inp; 2946 int32_t retval = 1; 2947 2948 inp = tp->t_inpcb; 2949 2950 if (tp->t_timers->tt_flags & TT_STOPPED) { 2951 return (1); 2952 } 2953 if (rack->rc_in_persist == 0) 2954 return (0); 2955 if (rack_progress_timeout_check(tp)) { 2956 tcp_set_inp_to_drop(inp, ETIMEDOUT); 2957 return (1); 2958 } 2959 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 2960 /* 2961 * Persistence timer into zero window. Force a byte to be output, if 2962 * possible. 2963 */ 2964 TCPSTAT_INC(tcps_persisttimeo); 2965 /* 2966 * Hack: if the peer is dead/unreachable, we do not time out if the 2967 * window is closed. After a full backoff, drop the connection if 2968 * the idle time (no responses to probes) reaches the maximum 2969 * backoff that we would use if retransmitting. 2970 */ 2971 if (tp->t_rxtshift == TCP_MAXRXTSHIFT && 2972 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 2973 ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) { 2974 TCPSTAT_INC(tcps_persistdrop); 2975 retval = 1; 2976 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT); 2977 goto out; 2978 } 2979 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 2980 tp->snd_una == tp->snd_max) 2981 rack_exit_persist(tp, rack); 2982 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 2983 /* 2984 * If the user has closed the socket then drop a persisting 2985 * connection after a much reduced timeout. 2986 */ 2987 if (tp->t_state > TCPS_CLOSE_WAIT && 2988 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 2989 retval = 1; 2990 TCPSTAT_INC(tcps_persistdrop); 2991 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT); 2992 goto out; 2993 } 2994 t_template = tcpip_maketemplate(rack->rc_inp); 2995 if (t_template) { 2996 tcp_respond(tp, t_template->tt_ipgen, 2997 &t_template->tt_t, (struct mbuf *)NULL, 2998 tp->rcv_nxt, tp->snd_una - 1, 0); 2999 /* This sends an ack */ 3000 if (tp->t_flags & TF_DELACK) 3001 tp->t_flags &= ~TF_DELACK; 3002 free(t_template, M_TEMP); 3003 } 3004 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 3005 tp->t_rxtshift++; 3006 out: 3007 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, 0); 3008 rack_start_hpts_timer(rack, tp, cts, 3009 0, 0, 0); 3010 return (retval); 3011 } 3012 3013 /* 3014 * If a keepalive goes off, we had no other timers 3015 * happening. We always return 1 here since this 3016 * routine either drops the connection or sends 3017 * out a segment with respond. 3018 */ 3019 static int 3020 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 3021 { 3022 struct tcptemp *t_template; 3023 struct inpcb *inp; 3024 3025 if (tp->t_timers->tt_flags & TT_STOPPED) { 3026 return (1); 3027 } 3028 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 3029 inp = tp->t_inpcb; 3030 rack_log_to_event(rack, RACK_TO_FRM_KEEP, 0); 3031 /* 3032 * Keep-alive timer went off; send something or drop connection if 3033 * idle for too long. 3034 */ 3035 TCPSTAT_INC(tcps_keeptimeo); 3036 if (tp->t_state < TCPS_ESTABLISHED) 3037 goto dropit; 3038 if ((tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 3039 tp->t_state <= TCPS_CLOSING) { 3040 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 3041 goto dropit; 3042 /* 3043 * Send a packet designed to force a response if the peer is 3044 * up and reachable: either an ACK if the connection is 3045 * still alive, or an RST if the peer has closed the 3046 * connection due to timeout or reboot. Using sequence 3047 * number tp->snd_una-1 causes the transmitted zero-length 3048 * segment to lie outside the receive window; by the 3049 * protocol spec, this requires the correspondent TCP to 3050 * respond. 3051 */ 3052 TCPSTAT_INC(tcps_keepprobe); 3053 t_template = tcpip_maketemplate(inp); 3054 if (t_template) { 3055 tcp_respond(tp, t_template->tt_ipgen, 3056 &t_template->tt_t, (struct mbuf *)NULL, 3057 tp->rcv_nxt, tp->snd_una - 1, 0); 3058 free(t_template, M_TEMP); 3059 } 3060 } 3061 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 3062 return (1); 3063 dropit: 3064 TCPSTAT_INC(tcps_keepdrops); 3065 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT); 3066 return (1); 3067 } 3068 3069 /* 3070 * Retransmit helper function, clear up all the ack 3071 * flags and take care of important book keeping. 3072 */ 3073 static void 3074 rack_remxt_tmr(struct tcpcb *tp) 3075 { 3076 /* 3077 * The retransmit timer went off, all sack'd blocks must be 3078 * un-acked. 3079 */ 3080 struct rack_sendmap *rsm, *trsm = NULL; 3081 struct tcp_rack *rack; 3082 int32_t cnt = 0; 3083 3084 rack = (struct tcp_rack *)tp->t_fb_ptr; 3085 rack_timer_cancel(tp, rack, tcp_ts_getticks(), __LINE__); 3086 rack_log_to_event(rack, RACK_TO_FRM_TMR, 0); 3087 if (rack->r_state && (rack->r_state != tp->t_state)) 3088 rack_set_state(tp, rack); 3089 /* 3090 * Ideally we would like to be able to 3091 * mark SACK-PASS on anything not acked here. 3092 * However, if we do that we would burst out 3093 * all that data 1ms apart. This would be unwise, 3094 * so for now we will just let the normal rxt timer 3095 * and tlp timer take care of it. 3096 */ 3097 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 3098 if (rsm->r_flags & RACK_ACKED) { 3099 cnt++; 3100 rsm->r_dupack = 0; 3101 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 3102 if (rsm->r_in_tmap == 0) { 3103 /* We must re-add it back to the tlist */ 3104 if (trsm == NULL) { 3105 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 3106 } else { 3107 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 3108 } 3109 rsm->r_in_tmap = 1; 3110 } 3111 } 3112 trsm = rsm; 3113 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS); 3114 } 3115 /* Clear the count (we just un-acked them) */ 3116 rack->r_ctl.rc_sacked = 0; 3117 /* Clear the tlp rtx mark */ 3118 rack->r_ctl.rc_tlp_rtx_out = 0; 3119 rack->r_ctl.rc_tlp_seg_send_cnt = 0; 3120 rack->r_ctl.rc_resend = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 3121 rack->r_ctl.rc_prr_sndcnt = 0; 3122 rack_log_to_prr(rack, 6); 3123 rack->r_timer_override = 1; 3124 } 3125 3126 /* 3127 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 3128 * we will setup to retransmit the lowest seq number outstanding. 3129 */ 3130 static int 3131 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 3132 { 3133 int32_t rexmt; 3134 struct inpcb *inp; 3135 int32_t retval = 0; 3136 3137 inp = tp->t_inpcb; 3138 if (tp->t_timers->tt_flags & TT_STOPPED) { 3139 return (1); 3140 } 3141 if (rack_progress_timeout_check(tp)) { 3142 tcp_set_inp_to_drop(inp, ETIMEDOUT); 3143 return (1); 3144 } 3145 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 3146 if (TCPS_HAVEESTABLISHED(tp->t_state) && 3147 (tp->snd_una == tp->snd_max)) { 3148 /* Nothing outstanding .. nothing to do */ 3149 return (0); 3150 } 3151 /* 3152 * Retransmission timer went off. Message has not been acked within 3153 * retransmit interval. Back off to a longer retransmit interval 3154 * and retransmit one segment. 3155 */ 3156 rack_remxt_tmr(tp); 3157 if ((rack->r_ctl.rc_resend == NULL) || 3158 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 3159 /* 3160 * If the rwnd collapsed on 3161 * the one we are retransmitting 3162 * it does not count against the 3163 * rxt count. 3164 */ 3165 tp->t_rxtshift++; 3166 } 3167 if (tp->t_rxtshift > TCP_MAXRXTSHIFT) { 3168 tp->t_rxtshift = TCP_MAXRXTSHIFT; 3169 TCPSTAT_INC(tcps_timeoutdrop); 3170 retval = 1; 3171 tcp_set_inp_to_drop(rack->rc_inp, 3172 (tp->t_softerror ? (uint16_t) tp->t_softerror : ETIMEDOUT)); 3173 goto out; 3174 } 3175 if (tp->t_state == TCPS_SYN_SENT) { 3176 /* 3177 * If the SYN was retransmitted, indicate CWND to be limited 3178 * to 1 segment in cc_conn_init(). 3179 */ 3180 tp->snd_cwnd = 1; 3181 } else if (tp->t_rxtshift == 1) { 3182 /* 3183 * first retransmit; record ssthresh and cwnd so they can be 3184 * recovered if this turns out to be a "bad" retransmit. A 3185 * retransmit is considered "bad" if an ACK for this segment 3186 * is received within RTT/2 interval; the assumption here is 3187 * that the ACK was already in flight. See "On Estimating 3188 * End-to-End Network Path Properties" by Allman and Paxson 3189 * for more details. 3190 */ 3191 tp->snd_cwnd_prev = tp->snd_cwnd; 3192 tp->snd_ssthresh_prev = tp->snd_ssthresh; 3193 tp->snd_recover_prev = tp->snd_recover; 3194 if (IN_FASTRECOVERY(tp->t_flags)) 3195 tp->t_flags |= TF_WASFRECOVERY; 3196 else 3197 tp->t_flags &= ~TF_WASFRECOVERY; 3198 if (IN_CONGRECOVERY(tp->t_flags)) 3199 tp->t_flags |= TF_WASCRECOVERY; 3200 else 3201 tp->t_flags &= ~TF_WASCRECOVERY; 3202 tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1)); 3203 tp->t_flags |= TF_PREVVALID; 3204 } else 3205 tp->t_flags &= ~TF_PREVVALID; 3206 TCPSTAT_INC(tcps_rexmttimeo); 3207 if ((tp->t_state == TCPS_SYN_SENT) || 3208 (tp->t_state == TCPS_SYN_RECEIVED)) 3209 rexmt = MSEC_2_TICKS(RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]); 3210 else 3211 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; 3212 TCPT_RANGESET(tp->t_rxtcur, rexmt, 3213 max(MSEC_2_TICKS(rack_rto_min), rexmt), 3214 MSEC_2_TICKS(rack_rto_max)); 3215 /* 3216 * We enter the path for PLMTUD if connection is established or, if 3217 * connection is FIN_WAIT_1 status, reason for the last is that if 3218 * amount of data we send is very small, we could send it in couple 3219 * of packets and process straight to FIN. In that case we won't 3220 * catch ESTABLISHED state. 3221 */ 3222 if (V_tcp_pmtud_blackhole_detect && (((tp->t_state == TCPS_ESTABLISHED)) 3223 || (tp->t_state == TCPS_FIN_WAIT_1))) { 3224 #ifdef INET6 3225 int32_t isipv6; 3226 #endif 3227 3228 /* 3229 * Idea here is that at each stage of mtu probe (usually, 3230 * 1448 -> 1188 -> 524) should be given 2 chances to recover 3231 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 3232 * should take care of that. 3233 */ 3234 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 3235 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 3236 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 3237 tp->t_rxtshift % 2 == 0)) { 3238 /* 3239 * Enter Path MTU Black-hole Detection mechanism: - 3240 * Disable Path MTU Discovery (IP "DF" bit). - 3241 * Reduce MTU to lower value than what we negotiated 3242 * with peer. 3243 */ 3244 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 3245 /* Record that we may have found a black hole. */ 3246 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 3247 /* Keep track of previous MSS. */ 3248 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 3249 } 3250 3251 /* 3252 * Reduce the MSS to blackhole value or to the 3253 * default in an attempt to retransmit. 3254 */ 3255 #ifdef INET6 3256 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? 1 : 0; 3257 if (isipv6 && 3258 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 3259 /* Use the sysctl tuneable blackhole MSS. */ 3260 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 3261 TCPSTAT_INC(tcps_pmtud_blackhole_activated); 3262 } else if (isipv6) { 3263 /* Use the default MSS. */ 3264 tp->t_maxseg = V_tcp_v6mssdflt; 3265 /* 3266 * Disable Path MTU Discovery when we switch 3267 * to minmss. 3268 */ 3269 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 3270 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 3271 } 3272 #endif 3273 #if defined(INET6) && defined(INET) 3274 else 3275 #endif 3276 #ifdef INET 3277 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 3278 /* Use the sysctl tuneable blackhole MSS. */ 3279 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 3280 TCPSTAT_INC(tcps_pmtud_blackhole_activated); 3281 } else { 3282 /* Use the default MSS. */ 3283 tp->t_maxseg = V_tcp_mssdflt; 3284 /* 3285 * Disable Path MTU Discovery when we switch 3286 * to minmss. 3287 */ 3288 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 3289 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 3290 } 3291 #endif 3292 } else { 3293 /* 3294 * If further retransmissions are still unsuccessful 3295 * with a lowered MTU, maybe this isn't a blackhole 3296 * and we restore the previous MSS and blackhole 3297 * detection flags. The limit '6' is determined by 3298 * giving each probe stage (1448, 1188, 524) 2 3299 * chances to recover. 3300 */ 3301 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 3302 (tp->t_rxtshift >= 6)) { 3303 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 3304 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 3305 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 3306 TCPSTAT_INC(tcps_pmtud_blackhole_failed); 3307 } 3308 } 3309 } 3310 /* 3311 * If we backed off this far, our srtt estimate is probably bogus. 3312 * Clobber it so we'll take the next rtt measurement as our srtt; 3313 * move the current srtt into rttvar to keep the current retransmit 3314 * times until then. 3315 */ 3316 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 3317 #ifdef INET6 3318 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 3319 in6_losing(tp->t_inpcb); 3320 else 3321 #endif 3322 in_losing(tp->t_inpcb); 3323 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT); 3324 tp->t_srtt = 0; 3325 } 3326 if (rack_use_sack_filter) 3327 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 3328 tp->snd_recover = tp->snd_max; 3329 tp->t_flags |= TF_ACKNOW; 3330 tp->t_rtttime = 0; 3331 rack_cong_signal(tp, NULL, CC_RTO); 3332 out: 3333 return (retval); 3334 } 3335 3336 static int 3337 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling) 3338 { 3339 int32_t ret = 0; 3340 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 3341 3342 if (timers == 0) { 3343 return (0); 3344 } 3345 if (tp->t_state == TCPS_LISTEN) { 3346 /* no timers on listen sockets */ 3347 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 3348 return (0); 3349 return (1); 3350 } 3351 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 3352 uint32_t left; 3353 3354 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 3355 ret = -1; 3356 rack_log_to_processing(rack, cts, ret, 0); 3357 return (0); 3358 } 3359 if (hpts_calling == 0) { 3360 ret = -2; 3361 rack_log_to_processing(rack, cts, ret, 0); 3362 return (0); 3363 } 3364 /* 3365 * Ok our timer went off early and we are not paced false 3366 * alarm, go back to sleep. 3367 */ 3368 ret = -3; 3369 left = rack->r_ctl.rc_timer_exp - cts; 3370 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(left)); 3371 rack_log_to_processing(rack, cts, ret, left); 3372 rack->rc_last_pto_set = 0; 3373 return (1); 3374 } 3375 rack->rc_tmr_stopped = 0; 3376 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 3377 if (timers & PACE_TMR_DELACK) { 3378 ret = rack_timeout_delack(tp, rack, cts); 3379 } else if (timers & PACE_TMR_RACK) { 3380 rack->r_ctl.rc_tlp_rxt_last_time = cts; 3381 ret = rack_timeout_rack(tp, rack, cts); 3382 } else if (timers & PACE_TMR_TLP) { 3383 rack->r_ctl.rc_tlp_rxt_last_time = cts; 3384 ret = rack_timeout_tlp(tp, rack, cts); 3385 } else if (timers & PACE_TMR_RXT) { 3386 rack->r_ctl.rc_tlp_rxt_last_time = cts; 3387 ret = rack_timeout_rxt(tp, rack, cts); 3388 } else if (timers & PACE_TMR_PERSIT) { 3389 ret = rack_timeout_persist(tp, rack, cts); 3390 } else if (timers & PACE_TMR_KEEP) { 3391 ret = rack_timeout_keepalive(tp, rack, cts); 3392 } 3393 rack_log_to_processing(rack, cts, ret, timers); 3394 return (ret); 3395 } 3396 3397 static void 3398 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 3399 { 3400 uint8_t hpts_removed = 0; 3401 3402 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 3403 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 3404 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 3405 hpts_removed = 1; 3406 } 3407 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 3408 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 3409 if (rack->rc_inp->inp_in_hpts && 3410 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 3411 /* 3412 * Canceling timer's when we have no output being 3413 * paced. We also must remove ourselves from the 3414 * hpts. 3415 */ 3416 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 3417 hpts_removed = 1; 3418 } 3419 rack_log_to_cancel(rack, hpts_removed, line); 3420 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 3421 } 3422 } 3423 3424 static void 3425 rack_timer_stop(struct tcpcb *tp, uint32_t timer_type) 3426 { 3427 return; 3428 } 3429 3430 static int 3431 rack_stopall(struct tcpcb *tp) 3432 { 3433 struct tcp_rack *rack; 3434 rack = (struct tcp_rack *)tp->t_fb_ptr; 3435 rack->t_timers_stopped = 1; 3436 return (0); 3437 } 3438 3439 static void 3440 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, uint32_t delta) 3441 { 3442 return; 3443 } 3444 3445 static int 3446 rack_timer_active(struct tcpcb *tp, uint32_t timer_type) 3447 { 3448 return (0); 3449 } 3450 3451 static void 3452 rack_stop_all_timers(struct tcpcb *tp) 3453 { 3454 struct tcp_rack *rack; 3455 3456 /* 3457 * Assure no timers are running. 3458 */ 3459 if (tcp_timer_active(tp, TT_PERSIST)) { 3460 /* We enter in persists, set the flag appropriately */ 3461 rack = (struct tcp_rack *)tp->t_fb_ptr; 3462 rack->rc_in_persist = 1; 3463 } 3464 tcp_timer_suspend(tp, TT_PERSIST); 3465 tcp_timer_suspend(tp, TT_REXMT); 3466 tcp_timer_suspend(tp, TT_KEEP); 3467 tcp_timer_suspend(tp, TT_DELACK); 3468 } 3469 3470 static void 3471 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 3472 struct rack_sendmap *rsm, uint32_t ts) 3473 { 3474 int32_t idx; 3475 3476 rsm->r_rtr_cnt++; 3477 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 3478 rsm->r_dupack = 0; 3479 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 3480 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 3481 rsm->r_flags |= RACK_OVERMAX; 3482 } 3483 if ((rsm->r_rtr_cnt > 1) && (rack->r_tlp_running == 0)) { 3484 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 3485 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 3486 } 3487 idx = rsm->r_rtr_cnt - 1; 3488 rsm->r_tim_lastsent[idx] = ts; 3489 if (rsm->r_flags & RACK_ACKED) { 3490 /* Problably MTU discovery messing with us */ 3491 rsm->r_flags &= ~RACK_ACKED; 3492 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 3493 } 3494 if (rsm->r_in_tmap) { 3495 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 3496 rsm->r_in_tmap = 0; 3497 } 3498 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 3499 rsm->r_in_tmap = 1; 3500 if (rsm->r_flags & RACK_SACK_PASSED) { 3501 /* We have retransmitted due to the SACK pass */ 3502 rsm->r_flags &= ~RACK_SACK_PASSED; 3503 rsm->r_flags |= RACK_WAS_SACKPASS; 3504 } 3505 } 3506 3507 3508 static uint32_t 3509 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 3510 struct rack_sendmap *rsm, uint32_t ts, int32_t *lenp) 3511 { 3512 /* 3513 * We (re-)transmitted starting at rsm->r_start for some length 3514 * (possibly less than r_end. 3515 */ 3516 struct rack_sendmap *nrsm, *insret; 3517 uint32_t c_end; 3518 int32_t len; 3519 3520 len = *lenp; 3521 c_end = rsm->r_start + len; 3522 if (SEQ_GEQ(c_end, rsm->r_end)) { 3523 /* 3524 * We retransmitted the whole piece or more than the whole 3525 * slopping into the next rsm. 3526 */ 3527 rack_update_rsm(tp, rack, rsm, ts); 3528 if (c_end == rsm->r_end) { 3529 *lenp = 0; 3530 return (0); 3531 } else { 3532 int32_t act_len; 3533 3534 /* Hangs over the end return whats left */ 3535 act_len = rsm->r_end - rsm->r_start; 3536 *lenp = (len - act_len); 3537 return (rsm->r_end); 3538 } 3539 /* We don't get out of this block. */ 3540 } 3541 /* 3542 * Here we retransmitted less than the whole thing which means we 3543 * have to split this into what was transmitted and what was not. 3544 */ 3545 nrsm = rack_alloc_full_limit(rack); 3546 if (nrsm == NULL) { 3547 /* 3548 * We can't get memory, so lets not proceed. 3549 */ 3550 *lenp = 0; 3551 return (0); 3552 } 3553 /* 3554 * So here we are going to take the original rsm and make it what we 3555 * retransmitted. nrsm will be the tail portion we did not 3556 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 3557 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 3558 * 1, 6 and the new piece will be 6, 11. 3559 */ 3560 rack_clone_rsm(rack, nrsm, rsm, c_end); 3561 nrsm->r_dupack = 0; 3562 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 3563 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 3564 #ifdef INVARIANTS 3565 if (insret != NULL) { 3566 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 3567 nrsm, insret, rack, rsm); 3568 } 3569 #endif 3570 if (rsm->r_in_tmap) { 3571 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 3572 nrsm->r_in_tmap = 1; 3573 } 3574 rsm->r_flags &= (~RACK_HAS_FIN); 3575 rack_update_rsm(tp, rack, rsm, ts); 3576 *lenp = 0; 3577 return (0); 3578 } 3579 3580 3581 static void 3582 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 3583 uint32_t seq_out, uint8_t th_flags, int32_t err, uint32_t ts, 3584 uint8_t pass, struct rack_sendmap *hintrsm) 3585 { 3586 struct tcp_rack *rack; 3587 struct rack_sendmap *rsm, *nrsm, *insret, fe; 3588 register uint32_t snd_max, snd_una; 3589 3590 /* 3591 * Add to the RACK log of packets in flight or retransmitted. If 3592 * there is a TS option we will use the TS echoed, if not we will 3593 * grab a TS. 3594 * 3595 * Retransmissions will increment the count and move the ts to its 3596 * proper place. Note that if options do not include TS's then we 3597 * won't be able to effectively use the ACK for an RTT on a retran. 3598 * 3599 * Notes about r_start and r_end. Lets consider a send starting at 3600 * sequence 1 for 10 bytes. In such an example the r_start would be 3601 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 3602 * This means that r_end is actually the first sequence for the next 3603 * slot (11). 3604 * 3605 */ 3606 /* 3607 * If err is set what do we do XXXrrs? should we not add the thing? 3608 * -- i.e. return if err != 0 or should we pretend we sent it? -- 3609 * i.e. proceed with add ** do this for now. 3610 */ 3611 INP_WLOCK_ASSERT(tp->t_inpcb); 3612 if (err) 3613 /* 3614 * We don't log errors -- we could but snd_max does not 3615 * advance in this case either. 3616 */ 3617 return; 3618 3619 if (th_flags & TH_RST) { 3620 /* 3621 * We don't log resets and we return immediately from 3622 * sending 3623 */ 3624 return; 3625 } 3626 rack = (struct tcp_rack *)tp->t_fb_ptr; 3627 snd_una = tp->snd_una; 3628 if (SEQ_LEQ((seq_out + len), snd_una)) { 3629 /* Are sending an old segment to induce an ack (keep-alive)? */ 3630 return; 3631 } 3632 if (SEQ_LT(seq_out, snd_una)) { 3633 /* huh? should we panic? */ 3634 uint32_t end; 3635 3636 end = seq_out + len; 3637 seq_out = snd_una; 3638 if (SEQ_GEQ(end, seq_out)) 3639 len = end - seq_out; 3640 else 3641 len = 0; 3642 } 3643 snd_max = tp->snd_max; 3644 if (th_flags & (TH_SYN | TH_FIN)) { 3645 /* 3646 * The call to rack_log_output is made before bumping 3647 * snd_max. This means we can record one extra byte on a SYN 3648 * or FIN if seq_out is adding more on and a FIN is present 3649 * (and we are not resending). 3650 */ 3651 if (th_flags & TH_SYN) 3652 len++; 3653 if (th_flags & TH_FIN) 3654 len++; 3655 if (SEQ_LT(snd_max, tp->snd_nxt)) { 3656 /* 3657 * The add/update as not been done for the FIN/SYN 3658 * yet. 3659 */ 3660 snd_max = tp->snd_nxt; 3661 } 3662 } 3663 if (len == 0) { 3664 /* We don't log zero window probes */ 3665 return; 3666 } 3667 rack->r_ctl.rc_time_last_sent = ts; 3668 if (IN_RECOVERY(tp->t_flags)) { 3669 rack->r_ctl.rc_prr_out += len; 3670 } 3671 /* First question is it a retransmission or new? */ 3672 if (seq_out == snd_max) { 3673 /* Its new */ 3674 again: 3675 rsm = rack_alloc(rack); 3676 if (rsm == NULL) { 3677 /* 3678 * Hmm out of memory and the tcb got destroyed while 3679 * we tried to wait. 3680 */ 3681 return; 3682 } 3683 if (th_flags & TH_FIN) { 3684 rsm->r_flags = RACK_HAS_FIN; 3685 } else { 3686 rsm->r_flags = 0; 3687 } 3688 rsm->r_tim_lastsent[0] = ts; 3689 rsm->r_rtr_cnt = 1; 3690 rsm->r_rtr_bytes = 0; 3691 if (th_flags & TH_SYN) { 3692 /* The data space is one beyond snd_una */ 3693 rsm->r_start = seq_out + 1; 3694 rsm->r_end = rsm->r_start + (len - 1); 3695 } else { 3696 /* Normal case */ 3697 rsm->r_start = seq_out; 3698 rsm->r_end = rsm->r_start + len; 3699 } 3700 rsm->r_dupack = 0; 3701 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 3702 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 3703 #ifdef INVARIANTS 3704 if (insret != NULL) { 3705 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 3706 nrsm, insret, rack, rsm); 3707 } 3708 #endif 3709 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 3710 rsm->r_in_tmap = 1; 3711 return; 3712 } 3713 /* 3714 * If we reach here its a retransmission and we need to find it. 3715 */ 3716 memset(&fe, 0, sizeof(fe)); 3717 more: 3718 if (hintrsm && (hintrsm->r_start == seq_out)) { 3719 rsm = hintrsm; 3720 hintrsm = NULL; 3721 } else { 3722 /* No hints sorry */ 3723 rsm = NULL; 3724 } 3725 if ((rsm) && (rsm->r_start == seq_out)) { 3726 seq_out = rack_update_entry(tp, rack, rsm, ts, &len); 3727 if (len == 0) { 3728 return; 3729 } else { 3730 goto more; 3731 } 3732 } 3733 /* Ok it was not the last pointer go through it the hard way. */ 3734 refind: 3735 fe.r_start = seq_out; 3736 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 3737 if (rsm) { 3738 if (rsm->r_start == seq_out) { 3739 seq_out = rack_update_entry(tp, rack, rsm, ts, &len); 3740 if (len == 0) { 3741 return; 3742 } else { 3743 goto refind; 3744 } 3745 } 3746 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 3747 /* Transmitted within this piece */ 3748 /* 3749 * Ok we must split off the front and then let the 3750 * update do the rest 3751 */ 3752 nrsm = rack_alloc_full_limit(rack); 3753 if (nrsm == NULL) { 3754 rack_update_rsm(tp, rack, rsm, ts); 3755 return; 3756 } 3757 /* 3758 * copy rsm to nrsm and then trim the front of rsm 3759 * to not include this part. 3760 */ 3761 rack_clone_rsm(rack, nrsm, rsm, seq_out); 3762 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 3763 #ifdef INVARIANTS 3764 if (insret != NULL) { 3765 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 3766 nrsm, insret, rack, rsm); 3767 } 3768 #endif 3769 if (rsm->r_in_tmap) { 3770 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 3771 nrsm->r_in_tmap = 1; 3772 } 3773 rsm->r_flags &= (~RACK_HAS_FIN); 3774 seq_out = rack_update_entry(tp, rack, nrsm, ts, &len); 3775 if (len == 0) { 3776 return; 3777 } 3778 } 3779 } 3780 /* 3781 * Hmm not found in map did they retransmit both old and on into the 3782 * new? 3783 */ 3784 if (seq_out == tp->snd_max) { 3785 goto again; 3786 } else if (SEQ_LT(seq_out, tp->snd_max)) { 3787 #ifdef INVARIANTS 3788 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 3789 seq_out, len, tp->snd_una, tp->snd_max); 3790 printf("Starting Dump of all rack entries\n"); 3791 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 3792 printf("rsm:%p start:%u end:%u\n", 3793 rsm, rsm->r_start, rsm->r_end); 3794 } 3795 printf("Dump complete\n"); 3796 panic("seq_out not found rack:%p tp:%p", 3797 rack, tp); 3798 #endif 3799 } else { 3800 #ifdef INVARIANTS 3801 /* 3802 * Hmm beyond sndmax? (only if we are using the new rtt-pack 3803 * flag) 3804 */ 3805 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 3806 seq_out, len, tp->snd_max, tp); 3807 #endif 3808 } 3809 } 3810 3811 /* 3812 * Record one of the RTT updates from an ack into 3813 * our sample structure. 3814 */ 3815 static void 3816 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt) 3817 { 3818 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 3819 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 3820 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 3821 } 3822 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 3823 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 3824 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 3825 } 3826 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 3827 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 3828 rack->r_ctl.rack_rs.rs_rtt_cnt++; 3829 } 3830 3831 /* 3832 * Collect new round-trip time estimate 3833 * and update averages and current timeout. 3834 */ 3835 static void 3836 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 3837 { 3838 int32_t delta; 3839 uint32_t o_srtt, o_var; 3840 int32_t rtt; 3841 3842 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 3843 /* No valid sample */ 3844 return; 3845 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 3846 /* We are to use the lowest RTT seen in a single ack */ 3847 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 3848 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 3849 /* We are to use the highest RTT seen in a single ack */ 3850 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 3851 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 3852 /* We are to use the average RTT seen in a single ack */ 3853 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 3854 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 3855 } else { 3856 #ifdef INVARIANTS 3857 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 3858 #endif 3859 return; 3860 } 3861 if (rtt == 0) 3862 rtt = 1; 3863 rack_log_rtt_sample(rack, rtt); 3864 o_srtt = tp->t_srtt; 3865 o_var = tp->t_rttvar; 3866 rack = (struct tcp_rack *)tp->t_fb_ptr; 3867 if (tp->t_srtt != 0) { 3868 /* 3869 * srtt is stored as fixed point with 5 bits after the 3870 * binary point (i.e., scaled by 8). The following magic is 3871 * equivalent to the smoothing algorithm in rfc793 with an 3872 * alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed point). 3873 * Adjust rtt to origin 0. 3874 */ 3875 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 3876 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 3877 3878 tp->t_srtt += delta; 3879 if (tp->t_srtt <= 0) 3880 tp->t_srtt = 1; 3881 3882 /* 3883 * We accumulate a smoothed rtt variance (actually, a 3884 * smoothed mean difference), then set the retransmit timer 3885 * to smoothed rtt + 4 times the smoothed variance. rttvar 3886 * is stored as fixed point with 4 bits after the binary 3887 * point (scaled by 16). The following is equivalent to 3888 * rfc793 smoothing with an alpha of .75 (rttvar = 3889 * rttvar*3/4 + |delta| / 4). This replaces rfc793's 3890 * wired-in beta. 3891 */ 3892 if (delta < 0) 3893 delta = -delta; 3894 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 3895 tp->t_rttvar += delta; 3896 if (tp->t_rttvar <= 0) 3897 tp->t_rttvar = 1; 3898 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 3899 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3900 } else { 3901 /* 3902 * No rtt measurement yet - use the unsmoothed rtt. Set the 3903 * variance to half the rtt (so our first retransmit happens 3904 * at 3*rtt). 3905 */ 3906 tp->t_srtt = rtt << TCP_RTT_SHIFT; 3907 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 3908 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3909 } 3910 TCPSTAT_INC(tcps_rttupdated); 3911 rack_log_rtt_upd(tp, rack, rtt, o_srtt, o_var); 3912 tp->t_rttupdated++; 3913 #ifdef NETFLIX_STATS 3914 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 3915 #endif 3916 tp->t_rxtshift = 0; 3917 3918 /* 3919 * the retransmit should happen at rtt + 4 * rttvar. Because of the 3920 * way we do the smoothing, srtt and rttvar will each average +1/2 3921 * tick of bias. When we compute the retransmit timer, we want 1/2 3922 * tick of rounding and 1 extra tick because of +-1/2 tick 3923 * uncertainty in the firing of the timer. The bias will give us 3924 * exactly the 1.5 tick we need. But, because the bias is 3925 * statistical, we have to test that we don't drop below the minimum 3926 * feasible timer (which is 2 ticks). 3927 */ 3928 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 3929 max(MSEC_2_TICKS(rack_rto_min), rtt + 2), MSEC_2_TICKS(rack_rto_max)); 3930 tp->t_softerror = 0; 3931 } 3932 3933 static void 3934 rack_earlier_retran(struct tcpcb *tp, struct rack_sendmap *rsm, 3935 uint32_t t, uint32_t cts) 3936 { 3937 /* 3938 * For this RSM, we acknowledged the data from a previous 3939 * transmission, not the last one we made. This means we did a false 3940 * retransmit. 3941 */ 3942 struct tcp_rack *rack; 3943 3944 if (rsm->r_flags & RACK_HAS_FIN) { 3945 /* 3946 * The sending of the FIN often is multiple sent when we 3947 * have everything outstanding ack'd. We ignore this case 3948 * since its over now. 3949 */ 3950 return; 3951 } 3952 if (rsm->r_flags & RACK_TLP) { 3953 /* 3954 * We expect TLP's to have this occur. 3955 */ 3956 return; 3957 } 3958 rack = (struct tcp_rack *)tp->t_fb_ptr; 3959 /* should we undo cc changes and exit recovery? */ 3960 if (IN_RECOVERY(tp->t_flags)) { 3961 if (rack->r_ctl.rc_rsm_start == rsm->r_start) { 3962 /* 3963 * Undo what we ratched down and exit recovery if 3964 * possible 3965 */ 3966 EXIT_RECOVERY(tp->t_flags); 3967 tp->snd_recover = tp->snd_una; 3968 if (rack->r_ctl.rc_cwnd_at > tp->snd_cwnd) 3969 tp->snd_cwnd = rack->r_ctl.rc_cwnd_at; 3970 if (rack->r_ctl.rc_ssthresh_at > tp->snd_ssthresh) 3971 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at; 3972 } 3973 } 3974 if (rsm->r_flags & RACK_WAS_SACKPASS) { 3975 /* 3976 * We retransmitted based on a sack and the earlier 3977 * retransmission ack'd it - re-ordering is occuring. 3978 */ 3979 counter_u64_add(rack_reorder_seen, 1); 3980 rack->r_ctl.rc_reorder_ts = cts; 3981 } 3982 counter_u64_add(rack_badfr, 1); 3983 counter_u64_add(rack_badfr_bytes, (rsm->r_end - rsm->r_start)); 3984 } 3985 3986 3987 static int 3988 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 3989 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type) 3990 { 3991 int32_t i; 3992 uint32_t t; 3993 3994 if (rsm->r_flags & RACK_ACKED) 3995 /* Already done */ 3996 return (0); 3997 3998 3999 if ((rsm->r_rtr_cnt == 1) || 4000 ((ack_type == CUM_ACKED) && 4001 (to->to_flags & TOF_TS) && 4002 (to->to_tsecr) && 4003 (rsm->r_tim_lastsent[rsm->r_rtr_cnt - 1] == to->to_tsecr)) 4004 ) { 4005 /* 4006 * We will only find a matching timestamp if its cum-acked. 4007 * But if its only one retransmission its for-sure matching 4008 * :-) 4009 */ 4010 t = cts - rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 4011 if ((int)t <= 0) 4012 t = 1; 4013 if (!tp->t_rttlow || tp->t_rttlow > t) 4014 tp->t_rttlow = t; 4015 if (!rack->r_ctl.rc_rack_min_rtt || 4016 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 4017 rack->r_ctl.rc_rack_min_rtt = t; 4018 if (rack->r_ctl.rc_rack_min_rtt == 0) { 4019 rack->r_ctl.rc_rack_min_rtt = 1; 4020 } 4021 } 4022 tcp_rack_xmit_timer(rack, t + 1); 4023 if ((rsm->r_flags & RACK_TLP) && 4024 (!IN_RECOVERY(tp->t_flags))) { 4025 /* Segment was a TLP and our retrans matched */ 4026 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 4027 rack->r_ctl.rc_rsm_start = tp->snd_max; 4028 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 4029 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 4030 rack_cong_signal(tp, NULL, CC_NDUPACK); 4031 /* 4032 * When we enter recovery we need to assure 4033 * we send one packet. 4034 */ 4035 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 4036 rack_log_to_prr(rack, 7); 4037 } 4038 } 4039 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 4040 /* New more recent rack_tmit_time */ 4041 rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 4042 rack->rc_rack_rtt = t; 4043 } 4044 return (1); 4045 } 4046 /* 4047 * We clear the soft/rxtshift since we got an ack. 4048 * There is no assurance we will call the commit() function 4049 * so we need to clear these to avoid incorrect handling. 4050 */ 4051 tp->t_rxtshift = 0; 4052 tp->t_softerror = 0; 4053 if ((to->to_flags & TOF_TS) && 4054 (ack_type == CUM_ACKED) && 4055 (to->to_tsecr) && 4056 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 4057 /* 4058 * Now which timestamp does it match? In this block the ACK 4059 * must be coming from a previous transmission. 4060 */ 4061 for (i = 0; i < rsm->r_rtr_cnt; i++) { 4062 if (rsm->r_tim_lastsent[i] == to->to_tsecr) { 4063 t = cts - rsm->r_tim_lastsent[i]; 4064 if ((int)t <= 0) 4065 t = 1; 4066 if ((i + 1) < rsm->r_rtr_cnt) { 4067 /* Likely */ 4068 rack_earlier_retran(tp, rsm, t, cts); 4069 } 4070 if (!tp->t_rttlow || tp->t_rttlow > t) 4071 tp->t_rttlow = t; 4072 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 4073 rack->r_ctl.rc_rack_min_rtt = t; 4074 if (rack->r_ctl.rc_rack_min_rtt == 0) { 4075 rack->r_ctl.rc_rack_min_rtt = 1; 4076 } 4077 } 4078 /* 4079 * Note the following calls to 4080 * tcp_rack_xmit_timer() are being commented 4081 * out for now. They give us no more accuracy 4082 * and often lead to a wrong choice. We have 4083 * enough samples that have not been 4084 * retransmitted. I leave the commented out 4085 * code in here in case in the future we 4086 * decide to add it back (though I can't forsee 4087 * doing that). That way we will easily see 4088 * where they need to be placed. 4089 */ 4090 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 4091 rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 4092 /* New more recent rack_tmit_time */ 4093 rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 4094 rack->rc_rack_rtt = t; 4095 } 4096 return (1); 4097 } 4098 } 4099 goto ts_not_found; 4100 } else { 4101 /* 4102 * Ok its a SACK block that we retransmitted. or a windows 4103 * machine without timestamps. We can tell nothing from the 4104 * time-stamp since its not there or the time the peer last 4105 * recieved a segment that moved forward its cum-ack point. 4106 */ 4107 ts_not_found: 4108 i = rsm->r_rtr_cnt - 1; 4109 t = cts - rsm->r_tim_lastsent[i]; 4110 if ((int)t <= 0) 4111 t = 1; 4112 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 4113 /* 4114 * We retransmitted and the ack came back in less 4115 * than the smallest rtt we have observed. We most 4116 * likey did an improper retransmit as outlined in 4117 * 4.2 Step 3 point 2 in the rack-draft. 4118 */ 4119 i = rsm->r_rtr_cnt - 2; 4120 t = cts - rsm->r_tim_lastsent[i]; 4121 rack_earlier_retran(tp, rsm, t, cts); 4122 } else if (rack->r_ctl.rc_rack_min_rtt) { 4123 /* 4124 * We retransmitted it and the retransmit did the 4125 * job. 4126 */ 4127 if (!rack->r_ctl.rc_rack_min_rtt || 4128 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 4129 rack->r_ctl.rc_rack_min_rtt = t; 4130 if (rack->r_ctl.rc_rack_min_rtt == 0) { 4131 rack->r_ctl.rc_rack_min_rtt = 1; 4132 } 4133 } 4134 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, rsm->r_tim_lastsent[i])) { 4135 /* New more recent rack_tmit_time */ 4136 rack->r_ctl.rc_rack_tmit_time = rsm->r_tim_lastsent[i]; 4137 rack->rc_rack_rtt = t; 4138 } 4139 return (1); 4140 } 4141 } 4142 return (0); 4143 } 4144 4145 /* 4146 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 4147 */ 4148 static void 4149 rack_log_sack_passed(struct tcpcb *tp, 4150 struct tcp_rack *rack, struct rack_sendmap *rsm) 4151 { 4152 struct rack_sendmap *nrsm; 4153 4154 nrsm = rsm; 4155 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 4156 rack_head, r_tnext) { 4157 if (nrsm == rsm) { 4158 /* Skip orginal segment he is acked */ 4159 continue; 4160 } 4161 if (nrsm->r_flags & RACK_ACKED) { 4162 /* 4163 * Skip ack'd segments, though we 4164 * should not see these, since tmap 4165 * should not have ack'd segments. 4166 */ 4167 continue; 4168 } 4169 if (nrsm->r_flags & RACK_SACK_PASSED) { 4170 /* 4171 * We found one that is already marked 4172 * passed, we have been here before and 4173 * so all others below this are marked. 4174 */ 4175 break; 4176 } 4177 nrsm->r_flags |= RACK_SACK_PASSED; 4178 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 4179 } 4180 } 4181 4182 static uint32_t 4183 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 4184 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, int *moved_two) 4185 { 4186 uint32_t start, end, changed = 0; 4187 struct rack_sendmap stack_map; 4188 struct rack_sendmap *rsm, *nrsm, fe, *insret, *prev, *next; 4189 int32_t used_ref = 1; 4190 int moved = 0; 4191 4192 start = sack->start; 4193 end = sack->end; 4194 rsm = *prsm; 4195 memset(&fe, 0, sizeof(fe)); 4196 do_rest_ofb: 4197 if ((rsm == NULL) || 4198 (SEQ_LT(end, rsm->r_start)) || 4199 (SEQ_GEQ(start, rsm->r_end)) || 4200 (SEQ_LT(start, rsm->r_start))) { 4201 /* 4202 * We are not in the right spot, 4203 * find the correct spot in the tree. 4204 */ 4205 used_ref = 0; 4206 fe.r_start = start; 4207 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 4208 moved++; 4209 } 4210 if (rsm == NULL) { 4211 /* TSNH */ 4212 goto out; 4213 } 4214 /* Ok we have an ACK for some piece of this rsm */ 4215 if (rsm->r_start != start) { 4216 if ((rsm->r_flags & RACK_ACKED) == 0) { 4217 /** 4218 * Need to split this in two pieces the before and after, 4219 * the before remains in the map, the after must be 4220 * added. In other words we have: 4221 * rsm |--------------| 4222 * sackblk |-------> 4223 * rsm will become 4224 * rsm |---| 4225 * and nrsm will be the sacked piece 4226 * nrsm |----------| 4227 * 4228 * But before we start down that path lets 4229 * see if the sack spans over on top of 4230 * the next guy and it is already sacked. 4231 */ 4232 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4233 if (next && (next->r_flags & RACK_ACKED) && 4234 SEQ_GEQ(end, next->r_start)) { 4235 /** 4236 * So the next one is already acked, and 4237 * we can thus by hookery use our stack_map 4238 * to reflect the piece being sacked and 4239 * then adjust the two tree entries moving 4240 * the start and ends around. So we start like: 4241 * rsm |------------| (not-acked) 4242 * next |-----------| (acked) 4243 * sackblk |--------> 4244 * We want to end like so: 4245 * rsm |------| (not-acked) 4246 * next |-----------------| (acked) 4247 * nrsm |-----| 4248 * Where nrsm is a temporary stack piece we 4249 * use to update all the gizmos. 4250 */ 4251 /* Copy up our fudge block */ 4252 nrsm = &stack_map; 4253 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 4254 /* Now adjust our tree blocks */ 4255 rsm->r_end = start; 4256 next->r_start = start; 4257 /* Clear out the dup ack count of the remainder */ 4258 rsm->r_dupack = 0; 4259 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 4260 /* Now lets make sure our fudge block is right */ 4261 nrsm->r_start = start; 4262 /* Now lets update all the stats and such */ 4263 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED); 4264 changed += (nrsm->r_end - nrsm->r_start); 4265 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 4266 if (nrsm->r_flags & RACK_SACK_PASSED) { 4267 counter_u64_add(rack_reorder_seen, 1); 4268 rack->r_ctl.rc_reorder_ts = cts; 4269 } 4270 /* 4271 * Now we want to go up from rsm (the 4272 * one left un-acked) to the next one 4273 * in the tmap. We do this so when 4274 * we walk backwards we include marking 4275 * sack-passed on rsm (The one passed in 4276 * is skipped since it is generally called 4277 * on something sacked before removing it 4278 * from the tmap). 4279 */ 4280 if (rsm->r_in_tmap) { 4281 nrsm = TAILQ_NEXT(rsm, r_tnext); 4282 /* 4283 * Now that we have the next 4284 * one walk backwards from there. 4285 */ 4286 if (nrsm && nrsm->r_in_tmap) 4287 rack_log_sack_passed(tp, rack, nrsm); 4288 } 4289 /* Now are we done? */ 4290 if (SEQ_LT(end, next->r_end) || 4291 (end == next->r_end)) { 4292 /* Done with block */ 4293 goto out; 4294 } 4295 counter_u64_add(rack_sack_used_next_merge, 1); 4296 /* Postion for the next block */ 4297 start = next->r_end; 4298 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, next); 4299 if (rsm == NULL) 4300 goto out; 4301 } else { 4302 /** 4303 * We can't use any hookery here, so we 4304 * need to split the map. We enter like 4305 * so: 4306 * rsm |--------| 4307 * sackblk |-----> 4308 * We will add the new block nrsm and 4309 * that will be the new portion, and then 4310 * fall through after reseting rsm. So we 4311 * split and look like this: 4312 * rsm |----| 4313 * sackblk |-----> 4314 * nrsm |---| 4315 * We then fall through reseting 4316 * rsm to nrsm, so the next block 4317 * picks it up. 4318 */ 4319 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 4320 if (nrsm == NULL) { 4321 /* 4322 * failed XXXrrs what can we do but loose the sack 4323 * info? 4324 */ 4325 goto out; 4326 } 4327 counter_u64_add(rack_sack_splits, 1); 4328 rack_clone_rsm(rack, nrsm, rsm, start); 4329 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 4330 #ifdef INVARIANTS 4331 if (insret != NULL) { 4332 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 4333 nrsm, insret, rack, rsm); 4334 } 4335 #endif 4336 if (rsm->r_in_tmap) { 4337 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 4338 nrsm->r_in_tmap = 1; 4339 } 4340 rsm->r_flags &= (~RACK_HAS_FIN); 4341 /* Position us to point to the new nrsm that starts the sack blk */ 4342 rsm = nrsm; 4343 } 4344 } else { 4345 /* Already sacked this piece */ 4346 counter_u64_add(rack_sack_skipped_acked, 1); 4347 moved++; 4348 if (end == rsm->r_end) { 4349 /* Done with block */ 4350 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4351 goto out; 4352 } else if (SEQ_LT(end, rsm->r_end)) { 4353 /* A partial sack to a already sacked block */ 4354 moved++; 4355 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4356 goto out; 4357 } else { 4358 /* 4359 * The end goes beyond this guy 4360 * repostion the start to the 4361 * next block. 4362 */ 4363 start = rsm->r_end; 4364 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4365 if (rsm == NULL) 4366 goto out; 4367 } 4368 } 4369 } 4370 if (SEQ_GEQ(end, rsm->r_end)) { 4371 /** 4372 * The end of this block is either beyond this guy or right 4373 * at this guy. I.e.: 4374 * rsm --- |-----| 4375 * end |-----| 4376 * <or> 4377 * end |---------| 4378 */ 4379 if (rsm->r_flags & RACK_TLP) 4380 rack->r_ctl.rc_tlp_rtx_out = 0; 4381 if ((rsm->r_flags & RACK_ACKED) == 0) { 4382 rack_update_rtt(tp, rack, rsm, to, cts, SACKED); 4383 changed += (rsm->r_end - rsm->r_start); 4384 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 4385 if (rsm->r_in_tmap) /* should be true */ 4386 rack_log_sack_passed(tp, rack, rsm); 4387 /* Is Reordering occuring? */ 4388 if (rsm->r_flags & RACK_SACK_PASSED) { 4389 rsm->r_flags &= ~RACK_SACK_PASSED; 4390 counter_u64_add(rack_reorder_seen, 1); 4391 rack->r_ctl.rc_reorder_ts = cts; 4392 } 4393 rsm->r_flags |= RACK_ACKED; 4394 rsm->r_flags &= ~RACK_TLP; 4395 if (rsm->r_in_tmap) { 4396 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 4397 rsm->r_in_tmap = 0; 4398 } 4399 } else { 4400 counter_u64_add(rack_sack_skipped_acked, 1); 4401 moved++; 4402 } 4403 if (end == rsm->r_end) { 4404 /* This block only - done, setup for next */ 4405 goto out; 4406 } 4407 /* 4408 * There is more not coverend by this rsm move on 4409 * to the next block in the RB tree. 4410 */ 4411 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4412 start = rsm->r_end; 4413 rsm = nrsm; 4414 if (rsm == NULL) 4415 goto out; 4416 goto do_rest_ofb; 4417 } 4418 /** 4419 * The end of this sack block is smaller than 4420 * our rsm i.e.: 4421 * rsm --- |-----| 4422 * end |--| 4423 */ 4424 if ((rsm->r_flags & RACK_ACKED) == 0) { 4425 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4426 if (prev && (prev->r_flags & RACK_ACKED)) { 4427 /** 4428 * Goal, we want the right remainder of rsm to shrink 4429 * in place and span from (rsm->r_start = end) to rsm->r_end. 4430 * We want to expand prev to go all the way 4431 * to prev->r_end <- end. 4432 * so in the tree we have before: 4433 * prev |--------| (acked) 4434 * rsm |-------| (non-acked) 4435 * sackblk |-| 4436 * We churn it so we end up with 4437 * prev |----------| (acked) 4438 * rsm |-----| (non-acked) 4439 * nrsm |-| (temporary) 4440 */ 4441 nrsm = &stack_map; 4442 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 4443 prev->r_end = end; 4444 rsm->r_start = end; 4445 /* Now adjust nrsm (stack copy) to be 4446 * the one that is the small 4447 * piece that was "sacked". 4448 */ 4449 nrsm->r_end = end; 4450 rsm->r_dupack = 0; 4451 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 4452 /* 4453 * Now nrsm is our new little piece 4454 * that is acked (which was merged 4455 * to prev). Update the rtt and changed 4456 * based on that. Also check for reordering. 4457 */ 4458 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED); 4459 changed += (nrsm->r_end - nrsm->r_start); 4460 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 4461 if (nrsm->r_flags & RACK_SACK_PASSED) { 4462 counter_u64_add(rack_reorder_seen, 1); 4463 rack->r_ctl.rc_reorder_ts = cts; 4464 } 4465 rsm = prev; 4466 counter_u64_add(rack_sack_used_prev_merge, 1); 4467 } else { 4468 /** 4469 * This is the case where our previous 4470 * block is not acked either, so we must 4471 * split the block in two. 4472 */ 4473 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 4474 if (nrsm == NULL) { 4475 /* failed rrs what can we do but loose the sack info? */ 4476 goto out; 4477 } 4478 /** 4479 * In this case nrsm becomes 4480 * nrsm->r_start = end; 4481 * nrsm->r_end = rsm->r_end; 4482 * which is un-acked. 4483 * <and> 4484 * rsm->r_end = nrsm->r_start; 4485 * i.e. the remaining un-acked 4486 * piece is left on the left 4487 * hand side. 4488 * 4489 * So we start like this 4490 * rsm |----------| (not acked) 4491 * sackblk |---| 4492 * build it so we have 4493 * rsm |---| (acked) 4494 * nrsm |------| (not acked) 4495 */ 4496 counter_u64_add(rack_sack_splits, 1); 4497 rack_clone_rsm(rack, nrsm, rsm, end); 4498 rsm->r_flags &= (~RACK_HAS_FIN); 4499 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 4500 #ifdef INVARIANTS 4501 if (insret != NULL) { 4502 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 4503 nrsm, insret, rack, rsm); 4504 } 4505 #endif 4506 if (rsm->r_in_tmap) { 4507 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 4508 nrsm->r_in_tmap = 1; 4509 } 4510 nrsm->r_dupack = 0; 4511 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 4512 if (rsm->r_flags & RACK_TLP) 4513 rack->r_ctl.rc_tlp_rtx_out = 0; 4514 rack_update_rtt(tp, rack, rsm, to, cts, SACKED); 4515 changed += (rsm->r_end - rsm->r_start); 4516 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 4517 if (rsm->r_in_tmap) /* should be true */ 4518 rack_log_sack_passed(tp, rack, rsm); 4519 /* Is Reordering occuring? */ 4520 if (rsm->r_flags & RACK_SACK_PASSED) { 4521 rsm->r_flags &= ~RACK_SACK_PASSED; 4522 counter_u64_add(rack_reorder_seen, 1); 4523 rack->r_ctl.rc_reorder_ts = cts; 4524 } 4525 rsm->r_flags |= RACK_ACKED; 4526 rsm->r_flags &= ~RACK_TLP; 4527 if (rsm->r_in_tmap) { 4528 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 4529 rsm->r_in_tmap = 0; 4530 } 4531 } 4532 } else if (start != end){ 4533 /* 4534 * The block was already acked. 4535 */ 4536 counter_u64_add(rack_sack_skipped_acked, 1); 4537 moved++; 4538 } 4539 out: 4540 if (rsm && (rsm->r_flags & RACK_ACKED)) { 4541 /* 4542 * Now can we merge where we worked 4543 * with either the previous or 4544 * next block? 4545 */ 4546 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4547 while (next) { 4548 if (next->r_flags & RACK_ACKED) { 4549 /* yep this and next can be merged */ 4550 rsm = rack_merge_rsm(rack, rsm, next); 4551 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4552 } else 4553 break; 4554 } 4555 /* Now what about the previous? */ 4556 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4557 while (prev) { 4558 if (prev->r_flags & RACK_ACKED) { 4559 /* yep the previous and this can be merged */ 4560 rsm = rack_merge_rsm(rack, prev, rsm); 4561 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4562 } else 4563 break; 4564 } 4565 } 4566 if (used_ref == 0) { 4567 counter_u64_add(rack_sack_proc_all, 1); 4568 } else { 4569 counter_u64_add(rack_sack_proc_short, 1); 4570 } 4571 /* Save off the next one for quick reference. */ 4572 if (rsm) 4573 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4574 else 4575 nrsm = NULL; 4576 *prsm = rack->r_ctl.rc_sacklast = nrsm; 4577 /* Pass back the moved. */ 4578 *moved_two = moved; 4579 return (changed); 4580 } 4581 4582 static void inline 4583 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 4584 { 4585 struct rack_sendmap *tmap; 4586 4587 tmap = NULL; 4588 while (rsm && (rsm->r_flags & RACK_ACKED)) { 4589 /* Its no longer sacked, mark it so */ 4590 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 4591 #ifdef INVARIANTS 4592 if (rsm->r_in_tmap) { 4593 panic("rack:%p rsm:%p flags:0x%x in tmap?", 4594 rack, rsm, rsm->r_flags); 4595 } 4596 #endif 4597 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 4598 /* Rebuild it into our tmap */ 4599 if (tmap == NULL) { 4600 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 4601 tmap = rsm; 4602 } else { 4603 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 4604 tmap = rsm; 4605 } 4606 tmap->r_in_tmap = 1; 4607 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4608 } 4609 /* 4610 * Now lets possibly clear the sack filter so we start 4611 * recognizing sacks that cover this area. 4612 */ 4613 if (rack_use_sack_filter) 4614 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 4615 4616 } 4617 4618 static void 4619 rack_do_decay(struct tcp_rack *rack) 4620 { 4621 struct timeval res; 4622 4623 #define timersub(tvp, uvp, vvp) \ 4624 do { \ 4625 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ 4626 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ 4627 if ((vvp)->tv_usec < 0) { \ 4628 (vvp)->tv_sec--; \ 4629 (vvp)->tv_usec += 1000000; \ 4630 } \ 4631 } while (0) 4632 4633 timersub(&rack->r_ctl.rc_last_ack, &rack->r_ctl.rc_last_time_decay, &res); 4634 #undef timersub 4635 4636 rack->r_ctl.input_pkt++; 4637 if ((rack->rc_in_persist) || 4638 (res.tv_sec >= 1) || 4639 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) { 4640 /* 4641 * Check for decay of non-SAD, 4642 * we want all SAD detection metrics to 4643 * decay 1/4 per second (or more) passed. 4644 */ 4645 uint32_t pkt_delta; 4646 4647 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt; 4648 /* Update our saved tracking values */ 4649 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt; 4650 rack->r_ctl.rc_last_time_decay = rack->r_ctl.rc_last_ack; 4651 /* Now do we escape without decay? */ 4652 if (rack->rc_in_persist || 4653 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) || 4654 (pkt_delta < tcp_sad_low_pps)){ 4655 /* 4656 * We don't decay idle connections 4657 * or ones that have a low input pps. 4658 */ 4659 return; 4660 } 4661 /* Decay the counters */ 4662 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count, 4663 tcp_sad_decay_val); 4664 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count, 4665 tcp_sad_decay_val); 4666 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra, 4667 tcp_sad_decay_val); 4668 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move, 4669 tcp_sad_decay_val); 4670 } 4671 } 4672 4673 static void 4674 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th) 4675 { 4676 uint32_t changed, entered_recovery = 0; 4677 struct tcp_rack *rack; 4678 struct rack_sendmap *rsm, *rm; 4679 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 4680 register uint32_t th_ack; 4681 int32_t i, j, k, num_sack_blks = 0; 4682 uint32_t cts, acked, ack_point, sack_changed = 0; 4683 int loop_start = 0, moved_two = 0; 4684 4685 INP_WLOCK_ASSERT(tp->t_inpcb); 4686 if (th->th_flags & TH_RST) { 4687 /* We don't log resets */ 4688 return; 4689 } 4690 rack = (struct tcp_rack *)tp->t_fb_ptr; 4691 cts = tcp_ts_getticks(); 4692 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 4693 changed = 0; 4694 th_ack = th->th_ack; 4695 if (rack->sack_attack_disable == 0) 4696 rack_do_decay(rack); 4697 if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) { 4698 /* 4699 * You only get credit for 4700 * MSS and greater (and you get extra 4701 * credit for larger cum-ack moves). 4702 */ 4703 int ac; 4704 4705 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 4706 rack->r_ctl.ack_count += ac; 4707 counter_u64_add(rack_ack_total, ac); 4708 } 4709 if (rack->r_ctl.ack_count > 0xfff00000) { 4710 /* 4711 * reduce the number to keep us under 4712 * a uint32_t. 4713 */ 4714 rack->r_ctl.ack_count /= 2; 4715 rack->r_ctl.sack_count /= 2; 4716 } 4717 if (SEQ_GT(th_ack, tp->snd_una)) { 4718 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 4719 tp->t_acktime = ticks; 4720 } 4721 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 4722 changed = th_ack - rsm->r_start; 4723 if (changed) { 4724 /* 4725 * The ACK point is advancing to th_ack, we must drop off 4726 * the packets in the rack log and calculate any eligble 4727 * RTT's. 4728 */ 4729 rack->r_wanted_output++; 4730 more: 4731 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 4732 if (rsm == NULL) { 4733 if ((th_ack - 1) == tp->iss) { 4734 /* 4735 * For the SYN incoming case we will not 4736 * have called tcp_output for the sending of 4737 * the SYN, so there will be no map. All 4738 * other cases should probably be a panic. 4739 */ 4740 goto proc_sack; 4741 } 4742 if (tp->t_flags & TF_SENTFIN) { 4743 /* if we send a FIN we will not hav a map */ 4744 goto proc_sack; 4745 } 4746 #ifdef INVARIANTS 4747 panic("No rack map tp:%p for th:%p state:%d rack:%p snd_una:%u snd_max:%u snd_nxt:%u chg:%d\n", 4748 tp, 4749 th, tp->t_state, rack, 4750 tp->snd_una, tp->snd_max, tp->snd_nxt, changed); 4751 #endif 4752 goto proc_sack; 4753 } 4754 if (SEQ_LT(th_ack, rsm->r_start)) { 4755 /* Huh map is missing this */ 4756 #ifdef INVARIANTS 4757 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 4758 rsm->r_start, 4759 th_ack, tp->t_state, rack->r_state); 4760 #endif 4761 goto proc_sack; 4762 } 4763 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED); 4764 /* Now do we consume the whole thing? */ 4765 if (SEQ_GEQ(th_ack, rsm->r_end)) { 4766 /* Its all consumed. */ 4767 uint32_t left; 4768 4769 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 4770 rsm->r_rtr_bytes = 0; 4771 if (rsm->r_flags & RACK_TLP) 4772 rack->r_ctl.rc_tlp_rtx_out = 0; 4773 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 4774 #ifdef INVARIANTS 4775 if (rm != rsm) { 4776 panic("removing head in rack:%p rsm:%p rm:%p", 4777 rack, rsm, rm); 4778 } 4779 #endif 4780 if (rsm->r_in_tmap) { 4781 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 4782 rsm->r_in_tmap = 0; 4783 } 4784 if (rsm->r_flags & RACK_ACKED) { 4785 /* 4786 * It was acked on the scoreboard -- remove 4787 * it from total 4788 */ 4789 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 4790 } else if (rsm->r_flags & RACK_SACK_PASSED) { 4791 /* 4792 * There are segments ACKED on the 4793 * scoreboard further up. We are seeing 4794 * reordering. 4795 */ 4796 rsm->r_flags &= ~RACK_SACK_PASSED; 4797 counter_u64_add(rack_reorder_seen, 1); 4798 rsm->r_flags |= RACK_ACKED; 4799 rack->r_ctl.rc_reorder_ts = cts; 4800 } 4801 left = th_ack - rsm->r_end; 4802 if (rsm->r_rtr_cnt > 1) { 4803 /* 4804 * Technically we should make r_rtr_cnt be 4805 * monotonicly increasing and just mod it to 4806 * the timestamp it is replacing.. that way 4807 * we would have the last 3 retransmits. Now 4808 * rc_loss_count will be wrong if we 4809 * retransmit something more than 2 times in 4810 * recovery :( 4811 */ 4812 rack->r_ctl.rc_loss_count += (rsm->r_rtr_cnt - 1); 4813 } 4814 /* Free back to zone */ 4815 rack_free(rack, rsm); 4816 if (left) { 4817 goto more; 4818 } 4819 goto proc_sack; 4820 } 4821 if (rsm->r_flags & RACK_ACKED) { 4822 /* 4823 * It was acked on the scoreboard -- remove it from 4824 * total for the part being cum-acked. 4825 */ 4826 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 4827 } 4828 /* 4829 * Clear the dup ack count for 4830 * the piece that remains. 4831 */ 4832 rsm->r_dupack = 0; 4833 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 4834 if (rsm->r_rtr_bytes) { 4835 /* 4836 * It was retransmitted adjust the 4837 * sack holes for what was acked. 4838 */ 4839 int ack_am; 4840 4841 ack_am = (th_ack - rsm->r_start); 4842 if (ack_am >= rsm->r_rtr_bytes) { 4843 rack->r_ctl.rc_holes_rxt -= ack_am; 4844 rsm->r_rtr_bytes -= ack_am; 4845 } 4846 } 4847 /* Update where the piece starts */ 4848 rsm->r_start = th_ack; 4849 } 4850 proc_sack: 4851 /* Check for reneging */ 4852 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 4853 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 4854 /* 4855 * The peer has moved snd_una up to 4856 * the edge of this send, i.e. one 4857 * that it had previously acked. The only 4858 * way that can be true if the peer threw 4859 * away data (space issues) that it had 4860 * previously sacked (else it would have 4861 * given us snd_una up to (rsm->r_end). 4862 * We need to undo the acked markings here. 4863 * 4864 * Note we have to look to make sure th_ack is 4865 * our rsm->r_start in case we get an old ack 4866 * where th_ack is behind snd_una. 4867 */ 4868 rack_peer_reneges(rack, rsm, th->th_ack); 4869 } 4870 if ((to->to_flags & TOF_SACK) == 0) { 4871 /* We are done nothing left */ 4872 goto out; 4873 } 4874 /* Sack block processing */ 4875 if (SEQ_GT(th_ack, tp->snd_una)) 4876 ack_point = th_ack; 4877 else 4878 ack_point = tp->snd_una; 4879 for (i = 0; i < to->to_nsacks; i++) { 4880 bcopy((to->to_sacks + i * TCPOLEN_SACK), 4881 &sack, sizeof(sack)); 4882 sack.start = ntohl(sack.start); 4883 sack.end = ntohl(sack.end); 4884 if (SEQ_GT(sack.end, sack.start) && 4885 SEQ_GT(sack.start, ack_point) && 4886 SEQ_LT(sack.start, tp->snd_max) && 4887 SEQ_GT(sack.end, ack_point) && 4888 SEQ_LEQ(sack.end, tp->snd_max)) { 4889 sack_blocks[num_sack_blks] = sack; 4890 num_sack_blks++; 4891 #ifdef NETFLIX_STATS 4892 } else if (SEQ_LEQ(sack.start, th_ack) && 4893 SEQ_LEQ(sack.end, th_ack)) { 4894 /* 4895 * Its a D-SACK block. 4896 */ 4897 tcp_record_dsack(sack.start, sack.end); 4898 #endif 4899 } 4900 4901 } 4902 /* 4903 * Sort the SACK blocks so we can update the rack scoreboard with 4904 * just one pass. 4905 */ 4906 if (rack_use_sack_filter) { 4907 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks, 4908 num_sack_blks, th->th_ack); 4909 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 4910 } 4911 if (num_sack_blks == 0) { 4912 /* Nothing to sack (DSACKs?) */ 4913 goto out_with_totals; 4914 } 4915 if (num_sack_blks < 2) { 4916 /* Only one, we don't need to sort */ 4917 goto do_sack_work; 4918 } 4919 /* Sort the sacks */ 4920 for (i = 0; i < num_sack_blks; i++) { 4921 for (j = i + 1; j < num_sack_blks; j++) { 4922 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 4923 sack = sack_blocks[i]; 4924 sack_blocks[i] = sack_blocks[j]; 4925 sack_blocks[j] = sack; 4926 } 4927 } 4928 } 4929 /* 4930 * Now are any of the sack block ends the same (yes some 4931 * implementations send these)? 4932 */ 4933 again: 4934 if (num_sack_blks == 0) 4935 goto out_with_totals; 4936 if (num_sack_blks > 1) { 4937 for (i = 0; i < num_sack_blks; i++) { 4938 for (j = i + 1; j < num_sack_blks; j++) { 4939 if (sack_blocks[i].end == sack_blocks[j].end) { 4940 /* 4941 * Ok these two have the same end we 4942 * want the smallest end and then 4943 * throw away the larger and start 4944 * again. 4945 */ 4946 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 4947 /* 4948 * The second block covers 4949 * more area use that 4950 */ 4951 sack_blocks[i].start = sack_blocks[j].start; 4952 } 4953 /* 4954 * Now collapse out the dup-sack and 4955 * lower the count 4956 */ 4957 for (k = (j + 1); k < num_sack_blks; k++) { 4958 sack_blocks[j].start = sack_blocks[k].start; 4959 sack_blocks[j].end = sack_blocks[k].end; 4960 j++; 4961 } 4962 num_sack_blks--; 4963 goto again; 4964 } 4965 } 4966 } 4967 } 4968 do_sack_work: 4969 /* 4970 * First lets look to see if 4971 * we have retransmitted and 4972 * can use the transmit next? 4973 */ 4974 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 4975 if (rsm && 4976 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 4977 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 4978 /* 4979 * We probably did the FR and the next 4980 * SACK in continues as we would expect. 4981 */ 4982 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &moved_two); 4983 if (acked) { 4984 rack->r_wanted_output++; 4985 changed += acked; 4986 sack_changed += acked; 4987 } 4988 if (num_sack_blks == 1) { 4989 /* 4990 * This is what we would expect from 4991 * a normal implementation to happen 4992 * after we have retransmitted the FR, 4993 * i.e the sack-filter pushes down 4994 * to 1 block and the next to be retransmitted 4995 * is the sequence in the sack block (has more 4996 * are acked). Count this as ACK'd data to boost 4997 * up the chances of recovering any false positives. 4998 */ 4999 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp)); 5000 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 5001 counter_u64_add(rack_express_sack, 1); 5002 if (rack->r_ctl.ack_count > 0xfff00000) { 5003 /* 5004 * reduce the number to keep us under 5005 * a uint32_t. 5006 */ 5007 rack->r_ctl.ack_count /= 2; 5008 rack->r_ctl.sack_count /= 2; 5009 } 5010 goto out_with_totals; 5011 } else { 5012 /* 5013 * Start the loop through the 5014 * rest of blocks, past the first block. 5015 */ 5016 moved_two = 0; 5017 loop_start = 1; 5018 } 5019 } 5020 /* Its a sack of some sort */ 5021 rack->r_ctl.sack_count++; 5022 if (rack->r_ctl.sack_count > 0xfff00000) { 5023 /* 5024 * reduce the number to keep us under 5025 * a uint32_t. 5026 */ 5027 rack->r_ctl.ack_count /= 2; 5028 rack->r_ctl.sack_count /= 2; 5029 } 5030 counter_u64_add(rack_sack_total, 1); 5031 if (rack->sack_attack_disable) { 5032 /* An attacker disablement is in place */ 5033 if (num_sack_blks > 1) { 5034 rack->r_ctl.sack_count += (num_sack_blks - 1); 5035 rack->r_ctl.sack_moved_extra++; 5036 counter_u64_add(rack_move_some, 1); 5037 if (rack->r_ctl.sack_moved_extra > 0xfff00000) { 5038 rack->r_ctl.sack_moved_extra /= 2; 5039 rack->r_ctl.sack_noextra_move /= 2; 5040 } 5041 } 5042 goto out; 5043 } 5044 rsm = rack->r_ctl.rc_sacklast; 5045 for (i = loop_start; i < num_sack_blks; i++) { 5046 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &moved_two); 5047 if (acked) { 5048 rack->r_wanted_output++; 5049 changed += acked; 5050 sack_changed += acked; 5051 } 5052 if (moved_two) { 5053 /* 5054 * If we did not get a SACK for at least a MSS and 5055 * had to move at all, or if we moved more than our 5056 * threshold, it counts against the "extra" move. 5057 */ 5058 rack->r_ctl.sack_moved_extra += moved_two; 5059 counter_u64_add(rack_move_some, 1); 5060 } else { 5061 /* 5062 * else we did not have to move 5063 * any more than we would expect. 5064 */ 5065 rack->r_ctl.sack_noextra_move++; 5066 counter_u64_add(rack_move_none, 1); 5067 } 5068 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) { 5069 /* 5070 * If the SACK was not a full MSS then 5071 * we add to sack_count the number of 5072 * MSS's (or possibly more than 5073 * a MSS if its a TSO send) we had to skip by. 5074 */ 5075 rack->r_ctl.sack_count += moved_two; 5076 counter_u64_add(rack_sack_total, moved_two); 5077 } 5078 /* 5079 * Now we need to setup for the next 5080 * round. First we make sure we won't 5081 * exceed the size of our uint32_t on 5082 * the various counts, and then clear out 5083 * moved_two. 5084 */ 5085 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) || 5086 (rack->r_ctl.sack_noextra_move > 0xfff00000)) { 5087 rack->r_ctl.sack_moved_extra /= 2; 5088 rack->r_ctl.sack_noextra_move /= 2; 5089 } 5090 if (rack->r_ctl.sack_count > 0xfff00000) { 5091 rack->r_ctl.ack_count /= 2; 5092 rack->r_ctl.sack_count /= 2; 5093 } 5094 moved_two = 0; 5095 } 5096 out_with_totals: 5097 if (num_sack_blks > 1) { 5098 /* 5099 * You get an extra stroke if 5100 * you have more than one sack-blk, this 5101 * could be where we are skipping forward 5102 * and the sack-filter is still working, or 5103 * it could be an attacker constantly 5104 * moving us. 5105 */ 5106 rack->r_ctl.sack_moved_extra++; 5107 counter_u64_add(rack_move_some, 1); 5108 } 5109 out: 5110 #ifdef NETFLIX_EXP_DETECTION 5111 if ((rack->do_detection || tcp_force_detection) && 5112 tcp_sack_to_ack_thresh && 5113 tcp_sack_to_move_thresh && 5114 ((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) { 5115 /* 5116 * We have thresholds set to find 5117 * possible attackers and disable sack. 5118 * Check them. 5119 */ 5120 uint64_t ackratio, moveratio, movetotal; 5121 5122 /* Log detecting */ 5123 rack_log_sad(rack, 1); 5124 ackratio = (uint64_t)(rack->r_ctl.sack_count); 5125 ackratio *= (uint64_t)(1000); 5126 if (rack->r_ctl.ack_count) 5127 ackratio /= (uint64_t)(rack->r_ctl.ack_count); 5128 else { 5129 /* We really should not hit here */ 5130 ackratio = 1000; 5131 } 5132 if ((rack->sack_attack_disable == 0) && 5133 (ackratio > rack_highest_sack_thresh_seen)) 5134 rack_highest_sack_thresh_seen = (uint32_t)ackratio; 5135 movetotal = rack->r_ctl.sack_moved_extra; 5136 movetotal += rack->r_ctl.sack_noextra_move; 5137 moveratio = rack->r_ctl.sack_moved_extra; 5138 moveratio *= (uint64_t)1000; 5139 if (movetotal) 5140 moveratio /= movetotal; 5141 else { 5142 /* No moves, thats pretty good */ 5143 moveratio = 0; 5144 } 5145 if ((rack->sack_attack_disable == 0) && 5146 (moveratio > rack_highest_move_thresh_seen)) 5147 rack_highest_move_thresh_seen = (uint32_t)moveratio; 5148 if (rack->sack_attack_disable == 0) { 5149 if ((ackratio > tcp_sack_to_ack_thresh) && 5150 (moveratio > tcp_sack_to_move_thresh)) { 5151 /* Disable sack processing */ 5152 rack->sack_attack_disable = 1; 5153 if (rack->r_rep_attack == 0) { 5154 rack->r_rep_attack = 1; 5155 counter_u64_add(rack_sack_attacks_detected, 1); 5156 } 5157 if (tcp_attack_on_turns_on_logging) { 5158 /* 5159 * Turn on logging, used for debugging 5160 * false positives. 5161 */ 5162 rack->rc_tp->t_logstate = tcp_attack_on_turns_on_logging; 5163 } 5164 /* Clamp the cwnd at flight size */ 5165 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd; 5166 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 5167 rack_log_sad(rack, 2); 5168 } 5169 } else { 5170 /* We are sack-disabled check for false positives */ 5171 if ((ackratio <= tcp_restoral_thresh) || 5172 (rack->r_ctl.rc_num_maps_alloced < tcp_map_minimum)) { 5173 rack->sack_attack_disable = 0; 5174 rack_log_sad(rack, 3); 5175 /* Restart counting */ 5176 rack->r_ctl.sack_count = 0; 5177 rack->r_ctl.sack_moved_extra = 0; 5178 rack->r_ctl.sack_noextra_move = 1; 5179 rack->r_ctl.ack_count = max(1, 5180 (BYTES_THIS_ACK(tp, th)/ctf_fixed_maxseg(rack->rc_tp))); 5181 5182 if (rack->r_rep_reverse == 0) { 5183 rack->r_rep_reverse = 1; 5184 counter_u64_add(rack_sack_attacks_reversed, 1); 5185 } 5186 /* Restore the cwnd */ 5187 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd) 5188 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd; 5189 } 5190 } 5191 } 5192 #endif 5193 if (changed) { 5194 /* Something changed cancel the rack timer */ 5195 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 5196 } 5197 if ((sack_changed) && (!IN_RECOVERY(tp->t_flags))) { 5198 /* 5199 * Ok we have a high probability that we need to go in to 5200 * recovery since we have data sack'd 5201 */ 5202 struct rack_sendmap *rsm; 5203 uint32_t tsused; 5204 5205 tsused = tcp_ts_getticks(); 5206 rsm = tcp_rack_output(tp, rack, tsused); 5207 if (rsm) { 5208 /* Enter recovery */ 5209 rack->r_ctl.rc_rsm_start = rsm->r_start; 5210 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 5211 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 5212 entered_recovery = 1; 5213 rack_cong_signal(tp, NULL, CC_NDUPACK); 5214 /* 5215 * When we enter recovery we need to assure we send 5216 * one packet. 5217 */ 5218 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 5219 rack_log_to_prr(rack, 8); 5220 rack->r_timer_override = 1; 5221 } 5222 } 5223 if (IN_RECOVERY(tp->t_flags) && (entered_recovery == 0)) { 5224 /* Deal with changed and PRR here (in recovery only) */ 5225 uint32_t pipe, snd_una; 5226 5227 rack->r_ctl.rc_prr_delivered += changed; 5228 /* Compute prr_sndcnt */ 5229 if (SEQ_GT(tp->snd_una, th_ack)) { 5230 snd_una = tp->snd_una; 5231 } else { 5232 snd_una = th_ack; 5233 } 5234 pipe = ((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt; 5235 if (pipe > tp->snd_ssthresh) { 5236 long sndcnt; 5237 5238 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 5239 if (rack->r_ctl.rc_prr_recovery_fs > 0) 5240 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 5241 else { 5242 rack->r_ctl.rc_prr_sndcnt = 0; 5243 rack_log_to_prr(rack, 9); 5244 sndcnt = 0; 5245 } 5246 sndcnt++; 5247 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 5248 sndcnt -= rack->r_ctl.rc_prr_out; 5249 else 5250 sndcnt = 0; 5251 rack->r_ctl.rc_prr_sndcnt = sndcnt; 5252 rack_log_to_prr(rack, 10); 5253 } else { 5254 uint32_t limit; 5255 5256 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 5257 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 5258 else 5259 limit = 0; 5260 if (changed > limit) 5261 limit = changed; 5262 limit += ctf_fixed_maxseg(tp); 5263 if (tp->snd_ssthresh > pipe) { 5264 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 5265 rack_log_to_prr(rack, 11); 5266 } else { 5267 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 5268 rack_log_to_prr(rack, 12); 5269 } 5270 } 5271 if (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) { 5272 rack->r_timer_override = 1; 5273 } 5274 } 5275 } 5276 5277 static void 5278 rack_strike_dupack(struct tcp_rack *rack) 5279 { 5280 struct rack_sendmap *rsm; 5281 5282 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5283 if (rsm && (rsm->r_dupack < 0xff)) { 5284 rsm->r_dupack++; 5285 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 5286 rack->r_wanted_output = 1; 5287 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 5288 } else { 5289 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 5290 } 5291 } 5292 } 5293 5294 /* 5295 * Return value of 1, we do not need to call rack_process_data(). 5296 * return value of 0, rack_process_data can be called. 5297 * For ret_val if its 0 the TCP is locked, if its non-zero 5298 * its unlocked and probably unsafe to touch the TCB. 5299 */ 5300 static int 5301 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 5302 struct tcpcb *tp, struct tcpopt *to, 5303 uint32_t tiwin, int32_t tlen, 5304 int32_t * ofia, int32_t thflags, int32_t * ret_val) 5305 { 5306 int32_t ourfinisacked = 0; 5307 int32_t nsegs, acked_amount; 5308 int32_t acked; 5309 struct mbuf *mfree; 5310 struct tcp_rack *rack; 5311 int32_t recovery = 0; 5312 5313 rack = (struct tcp_rack *)tp->t_fb_ptr; 5314 if (SEQ_GT(th->th_ack, tp->snd_max)) { 5315 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val); 5316 rack->r_wanted_output++; 5317 return (1); 5318 } 5319 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 5320 if (rack->rc_in_persist) 5321 tp->t_rxtshift = 0; 5322 if ((th->th_ack == tp->snd_una) && (tiwin == tp->snd_wnd)) 5323 rack_strike_dupack(rack); 5324 rack_log_ack(tp, to, th); 5325 } 5326 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 5327 /* 5328 * Old ack, behind (or duplicate to) the last one rcv'd 5329 * Note: Should mark reordering is occuring! We should also 5330 * look for sack blocks arriving e.g. ack 1, 4-4 then ack 1, 5331 * 3-3, 4-4 would be reording. As well as ack 1, 3-3 <no 5332 * retran and> ack 3 5333 */ 5334 return (0); 5335 } 5336 /* 5337 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 5338 * something we sent. 5339 */ 5340 if (tp->t_flags & TF_NEEDSYN) { 5341 /* 5342 * T/TCP: Connection was half-synchronized, and our SYN has 5343 * been ACK'd (so connection is now fully synchronized). Go 5344 * to non-starred state, increment snd_una for ACK of SYN, 5345 * and check if we can do window scaling. 5346 */ 5347 tp->t_flags &= ~TF_NEEDSYN; 5348 tp->snd_una++; 5349 /* Do window scaling? */ 5350 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 5351 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 5352 tp->rcv_scale = tp->request_r_scale; 5353 /* Send window already scaled. */ 5354 } 5355 } 5356 nsegs = max(1, m->m_pkthdr.lro_nsegs); 5357 INP_WLOCK_ASSERT(tp->t_inpcb); 5358 5359 acked = BYTES_THIS_ACK(tp, th); 5360 TCPSTAT_ADD(tcps_rcvackpack, nsegs); 5361 TCPSTAT_ADD(tcps_rcvackbyte, acked); 5362 5363 /* 5364 * If we just performed our first retransmit, and the ACK arrives 5365 * within our recovery window, then it was a mistake to do the 5366 * retransmit in the first place. Recover our original cwnd and 5367 * ssthresh, and proceed to transmit where we left off. 5368 */ 5369 if (tp->t_flags & TF_PREVVALID) { 5370 tp->t_flags &= ~TF_PREVVALID; 5371 if (tp->t_rxtshift == 1 && 5372 (int)(ticks - tp->t_badrxtwin) < 0) 5373 rack_cong_signal(tp, th, CC_RTO_ERR); 5374 } 5375 /* 5376 * If we have a timestamp reply, update smoothed round trip time. If 5377 * no timestamp is present but transmit timer is running and timed 5378 * sequence number was acked, update smoothed round trip time. Since 5379 * we now have an rtt measurement, cancel the timer backoff (cf., 5380 * Phil Karn's retransmit alg.). Recompute the initial retransmit 5381 * timer. 5382 * 5383 * Some boxes send broken timestamp replies during the SYN+ACK 5384 * phase, ignore timestamps of 0 or we could calculate a huge RTT 5385 * and blow up the retransmit timer. 5386 */ 5387 /* 5388 * If all outstanding data is acked, stop retransmit timer and 5389 * remember to restart (more output or persist). If there is more 5390 * data to be acked, restart retransmit timer, using current 5391 * (possibly backed-off) value. 5392 */ 5393 if (th->th_ack == tp->snd_max) { 5394 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 5395 rack->r_wanted_output++; 5396 } 5397 if (acked == 0) { 5398 if (ofia) 5399 *ofia = ourfinisacked; 5400 return (0); 5401 } 5402 if (rack->r_ctl.rc_early_recovery) { 5403 if (IN_RECOVERY(tp->t_flags)) { 5404 if (SEQ_LT(th->th_ack, tp->snd_recover) && 5405 (SEQ_LT(th->th_ack, tp->snd_max))) { 5406 tcp_rack_partialack(tp, th); 5407 } else { 5408 rack_post_recovery(tp, th); 5409 recovery = 1; 5410 } 5411 } 5412 } 5413 /* 5414 * Let the congestion control algorithm update congestion control 5415 * related information. This typically means increasing the 5416 * congestion window. 5417 */ 5418 rack_ack_received(tp, rack, th, nsegs, CC_ACK, recovery); 5419 SOCKBUF_LOCK(&so->so_snd); 5420 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 5421 tp->snd_wnd -= acked_amount; 5422 mfree = sbcut_locked(&so->so_snd, acked_amount); 5423 if ((sbused(&so->so_snd) == 0) && 5424 (acked > acked_amount) && 5425 (tp->t_state >= TCPS_FIN_WAIT_1)) { 5426 ourfinisacked = 1; 5427 } 5428 /* NB: sowwakeup_locked() does an implicit unlock. */ 5429 sowwakeup_locked(so); 5430 m_freem(mfree); 5431 if (rack->r_ctl.rc_early_recovery == 0) { 5432 if (IN_RECOVERY(tp->t_flags)) { 5433 if (SEQ_LT(th->th_ack, tp->snd_recover) && 5434 (SEQ_LT(th->th_ack, tp->snd_max))) { 5435 tcp_rack_partialack(tp, th); 5436 } else { 5437 rack_post_recovery(tp, th); 5438 } 5439 } 5440 } 5441 tp->snd_una = th->th_ack; 5442 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 5443 tp->snd_recover = tp->snd_una; 5444 5445 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) { 5446 tp->snd_nxt = tp->snd_una; 5447 } 5448 if (tp->snd_una == tp->snd_max) { 5449 /* Nothing left outstanding */ 5450 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 5451 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 5452 tp->t_acktime = 0; 5453 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 5454 /* Set need output so persist might get set */ 5455 rack->r_wanted_output++; 5456 if (rack_use_sack_filter) 5457 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 5458 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 5459 (sbavail(&so->so_snd) == 0) && 5460 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 5461 /* 5462 * The socket was gone and the 5463 * peer sent data, time to 5464 * reset him. 5465 */ 5466 *ret_val = 1; 5467 tp = tcp_close(tp); 5468 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 5469 return (1); 5470 } 5471 } 5472 if (ofia) 5473 *ofia = ourfinisacked; 5474 return (0); 5475 } 5476 5477 static void 5478 rack_collapsed_window(struct tcp_rack *rack) 5479 { 5480 /* 5481 * Now we must walk the 5482 * send map and divide the 5483 * ones left stranded. These 5484 * guys can't cause us to abort 5485 * the connection and are really 5486 * "unsent". However if a buggy 5487 * client actually did keep some 5488 * of the data i.e. collapsed the win 5489 * and refused to ack and then opened 5490 * the win and acked that data. We would 5491 * get into an ack war, the simplier 5492 * method then of just pretending we 5493 * did not send those segments something 5494 * won't work. 5495 */ 5496 struct rack_sendmap *rsm, *nrsm, fe, *insret; 5497 tcp_seq max_seq; 5498 uint32_t maxseg; 5499 5500 max_seq = rack->rc_tp->snd_una + rack->rc_tp->snd_wnd; 5501 maxseg = ctf_fixed_maxseg(rack->rc_tp); 5502 memset(&fe, 0, sizeof(fe)); 5503 fe.r_start = max_seq; 5504 /* Find the first seq past or at maxseq */ 5505 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 5506 if (rsm == NULL) { 5507 /* Nothing to do strange */ 5508 rack->rc_has_collapsed = 0; 5509 return; 5510 } 5511 /* 5512 * Now do we need to split at 5513 * the collapse point? 5514 */ 5515 if (SEQ_GT(max_seq, rsm->r_start)) { 5516 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 5517 if (nrsm == NULL) { 5518 /* We can't get a rsm, mark all? */ 5519 nrsm = rsm; 5520 goto no_split; 5521 } 5522 /* Clone it */ 5523 rack_clone_rsm(rack, nrsm, rsm, max_seq); 5524 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 5525 #ifdef INVARIANTS 5526 if (insret != NULL) { 5527 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 5528 nrsm, insret, rack, rsm); 5529 } 5530 #endif 5531 if (rsm->r_in_tmap) { 5532 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 5533 nrsm->r_in_tmap = 1; 5534 } 5535 /* 5536 * Set in the new RSM as the 5537 * collapsed starting point 5538 */ 5539 rsm = nrsm; 5540 } 5541 no_split: 5542 counter_u64_add(rack_collapsed_win, 1); 5543 RB_FOREACH_FROM(nrsm, rack_rb_tree_head, rsm) { 5544 nrsm->r_flags |= RACK_RWND_COLLAPSED; 5545 rack->rc_has_collapsed = 1; 5546 } 5547 } 5548 5549 static void 5550 rack_un_collapse_window(struct tcp_rack *rack) 5551 { 5552 struct rack_sendmap *rsm; 5553 5554 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 5555 if (rsm->r_flags & RACK_RWND_COLLAPSED) 5556 rsm->r_flags &= ~RACK_RWND_COLLAPSED; 5557 else 5558 break; 5559 } 5560 rack->rc_has_collapsed = 0; 5561 } 5562 5563 /* 5564 * Return value of 1, the TCB is unlocked and most 5565 * likely gone, return value of 0, the TCP is still 5566 * locked. 5567 */ 5568 static int 5569 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 5570 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 5571 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 5572 { 5573 /* 5574 * Update window information. Don't look at window if no ACK: TAC's 5575 * send garbage on first SYN. 5576 */ 5577 int32_t nsegs; 5578 int32_t tfo_syn; 5579 struct tcp_rack *rack; 5580 5581 rack = (struct tcp_rack *)tp->t_fb_ptr; 5582 INP_WLOCK_ASSERT(tp->t_inpcb); 5583 nsegs = max(1, m->m_pkthdr.lro_nsegs); 5584 if ((thflags & TH_ACK) && 5585 (SEQ_LT(tp->snd_wl1, th->th_seq) || 5586 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 5587 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 5588 /* keep track of pure window updates */ 5589 if (tlen == 0 && 5590 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 5591 TCPSTAT_INC(tcps_rcvwinupd); 5592 tp->snd_wnd = tiwin; 5593 tp->snd_wl1 = th->th_seq; 5594 tp->snd_wl2 = th->th_ack; 5595 if (tp->snd_wnd > tp->max_sndwnd) 5596 tp->max_sndwnd = tp->snd_wnd; 5597 rack->r_wanted_output++; 5598 } else if (thflags & TH_ACK) { 5599 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 5600 tp->snd_wnd = tiwin; 5601 tp->snd_wl1 = th->th_seq; 5602 tp->snd_wl2 = th->th_ack; 5603 } 5604 } 5605 if (tp->snd_wnd < ctf_outstanding(tp)) 5606 /* The peer collapsed the window */ 5607 rack_collapsed_window(rack); 5608 else if (rack->rc_has_collapsed) 5609 rack_un_collapse_window(rack); 5610 /* Was persist timer active and now we have window space? */ 5611 if ((rack->rc_in_persist != 0) && 5612 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 5613 rack->r_ctl.rc_pace_min_segs))) { 5614 rack_exit_persist(tp, rack); 5615 tp->snd_nxt = tp->snd_max; 5616 /* Make sure we output to start the timer */ 5617 rack->r_wanted_output++; 5618 } 5619 /* Do we enter persists? */ 5620 if ((rack->rc_in_persist == 0) && 5621 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 5622 TCPS_HAVEESTABLISHED(tp->t_state) && 5623 (tp->snd_max == tp->snd_una) && 5624 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 5625 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 5626 /* 5627 * Here the rwnd is less than 5628 * the pacing size, we are established, 5629 * nothing is outstanding, and there is 5630 * data to send. Enter persists. 5631 */ 5632 tp->snd_nxt = tp->snd_una; 5633 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 5634 } 5635 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 5636 m_freem(m); 5637 return (0); 5638 } 5639 /* 5640 * Process segments with URG. 5641 */ 5642 if ((thflags & TH_URG) && th->th_urp && 5643 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 5644 /* 5645 * This is a kludge, but if we receive and accept random 5646 * urgent pointers, we'll crash in soreceive. It's hard to 5647 * imagine someone actually wanting to send this much urgent 5648 * data. 5649 */ 5650 SOCKBUF_LOCK(&so->so_rcv); 5651 if (th->th_urp + sbavail(&so->so_rcv) > sb_max) { 5652 th->th_urp = 0; /* XXX */ 5653 thflags &= ~TH_URG; /* XXX */ 5654 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */ 5655 goto dodata; /* XXX */ 5656 } 5657 /* 5658 * If this segment advances the known urgent pointer, then 5659 * mark the data stream. This should not happen in 5660 * CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since a 5661 * FIN has been received from the remote side. In these 5662 * states we ignore the URG. 5663 * 5664 * According to RFC961 (Assigned Protocols), the urgent 5665 * pointer points to the last octet of urgent data. We 5666 * continue, however, to consider it to indicate the first 5667 * octet of data past the urgent section as the original 5668 * spec states (in one of two places). 5669 */ 5670 if (SEQ_GT(th->th_seq + th->th_urp, tp->rcv_up)) { 5671 tp->rcv_up = th->th_seq + th->th_urp; 5672 so->so_oobmark = sbavail(&so->so_rcv) + 5673 (tp->rcv_up - tp->rcv_nxt) - 1; 5674 if (so->so_oobmark == 0) 5675 so->so_rcv.sb_state |= SBS_RCVATMARK; 5676 sohasoutofband(so); 5677 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 5678 } 5679 SOCKBUF_UNLOCK(&so->so_rcv); 5680 /* 5681 * Remove out of band data so doesn't get presented to user. 5682 * This can happen independent of advancing the URG pointer, 5683 * but if two URG's are pending at once, some out-of-band 5684 * data may creep in... ick. 5685 */ 5686 if (th->th_urp <= (uint32_t) tlen && 5687 !(so->so_options & SO_OOBINLINE)) { 5688 /* hdr drop is delayed */ 5689 tcp_pulloutofband(so, th, m, drop_hdrlen); 5690 } 5691 } else { 5692 /* 5693 * If no out of band data is expected, pull receive urgent 5694 * pointer along with the receive window. 5695 */ 5696 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 5697 tp->rcv_up = tp->rcv_nxt; 5698 } 5699 dodata: /* XXX */ 5700 INP_WLOCK_ASSERT(tp->t_inpcb); 5701 5702 /* 5703 * Process the segment text, merging it into the TCP sequencing 5704 * queue, and arranging for acknowledgment of receipt if necessary. 5705 * This process logically involves adjusting tp->rcv_wnd as data is 5706 * presented to the user (this happens in tcp_usrreq.c, case 5707 * PRU_RCVD). If a FIN has already been received on this connection 5708 * then we just ignore the text. 5709 */ 5710 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 5711 IS_FASTOPEN(tp->t_flags)); 5712 if ((tlen || (thflags & TH_FIN) || tfo_syn) && 5713 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 5714 tcp_seq save_start = th->th_seq; 5715 tcp_seq save_rnxt = tp->rcv_nxt; 5716 int save_tlen = tlen; 5717 5718 m_adj(m, drop_hdrlen); /* delayed header drop */ 5719 /* 5720 * Insert segment which includes th into TCP reassembly 5721 * queue with control block tp. Set thflags to whether 5722 * reassembly now includes a segment with FIN. This handles 5723 * the common case inline (segment is the next to be 5724 * received on an established connection, and the queue is 5725 * empty), avoiding linkage into and removal from the queue 5726 * and repetition of various conversions. Set DELACK for 5727 * segments received in order, but ack immediately when 5728 * segments are out of order (so fast retransmit can work). 5729 */ 5730 if (th->th_seq == tp->rcv_nxt && 5731 SEGQ_EMPTY(tp) && 5732 (TCPS_HAVEESTABLISHED(tp->t_state) || 5733 tfo_syn)) { 5734 #ifdef NETFLIX_SB_LIMITS 5735 u_int mcnt, appended; 5736 5737 if (so->so_rcv.sb_shlim) { 5738 mcnt = m_memcnt(m); 5739 appended = 0; 5740 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 5741 CFO_NOSLEEP, NULL) == false) { 5742 counter_u64_add(tcp_sb_shlim_fails, 1); 5743 m_freem(m); 5744 return (0); 5745 } 5746 } 5747 #endif 5748 if (DELAY_ACK(tp, tlen) || tfo_syn) { 5749 rack_timer_cancel(tp, rack, 5750 rack->r_ctl.rc_rcvtime, __LINE__); 5751 tp->t_flags |= TF_DELACK; 5752 } else { 5753 rack->r_wanted_output++; 5754 tp->t_flags |= TF_ACKNOW; 5755 } 5756 tp->rcv_nxt += tlen; 5757 thflags = th->th_flags & TH_FIN; 5758 TCPSTAT_ADD(tcps_rcvpack, nsegs); 5759 TCPSTAT_ADD(tcps_rcvbyte, tlen); 5760 SOCKBUF_LOCK(&so->so_rcv); 5761 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 5762 m_freem(m); 5763 } else 5764 #ifdef NETFLIX_SB_LIMITS 5765 appended = 5766 #endif 5767 sbappendstream_locked(&so->so_rcv, m, 0); 5768 /* NB: sorwakeup_locked() does an implicit unlock. */ 5769 sorwakeup_locked(so); 5770 #ifdef NETFLIX_SB_LIMITS 5771 if (so->so_rcv.sb_shlim && appended != mcnt) 5772 counter_fo_release(so->so_rcv.sb_shlim, 5773 mcnt - appended); 5774 #endif 5775 } else { 5776 /* 5777 * XXX: Due to the header drop above "th" is 5778 * theoretically invalid by now. Fortunately 5779 * m_adj() doesn't actually frees any mbufs when 5780 * trimming from the head. 5781 */ 5782 tcp_seq temp = save_start; 5783 thflags = tcp_reass(tp, th, &temp, &tlen, m); 5784 tp->t_flags |= TF_ACKNOW; 5785 } 5786 if ((tp->t_flags & TF_SACK_PERMIT) && (save_tlen > 0)) { 5787 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 5788 /* 5789 * DSACK actually handled in the fastpath 5790 * above. 5791 */ 5792 tcp_update_sack_list(tp, save_start, 5793 save_start + save_tlen); 5794 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 5795 if ((tp->rcv_numsacks >= 1) && 5796 (tp->sackblks[0].end == save_start)) { 5797 /* 5798 * Partial overlap, recorded at todrop 5799 * above. 5800 */ 5801 tcp_update_sack_list(tp, 5802 tp->sackblks[0].start, 5803 tp->sackblks[0].end); 5804 } else { 5805 tcp_update_dsack_list(tp, save_start, 5806 save_start + save_tlen); 5807 } 5808 } else if (tlen >= save_tlen) { 5809 /* Update of sackblks. */ 5810 tcp_update_dsack_list(tp, save_start, 5811 save_start + save_tlen); 5812 } else if (tlen > 0) { 5813 tcp_update_dsack_list(tp, save_start, 5814 save_start + tlen); 5815 } 5816 } 5817 } else { 5818 m_freem(m); 5819 thflags &= ~TH_FIN; 5820 } 5821 5822 /* 5823 * If FIN is received ACK the FIN and let the user know that the 5824 * connection is closing. 5825 */ 5826 if (thflags & TH_FIN) { 5827 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 5828 socantrcvmore(so); 5829 /* 5830 * If connection is half-synchronized (ie NEEDSYN 5831 * flag on) then delay ACK, so it may be piggybacked 5832 * when SYN is sent. Otherwise, since we received a 5833 * FIN then no more input can be expected, send ACK 5834 * now. 5835 */ 5836 if (tp->t_flags & TF_NEEDSYN) { 5837 rack_timer_cancel(tp, rack, 5838 rack->r_ctl.rc_rcvtime, __LINE__); 5839 tp->t_flags |= TF_DELACK; 5840 } else { 5841 tp->t_flags |= TF_ACKNOW; 5842 } 5843 tp->rcv_nxt++; 5844 } 5845 switch (tp->t_state) { 5846 5847 /* 5848 * In SYN_RECEIVED and ESTABLISHED STATES enter the 5849 * CLOSE_WAIT state. 5850 */ 5851 case TCPS_SYN_RECEIVED: 5852 tp->t_starttime = ticks; 5853 /* FALLTHROUGH */ 5854 case TCPS_ESTABLISHED: 5855 rack_timer_cancel(tp, rack, 5856 rack->r_ctl.rc_rcvtime, __LINE__); 5857 tcp_state_change(tp, TCPS_CLOSE_WAIT); 5858 break; 5859 5860 /* 5861 * If still in FIN_WAIT_1 STATE FIN has not been 5862 * acked so enter the CLOSING state. 5863 */ 5864 case TCPS_FIN_WAIT_1: 5865 rack_timer_cancel(tp, rack, 5866 rack->r_ctl.rc_rcvtime, __LINE__); 5867 tcp_state_change(tp, TCPS_CLOSING); 5868 break; 5869 5870 /* 5871 * In FIN_WAIT_2 state enter the TIME_WAIT state, 5872 * starting the time-wait timer, turning off the 5873 * other standard timers. 5874 */ 5875 case TCPS_FIN_WAIT_2: 5876 rack_timer_cancel(tp, rack, 5877 rack->r_ctl.rc_rcvtime, __LINE__); 5878 tcp_twstart(tp); 5879 return (1); 5880 } 5881 } 5882 /* 5883 * Return any desired output. 5884 */ 5885 if ((tp->t_flags & TF_ACKNOW) || 5886 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 5887 rack->r_wanted_output++; 5888 } 5889 INP_WLOCK_ASSERT(tp->t_inpcb); 5890 return (0); 5891 } 5892 5893 /* 5894 * Here nothing is really faster, its just that we 5895 * have broken out the fast-data path also just like 5896 * the fast-ack. 5897 */ 5898 static int 5899 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 5900 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 5901 uint32_t tiwin, int32_t nxt_pkt) 5902 { 5903 int32_t nsegs; 5904 int32_t newsize = 0; /* automatic sockbuf scaling */ 5905 struct tcp_rack *rack; 5906 #ifdef NETFLIX_SB_LIMITS 5907 u_int mcnt, appended; 5908 #endif 5909 #ifdef TCPDEBUG 5910 /* 5911 * The size of tcp_saveipgen must be the size of the max ip header, 5912 * now IPv6. 5913 */ 5914 u_char tcp_saveipgen[IP6_HDR_LEN]; 5915 struct tcphdr tcp_savetcp; 5916 short ostate = 0; 5917 5918 #endif 5919 /* 5920 * If last ACK falls within this segment's sequence numbers, record 5921 * the timestamp. NOTE that the test is modified according to the 5922 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 5923 */ 5924 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 5925 return (0); 5926 } 5927 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 5928 return (0); 5929 } 5930 if (tiwin && tiwin != tp->snd_wnd) { 5931 return (0); 5932 } 5933 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 5934 return (0); 5935 } 5936 if (__predict_false((to->to_flags & TOF_TS) && 5937 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 5938 return (0); 5939 } 5940 if (__predict_false((th->th_ack != tp->snd_una))) { 5941 return (0); 5942 } 5943 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 5944 return (0); 5945 } 5946 if ((to->to_flags & TOF_TS) != 0 && 5947 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 5948 tp->ts_recent_age = tcp_ts_getticks(); 5949 tp->ts_recent = to->to_tsval; 5950 } 5951 rack = (struct tcp_rack *)tp->t_fb_ptr; 5952 /* 5953 * This is a pure, in-sequence data packet with nothing on the 5954 * reassembly queue and we have enough buffer space to take it. 5955 */ 5956 nsegs = max(1, m->m_pkthdr.lro_nsegs); 5957 5958 #ifdef NETFLIX_SB_LIMITS 5959 if (so->so_rcv.sb_shlim) { 5960 mcnt = m_memcnt(m); 5961 appended = 0; 5962 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 5963 CFO_NOSLEEP, NULL) == false) { 5964 counter_u64_add(tcp_sb_shlim_fails, 1); 5965 m_freem(m); 5966 return (1); 5967 } 5968 } 5969 #endif 5970 /* Clean receiver SACK report if present */ 5971 if (tp->rcv_numsacks) 5972 tcp_clean_sackreport(tp); 5973 TCPSTAT_INC(tcps_preddat); 5974 tp->rcv_nxt += tlen; 5975 /* 5976 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 5977 */ 5978 tp->snd_wl1 = th->th_seq; 5979 /* 5980 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 5981 */ 5982 tp->rcv_up = tp->rcv_nxt; 5983 TCPSTAT_ADD(tcps_rcvpack, nsegs); 5984 TCPSTAT_ADD(tcps_rcvbyte, tlen); 5985 #ifdef TCPDEBUG 5986 if (so->so_options & SO_DEBUG) 5987 tcp_trace(TA_INPUT, ostate, tp, 5988 (void *)tcp_saveipgen, &tcp_savetcp, 0); 5989 #endif 5990 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 5991 5992 /* Add data to socket buffer. */ 5993 SOCKBUF_LOCK(&so->so_rcv); 5994 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 5995 m_freem(m); 5996 } else { 5997 /* 5998 * Set new socket buffer size. Give up when limit is 5999 * reached. 6000 */ 6001 if (newsize) 6002 if (!sbreserve_locked(&so->so_rcv, 6003 newsize, so, NULL)) 6004 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 6005 m_adj(m, drop_hdrlen); /* delayed header drop */ 6006 #ifdef NETFLIX_SB_LIMITS 6007 appended = 6008 #endif 6009 sbappendstream_locked(&so->so_rcv, m, 0); 6010 ctf_calc_rwin(so, tp); 6011 } 6012 /* NB: sorwakeup_locked() does an implicit unlock. */ 6013 sorwakeup_locked(so); 6014 #ifdef NETFLIX_SB_LIMITS 6015 if (so->so_rcv.sb_shlim && mcnt != appended) 6016 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 6017 #endif 6018 if (DELAY_ACK(tp, tlen)) { 6019 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 6020 tp->t_flags |= TF_DELACK; 6021 } else { 6022 tp->t_flags |= TF_ACKNOW; 6023 rack->r_wanted_output++; 6024 } 6025 if ((tp->snd_una == tp->snd_max) && rack_use_sack_filter) 6026 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 6027 return (1); 6028 } 6029 6030 /* 6031 * This subfunction is used to try to highly optimize the 6032 * fast path. We again allow window updates that are 6033 * in sequence to remain in the fast-path. We also add 6034 * in the __predict's to attempt to help the compiler. 6035 * Note that if we return a 0, then we can *not* process 6036 * it and the caller should push the packet into the 6037 * slow-path. 6038 */ 6039 static int 6040 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 6041 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 6042 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 6043 { 6044 int32_t acked; 6045 int32_t nsegs; 6046 6047 #ifdef TCPDEBUG 6048 /* 6049 * The size of tcp_saveipgen must be the size of the max ip header, 6050 * now IPv6. 6051 */ 6052 u_char tcp_saveipgen[IP6_HDR_LEN]; 6053 struct tcphdr tcp_savetcp; 6054 short ostate = 0; 6055 6056 #endif 6057 struct tcp_rack *rack; 6058 6059 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 6060 /* Old ack, behind (or duplicate to) the last one rcv'd */ 6061 return (0); 6062 } 6063 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 6064 /* Above what we have sent? */ 6065 return (0); 6066 } 6067 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 6068 /* We are retransmitting */ 6069 return (0); 6070 } 6071 if (__predict_false(tiwin == 0)) { 6072 /* zero window */ 6073 return (0); 6074 } 6075 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 6076 /* We need a SYN or a FIN, unlikely.. */ 6077 return (0); 6078 } 6079 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 6080 /* Timestamp is behind .. old ack with seq wrap? */ 6081 return (0); 6082 } 6083 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 6084 /* Still recovering */ 6085 return (0); 6086 } 6087 rack = (struct tcp_rack *)tp->t_fb_ptr; 6088 if (rack->r_ctl.rc_sacked) { 6089 /* We have sack holes on our scoreboard */ 6090 return (0); 6091 } 6092 /* Ok if we reach here, we can process a fast-ack */ 6093 nsegs = max(1, m->m_pkthdr.lro_nsegs); 6094 rack_log_ack(tp, to, th); 6095 /* 6096 * We made progress, clear the tlp 6097 * out flag so we could start a TLP 6098 * again. 6099 */ 6100 rack->r_ctl.rc_tlp_rtx_out = 0; 6101 /* Did the window get updated? */ 6102 if (tiwin != tp->snd_wnd) { 6103 tp->snd_wnd = tiwin; 6104 tp->snd_wl1 = th->th_seq; 6105 if (tp->snd_wnd > tp->max_sndwnd) 6106 tp->max_sndwnd = tp->snd_wnd; 6107 } 6108 /* Do we exit persists? */ 6109 if ((rack->rc_in_persist != 0) && 6110 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 6111 rack->r_ctl.rc_pace_min_segs))) { 6112 rack_exit_persist(tp, rack); 6113 } 6114 /* Do we enter persists? */ 6115 if ((rack->rc_in_persist == 0) && 6116 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 6117 TCPS_HAVEESTABLISHED(tp->t_state) && 6118 (tp->snd_max == tp->snd_una) && 6119 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 6120 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 6121 /* 6122 * Here the rwnd is less than 6123 * the pacing size, we are established, 6124 * nothing is outstanding, and there is 6125 * data to send. Enter persists. 6126 */ 6127 tp->snd_nxt = tp->snd_una; 6128 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 6129 } 6130 /* 6131 * If last ACK falls within this segment's sequence numbers, record 6132 * the timestamp. NOTE that the test is modified according to the 6133 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 6134 */ 6135 if ((to->to_flags & TOF_TS) != 0 && 6136 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 6137 tp->ts_recent_age = tcp_ts_getticks(); 6138 tp->ts_recent = to->to_tsval; 6139 } 6140 /* 6141 * This is a pure ack for outstanding data. 6142 */ 6143 TCPSTAT_INC(tcps_predack); 6144 6145 /* 6146 * "bad retransmit" recovery. 6147 */ 6148 if (tp->t_flags & TF_PREVVALID) { 6149 tp->t_flags &= ~TF_PREVVALID; 6150 if (tp->t_rxtshift == 1 && 6151 (int)(ticks - tp->t_badrxtwin) < 0) 6152 rack_cong_signal(tp, th, CC_RTO_ERR); 6153 } 6154 /* 6155 * Recalculate the transmit timer / rtt. 6156 * 6157 * Some boxes send broken timestamp replies during the SYN+ACK 6158 * phase, ignore timestamps of 0 or we could calculate a huge RTT 6159 * and blow up the retransmit timer. 6160 */ 6161 acked = BYTES_THIS_ACK(tp, th); 6162 6163 #ifdef TCP_HHOOK 6164 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 6165 hhook_run_tcp_est_in(tp, th, to); 6166 #endif 6167 6168 TCPSTAT_ADD(tcps_rcvackpack, nsegs); 6169 TCPSTAT_ADD(tcps_rcvackbyte, acked); 6170 sbdrop(&so->so_snd, acked); 6171 /* 6172 * Let the congestion control algorithm update congestion control 6173 * related information. This typically means increasing the 6174 * congestion window. 6175 */ 6176 rack_ack_received(tp, rack, th, nsegs, CC_ACK, 0); 6177 6178 tp->snd_una = th->th_ack; 6179 if (tp->snd_wnd < ctf_outstanding(tp)) { 6180 /* The peer collapsed the window */ 6181 rack_collapsed_window(rack); 6182 } else if (rack->rc_has_collapsed) 6183 rack_un_collapse_window(rack); 6184 6185 /* 6186 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 6187 */ 6188 tp->snd_wl2 = th->th_ack; 6189 tp->t_dupacks = 0; 6190 m_freem(m); 6191 /* ND6_HINT(tp); *//* Some progress has been made. */ 6192 6193 /* 6194 * If all outstanding data are acked, stop retransmit timer, 6195 * otherwise restart timer using current (possibly backed-off) 6196 * value. If process is waiting for space, wakeup/selwakeup/signal. 6197 * If data are ready to send, let tcp_output decide between more 6198 * output or persist. 6199 */ 6200 #ifdef TCPDEBUG 6201 if (so->so_options & SO_DEBUG) 6202 tcp_trace(TA_INPUT, ostate, tp, 6203 (void *)tcp_saveipgen, 6204 &tcp_savetcp, 0); 6205 #endif 6206 if (tp->snd_una == tp->snd_max) { 6207 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 6208 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 6209 tp->t_acktime = 0; 6210 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 6211 } 6212 /* Wake up the socket if we have room to write more */ 6213 sowwakeup(so); 6214 if (sbavail(&so->so_snd)) { 6215 rack->r_wanted_output++; 6216 } 6217 return (1); 6218 } 6219 6220 /* 6221 * Return value of 1, the TCB is unlocked and most 6222 * likely gone, return value of 0, the TCP is still 6223 * locked. 6224 */ 6225 static int 6226 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 6227 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 6228 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 6229 { 6230 int32_t ret_val = 0; 6231 int32_t todrop; 6232 int32_t ourfinisacked = 0; 6233 struct tcp_rack *rack; 6234 6235 ctf_calc_rwin(so, tp); 6236 /* 6237 * If the state is SYN_SENT: if seg contains an ACK, but not for our 6238 * SYN, drop the input. if seg contains a RST, then drop the 6239 * connection. if seg does not contain SYN, then drop it. Otherwise 6240 * this is an acceptable SYN segment initialize tp->rcv_nxt and 6241 * tp->irs if seg contains ack then advance tp->snd_una if seg 6242 * contains an ECE and ECN support is enabled, the stream is ECN 6243 * capable. if SYN has been acked change to ESTABLISHED else 6244 * SYN_RCVD state arrange for segment to be acked (eventually) 6245 * continue processing rest of data/controls, beginning with URG 6246 */ 6247 if ((thflags & TH_ACK) && 6248 (SEQ_LEQ(th->th_ack, tp->iss) || 6249 SEQ_GT(th->th_ack, tp->snd_max))) { 6250 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 6251 return (1); 6252 } 6253 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 6254 TCP_PROBE5(connect__refused, NULL, tp, 6255 mtod(m, const char *), tp, th); 6256 tp = tcp_drop(tp, ECONNREFUSED); 6257 ctf_do_drop(m, tp); 6258 return (1); 6259 } 6260 if (thflags & TH_RST) { 6261 ctf_do_drop(m, tp); 6262 return (1); 6263 } 6264 if (!(thflags & TH_SYN)) { 6265 ctf_do_drop(m, tp); 6266 return (1); 6267 } 6268 tp->irs = th->th_seq; 6269 tcp_rcvseqinit(tp); 6270 rack = (struct tcp_rack *)tp->t_fb_ptr; 6271 if (thflags & TH_ACK) { 6272 int tfo_partial = 0; 6273 6274 TCPSTAT_INC(tcps_connects); 6275 soisconnected(so); 6276 #ifdef MAC 6277 mac_socketpeer_set_from_mbuf(m, so); 6278 #endif 6279 /* Do window scaling on this connection? */ 6280 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 6281 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 6282 tp->rcv_scale = tp->request_r_scale; 6283 } 6284 tp->rcv_adv += min(tp->rcv_wnd, 6285 TCP_MAXWIN << tp->rcv_scale); 6286 /* 6287 * If not all the data that was sent in the TFO SYN 6288 * has been acked, resend the remainder right away. 6289 */ 6290 if (IS_FASTOPEN(tp->t_flags) && 6291 (tp->snd_una != tp->snd_max)) { 6292 tp->snd_nxt = th->th_ack; 6293 tfo_partial = 1; 6294 } 6295 /* 6296 * If there's data, delay ACK; if there's also a FIN ACKNOW 6297 * will be turned on later. 6298 */ 6299 if (DELAY_ACK(tp, tlen) && tlen != 0 && (tfo_partial == 0)) { 6300 rack_timer_cancel(tp, rack, 6301 rack->r_ctl.rc_rcvtime, __LINE__); 6302 tp->t_flags |= TF_DELACK; 6303 } else { 6304 rack->r_wanted_output++; 6305 tp->t_flags |= TF_ACKNOW; 6306 } 6307 6308 if (((thflags & (TH_CWR | TH_ECE)) == TH_ECE) && 6309 V_tcp_do_ecn) { 6310 tp->t_flags |= TF_ECN_PERMIT; 6311 TCPSTAT_INC(tcps_ecn_shs); 6312 } 6313 if (SEQ_GT(th->th_ack, tp->snd_una)) { 6314 /* 6315 * We advance snd_una for the 6316 * fast open case. If th_ack is 6317 * acknowledging data beyond 6318 * snd_una we can't just call 6319 * ack-processing since the 6320 * data stream in our send-map 6321 * will start at snd_una + 1 (one 6322 * beyond the SYN). If its just 6323 * equal we don't need to do that 6324 * and there is no send_map. 6325 */ 6326 tp->snd_una++; 6327 } 6328 /* 6329 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 6330 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 6331 */ 6332 tp->t_starttime = ticks; 6333 if (tp->t_flags & TF_NEEDFIN) { 6334 tcp_state_change(tp, TCPS_FIN_WAIT_1); 6335 tp->t_flags &= ~TF_NEEDFIN; 6336 thflags &= ~TH_SYN; 6337 } else { 6338 tcp_state_change(tp, TCPS_ESTABLISHED); 6339 TCP_PROBE5(connect__established, NULL, tp, 6340 mtod(m, const char *), tp, th); 6341 cc_conn_init(tp); 6342 } 6343 } else { 6344 /* 6345 * Received initial SYN in SYN-SENT[*] state => simultaneous 6346 * open. If segment contains CC option and there is a 6347 * cached CC, apply TAO test. If it succeeds, connection is * 6348 * half-synchronized. Otherwise, do 3-way handshake: 6349 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 6350 * there was no CC option, clear cached CC value. 6351 */ 6352 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 6353 tcp_state_change(tp, TCPS_SYN_RECEIVED); 6354 } 6355 INP_WLOCK_ASSERT(tp->t_inpcb); 6356 /* 6357 * Advance th->th_seq to correspond to first data byte. If data, 6358 * trim to stay within window, dropping FIN if necessary. 6359 */ 6360 th->th_seq++; 6361 if (tlen > tp->rcv_wnd) { 6362 todrop = tlen - tp->rcv_wnd; 6363 m_adj(m, -todrop); 6364 tlen = tp->rcv_wnd; 6365 thflags &= ~TH_FIN; 6366 TCPSTAT_INC(tcps_rcvpackafterwin); 6367 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 6368 } 6369 tp->snd_wl1 = th->th_seq - 1; 6370 tp->rcv_up = th->th_seq; 6371 /* 6372 * Client side of transaction: already sent SYN and data. If the 6373 * remote host used T/TCP to validate the SYN, our data will be 6374 * ACK'd; if so, enter normal data segment processing in the middle 6375 * of step 5, ack processing. Otherwise, goto step 6. 6376 */ 6377 if (thflags & TH_ACK) { 6378 /* For syn-sent we need to possibly update the rtt */ 6379 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 6380 uint32_t t; 6381 6382 t = tcp_ts_getticks() - to->to_tsecr; 6383 if (!tp->t_rttlow || tp->t_rttlow > t) 6384 tp->t_rttlow = t; 6385 tcp_rack_xmit_timer(rack, t + 1); 6386 tcp_rack_xmit_timer_commit(rack, tp); 6387 } 6388 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) 6389 return (ret_val); 6390 /* We may have changed to FIN_WAIT_1 above */ 6391 if (tp->t_state == TCPS_FIN_WAIT_1) { 6392 /* 6393 * In FIN_WAIT_1 STATE in addition to the processing 6394 * for the ESTABLISHED state if our FIN is now 6395 * acknowledged then enter FIN_WAIT_2. 6396 */ 6397 if (ourfinisacked) { 6398 /* 6399 * If we can't receive any more data, then 6400 * closing user can proceed. Starting the 6401 * timer is contrary to the specification, 6402 * but if we don't get a FIN we'll hang 6403 * forever. 6404 * 6405 * XXXjl: we should release the tp also, and 6406 * use a compressed state. 6407 */ 6408 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6409 soisdisconnected(so); 6410 tcp_timer_activate(tp, TT_2MSL, 6411 (tcp_fast_finwait2_recycle ? 6412 tcp_finwait2_timeout : 6413 TP_MAXIDLE(tp))); 6414 } 6415 tcp_state_change(tp, TCPS_FIN_WAIT_2); 6416 } 6417 } 6418 } 6419 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6420 tiwin, thflags, nxt_pkt)); 6421 } 6422 6423 /* 6424 * Return value of 1, the TCB is unlocked and most 6425 * likely gone, return value of 0, the TCP is still 6426 * locked. 6427 */ 6428 static int 6429 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 6430 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 6431 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 6432 { 6433 struct tcp_rack *rack; 6434 int32_t ret_val = 0; 6435 int32_t ourfinisacked = 0; 6436 6437 ctf_calc_rwin(so, tp); 6438 if ((thflags & TH_ACK) && 6439 (SEQ_LEQ(th->th_ack, tp->snd_una) || 6440 SEQ_GT(th->th_ack, tp->snd_max))) { 6441 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 6442 return (1); 6443 } 6444 rack = (struct tcp_rack *)tp->t_fb_ptr; 6445 if (IS_FASTOPEN(tp->t_flags)) { 6446 /* 6447 * When a TFO connection is in SYN_RECEIVED, the 6448 * only valid packets are the initial SYN, a 6449 * retransmit/copy of the initial SYN (possibly with 6450 * a subset of the original data), a valid ACK, a 6451 * FIN, or a RST. 6452 */ 6453 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 6454 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 6455 return (1); 6456 } else if (thflags & TH_SYN) { 6457 /* non-initial SYN is ignored */ 6458 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 6459 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 6460 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 6461 ctf_do_drop(m, NULL); 6462 return (0); 6463 } 6464 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 6465 ctf_do_drop(m, NULL); 6466 return (0); 6467 } 6468 } 6469 if ((thflags & TH_RST) || 6470 (tp->t_fin_is_rst && (thflags & TH_FIN))) 6471 return (ctf_process_rst(m, th, so, tp)); 6472 /* 6473 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 6474 * it's less than ts_recent, drop it. 6475 */ 6476 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 6477 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 6478 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 6479 return (ret_val); 6480 } 6481 /* 6482 * In the SYN-RECEIVED state, validate that the packet belongs to 6483 * this connection before trimming the data to fit the receive 6484 * window. Check the sequence number versus IRS since we know the 6485 * sequence numbers haven't wrapped. This is a partial fix for the 6486 * "LAND" DoS attack. 6487 */ 6488 if (SEQ_LT(th->th_seq, tp->irs)) { 6489 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 6490 return (1); 6491 } 6492 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 6493 return (ret_val); 6494 } 6495 /* 6496 * If last ACK falls within this segment's sequence numbers, record 6497 * its timestamp. NOTE: 1) That the test incorporates suggestions 6498 * from the latest proposal of the tcplw@cray.com list (Braden 6499 * 1993/04/26). 2) That updating only on newer timestamps interferes 6500 * with our earlier PAWS tests, so this check should be solely 6501 * predicated on the sequence space of this segment. 3) That we 6502 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 6503 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 6504 * SEG.Len, This modified check allows us to overcome RFC1323's 6505 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 6506 * p.869. In such cases, we can still calculate the RTT correctly 6507 * when RCV.NXT == Last.ACK.Sent. 6508 */ 6509 if ((to->to_flags & TOF_TS) != 0 && 6510 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 6511 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 6512 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 6513 tp->ts_recent_age = tcp_ts_getticks(); 6514 tp->ts_recent = to->to_tsval; 6515 } 6516 tp->snd_wnd = tiwin; 6517 /* 6518 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 6519 * is on (half-synchronized state), then queue data for later 6520 * processing; else drop segment and return. 6521 */ 6522 if ((thflags & TH_ACK) == 0) { 6523 if (IS_FASTOPEN(tp->t_flags)) { 6524 cc_conn_init(tp); 6525 } 6526 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6527 tiwin, thflags, nxt_pkt)); 6528 } 6529 TCPSTAT_INC(tcps_connects); 6530 soisconnected(so); 6531 /* Do window scaling? */ 6532 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 6533 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 6534 tp->rcv_scale = tp->request_r_scale; 6535 } 6536 /* 6537 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 6538 * FIN-WAIT-1 6539 */ 6540 tp->t_starttime = ticks; 6541 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) { 6542 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 6543 tp->t_tfo_pending = NULL; 6544 6545 /* 6546 * Account for the ACK of our SYN prior to 6547 * regular ACK processing below. 6548 */ 6549 tp->snd_una++; 6550 } 6551 if (tp->t_flags & TF_NEEDFIN) { 6552 tcp_state_change(tp, TCPS_FIN_WAIT_1); 6553 tp->t_flags &= ~TF_NEEDFIN; 6554 } else { 6555 tcp_state_change(tp, TCPS_ESTABLISHED); 6556 TCP_PROBE5(accept__established, NULL, tp, 6557 mtod(m, const char *), tp, th); 6558 /* 6559 * TFO connections call cc_conn_init() during SYN 6560 * processing. Calling it again here for such connections 6561 * is not harmless as it would undo the snd_cwnd reduction 6562 * that occurs when a TFO SYN|ACK is retransmitted. 6563 */ 6564 if (!IS_FASTOPEN(tp->t_flags)) 6565 cc_conn_init(tp); 6566 } 6567 /* 6568 * If segment contains data or ACK, will call tcp_reass() later; if 6569 * not, do so now to pass queued data to user. 6570 */ 6571 if (tlen == 0 && (thflags & TH_FIN) == 0) 6572 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 6573 (struct mbuf *)0); 6574 tp->snd_wl1 = th->th_seq - 1; 6575 /* For syn-recv we need to possibly update the rtt */ 6576 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 6577 uint32_t t; 6578 6579 t = tcp_ts_getticks() - to->to_tsecr; 6580 if (!tp->t_rttlow || tp->t_rttlow > t) 6581 tp->t_rttlow = t; 6582 tcp_rack_xmit_timer(rack, t + 1); 6583 tcp_rack_xmit_timer_commit(rack, tp); 6584 } 6585 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 6586 return (ret_val); 6587 } 6588 if (tp->t_state == TCPS_FIN_WAIT_1) { 6589 /* We could have went to FIN_WAIT_1 (or EST) above */ 6590 /* 6591 * In FIN_WAIT_1 STATE in addition to the processing for the 6592 * ESTABLISHED state if our FIN is now acknowledged then 6593 * enter FIN_WAIT_2. 6594 */ 6595 if (ourfinisacked) { 6596 /* 6597 * If we can't receive any more data, then closing 6598 * user can proceed. Starting the timer is contrary 6599 * to the specification, but if we don't get a FIN 6600 * we'll hang forever. 6601 * 6602 * XXXjl: we should release the tp also, and use a 6603 * compressed state. 6604 */ 6605 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6606 soisdisconnected(so); 6607 tcp_timer_activate(tp, TT_2MSL, 6608 (tcp_fast_finwait2_recycle ? 6609 tcp_finwait2_timeout : 6610 TP_MAXIDLE(tp))); 6611 } 6612 tcp_state_change(tp, TCPS_FIN_WAIT_2); 6613 } 6614 } 6615 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6616 tiwin, thflags, nxt_pkt)); 6617 } 6618 6619 /* 6620 * Return value of 1, the TCB is unlocked and most 6621 * likely gone, return value of 0, the TCP is still 6622 * locked. 6623 */ 6624 static int 6625 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 6626 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 6627 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 6628 { 6629 int32_t ret_val = 0; 6630 6631 /* 6632 * Header prediction: check for the two common cases of a 6633 * uni-directional data xfer. If the packet has no control flags, 6634 * is in-sequence, the window didn't change and we're not 6635 * retransmitting, it's a candidate. If the length is zero and the 6636 * ack moved forward, we're the sender side of the xfer. Just free 6637 * the data acked & wake any higher level process that was blocked 6638 * waiting for space. If the length is non-zero and the ack didn't 6639 * move, we're the receiver side. If we're getting packets in-order 6640 * (the reassembly queue is empty), add the data toc The socket 6641 * buffer and note that we need a delayed ack. Make sure that the 6642 * hidden state-flags are also off. Since we check for 6643 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 6644 */ 6645 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 6646 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_URG | TH_ACK)) == TH_ACK) && 6647 __predict_true(SEGQ_EMPTY(tp)) && 6648 __predict_true(th->th_seq == tp->rcv_nxt)) { 6649 struct tcp_rack *rack; 6650 6651 rack = (struct tcp_rack *)tp->t_fb_ptr; 6652 if (tlen == 0) { 6653 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 6654 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 6655 return (0); 6656 } 6657 } else { 6658 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 6659 tiwin, nxt_pkt)) { 6660 return (0); 6661 } 6662 } 6663 } 6664 ctf_calc_rwin(so, tp); 6665 6666 if ((thflags & TH_RST) || 6667 (tp->t_fin_is_rst && (thflags & TH_FIN))) 6668 return (ctf_process_rst(m, th, so, tp)); 6669 6670 /* 6671 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 6672 * synchronized state. 6673 */ 6674 if (thflags & TH_SYN) { 6675 ctf_challenge_ack(m, th, tp, &ret_val); 6676 return (ret_val); 6677 } 6678 /* 6679 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 6680 * it's less than ts_recent, drop it. 6681 */ 6682 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 6683 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 6684 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 6685 return (ret_val); 6686 } 6687 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 6688 return (ret_val); 6689 } 6690 /* 6691 * If last ACK falls within this segment's sequence numbers, record 6692 * its timestamp. NOTE: 1) That the test incorporates suggestions 6693 * from the latest proposal of the tcplw@cray.com list (Braden 6694 * 1993/04/26). 2) That updating only on newer timestamps interferes 6695 * with our earlier PAWS tests, so this check should be solely 6696 * predicated on the sequence space of this segment. 3) That we 6697 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 6698 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 6699 * SEG.Len, This modified check allows us to overcome RFC1323's 6700 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 6701 * p.869. In such cases, we can still calculate the RTT correctly 6702 * when RCV.NXT == Last.ACK.Sent. 6703 */ 6704 if ((to->to_flags & TOF_TS) != 0 && 6705 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 6706 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 6707 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 6708 tp->ts_recent_age = tcp_ts_getticks(); 6709 tp->ts_recent = to->to_tsval; 6710 } 6711 /* 6712 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 6713 * is on (half-synchronized state), then queue data for later 6714 * processing; else drop segment and return. 6715 */ 6716 if ((thflags & TH_ACK) == 0) { 6717 if (tp->t_flags & TF_NEEDSYN) { 6718 6719 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6720 tiwin, thflags, nxt_pkt)); 6721 6722 } else if (tp->t_flags & TF_ACKNOW) { 6723 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 6724 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output++; 6725 return (ret_val); 6726 } else { 6727 ctf_do_drop(m, NULL); 6728 return (0); 6729 } 6730 } 6731 /* 6732 * Ack processing. 6733 */ 6734 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 6735 return (ret_val); 6736 } 6737 if (sbavail(&so->so_snd)) { 6738 if (rack_progress_timeout_check(tp)) { 6739 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 6740 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 6741 return (1); 6742 } 6743 } 6744 /* State changes only happen in rack_process_data() */ 6745 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6746 tiwin, thflags, nxt_pkt)); 6747 } 6748 6749 /* 6750 * Return value of 1, the TCB is unlocked and most 6751 * likely gone, return value of 0, the TCP is still 6752 * locked. 6753 */ 6754 static int 6755 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 6756 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 6757 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 6758 { 6759 int32_t ret_val = 0; 6760 6761 ctf_calc_rwin(so, tp); 6762 if ((thflags & TH_RST) || 6763 (tp->t_fin_is_rst && (thflags & TH_FIN))) 6764 return (ctf_process_rst(m, th, so, tp)); 6765 /* 6766 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 6767 * synchronized state. 6768 */ 6769 if (thflags & TH_SYN) { 6770 ctf_challenge_ack(m, th, tp, &ret_val); 6771 return (ret_val); 6772 } 6773 /* 6774 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 6775 * it's less than ts_recent, drop it. 6776 */ 6777 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 6778 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 6779 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 6780 return (ret_val); 6781 } 6782 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 6783 return (ret_val); 6784 } 6785 /* 6786 * If last ACK falls within this segment's sequence numbers, record 6787 * its timestamp. NOTE: 1) That the test incorporates suggestions 6788 * from the latest proposal of the tcplw@cray.com list (Braden 6789 * 1993/04/26). 2) That updating only on newer timestamps interferes 6790 * with our earlier PAWS tests, so this check should be solely 6791 * predicated on the sequence space of this segment. 3) That we 6792 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 6793 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 6794 * SEG.Len, This modified check allows us to overcome RFC1323's 6795 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 6796 * p.869. In such cases, we can still calculate the RTT correctly 6797 * when RCV.NXT == Last.ACK.Sent. 6798 */ 6799 if ((to->to_flags & TOF_TS) != 0 && 6800 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 6801 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 6802 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 6803 tp->ts_recent_age = tcp_ts_getticks(); 6804 tp->ts_recent = to->to_tsval; 6805 } 6806 /* 6807 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 6808 * is on (half-synchronized state), then queue data for later 6809 * processing; else drop segment and return. 6810 */ 6811 if ((thflags & TH_ACK) == 0) { 6812 if (tp->t_flags & TF_NEEDSYN) { 6813 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6814 tiwin, thflags, nxt_pkt)); 6815 6816 } else if (tp->t_flags & TF_ACKNOW) { 6817 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 6818 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output++; 6819 return (ret_val); 6820 } else { 6821 ctf_do_drop(m, NULL); 6822 return (0); 6823 } 6824 } 6825 /* 6826 * Ack processing. 6827 */ 6828 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 6829 return (ret_val); 6830 } 6831 if (sbavail(&so->so_snd)) { 6832 if (rack_progress_timeout_check(tp)) { 6833 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 6834 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 6835 return (1); 6836 } 6837 } 6838 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6839 tiwin, thflags, nxt_pkt)); 6840 } 6841 6842 static int 6843 rack_check_data_after_close(struct mbuf *m, 6844 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 6845 { 6846 struct tcp_rack *rack; 6847 6848 rack = (struct tcp_rack *)tp->t_fb_ptr; 6849 if (rack->rc_allow_data_af_clo == 0) { 6850 close_now: 6851 tp = tcp_close(tp); 6852 TCPSTAT_INC(tcps_rcvafterclose); 6853 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 6854 return (1); 6855 } 6856 if (sbavail(&so->so_snd) == 0) 6857 goto close_now; 6858 /* Ok we allow data that is ignored and a followup reset */ 6859 tp->rcv_nxt = th->th_seq + *tlen; 6860 tp->t_flags2 |= TF2_DROP_AF_DATA; 6861 rack->r_wanted_output = 1; 6862 *tlen = 0; 6863 return (0); 6864 } 6865 6866 /* 6867 * Return value of 1, the TCB is unlocked and most 6868 * likely gone, return value of 0, the TCP is still 6869 * locked. 6870 */ 6871 static int 6872 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 6873 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 6874 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 6875 { 6876 int32_t ret_val = 0; 6877 int32_t ourfinisacked = 0; 6878 6879 ctf_calc_rwin(so, tp); 6880 6881 if ((thflags & TH_RST) || 6882 (tp->t_fin_is_rst && (thflags & TH_FIN))) 6883 return (ctf_process_rst(m, th, so, tp)); 6884 /* 6885 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 6886 * synchronized state. 6887 */ 6888 if (thflags & TH_SYN) { 6889 ctf_challenge_ack(m, th, tp, &ret_val); 6890 return (ret_val); 6891 } 6892 /* 6893 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 6894 * it's less than ts_recent, drop it. 6895 */ 6896 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 6897 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 6898 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 6899 return (ret_val); 6900 } 6901 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 6902 return (ret_val); 6903 } 6904 /* 6905 * If new data are received on a connection after the user processes 6906 * are gone, then RST the other end. 6907 */ 6908 if ((so->so_state & SS_NOFDREF) && tlen) { 6909 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 6910 return (1); 6911 } 6912 /* 6913 * If last ACK falls within this segment's sequence numbers, record 6914 * its timestamp. NOTE: 1) That the test incorporates suggestions 6915 * from the latest proposal of the tcplw@cray.com list (Braden 6916 * 1993/04/26). 2) That updating only on newer timestamps interferes 6917 * with our earlier PAWS tests, so this check should be solely 6918 * predicated on the sequence space of this segment. 3) That we 6919 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 6920 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 6921 * SEG.Len, This modified check allows us to overcome RFC1323's 6922 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 6923 * p.869. In such cases, we can still calculate the RTT correctly 6924 * when RCV.NXT == Last.ACK.Sent. 6925 */ 6926 if ((to->to_flags & TOF_TS) != 0 && 6927 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 6928 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 6929 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 6930 tp->ts_recent_age = tcp_ts_getticks(); 6931 tp->ts_recent = to->to_tsval; 6932 } 6933 /* 6934 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 6935 * is on (half-synchronized state), then queue data for later 6936 * processing; else drop segment and return. 6937 */ 6938 if ((thflags & TH_ACK) == 0) { 6939 if (tp->t_flags & TF_NEEDSYN) { 6940 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6941 tiwin, thflags, nxt_pkt)); 6942 } else if (tp->t_flags & TF_ACKNOW) { 6943 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 6944 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output++; 6945 return (ret_val); 6946 } else { 6947 ctf_do_drop(m, NULL); 6948 return (0); 6949 } 6950 } 6951 /* 6952 * Ack processing. 6953 */ 6954 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 6955 return (ret_val); 6956 } 6957 if (ourfinisacked) { 6958 /* 6959 * If we can't receive any more data, then closing user can 6960 * proceed. Starting the timer is contrary to the 6961 * specification, but if we don't get a FIN we'll hang 6962 * forever. 6963 * 6964 * XXXjl: we should release the tp also, and use a 6965 * compressed state. 6966 */ 6967 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6968 soisdisconnected(so); 6969 tcp_timer_activate(tp, TT_2MSL, 6970 (tcp_fast_finwait2_recycle ? 6971 tcp_finwait2_timeout : 6972 TP_MAXIDLE(tp))); 6973 } 6974 tcp_state_change(tp, TCPS_FIN_WAIT_2); 6975 } 6976 if (sbavail(&so->so_snd)) { 6977 if (rack_progress_timeout_check(tp)) { 6978 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 6979 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 6980 return (1); 6981 } 6982 } 6983 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 6984 tiwin, thflags, nxt_pkt)); 6985 } 6986 6987 /* 6988 * Return value of 1, the TCB is unlocked and most 6989 * likely gone, return value of 0, the TCP is still 6990 * locked. 6991 */ 6992 static int 6993 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 6994 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 6995 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 6996 { 6997 int32_t ret_val = 0; 6998 int32_t ourfinisacked = 0; 6999 7000 ctf_calc_rwin(so, tp); 7001 7002 if ((thflags & TH_RST) || 7003 (tp->t_fin_is_rst && (thflags & TH_FIN))) 7004 return (ctf_process_rst(m, th, so, tp)); 7005 /* 7006 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 7007 * synchronized state. 7008 */ 7009 if (thflags & TH_SYN) { 7010 ctf_challenge_ack(m, th, tp, &ret_val); 7011 return (ret_val); 7012 } 7013 /* 7014 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 7015 * it's less than ts_recent, drop it. 7016 */ 7017 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 7018 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 7019 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 7020 return (ret_val); 7021 } 7022 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 7023 return (ret_val); 7024 } 7025 /* 7026 * If new data are received on a connection after the user processes 7027 * are gone, then RST the other end. 7028 */ 7029 if ((so->so_state & SS_NOFDREF) && tlen) { 7030 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 7031 return (1); 7032 } 7033 /* 7034 * If last ACK falls within this segment's sequence numbers, record 7035 * its timestamp. NOTE: 1) That the test incorporates suggestions 7036 * from the latest proposal of the tcplw@cray.com list (Braden 7037 * 1993/04/26). 2) That updating only on newer timestamps interferes 7038 * with our earlier PAWS tests, so this check should be solely 7039 * predicated on the sequence space of this segment. 3) That we 7040 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 7041 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 7042 * SEG.Len, This modified check allows us to overcome RFC1323's 7043 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 7044 * p.869. In such cases, we can still calculate the RTT correctly 7045 * when RCV.NXT == Last.ACK.Sent. 7046 */ 7047 if ((to->to_flags & TOF_TS) != 0 && 7048 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 7049 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 7050 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 7051 tp->ts_recent_age = tcp_ts_getticks(); 7052 tp->ts_recent = to->to_tsval; 7053 } 7054 /* 7055 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 7056 * is on (half-synchronized state), then queue data for later 7057 * processing; else drop segment and return. 7058 */ 7059 if ((thflags & TH_ACK) == 0) { 7060 if (tp->t_flags & TF_NEEDSYN) { 7061 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 7062 tiwin, thflags, nxt_pkt)); 7063 } else if (tp->t_flags & TF_ACKNOW) { 7064 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 7065 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output++; 7066 return (ret_val); 7067 } else { 7068 ctf_do_drop(m, NULL); 7069 return (0); 7070 } 7071 } 7072 /* 7073 * Ack processing. 7074 */ 7075 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 7076 return (ret_val); 7077 } 7078 if (ourfinisacked) { 7079 tcp_twstart(tp); 7080 m_freem(m); 7081 return (1); 7082 } 7083 if (sbavail(&so->so_snd)) { 7084 if (rack_progress_timeout_check(tp)) { 7085 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 7086 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 7087 return (1); 7088 } 7089 } 7090 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 7091 tiwin, thflags, nxt_pkt)); 7092 } 7093 7094 /* 7095 * Return value of 1, the TCB is unlocked and most 7096 * likely gone, return value of 0, the TCP is still 7097 * locked. 7098 */ 7099 static int 7100 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 7101 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 7102 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 7103 { 7104 int32_t ret_val = 0; 7105 int32_t ourfinisacked = 0; 7106 7107 ctf_calc_rwin(so, tp); 7108 7109 if ((thflags & TH_RST) || 7110 (tp->t_fin_is_rst && (thflags & TH_FIN))) 7111 return (ctf_process_rst(m, th, so, tp)); 7112 /* 7113 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 7114 * synchronized state. 7115 */ 7116 if (thflags & TH_SYN) { 7117 ctf_challenge_ack(m, th, tp, &ret_val); 7118 return (ret_val); 7119 } 7120 /* 7121 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 7122 * it's less than ts_recent, drop it. 7123 */ 7124 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 7125 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 7126 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 7127 return (ret_val); 7128 } 7129 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 7130 return (ret_val); 7131 } 7132 /* 7133 * If new data are received on a connection after the user processes 7134 * are gone, then RST the other end. 7135 */ 7136 if ((so->so_state & SS_NOFDREF) && tlen) { 7137 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 7138 return (1); 7139 } 7140 /* 7141 * If last ACK falls within this segment's sequence numbers, record 7142 * its timestamp. NOTE: 1) That the test incorporates suggestions 7143 * from the latest proposal of the tcplw@cray.com list (Braden 7144 * 1993/04/26). 2) That updating only on newer timestamps interferes 7145 * with our earlier PAWS tests, so this check should be solely 7146 * predicated on the sequence space of this segment. 3) That we 7147 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 7148 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 7149 * SEG.Len, This modified check allows us to overcome RFC1323's 7150 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 7151 * p.869. In such cases, we can still calculate the RTT correctly 7152 * when RCV.NXT == Last.ACK.Sent. 7153 */ 7154 if ((to->to_flags & TOF_TS) != 0 && 7155 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 7156 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 7157 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 7158 tp->ts_recent_age = tcp_ts_getticks(); 7159 tp->ts_recent = to->to_tsval; 7160 } 7161 /* 7162 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 7163 * is on (half-synchronized state), then queue data for later 7164 * processing; else drop segment and return. 7165 */ 7166 if ((thflags & TH_ACK) == 0) { 7167 if (tp->t_flags & TF_NEEDSYN) { 7168 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 7169 tiwin, thflags, nxt_pkt)); 7170 } else if (tp->t_flags & TF_ACKNOW) { 7171 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 7172 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output++; 7173 return (ret_val); 7174 } else { 7175 ctf_do_drop(m, NULL); 7176 return (0); 7177 } 7178 } 7179 /* 7180 * case TCPS_LAST_ACK: Ack processing. 7181 */ 7182 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 7183 return (ret_val); 7184 } 7185 if (ourfinisacked) { 7186 tp = tcp_close(tp); 7187 ctf_do_drop(m, tp); 7188 return (1); 7189 } 7190 if (sbavail(&so->so_snd)) { 7191 if (rack_progress_timeout_check(tp)) { 7192 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 7193 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 7194 return (1); 7195 } 7196 } 7197 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 7198 tiwin, thflags, nxt_pkt)); 7199 } 7200 7201 7202 /* 7203 * Return value of 1, the TCB is unlocked and most 7204 * likely gone, return value of 0, the TCP is still 7205 * locked. 7206 */ 7207 static int 7208 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 7209 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 7210 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 7211 { 7212 int32_t ret_val = 0; 7213 int32_t ourfinisacked = 0; 7214 7215 ctf_calc_rwin(so, tp); 7216 7217 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 7218 if ((thflags & TH_RST) || 7219 (tp->t_fin_is_rst && (thflags & TH_FIN))) 7220 return (ctf_process_rst(m, th, so, tp)); 7221 /* 7222 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 7223 * synchronized state. 7224 */ 7225 if (thflags & TH_SYN) { 7226 ctf_challenge_ack(m, th, tp, &ret_val); 7227 return (ret_val); 7228 } 7229 /* 7230 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 7231 * it's less than ts_recent, drop it. 7232 */ 7233 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 7234 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 7235 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 7236 return (ret_val); 7237 } 7238 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 7239 return (ret_val); 7240 } 7241 /* 7242 * If new data are received on a connection after the user processes 7243 * are gone, then RST the other end. 7244 */ 7245 if ((so->so_state & SS_NOFDREF) && 7246 tlen) { 7247 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 7248 return (1); 7249 } 7250 /* 7251 * If last ACK falls within this segment's sequence numbers, record 7252 * its timestamp. NOTE: 1) That the test incorporates suggestions 7253 * from the latest proposal of the tcplw@cray.com list (Braden 7254 * 1993/04/26). 2) That updating only on newer timestamps interferes 7255 * with our earlier PAWS tests, so this check should be solely 7256 * predicated on the sequence space of this segment. 3) That we 7257 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 7258 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 7259 * SEG.Len, This modified check allows us to overcome RFC1323's 7260 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 7261 * p.869. In such cases, we can still calculate the RTT correctly 7262 * when RCV.NXT == Last.ACK.Sent. 7263 */ 7264 if ((to->to_flags & TOF_TS) != 0 && 7265 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 7266 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 7267 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 7268 tp->ts_recent_age = tcp_ts_getticks(); 7269 tp->ts_recent = to->to_tsval; 7270 } 7271 /* 7272 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 7273 * is on (half-synchronized state), then queue data for later 7274 * processing; else drop segment and return. 7275 */ 7276 if ((thflags & TH_ACK) == 0) { 7277 if (tp->t_flags & TF_NEEDSYN) { 7278 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 7279 tiwin, thflags, nxt_pkt)); 7280 } else if (tp->t_flags & TF_ACKNOW) { 7281 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 7282 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output++; 7283 return (ret_val); 7284 } else { 7285 ctf_do_drop(m, NULL); 7286 return (0); 7287 } 7288 } 7289 /* 7290 * Ack processing. 7291 */ 7292 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 7293 return (ret_val); 7294 } 7295 if (sbavail(&so->so_snd)) { 7296 if (rack_progress_timeout_check(tp)) { 7297 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 7298 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 7299 return (1); 7300 } 7301 } 7302 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 7303 tiwin, thflags, nxt_pkt)); 7304 } 7305 7306 7307 static void inline 7308 rack_clear_rate_sample(struct tcp_rack *rack) 7309 { 7310 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 7311 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 7312 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 7313 } 7314 7315 static void 7316 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack) 7317 { 7318 uint32_t tls_seg = 0; 7319 7320 #ifdef KERN_TLS 7321 if (rack->rc_inp->inp_socket->so_snd.sb_flags & SB_TLS_IFNET) { 7322 tls_seg = ctf_get_opt_tls_size(rack->rc_inp->inp_socket, rack->rc_tp->snd_wnd); 7323 rack->r_ctl.rc_pace_min_segs = tls_seg; 7324 } else 7325 #endif 7326 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 7327 rack->r_ctl.rc_pace_max_segs = ctf_fixed_maxseg(tp) * rack->rc_pace_max_segs; 7328 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) 7329 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 7330 #ifdef KERN_TLS 7331 if (tls_seg != 0) { 7332 if (rack_hw_tls_max_seg > 1) { 7333 rack->r_ctl.rc_pace_max_segs /= tls_seg; 7334 if (rack_hw_tls_max_seg < rack->r_ctl.rc_pace_max_segs) 7335 rack->r_ctl.rc_pace_max_segs = rack_hw_tls_max_seg; 7336 } else { 7337 rack->r_ctl.rc_pace_max_segs = 1; 7338 } 7339 if (rack->r_ctl.rc_pace_max_segs == 0) 7340 rack->r_ctl.rc_pace_max_segs = 1; 7341 rack->r_ctl.rc_pace_max_segs *= tls_seg; 7342 } 7343 #endif 7344 rack_log_type_hrdwtso(tp, rack, tls_seg, rack->rc_inp->inp_socket->so_snd.sb_flags, 0, 2); 7345 } 7346 7347 static int 7348 rack_init(struct tcpcb *tp) 7349 { 7350 struct tcp_rack *rack = NULL; 7351 struct rack_sendmap *insret; 7352 7353 tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 7354 if (tp->t_fb_ptr == NULL) { 7355 /* 7356 * We need to allocate memory but cant. The INP and INP_INFO 7357 * locks and they are recusive (happens during setup. So a 7358 * scheme to drop the locks fails :( 7359 * 7360 */ 7361 return (ENOMEM); 7362 } 7363 memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack)); 7364 7365 rack = (struct tcp_rack *)tp->t_fb_ptr; 7366 RB_INIT(&rack->r_ctl.rc_mtree); 7367 TAILQ_INIT(&rack->r_ctl.rc_free); 7368 TAILQ_INIT(&rack->r_ctl.rc_tmap); 7369 rack->rc_tp = tp; 7370 if (tp->t_inpcb) { 7371 rack->rc_inp = tp->t_inpcb; 7372 } 7373 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 7374 /* Probably not needed but lets be sure */ 7375 rack_clear_rate_sample(rack); 7376 rack->r_cpu = 0; 7377 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 7378 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 7379 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 7380 rack->rc_pace_reduce = rack_slot_reduction; 7381 if (use_rack_cheat) 7382 rack->use_rack_cheat = 1; 7383 if (V_tcp_delack_enabled) 7384 tp->t_delayed_ack = 1; 7385 else 7386 tp->t_delayed_ack = 0; 7387 rack->rc_pace_max_segs = rack_hptsi_segments; 7388 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 7389 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 7390 rack->r_ctl.rc_prop_reduce = rack_use_proportional_reduce; 7391 rack->r_enforce_min_pace = rack_min_pace_time; 7392 rack->r_ctl.rc_prop_rate = rack_proportional_rate; 7393 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 7394 rack->r_ctl.rc_early_recovery = rack_early_recovery; 7395 rack->rc_always_pace = rack_pace_every_seg; 7396 rack_set_pace_segments(tp, rack); 7397 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 7398 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 7399 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 7400 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 7401 rack->r_ctl.rc_min_to = rack_min_to; 7402 rack->rack_per_of_gp = rack_per_of_gp; 7403 microuptime(&rack->r_ctl.rc_last_ack); 7404 rack->r_ctl.rc_last_time_decay = rack->r_ctl.rc_last_ack; 7405 rack->r_ctl.rc_tlp_rxt_last_time = tcp_ts_getticks(); 7406 /* Do we force on detection? */ 7407 if (tcp_force_detection) 7408 rack->do_detection = 1; 7409 else 7410 rack->do_detection = 0; 7411 if (tp->snd_una != tp->snd_max) { 7412 /* Create a send map for the current outstanding data */ 7413 struct rack_sendmap *rsm; 7414 7415 rsm = rack_alloc(rack); 7416 if (rsm == NULL) { 7417 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 7418 tp->t_fb_ptr = NULL; 7419 return (ENOMEM); 7420 } 7421 rsm->r_flags = RACK_OVERMAX; 7422 rsm->r_tim_lastsent[0] = rack->r_ctl.rc_tlp_rxt_last_time; 7423 rsm->r_rtr_cnt = 1; 7424 rsm->r_rtr_bytes = 0; 7425 rsm->r_start = tp->snd_una; 7426 rsm->r_end = tp->snd_max; 7427 rsm->r_dupack = 0; 7428 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7429 #ifdef INVARIANTS 7430 if (insret != NULL) { 7431 panic("Insert in rb tree fails ret:%p rack:%p rsm:%p", 7432 insret, rack, rsm); 7433 } 7434 #endif 7435 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7436 rsm->r_in_tmap = 1; 7437 } 7438 rack_stop_all_timers(tp); 7439 rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), 0, 0, 0); 7440 return (0); 7441 } 7442 7443 static int 7444 rack_handoff_ok(struct tcpcb *tp) 7445 { 7446 if ((tp->t_state == TCPS_CLOSED) || 7447 (tp->t_state == TCPS_LISTEN)) { 7448 /* Sure no problem though it may not stick */ 7449 return (0); 7450 } 7451 if ((tp->t_state == TCPS_SYN_SENT) || 7452 (tp->t_state == TCPS_SYN_RECEIVED)) { 7453 /* 7454 * We really don't know you have to get to ESTAB or beyond 7455 * to tell. 7456 */ 7457 return (EAGAIN); 7458 } 7459 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 7460 return (0); 7461 } 7462 /* 7463 * If we reach here we don't do SACK on this connection so we can 7464 * never do rack. 7465 */ 7466 return (EINVAL); 7467 } 7468 7469 static void 7470 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 7471 { 7472 if (tp->t_fb_ptr) { 7473 struct tcp_rack *rack; 7474 struct rack_sendmap *rsm, *nrsm, *rm; 7475 if (tp->t_inpcb) { 7476 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 7477 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_QUEUE_READY; 7478 } 7479 rack = (struct tcp_rack *)tp->t_fb_ptr; 7480 #ifdef TCP_BLACKBOX 7481 tcp_log_flowend(tp); 7482 #endif 7483 RB_FOREACH_SAFE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm) { 7484 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7485 #ifdef INVARIANTS 7486 if (rm != rsm) { 7487 panic("At fini, rack:%p rsm:%p rm:%p", 7488 rack, rsm, rm); 7489 } 7490 #endif 7491 uma_zfree(rack_zone, rsm); 7492 } 7493 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 7494 while (rsm) { 7495 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 7496 uma_zfree(rack_zone, rsm); 7497 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 7498 } 7499 rack->rc_free_cnt = 0; 7500 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 7501 tp->t_fb_ptr = NULL; 7502 } 7503 /* Make sure snd_nxt is correctly set */ 7504 tp->snd_nxt = tp->snd_max; 7505 } 7506 7507 7508 static void 7509 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 7510 { 7511 switch (tp->t_state) { 7512 case TCPS_SYN_SENT: 7513 rack->r_state = TCPS_SYN_SENT; 7514 rack->r_substate = rack_do_syn_sent; 7515 break; 7516 case TCPS_SYN_RECEIVED: 7517 rack->r_state = TCPS_SYN_RECEIVED; 7518 rack->r_substate = rack_do_syn_recv; 7519 break; 7520 case TCPS_ESTABLISHED: 7521 rack_set_pace_segments(tp, rack); 7522 rack->r_state = TCPS_ESTABLISHED; 7523 rack->r_substate = rack_do_established; 7524 break; 7525 case TCPS_CLOSE_WAIT: 7526 rack->r_state = TCPS_CLOSE_WAIT; 7527 rack->r_substate = rack_do_close_wait; 7528 break; 7529 case TCPS_FIN_WAIT_1: 7530 rack->r_state = TCPS_FIN_WAIT_1; 7531 rack->r_substate = rack_do_fin_wait_1; 7532 break; 7533 case TCPS_CLOSING: 7534 rack->r_state = TCPS_CLOSING; 7535 rack->r_substate = rack_do_closing; 7536 break; 7537 case TCPS_LAST_ACK: 7538 rack->r_state = TCPS_LAST_ACK; 7539 rack->r_substate = rack_do_lastack; 7540 break; 7541 case TCPS_FIN_WAIT_2: 7542 rack->r_state = TCPS_FIN_WAIT_2; 7543 rack->r_substate = rack_do_fin_wait_2; 7544 break; 7545 case TCPS_LISTEN: 7546 case TCPS_CLOSED: 7547 case TCPS_TIME_WAIT: 7548 default: 7549 break; 7550 }; 7551 } 7552 7553 7554 static void 7555 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 7556 { 7557 /* 7558 * We received an ack, and then did not 7559 * call send or were bounced out due to the 7560 * hpts was running. Now a timer is up as well, is 7561 * it the right timer? 7562 */ 7563 struct rack_sendmap *rsm; 7564 int tmr_up; 7565 7566 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 7567 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 7568 return; 7569 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 7570 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 7571 (tmr_up == PACE_TMR_RXT)) { 7572 /* Should be an RXT */ 7573 return; 7574 } 7575 if (rsm == NULL) { 7576 /* Nothing outstanding? */ 7577 if (tp->t_flags & TF_DELACK) { 7578 if (tmr_up == PACE_TMR_DELACK) 7579 /* We are supposed to have delayed ack up and we do */ 7580 return; 7581 } else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) { 7582 /* 7583 * if we hit enobufs then we would expect the possiblity 7584 * of nothing outstanding and the RXT up (and the hptsi timer). 7585 */ 7586 return; 7587 } else if (((tcp_always_keepalive || 7588 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 7589 (tp->t_state <= TCPS_CLOSING)) && 7590 (tmr_up == PACE_TMR_KEEP) && 7591 (tp->snd_max == tp->snd_una)) { 7592 /* We should have keep alive up and we do */ 7593 return; 7594 } 7595 } 7596 if (SEQ_GT(tp->snd_max, tp->snd_una) && 7597 ((tmr_up == PACE_TMR_TLP) || 7598 (tmr_up == PACE_TMR_RACK) || 7599 (tmr_up == PACE_TMR_RXT))) { 7600 /* 7601 * Either a Rack, TLP or RXT is fine if we 7602 * have outstanding data. 7603 */ 7604 return; 7605 } else if (tmr_up == PACE_TMR_DELACK) { 7606 /* 7607 * If the delayed ack was going to go off 7608 * before the rtx/tlp/rack timer were going to 7609 * expire, then that would be the timer in control. 7610 * Note we don't check the time here trusting the 7611 * code is correct. 7612 */ 7613 return; 7614 } 7615 /* 7616 * Ok the timer originally started is not what we want now. 7617 * We will force the hpts to be stopped if any, and restart 7618 * with the slot set to what was in the saved slot. 7619 */ 7620 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 7621 rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), 0, 0, 0); 7622 } 7623 7624 static int 7625 rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so, 7626 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, 7627 int32_t nxt_pkt, struct timeval *tv) 7628 { 7629 int32_t thflags, retval, did_out = 0; 7630 int32_t way_out = 0; 7631 uint32_t cts; 7632 uint32_t tiwin; 7633 struct tcpopt to; 7634 struct tcp_rack *rack; 7635 struct rack_sendmap *rsm; 7636 int32_t prev_state = 0; 7637 7638 if (m->m_flags & M_TSTMP_LRO) { 7639 tv->tv_sec = m->m_pkthdr.rcv_tstmp /1000000000; 7640 tv->tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000)/1000; 7641 } 7642 cts = tcp_tv_to_mssectick(tv); 7643 rack = (struct tcp_rack *)tp->t_fb_ptr; 7644 7645 kern_prefetch(rack, &prev_state); 7646 prev_state = 0; 7647 thflags = th->th_flags; 7648 7649 NET_EPOCH_ASSERT(); 7650 INP_WLOCK_ASSERT(tp->t_inpcb); 7651 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 7652 __func__)); 7653 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 7654 __func__)); 7655 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 7656 union tcp_log_stackspecific log; 7657 struct timeval tv; 7658 7659 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 7660 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 7661 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 7662 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 7663 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 7664 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 7665 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 7666 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 7667 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 7668 tlen, &log, true, &tv); 7669 } 7670 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 7671 way_out = 4; 7672 retval = 0; 7673 goto done_with_input; 7674 } 7675 /* 7676 * If a segment with the ACK-bit set arrives in the SYN-SENT state 7677 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 7678 */ 7679 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 7680 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 7681 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 7682 return(1); 7683 } 7684 /* 7685 * Segment received on connection. Reset idle time and keep-alive 7686 * timer. XXX: This should be done after segment validation to 7687 * ignore broken/spoofed segs. 7688 */ 7689 if (tp->t_idle_reduce && 7690 (tp->snd_max == tp->snd_una) && 7691 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 7692 counter_u64_add(rack_input_idle_reduces, 1); 7693 rack_cc_after_idle(tp); 7694 } 7695 tp->t_rcvtime = ticks; 7696 7697 /* 7698 * Unscale the window into a 32-bit value. For the SYN_SENT state 7699 * the scale is zero. 7700 */ 7701 tiwin = th->th_win << tp->snd_scale; 7702 #ifdef NETFLIX_STATS 7703 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 7704 #endif 7705 if (tiwin > rack->r_ctl.rc_high_rwnd) 7706 rack->r_ctl.rc_high_rwnd = tiwin; 7707 /* 7708 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 7709 * this to occur after we've validated the segment. 7710 */ 7711 if (tp->t_flags & TF_ECN_PERMIT) { 7712 if (thflags & TH_CWR) 7713 tp->t_flags &= ~TF_ECN_SND_ECE; 7714 switch (iptos & IPTOS_ECN_MASK) { 7715 case IPTOS_ECN_CE: 7716 tp->t_flags |= TF_ECN_SND_ECE; 7717 TCPSTAT_INC(tcps_ecn_ce); 7718 break; 7719 case IPTOS_ECN_ECT0: 7720 TCPSTAT_INC(tcps_ecn_ect0); 7721 break; 7722 case IPTOS_ECN_ECT1: 7723 TCPSTAT_INC(tcps_ecn_ect1); 7724 break; 7725 } 7726 /* Congestion experienced. */ 7727 if (thflags & TH_ECE) { 7728 rack_cong_signal(tp, th, CC_ECN); 7729 } 7730 } 7731 /* 7732 * Parse options on any incoming segment. 7733 */ 7734 tcp_dooptions(&to, (u_char *)(th + 1), 7735 (th->th_off << 2) - sizeof(struct tcphdr), 7736 (thflags & TH_SYN) ? TO_SYN : 0); 7737 7738 /* 7739 * If echoed timestamp is later than the current time, fall back to 7740 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 7741 * were used when this connection was established. 7742 */ 7743 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 7744 to.to_tsecr -= tp->ts_offset; 7745 if (TSTMP_GT(to.to_tsecr, cts)) 7746 to.to_tsecr = 0; 7747 } 7748 /* 7749 * If its the first time in we need to take care of options and 7750 * verify we can do SACK for rack! 7751 */ 7752 if (rack->r_state == 0) { 7753 /* Should be init'd by rack_init() */ 7754 KASSERT(rack->rc_inp != NULL, 7755 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 7756 if (rack->rc_inp == NULL) { 7757 rack->rc_inp = tp->t_inpcb; 7758 } 7759 7760 /* 7761 * Process options only when we get SYN/ACK back. The SYN 7762 * case for incoming connections is handled in tcp_syncache. 7763 * According to RFC1323 the window field in a SYN (i.e., a 7764 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 7765 * this is traditional behavior, may need to be cleaned up. 7766 */ 7767 rack->r_cpu = inp_to_cpuid(tp->t_inpcb); 7768 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 7769 if ((to.to_flags & TOF_SCALE) && 7770 (tp->t_flags & TF_REQ_SCALE)) { 7771 tp->t_flags |= TF_RCVD_SCALE; 7772 tp->snd_scale = to.to_wscale; 7773 } 7774 /* 7775 * Initial send window. It will be updated with the 7776 * next incoming segment to the scaled value. 7777 */ 7778 tp->snd_wnd = th->th_win; 7779 if (to.to_flags & TOF_TS) { 7780 tp->t_flags |= TF_RCVD_TSTMP; 7781 tp->ts_recent = to.to_tsval; 7782 tp->ts_recent_age = cts; 7783 } 7784 if (to.to_flags & TOF_MSS) 7785 tcp_mss(tp, to.to_mss); 7786 if ((tp->t_flags & TF_SACK_PERMIT) && 7787 (to.to_flags & TOF_SACKPERM) == 0) 7788 tp->t_flags &= ~TF_SACK_PERMIT; 7789 if (IS_FASTOPEN(tp->t_flags)) { 7790 if (to.to_flags & TOF_FASTOPEN) { 7791 uint16_t mss; 7792 7793 if (to.to_flags & TOF_MSS) 7794 mss = to.to_mss; 7795 else 7796 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 7797 mss = TCP6_MSS; 7798 else 7799 mss = TCP_MSS; 7800 tcp_fastopen_update_cache(tp, mss, 7801 to.to_tfo_len, to.to_tfo_cookie); 7802 } else 7803 tcp_fastopen_disable_path(tp); 7804 } 7805 } 7806 /* 7807 * At this point we are at the initial call. Here we decide 7808 * if we are doing RACK or not. We do this by seeing if 7809 * TF_SACK_PERMIT is set, if not rack is *not* possible and 7810 * we switch to the default code. 7811 */ 7812 if ((tp->t_flags & TF_SACK_PERMIT) == 0) { 7813 tcp_switch_back_to_default(tp); 7814 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen, 7815 tlen, iptos); 7816 return (1); 7817 } 7818 /* Set the flag */ 7819 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 7820 tcp_set_hpts(tp->t_inpcb); 7821 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 7822 } 7823 /* 7824 * This is the one exception case where we set the rack state 7825 * always. All other times (timers etc) we must have a rack-state 7826 * set (so we assure we have done the checks above for SACK). 7827 */ 7828 memcpy(&rack->r_ctl.rc_last_ack, tv, sizeof(struct timeval)); 7829 rack->r_ctl.rc_rcvtime = cts; 7830 if (rack->r_state != tp->t_state) 7831 rack_set_state(tp, rack); 7832 if (SEQ_GT(th->th_ack, tp->snd_una) && 7833 (rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree)) != NULL) 7834 kern_prefetch(rsm, &prev_state); 7835 prev_state = rack->r_state; 7836 rack->r_ctl.rc_tlp_send_cnt = 0; 7837 rack_clear_rate_sample(rack); 7838 retval = (*rack->r_substate) (m, th, so, 7839 tp, &to, drop_hdrlen, 7840 tlen, tiwin, thflags, nxt_pkt); 7841 #ifdef INVARIANTS 7842 if ((retval == 0) && 7843 (tp->t_inpcb == NULL)) { 7844 panic("retval:%d tp:%p t_inpcb:NULL state:%d", 7845 retval, tp, prev_state); 7846 } 7847 #endif 7848 if (retval == 0) { 7849 /* 7850 * If retval is 1 the tcb is unlocked and most likely the tp 7851 * is gone. 7852 */ 7853 INP_WLOCK_ASSERT(tp->t_inpcb); 7854 if (rack->set_pacing_done_a_iw == 0) { 7855 /* How much has been acked? */ 7856 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 7857 /* We have enough to set in the pacing segment size */ 7858 rack->set_pacing_done_a_iw = 1; 7859 rack_set_pace_segments(tp, rack); 7860 } 7861 } 7862 tcp_rack_xmit_timer_commit(rack, tp); 7863 if ((nxt_pkt == 0) || (IN_RECOVERY(tp->t_flags))) { 7864 if (rack->r_wanted_output != 0) { 7865 did_out = 1; 7866 (void)tp->t_fb->tfb_tcp_output(tp); 7867 } 7868 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 7869 } 7870 if ((nxt_pkt == 0) && 7871 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 7872 (SEQ_GT(tp->snd_max, tp->snd_una) || 7873 (tp->t_flags & TF_DELACK) || 7874 ((tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 7875 (tp->t_state <= TCPS_CLOSING)))) { 7876 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 7877 if ((tp->snd_max == tp->snd_una) && 7878 ((tp->t_flags & TF_DELACK) == 0) && 7879 (rack->rc_inp->inp_in_hpts) && 7880 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 7881 /* keep alive not needed if we are hptsi output yet */ 7882 ; 7883 } else { 7884 if (rack->rc_inp->inp_in_hpts) { 7885 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 7886 counter_u64_add(rack_per_timer_hole, 1); 7887 } 7888 rack_start_hpts_timer(rack, tp, tcp_ts_getticks(), 0, 0, 0); 7889 } 7890 way_out = 1; 7891 } else if (nxt_pkt == 0) { 7892 /* Do we have the correct timer running? */ 7893 rack_timer_audit(tp, rack, &so->so_snd); 7894 way_out = 2; 7895 } 7896 done_with_input: 7897 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out); 7898 if (did_out) 7899 rack->r_wanted_output = 0; 7900 #ifdef INVARIANTS 7901 if (tp->t_inpcb == NULL) { 7902 panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d", 7903 did_out, 7904 retval, tp, prev_state); 7905 } 7906 #endif 7907 } 7908 return (retval); 7909 } 7910 7911 void 7912 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 7913 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 7914 { 7915 struct timeval tv; 7916 7917 /* First lets see if we have old packets */ 7918 if (tp->t_in_pkt) { 7919 if (ctf_do_queued_segments(so, tp, 1)) { 7920 m_freem(m); 7921 return; 7922 } 7923 } 7924 if (m->m_flags & M_TSTMP_LRO) { 7925 tv.tv_sec = m->m_pkthdr.rcv_tstmp /1000000000; 7926 tv.tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000)/1000; 7927 } else { 7928 /* Should not be should we kassert instead? */ 7929 tcp_get_usecs(&tv); 7930 } 7931 if(rack_do_segment_nounlock(m, th, so, tp, 7932 drop_hdrlen, tlen, iptos, 0, &tv) == 0) 7933 INP_WUNLOCK(tp->t_inpcb); 7934 } 7935 7936 struct rack_sendmap * 7937 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 7938 { 7939 struct rack_sendmap *rsm = NULL; 7940 int32_t idx; 7941 uint32_t srtt = 0, thresh = 0, ts_low = 0; 7942 7943 /* Return the next guy to be re-transmitted */ 7944 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 7945 return (NULL); 7946 } 7947 if (tp->t_flags & TF_SENTFIN) { 7948 /* retran the end FIN? */ 7949 return (NULL); 7950 } 7951 /* ok lets look at this one */ 7952 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 7953 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 7954 goto check_it; 7955 } 7956 rsm = rack_find_lowest_rsm(rack); 7957 if (rsm == NULL) { 7958 return (NULL); 7959 } 7960 check_it: 7961 if (rsm->r_flags & RACK_ACKED) { 7962 return (NULL); 7963 } 7964 if ((rsm->r_flags & RACK_SACK_PASSED) == 0) { 7965 /* Its not yet ready */ 7966 return (NULL); 7967 } 7968 srtt = rack_grab_rtt(tp, rack); 7969 idx = rsm->r_rtr_cnt - 1; 7970 ts_low = rsm->r_tim_lastsent[idx]; 7971 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 7972 if ((tsused == ts_low) || 7973 (TSTMP_LT(tsused, ts_low))) { 7974 /* No time since sending */ 7975 return (NULL); 7976 } 7977 if ((tsused - ts_low) < thresh) { 7978 /* It has not been long enough yet */ 7979 return (NULL); 7980 } 7981 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 7982 ((rsm->r_flags & RACK_SACK_PASSED) && 7983 (rack->sack_attack_disable == 0))) { 7984 /* 7985 * We have passed the dup-ack threshold <or> 7986 * a SACK has indicated this is missing. 7987 * Note that if you are a declared attacker 7988 * it is only the dup-ack threshold that 7989 * will cause retransmits. 7990 */ 7991 /* log retransmit reason */ 7992 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 7993 return (rsm); 7994 } 7995 return (NULL); 7996 } 7997 7998 static int32_t 7999 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len) 8000 { 8001 int32_t slot = 0; 8002 8003 if ((rack->rack_per_of_gp == 0) || 8004 (rack->rc_always_pace == 0)) { 8005 /* 8006 * We use the most optimistic possible cwnd/srtt for 8007 * sending calculations. This will make our 8008 * calculation anticipate getting more through 8009 * quicker then possible. But thats ok we don't want 8010 * the peer to have a gap in data sending. 8011 */ 8012 uint32_t srtt, cwnd, tr_perms = 0; 8013 8014 old_method: 8015 if (rack->r_ctl.rc_rack_min_rtt) 8016 srtt = rack->r_ctl.rc_rack_min_rtt; 8017 else 8018 srtt = TICKS_2_MSEC((tp->t_srtt >> TCP_RTT_SHIFT)); 8019 if (rack->r_ctl.rc_rack_largest_cwnd) 8020 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 8021 else 8022 cwnd = tp->snd_cwnd; 8023 tr_perms = cwnd / srtt; 8024 if (tr_perms == 0) { 8025 tr_perms = ctf_fixed_maxseg(tp); 8026 } 8027 /* 8028 * Calculate how long this will take to drain, if 8029 * the calculation comes out to zero, thats ok we 8030 * will use send_a_lot to possibly spin around for 8031 * more increasing tot_len_this_send to the point 8032 * that its going to require a pace, or we hit the 8033 * cwnd. Which in that case we are just waiting for 8034 * a ACK. 8035 */ 8036 slot = len / tr_perms; 8037 /* Now do we reduce the time so we don't run dry? */ 8038 if (slot && rack->rc_pace_reduce) { 8039 int32_t reduce; 8040 8041 reduce = (slot / rack->rc_pace_reduce); 8042 if (reduce < slot) { 8043 slot -= reduce; 8044 } else 8045 slot = 0; 8046 } 8047 } else { 8048 int cnt; 8049 uint64_t bw_est, bw_raise, res, lentim; 8050 8051 bw_est = 0; 8052 for (cnt=0; cnt<RACK_GP_HIST; cnt++) { 8053 if ((rack->r_ctl.rc_gp_hist_filled == 0) && 8054 (rack->r_ctl.rc_gp_history[cnt] == 0)) 8055 break; 8056 bw_est += rack->r_ctl.rc_gp_history[cnt]; 8057 } 8058 if (bw_est == 0) { 8059 /* 8060 * No way yet to make a b/w estimate 8061 * (no goodput est yet). 8062 */ 8063 goto old_method; 8064 } 8065 /* Covert to bytes per second */ 8066 bw_est *= MSEC_IN_SECOND; 8067 /* 8068 * Now ratchet it up by our percentage. Note 8069 * that the minimum you can do is 1 which would 8070 * get you 101% of the average last N goodput estimates. 8071 * The max you can do is 256 which would yeild you 8072 * 356% of the last N goodput estimates. 8073 */ 8074 bw_raise = bw_est * (uint64_t)rack->rack_per_of_gp; 8075 bw_est += bw_raise; 8076 /* average by the number we added */ 8077 bw_est /= cnt; 8078 /* Now calculate a rate based on this b/w */ 8079 lentim = (uint64_t) len * (uint64_t)MSEC_IN_SECOND; 8080 res = lentim / bw_est; 8081 slot = (uint32_t)res; 8082 } 8083 if (rack->r_enforce_min_pace && 8084 (slot == 0)) { 8085 /* We are enforcing a minimum pace time of 1ms */ 8086 slot = rack->r_enforce_min_pace; 8087 } 8088 if (slot) 8089 counter_u64_add(rack_calc_nonzero, 1); 8090 else 8091 counter_u64_add(rack_calc_zero, 1); 8092 return (slot); 8093 } 8094 8095 static int 8096 rack_output(struct tcpcb *tp) 8097 { 8098 struct socket *so; 8099 uint32_t recwin, sendwin; 8100 uint32_t sb_offset; 8101 int32_t len, flags, error = 0; 8102 struct mbuf *m; 8103 struct mbuf *mb; 8104 uint32_t if_hw_tsomaxsegcount = 0; 8105 uint32_t if_hw_tsomaxsegsize = 0; 8106 int32_t maxseg; 8107 long tot_len_this_send = 0; 8108 struct ip *ip = NULL; 8109 #ifdef TCPDEBUG 8110 struct ipovly *ipov = NULL; 8111 #endif 8112 struct udphdr *udp = NULL; 8113 struct tcp_rack *rack; 8114 struct tcphdr *th; 8115 uint8_t pass = 0; 8116 uint8_t wanted_cookie = 0; 8117 u_char opt[TCP_MAXOLEN]; 8118 unsigned ipoptlen, optlen, hdrlen, ulen=0; 8119 uint32_t rack_seq; 8120 8121 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 8122 unsigned ipsec_optlen = 0; 8123 8124 #endif 8125 int32_t idle, sendalot; 8126 int32_t sub_from_prr = 0; 8127 volatile int32_t sack_rxmit; 8128 struct rack_sendmap *rsm = NULL; 8129 int32_t tso, mtu; 8130 struct tcpopt to; 8131 int32_t slot = 0; 8132 int32_t sup_rack = 0; 8133 uint32_t cts; 8134 uint8_t hpts_calling, new_data_tlp = 0, doing_tlp = 0; 8135 int32_t do_a_prefetch; 8136 int32_t prefetch_rsm = 0; 8137 int force_tso = 0; 8138 int32_t orig_len; 8139 int32_t prefetch_so_done = 0; 8140 struct tcp_log_buffer *lgb = NULL; 8141 struct inpcb *inp; 8142 struct sockbuf *sb; 8143 #ifdef INET6 8144 struct ip6_hdr *ip6 = NULL; 8145 int32_t isipv6; 8146 #endif 8147 uint8_t filled_all = 0; 8148 bool hw_tls = false; 8149 8150 /* setup and take the cache hits here */ 8151 rack = (struct tcp_rack *)tp->t_fb_ptr; 8152 inp = rack->rc_inp; 8153 so = inp->inp_socket; 8154 sb = &so->so_snd; 8155 kern_prefetch(sb, &do_a_prefetch); 8156 do_a_prefetch = 1; 8157 8158 #ifdef KERN_TLS 8159 hw_tls = (so->so_snd.sb_flags & SB_TLS_IFNET) != 0; 8160 #endif 8161 8162 INP_WLOCK_ASSERT(inp); 8163 #ifdef TCP_OFFLOAD 8164 if (tp->t_flags & TF_TOE) 8165 return (tcp_offload_output(tp)); 8166 #endif 8167 maxseg = ctf_fixed_maxseg(tp); 8168 /* 8169 * For TFO connections in SYN_RECEIVED, only allow the initial 8170 * SYN|ACK and those sent by the retransmit timer. 8171 */ 8172 if (IS_FASTOPEN(tp->t_flags) && 8173 (tp->t_state == TCPS_SYN_RECEIVED) && 8174 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 8175 (rack->r_ctl.rc_resend == NULL)) /* not a retransmit */ 8176 return (0); 8177 #ifdef INET6 8178 if (rack->r_state) { 8179 /* Use the cache line loaded if possible */ 8180 isipv6 = rack->r_is_v6; 8181 } else { 8182 isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 8183 } 8184 #endif 8185 cts = tcp_ts_getticks(); 8186 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 8187 inp->inp_in_hpts) { 8188 /* 8189 * We are on the hpts for some timer but not hptsi output. 8190 * Remove from the hpts unconditionally. 8191 */ 8192 rack_timer_cancel(tp, rack, cts, __LINE__); 8193 } 8194 /* Mark that we have called rack_output(). */ 8195 if ((rack->r_timer_override) || 8196 (tp->t_flags & TF_FORCEDATA) || 8197 (tp->t_state < TCPS_ESTABLISHED)) { 8198 if (tp->t_inpcb->inp_in_hpts) 8199 tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT); 8200 } else if (tp->t_inpcb->inp_in_hpts) { 8201 /* 8202 * On the hpts you can't pass even if ACKNOW is on, we will 8203 * when the hpts fires. 8204 */ 8205 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 8206 return (0); 8207 } 8208 hpts_calling = inp->inp_hpts_calls; 8209 inp->inp_hpts_calls = 0; 8210 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 8211 if (rack_process_timers(tp, rack, cts, hpts_calling)) { 8212 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 8213 return (0); 8214 } 8215 } 8216 rack->r_wanted_output = 0; 8217 rack->r_timer_override = 0; 8218 /* 8219 * For TFO connections in SYN_SENT or SYN_RECEIVED, 8220 * only allow the initial SYN or SYN|ACK and those sent 8221 * by the retransmit timer. 8222 */ 8223 if (IS_FASTOPEN(tp->t_flags) && 8224 ((tp->t_state == TCPS_SYN_RECEIVED) || 8225 (tp->t_state == TCPS_SYN_SENT)) && 8226 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 8227 (tp->t_rxtshift == 0)) /* not a retransmit */ 8228 return (0); 8229 /* 8230 * Determine length of data that should be transmitted, and flags 8231 * that will be used. If there is some data or critical controls 8232 * (SYN, RST) to send, then transmit; otherwise, investigate 8233 * further. 8234 */ 8235 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 8236 if (tp->t_idle_reduce) { 8237 if (idle && ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 8238 rack_cc_after_idle(tp); 8239 } 8240 tp->t_flags &= ~TF_LASTIDLE; 8241 if (idle) { 8242 if (tp->t_flags & TF_MORETOCOME) { 8243 tp->t_flags |= TF_LASTIDLE; 8244 idle = 0; 8245 } 8246 } 8247 again: 8248 /* 8249 * If we've recently taken a timeout, snd_max will be greater than 8250 * snd_nxt. There may be SACK information that allows us to avoid 8251 * resending already delivered data. Adjust snd_nxt accordingly. 8252 */ 8253 sendalot = 0; 8254 cts = tcp_ts_getticks(); 8255 tso = 0; 8256 mtu = 0; 8257 sb_offset = tp->snd_max - tp->snd_una; 8258 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 8259 8260 flags = tcp_outflags[tp->t_state]; 8261 while (rack->rc_free_cnt < rack_free_cache) { 8262 rsm = rack_alloc(rack); 8263 if (rsm == NULL) { 8264 if (inp->inp_hpts_calls) 8265 /* Retry in a ms */ 8266 slot = 1; 8267 goto just_return_nolock; 8268 } 8269 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 8270 rack->rc_free_cnt++; 8271 rsm = NULL; 8272 } 8273 if (inp->inp_hpts_calls) 8274 inp->inp_hpts_calls = 0; 8275 sack_rxmit = 0; 8276 len = 0; 8277 rsm = NULL; 8278 if (flags & TH_RST) { 8279 SOCKBUF_LOCK(sb); 8280 goto send; 8281 } 8282 if (rack->r_ctl.rc_tlpsend) { 8283 /* Tail loss probe */ 8284 long cwin; 8285 long tlen; 8286 8287 doing_tlp = 1; 8288 /* 8289 * Check if we can do a TLP with a RACK'd packet 8290 * this can happen if we are not doing the rack 8291 * cheat and we skipped to a TLP and it 8292 * went off. 8293 */ 8294 rsm = tcp_rack_output(tp, rack, cts); 8295 if (rsm == NULL) 8296 rsm = rack->r_ctl.rc_tlpsend; 8297 rack->r_ctl.rc_tlpsend = NULL; 8298 sack_rxmit = 1; 8299 tlen = rsm->r_end - rsm->r_start; 8300 if (tlen > ctf_fixed_maxseg(tp)) 8301 tlen = ctf_fixed_maxseg(tp); 8302 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 8303 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 8304 __func__, __LINE__, 8305 rsm->r_start, tp->snd_una, tp, rack, rsm)); 8306 sb_offset = rsm->r_start - tp->snd_una; 8307 cwin = min(tp->snd_wnd, tlen); 8308 len = cwin; 8309 } else if (rack->r_ctl.rc_resend) { 8310 /* Retransmit timer */ 8311 rsm = rack->r_ctl.rc_resend; 8312 rack->r_ctl.rc_resend = NULL; 8313 len = rsm->r_end - rsm->r_start; 8314 sack_rxmit = 1; 8315 sendalot = 0; 8316 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 8317 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 8318 __func__, __LINE__, 8319 rsm->r_start, tp->snd_una, tp, rack, rsm)); 8320 sb_offset = rsm->r_start - tp->snd_una; 8321 if (len >= ctf_fixed_maxseg(tp)) { 8322 len = ctf_fixed_maxseg(tp); 8323 } 8324 } else if ((rack->rc_in_persist == 0) && 8325 ((rsm = tcp_rack_output(tp, rack, cts)) != NULL)) { 8326 int maxseg; 8327 8328 maxseg = ctf_fixed_maxseg(tp); 8329 if ((!IN_RECOVERY(tp->t_flags)) && 8330 ((tp->t_flags & (TF_WASFRECOVERY | TF_WASCRECOVERY)) == 0)) { 8331 /* Enter recovery if not induced by a time-out */ 8332 rack->r_ctl.rc_rsm_start = rsm->r_start; 8333 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 8334 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 8335 rack_cong_signal(tp, NULL, CC_NDUPACK); 8336 /* 8337 * When we enter recovery we need to assure we send 8338 * one packet. 8339 */ 8340 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 8341 rack_log_to_prr(rack, 13); 8342 } 8343 #ifdef INVARIANTS 8344 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 8345 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 8346 tp, rack, rsm, rsm->r_start, tp->snd_una); 8347 } 8348 #endif 8349 len = rsm->r_end - rsm->r_start; 8350 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 8351 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 8352 __func__, __LINE__, 8353 rsm->r_start, tp->snd_una, tp, rack, rsm)); 8354 sb_offset = rsm->r_start - tp->snd_una; 8355 /* Can we send it within the PRR boundary? */ 8356 if ((rack->use_rack_cheat == 0) && (len > rack->r_ctl.rc_prr_sndcnt)) { 8357 /* It does not fit */ 8358 if ((ctf_flight_size(tp, rack->r_ctl.rc_sacked) > len) && 8359 (rack->r_ctl.rc_prr_sndcnt < maxseg)) { 8360 /* 8361 * prr is less than a segment, we 8362 * have more acks due in besides 8363 * what we need to resend. Lets not send 8364 * to avoid sending small pieces of 8365 * what we need to retransmit. 8366 */ 8367 len = 0; 8368 goto just_return_nolock; 8369 } 8370 len = rack->r_ctl.rc_prr_sndcnt; 8371 } 8372 sendalot = 0; 8373 if (len >= maxseg) { 8374 len = maxseg; 8375 } 8376 if (len > 0) { 8377 sub_from_prr = 1; 8378 sack_rxmit = 1; 8379 TCPSTAT_INC(tcps_sack_rexmits); 8380 TCPSTAT_ADD(tcps_sack_rexmit_bytes, 8381 min(len, ctf_fixed_maxseg(tp))); 8382 counter_u64_add(rack_rtm_prr_retran, 1); 8383 } 8384 } 8385 /* 8386 * Enforce a connection sendmap count limit if set 8387 * as long as we are not retransmiting. 8388 */ 8389 if ((rsm == NULL) && 8390 (rack->do_detection == 0) && 8391 (rack_tcp_map_entries_limit > 0) && 8392 (rack->r_ctl.rc_num_maps_alloced >= rack_tcp_map_entries_limit)) { 8393 counter_u64_add(rack_to_alloc_limited, 1); 8394 if (!rack->alloc_limit_reported) { 8395 rack->alloc_limit_reported = 1; 8396 counter_u64_add(rack_alloc_limited_conns, 1); 8397 } 8398 goto just_return_nolock; 8399 } 8400 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 8401 /* we are retransmitting the fin */ 8402 len--; 8403 if (len) { 8404 /* 8405 * When retransmitting data do *not* include the 8406 * FIN. This could happen from a TLP probe. 8407 */ 8408 flags &= ~TH_FIN; 8409 } 8410 } 8411 #ifdef INVARIANTS 8412 /* For debugging */ 8413 rack->r_ctl.rc_rsm_at_retran = rsm; 8414 #endif 8415 /* 8416 * Get standard flags, and add SYN or FIN if requested by 'hidden' 8417 * state flags. 8418 */ 8419 if (tp->t_flags & TF_NEEDFIN) 8420 flags |= TH_FIN; 8421 if (tp->t_flags & TF_NEEDSYN) 8422 flags |= TH_SYN; 8423 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 8424 void *end_rsm; 8425 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 8426 if (end_rsm) 8427 kern_prefetch(end_rsm, &prefetch_rsm); 8428 prefetch_rsm = 1; 8429 } 8430 SOCKBUF_LOCK(sb); 8431 /* 8432 * If in persist timeout with window of 0, send 1 byte. Otherwise, 8433 * if window is small but nonzero and time TF_SENTFIN expired, we 8434 * will send what we can and go to transmit state. 8435 */ 8436 if (tp->t_flags & TF_FORCEDATA) { 8437 if (sendwin == 0) { 8438 /* 8439 * If we still have some data to send, then clear 8440 * the FIN bit. Usually this would happen below 8441 * when it realizes that we aren't sending all the 8442 * data. However, if we have exactly 1 byte of 8443 * unsent data, then it won't clear the FIN bit 8444 * below, and if we are in persist state, we wind up 8445 * sending the packet without recording that we sent 8446 * the FIN bit. 8447 * 8448 * We can't just blindly clear the FIN bit, because 8449 * if we don't have any more data to send then the 8450 * probe will be the FIN itself. 8451 */ 8452 if (sb_offset < sbused(sb)) 8453 flags &= ~TH_FIN; 8454 sendwin = 1; 8455 } else { 8456 if ((rack->rc_in_persist != 0) && 8457 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 8458 rack->r_ctl.rc_pace_min_segs))) 8459 rack_exit_persist(tp, rack); 8460 /* 8461 * If we are dropping persist mode then we need to 8462 * correct snd_nxt/snd_max and off. 8463 */ 8464 tp->snd_nxt = tp->snd_max; 8465 sb_offset = tp->snd_nxt - tp->snd_una; 8466 } 8467 } 8468 /* 8469 * If snd_nxt == snd_max and we have transmitted a FIN, the 8470 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a 8471 * negative length. This can also occur when TCP opens up its 8472 * congestion window while receiving additional duplicate acks after 8473 * fast-retransmit because TCP will reset snd_nxt to snd_max after 8474 * the fast-retransmit. 8475 * 8476 * In the normal retransmit-FIN-only case, however, snd_nxt will be 8477 * set to snd_una, the sb_offset will be 0, and the length may wind 8478 * up 0. 8479 * 8480 * If sack_rxmit is true we are retransmitting from the scoreboard 8481 * in which case len is already set. 8482 */ 8483 if (sack_rxmit == 0) { 8484 uint32_t avail; 8485 8486 avail = sbavail(sb); 8487 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail) 8488 sb_offset = tp->snd_nxt - tp->snd_una; 8489 else 8490 sb_offset = 0; 8491 if (IN_RECOVERY(tp->t_flags) == 0) { 8492 if (rack->r_ctl.rc_tlp_new_data) { 8493 /* TLP is forcing out new data */ 8494 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 8495 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 8496 } 8497 if (rack->r_ctl.rc_tlp_new_data > tp->snd_wnd) 8498 len = tp->snd_wnd; 8499 else 8500 len = rack->r_ctl.rc_tlp_new_data; 8501 rack->r_ctl.rc_tlp_new_data = 0; 8502 new_data_tlp = doing_tlp = 1; 8503 } else { 8504 if (sendwin > avail) { 8505 /* use the available */ 8506 if (avail > sb_offset) { 8507 len = (int32_t)(avail - sb_offset); 8508 } else { 8509 len = 0; 8510 } 8511 } else { 8512 if (sendwin > sb_offset) { 8513 len = (int32_t)(sendwin - sb_offset); 8514 } else { 8515 len = 0; 8516 } 8517 } 8518 } 8519 } else { 8520 uint32_t outstanding; 8521 8522 /* 8523 * We are inside of a SACK recovery episode and are 8524 * sending new data, having retransmitted all the 8525 * data possible so far in the scoreboard. 8526 */ 8527 outstanding = tp->snd_max - tp->snd_una; 8528 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 8529 if (tp->snd_wnd > outstanding) { 8530 len = tp->snd_wnd - outstanding; 8531 /* Check to see if we have the data */ 8532 if (((sb_offset + len) > avail) && 8533 (avail > sb_offset)) 8534 len = avail - sb_offset; 8535 else 8536 len = 0; 8537 } else 8538 len = 0; 8539 } else if (avail > sb_offset) 8540 len = avail - sb_offset; 8541 else 8542 len = 0; 8543 if (len > 0) { 8544 if (len > rack->r_ctl.rc_prr_sndcnt) 8545 len = rack->r_ctl.rc_prr_sndcnt; 8546 if (len > 0) { 8547 sub_from_prr = 1; 8548 counter_u64_add(rack_rtm_prr_newdata, 1); 8549 } 8550 } 8551 if (len > ctf_fixed_maxseg(tp)) { 8552 /* 8553 * We should never send more than a MSS when 8554 * retransmitting or sending new data in prr 8555 * mode unless the override flag is on. Most 8556 * likely the PRR algorithm is not going to 8557 * let us send a lot as well :-) 8558 */ 8559 if (rack->r_ctl.rc_prr_sendalot == 0) 8560 len = ctf_fixed_maxseg(tp); 8561 } else if (len < ctf_fixed_maxseg(tp)) { 8562 /* 8563 * Do we send any? The idea here is if the 8564 * send empty's the socket buffer we want to 8565 * do it. However if not then lets just wait 8566 * for our prr_sndcnt to get bigger. 8567 */ 8568 long leftinsb; 8569 8570 leftinsb = sbavail(sb) - sb_offset; 8571 if (leftinsb > len) { 8572 /* This send does not empty the sb */ 8573 len = 0; 8574 } 8575 } 8576 } 8577 } 8578 if (prefetch_so_done == 0) { 8579 kern_prefetch(so, &prefetch_so_done); 8580 prefetch_so_done = 1; 8581 } 8582 /* 8583 * Lop off SYN bit if it has already been sent. However, if this is 8584 * SYN-SENT state and if segment contains data and if we don't know 8585 * that foreign host supports TAO, suppress sending segment. 8586 */ 8587 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) && 8588 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) { 8589 if (tp->t_state != TCPS_SYN_RECEIVED) 8590 flags &= ~TH_SYN; 8591 /* 8592 * When sending additional segments following a TFO SYN|ACK, 8593 * do not include the SYN bit. 8594 */ 8595 if (IS_FASTOPEN(tp->t_flags) && 8596 (tp->t_state == TCPS_SYN_RECEIVED)) 8597 flags &= ~TH_SYN; 8598 sb_offset--, len++; 8599 } 8600 /* 8601 * Be careful not to send data and/or FIN on SYN segments. This 8602 * measure is needed to prevent interoperability problems with not 8603 * fully conformant TCP implementations. 8604 */ 8605 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 8606 len = 0; 8607 flags &= ~TH_FIN; 8608 } 8609 /* 8610 * On TFO sockets, ensure no data is sent in the following cases: 8611 * 8612 * - When retransmitting SYN|ACK on a passively-created socket 8613 * 8614 * - When retransmitting SYN on an actively created socket 8615 * 8616 * - When sending a zero-length cookie (cookie request) on an 8617 * actively created socket 8618 * 8619 * - When the socket is in the CLOSED state (RST is being sent) 8620 */ 8621 if (IS_FASTOPEN(tp->t_flags) && 8622 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 8623 ((tp->t_state == TCPS_SYN_SENT) && 8624 (tp->t_tfo_client_cookie_len == 0)) || 8625 (flags & TH_RST))) { 8626 sack_rxmit = 0; 8627 len = 0; 8628 } 8629 /* Without fast-open there should never be data sent on a SYN */ 8630 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) 8631 len = 0; 8632 orig_len = len; 8633 if (len <= 0) { 8634 /* 8635 * If FIN has been sent but not acked, but we haven't been 8636 * called to retransmit, len will be < 0. Otherwise, window 8637 * shrank after we sent into it. If window shrank to 0, 8638 * cancel pending retransmit, pull snd_nxt back to (closed) 8639 * window, and set the persist timer if it isn't already 8640 * going. If the window didn't close completely, just wait 8641 * for an ACK. 8642 * 8643 * We also do a general check here to ensure that we will 8644 * set the persist timer when we have data to send, but a 8645 * 0-byte window. This makes sure the persist timer is set 8646 * even if the packet hits one of the "goto send" lines 8647 * below. 8648 */ 8649 len = 0; 8650 if ((tp->snd_wnd == 0) && 8651 (TCPS_HAVEESTABLISHED(tp->t_state)) && 8652 (tp->snd_una == tp->snd_max) && 8653 (sb_offset < (int)sbavail(sb))) { 8654 tp->snd_nxt = tp->snd_una; 8655 rack_enter_persist(tp, rack, cts); 8656 } 8657 } else if ((rsm == NULL) && 8658 ((doing_tlp == 0) || (new_data_tlp == 1)) && 8659 (len < rack->r_ctl.rc_pace_max_segs)) { 8660 /* 8661 * We are not sending a full segment for 8662 * some reason. Should we not send anything (think 8663 * sws or persists)? 8664 */ 8665 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 8666 (TCPS_HAVEESTABLISHED(tp->t_state)) && 8667 (len < (int)(sbavail(sb) - sb_offset))) { 8668 /* 8669 * Here the rwnd is less than 8670 * the pacing size, this is not a retransmit, 8671 * we are established and 8672 * the send is not the last in the socket buffer 8673 * we send nothing, and may enter persists. 8674 */ 8675 len = 0; 8676 if (tp->snd_max == tp->snd_una) { 8677 /* 8678 * Nothing out we can 8679 * go into persists. 8680 */ 8681 rack_enter_persist(tp, rack, cts); 8682 tp->snd_nxt = tp->snd_una; 8683 } 8684 } else if ((tp->snd_cwnd >= max(rack->r_ctl.rc_pace_min_segs, (maxseg * 4))) && 8685 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * maxseg)) && 8686 (len < (int)(sbavail(sb) - sb_offset)) && 8687 (len < rack->r_ctl.rc_pace_min_segs)) { 8688 /* 8689 * Here we are not retransmitting, and 8690 * the cwnd is not so small that we could 8691 * not send at least a min size (rxt timer 8692 * not having gone off), We have 2 segments or 8693 * more already in flight, its not the tail end 8694 * of the socket buffer and the cwnd is blocking 8695 * us from sending out a minimum pacing segment size. 8696 * Lets not send anything. 8697 */ 8698 len = 0; 8699 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 8700 min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 8701 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * maxseg)) && 8702 (len < (int)(sbavail(sb) - sb_offset)) && 8703 (TCPS_HAVEESTABLISHED(tp->t_state))) { 8704 /* 8705 * Here we have a send window but we have 8706 * filled it up and we can't send another pacing segment. 8707 * We also have in flight more than 2 segments 8708 * and we are not completing the sb i.e. we allow 8709 * the last bytes of the sb to go out even if 8710 * its not a full pacing segment. 8711 */ 8712 len = 0; 8713 } 8714 } 8715 /* len will be >= 0 after this point. */ 8716 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 8717 tcp_sndbuf_autoscale(tp, so, sendwin); 8718 /* 8719 * Decide if we can use TCP Segmentation Offloading (if supported by 8720 * hardware). 8721 * 8722 * TSO may only be used if we are in a pure bulk sending state. The 8723 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 8724 * options prevent using TSO. With TSO the TCP header is the same 8725 * (except for the sequence number) for all generated packets. This 8726 * makes it impossible to transmit any options which vary per 8727 * generated segment or packet. 8728 * 8729 * IPv4 handling has a clear separation of ip options and ip header 8730 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 8731 * the right thing below to provide length of just ip options and thus 8732 * checking for ipoptlen is enough to decide if ip options are present. 8733 */ 8734 8735 #ifdef INET6 8736 if (isipv6) 8737 ipoptlen = ip6_optlen(tp->t_inpcb); 8738 else 8739 #endif 8740 if (tp->t_inpcb->inp_options) 8741 ipoptlen = tp->t_inpcb->inp_options->m_len - 8742 offsetof(struct ipoption, ipopt_list); 8743 else 8744 ipoptlen = 0; 8745 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 8746 /* 8747 * Pre-calculate here as we save another lookup into the darknesses 8748 * of IPsec that way and can actually decide if TSO is ok. 8749 */ 8750 #ifdef INET6 8751 if (isipv6 && IPSEC_ENABLED(ipv6)) 8752 ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb); 8753 #ifdef INET 8754 else 8755 #endif 8756 #endif /* INET6 */ 8757 #ifdef INET 8758 if (IPSEC_ENABLED(ipv4)) 8759 ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb); 8760 #endif /* INET */ 8761 #endif 8762 8763 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 8764 ipoptlen += ipsec_optlen; 8765 #endif 8766 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > ctf_fixed_maxseg(tp) && 8767 (tp->t_port == 0) && 8768 ((tp->t_flags & TF_SIGNATURE) == 0) && 8769 tp->rcv_numsacks == 0 && sack_rxmit == 0 && 8770 ipoptlen == 0) 8771 tso = 1; 8772 { 8773 uint32_t outstanding; 8774 8775 outstanding = tp->snd_max - tp->snd_una; 8776 if (tp->t_flags & TF_SENTFIN) { 8777 /* 8778 * If we sent a fin, snd_max is 1 higher than 8779 * snd_una 8780 */ 8781 outstanding--; 8782 } 8783 if (sack_rxmit) { 8784 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 8785 flags &= ~TH_FIN; 8786 } else { 8787 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + 8788 sbused(sb))) 8789 flags &= ~TH_FIN; 8790 } 8791 } 8792 recwin = sbspace(&so->so_rcv); 8793 8794 /* 8795 * Sender silly window avoidance. We transmit under the following 8796 * conditions when len is non-zero: 8797 * 8798 * - We have a full segment (or more with TSO) - This is the last 8799 * buffer in a write()/send() and we are either idle or running 8800 * NODELAY - we've timed out (e.g. persist timer) - we have more 8801 * then 1/2 the maximum send window's worth of data (receiver may be 8802 * limited the window size) - we need to retransmit 8803 */ 8804 if (len) { 8805 if (len >= ctf_fixed_maxseg(tp)) { 8806 pass = 1; 8807 goto send; 8808 } 8809 /* 8810 * NOTE! on localhost connections an 'ack' from the remote 8811 * end may occur synchronously with the output and cause us 8812 * to flush a buffer queued with moretocome. XXX 8813 * 8814 */ 8815 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 8816 (idle || (tp->t_flags & TF_NODELAY)) && 8817 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(&so->so_snd)) && 8818 (tp->t_flags & TF_NOPUSH) == 0) { 8819 pass = 2; 8820 goto send; 8821 } 8822 if (tp->t_flags & TF_FORCEDATA) { /* typ. timeout case */ 8823 pass = 3; 8824 goto send; 8825 } 8826 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 8827 goto send; 8828 } 8829 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 8830 pass = 4; 8831 goto send; 8832 } 8833 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */ 8834 pass = 5; 8835 goto send; 8836 } 8837 if (sack_rxmit) { 8838 pass = 6; 8839 goto send; 8840 } 8841 } 8842 /* 8843 * Sending of standalone window updates. 8844 * 8845 * Window updates are important when we close our window due to a 8846 * full socket buffer and are opening it again after the application 8847 * reads data from it. Once the window has opened again and the 8848 * remote end starts to send again the ACK clock takes over and 8849 * provides the most current window information. 8850 * 8851 * We must avoid the silly window syndrome whereas every read from 8852 * the receive buffer, no matter how small, causes a window update 8853 * to be sent. We also should avoid sending a flurry of window 8854 * updates when the socket buffer had queued a lot of data and the 8855 * application is doing small reads. 8856 * 8857 * Prevent a flurry of pointless window updates by only sending an 8858 * update when we can increase the advertized window by more than 8859 * 1/4th of the socket buffer capacity. When the buffer is getting 8860 * full or is very small be more aggressive and send an update 8861 * whenever we can increase by two mss sized segments. In all other 8862 * situations the ACK's to new incoming data will carry further 8863 * window increases. 8864 * 8865 * Don't send an independent window update if a delayed ACK is 8866 * pending (it will get piggy-backed on it) or the remote side 8867 * already has done a half-close and won't send more data. Skip 8868 * this if the connection is in T/TCP half-open state. 8869 */ 8870 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 8871 !(tp->t_flags & TF_DELACK) && 8872 !TCPS_HAVERCVDFIN(tp->t_state)) { 8873 /* 8874 * "adv" is the amount we could increase the window, taking 8875 * into account that we are limited by TCP_MAXWIN << 8876 * tp->rcv_scale. 8877 */ 8878 int32_t adv; 8879 int oldwin; 8880 8881 adv = min(recwin, (long)TCP_MAXWIN << tp->rcv_scale); 8882 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 8883 oldwin = (tp->rcv_adv - tp->rcv_nxt); 8884 adv -= oldwin; 8885 } else 8886 oldwin = 0; 8887 8888 /* 8889 * If the new window size ends up being the same as the old 8890 * size when it is scaled, then don't force a window update. 8891 */ 8892 if (oldwin >> tp->rcv_scale == (adv + oldwin) >> tp->rcv_scale) 8893 goto dontupdate; 8894 8895 if (adv >= (int32_t)(2 * ctf_fixed_maxseg(tp)) && 8896 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 8897 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 8898 so->so_rcv.sb_hiwat <= 8 * ctf_fixed_maxseg(tp))) { 8899 pass = 7; 8900 goto send; 8901 } 8902 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) 8903 goto send; 8904 } 8905 dontupdate: 8906 8907 /* 8908 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 8909 * is also a catch-all for the retransmit timer timeout case. 8910 */ 8911 if (tp->t_flags & TF_ACKNOW) { 8912 pass = 8; 8913 goto send; 8914 } 8915 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 8916 pass = 9; 8917 goto send; 8918 } 8919 if (SEQ_GT(tp->snd_up, tp->snd_una)) { 8920 pass = 10; 8921 goto send; 8922 } 8923 /* 8924 * If our state indicates that FIN should be sent and we have not 8925 * yet done so, then we need to send. 8926 */ 8927 if ((flags & TH_FIN) && 8928 (tp->snd_nxt == tp->snd_una)) { 8929 pass = 11; 8930 goto send; 8931 } 8932 /* 8933 * No reason to send a segment, just return. 8934 */ 8935 just_return: 8936 SOCKBUF_UNLOCK(sb); 8937 just_return_nolock: 8938 if (tot_len_this_send == 0) 8939 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 8940 if (slot) { 8941 /* set the rack tcb into the slot N */ 8942 counter_u64_add(rack_paced_segments, 1); 8943 } else if (tot_len_this_send) { 8944 counter_u64_add(rack_unpaced_segments, 1); 8945 } 8946 /* Check if we need to go into persists or not */ 8947 if ((rack->rc_in_persist == 0) && 8948 (tp->snd_max == tp->snd_una) && 8949 TCPS_HAVEESTABLISHED(tp->t_state) && 8950 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 8951 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd) && 8952 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs))) { 8953 /* Yes lets make sure to move to persist before timer-start */ 8954 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 8955 } 8956 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); 8957 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling); 8958 tp->t_flags &= ~TF_FORCEDATA; 8959 return (0); 8960 8961 send: 8962 if ((flags & TH_FIN) && 8963 sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 8964 /* 8965 * We do not transmit a FIN 8966 * with data outstanding. We 8967 * need to make it so all data 8968 * is acked first. 8969 */ 8970 flags &= ~TH_FIN; 8971 } 8972 if (doing_tlp == 0) { 8973 /* 8974 * Data not a TLP, and its not the rxt firing. If it is the 8975 * rxt firing, we want to leave the tlp_in_progress flag on 8976 * so we don't send another TLP. It has to be a rack timer 8977 * or normal send (response to acked data) to clear the tlp 8978 * in progress flag. 8979 */ 8980 rack->rc_tlp_in_progress = 0; 8981 } 8982 SOCKBUF_LOCK_ASSERT(sb); 8983 if (len > 0) { 8984 if (len >= ctf_fixed_maxseg(tp)) 8985 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 8986 else 8987 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 8988 } 8989 /* 8990 * Before ESTABLISHED, force sending of initial options unless TCP 8991 * set not to do any options. NOTE: we assume that the IP/TCP header 8992 * plus TCP options always fit in a single mbuf, leaving room for a 8993 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 8994 * + optlen <= MCLBYTES 8995 */ 8996 optlen = 0; 8997 #ifdef INET6 8998 if (isipv6) 8999 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 9000 else 9001 #endif 9002 hdrlen = sizeof(struct tcpiphdr); 9003 9004 /* 9005 * Compute options for segment. We only have to care about SYN and 9006 * established connection segments. Options for SYN-ACK segments 9007 * are handled in TCP syncache. 9008 */ 9009 to.to_flags = 0; 9010 if ((tp->t_flags & TF_NOOPT) == 0) { 9011 /* Maximum segment size. */ 9012 if (flags & TH_SYN) { 9013 tp->snd_nxt = tp->iss; 9014 to.to_mss = tcp_mssopt(&inp->inp_inc); 9015 #ifdef NETFLIX_TCPOUDP 9016 if (tp->t_port) 9017 to.to_mss -= V_tcp_udp_tunneling_overhead; 9018 #endif 9019 to.to_flags |= TOF_MSS; 9020 9021 /* 9022 * On SYN or SYN|ACK transmits on TFO connections, 9023 * only include the TFO option if it is not a 9024 * retransmit, as the presence of the TFO option may 9025 * have caused the original SYN or SYN|ACK to have 9026 * been dropped by a middlebox. 9027 */ 9028 if (IS_FASTOPEN(tp->t_flags) && 9029 (tp->t_rxtshift == 0)) { 9030 if (tp->t_state == TCPS_SYN_RECEIVED) { 9031 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 9032 to.to_tfo_cookie = 9033 (u_int8_t *)&tp->t_tfo_cookie.server; 9034 to.to_flags |= TOF_FASTOPEN; 9035 wanted_cookie = 1; 9036 } else if (tp->t_state == TCPS_SYN_SENT) { 9037 to.to_tfo_len = 9038 tp->t_tfo_client_cookie_len; 9039 to.to_tfo_cookie = 9040 tp->t_tfo_cookie.client; 9041 to.to_flags |= TOF_FASTOPEN; 9042 wanted_cookie = 1; 9043 /* 9044 * If we wind up having more data to 9045 * send with the SYN than can fit in 9046 * one segment, don't send any more 9047 * until the SYN|ACK comes back from 9048 * the other end. 9049 */ 9050 sendalot = 0; 9051 } 9052 } 9053 } 9054 /* Window scaling. */ 9055 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 9056 to.to_wscale = tp->request_r_scale; 9057 to.to_flags |= TOF_SCALE; 9058 } 9059 /* Timestamps. */ 9060 if ((tp->t_flags & TF_RCVD_TSTMP) || 9061 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 9062 to.to_tsval = cts + tp->ts_offset; 9063 to.to_tsecr = tp->ts_recent; 9064 to.to_flags |= TOF_TS; 9065 } 9066 /* Set receive buffer autosizing timestamp. */ 9067 if (tp->rfbuf_ts == 0 && 9068 (so->so_rcv.sb_flags & SB_AUTOSIZE)) 9069 tp->rfbuf_ts = tcp_ts_getticks(); 9070 /* Selective ACK's. */ 9071 if (flags & TH_SYN) 9072 to.to_flags |= TOF_SACKPERM; 9073 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 9074 tp->rcv_numsacks > 0) { 9075 to.to_flags |= TOF_SACK; 9076 to.to_nsacks = tp->rcv_numsacks; 9077 to.to_sacks = (u_char *)tp->sackblks; 9078 } 9079 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 9080 /* TCP-MD5 (RFC2385). */ 9081 if (tp->t_flags & TF_SIGNATURE) 9082 to.to_flags |= TOF_SIGNATURE; 9083 #endif /* TCP_SIGNATURE */ 9084 9085 /* Processing the options. */ 9086 hdrlen += optlen = tcp_addoptions(&to, opt); 9087 /* 9088 * If we wanted a TFO option to be added, but it was unable 9089 * to fit, ensure no data is sent. 9090 */ 9091 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie && 9092 !(to.to_flags & TOF_FASTOPEN)) 9093 len = 0; 9094 } 9095 #ifdef NETFLIX_TCPOUDP 9096 if (tp->t_port) { 9097 if (V_tcp_udp_tunneling_port == 0) { 9098 /* The port was removed?? */ 9099 SOCKBUF_UNLOCK(&so->so_snd); 9100 return (EHOSTUNREACH); 9101 } 9102 hdrlen += sizeof(struct udphdr); 9103 } 9104 #endif 9105 #ifdef INET6 9106 if (isipv6) 9107 ipoptlen = ip6_optlen(tp->t_inpcb); 9108 else 9109 #endif 9110 if (tp->t_inpcb->inp_options) 9111 ipoptlen = tp->t_inpcb->inp_options->m_len - 9112 offsetof(struct ipoption, ipopt_list); 9113 else 9114 ipoptlen = 0; 9115 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 9116 ipoptlen += ipsec_optlen; 9117 #endif 9118 9119 #ifdef KERN_TLS 9120 /* force TSO for so TLS offload can get mss */ 9121 if (sb->sb_flags & SB_TLS_IFNET) { 9122 force_tso = 1; 9123 } 9124 #endif 9125 /* 9126 * Adjust data length if insertion of options will bump the packet 9127 * length beyond the t_maxseg length. Clear the FIN bit because we 9128 * cut off the tail of the segment. 9129 */ 9130 if (len + optlen + ipoptlen > tp->t_maxseg) { 9131 if (tso) { 9132 uint32_t if_hw_tsomax; 9133 uint32_t moff; 9134 int32_t max_len; 9135 9136 /* extract TSO information */ 9137 if_hw_tsomax = tp->t_tsomax; 9138 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 9139 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 9140 KASSERT(ipoptlen == 0, 9141 ("%s: TSO can't do IP options", __func__)); 9142 9143 /* 9144 * Check if we should limit by maximum payload 9145 * length: 9146 */ 9147 if (if_hw_tsomax != 0) { 9148 /* compute maximum TSO length */ 9149 max_len = (if_hw_tsomax - hdrlen - 9150 max_linkhdr); 9151 if (max_len <= 0) { 9152 len = 0; 9153 } else if (len > max_len) { 9154 sendalot = 1; 9155 len = max_len; 9156 } 9157 } 9158 /* 9159 * Prevent the last segment from being fractional 9160 * unless the send sockbuf can be emptied: 9161 */ 9162 max_len = (tp->t_maxseg - optlen); 9163 if (((sb_offset + len) < sbavail(sb)) && 9164 (hw_tls == 0)) { 9165 moff = len % (u_int)max_len; 9166 if (moff != 0) { 9167 len -= moff; 9168 sendalot = 1; 9169 } 9170 } 9171 /* 9172 * In case there are too many small fragments don't 9173 * use TSO: 9174 */ 9175 if (len <= maxseg) { 9176 len = max_len; 9177 sendalot = 1; 9178 tso = 0; 9179 } 9180 /* 9181 * Send the FIN in a separate segment after the bulk 9182 * sending is done. We don't trust the TSO 9183 * implementations to clear the FIN flag on all but 9184 * the last segment. 9185 */ 9186 if (tp->t_flags & TF_NEEDFIN) 9187 sendalot = 1; 9188 9189 } else { 9190 if (optlen + ipoptlen >= tp->t_maxseg) { 9191 /* 9192 * Since we don't have enough space to put 9193 * the IP header chain and the TCP header in 9194 * one packet as required by RFC 7112, don't 9195 * send it. Also ensure that at least one 9196 * byte of the payload can be put into the 9197 * TCP segment. 9198 */ 9199 SOCKBUF_UNLOCK(&so->so_snd); 9200 error = EMSGSIZE; 9201 sack_rxmit = 0; 9202 goto out; 9203 } 9204 len = tp->t_maxseg - optlen - ipoptlen; 9205 sendalot = 1; 9206 } 9207 } else 9208 tso = 0; 9209 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 9210 ("%s: len > IP_MAXPACKET", __func__)); 9211 #ifdef DIAGNOSTIC 9212 #ifdef INET6 9213 if (max_linkhdr + hdrlen > MCLBYTES) 9214 #else 9215 if (max_linkhdr + hdrlen > MHLEN) 9216 #endif 9217 panic("tcphdr too big"); 9218 #endif 9219 9220 /* 9221 * This KASSERT is here to catch edge cases at a well defined place. 9222 * Before, those had triggered (random) panic conditions further 9223 * down. 9224 */ 9225 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 9226 if ((len == 0) && 9227 (flags & TH_FIN) && 9228 (sbused(sb))) { 9229 /* 9230 * We have outstanding data, don't send a fin by itself!. 9231 */ 9232 goto just_return; 9233 } 9234 /* 9235 * Grab a header mbuf, attaching a copy of data to be transmitted, 9236 * and initialize the header from the template for sends on this 9237 * connection. 9238 */ 9239 if (len) { 9240 uint32_t max_val; 9241 uint32_t moff; 9242 9243 if (rack->rc_pace_max_segs) 9244 max_val = rack->rc_pace_max_segs * ctf_fixed_maxseg(tp); 9245 else 9246 max_val = len; 9247 if (rack->r_ctl.rc_pace_max_segs < max_val) 9248 max_val = rack->r_ctl.rc_pace_max_segs; 9249 /* 9250 * We allow a limit on sending with hptsi. 9251 */ 9252 if (len > max_val) { 9253 len = max_val; 9254 } 9255 #ifdef INET6 9256 if (MHLEN < hdrlen + max_linkhdr) 9257 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 9258 else 9259 #endif 9260 m = m_gethdr(M_NOWAIT, MT_DATA); 9261 9262 if (m == NULL) { 9263 SOCKBUF_UNLOCK(sb); 9264 error = ENOBUFS; 9265 sack_rxmit = 0; 9266 goto out; 9267 } 9268 m->m_data += max_linkhdr; 9269 m->m_len = hdrlen; 9270 9271 /* 9272 * Start the m_copy functions from the closest mbuf to the 9273 * sb_offset in the socket buffer chain. 9274 */ 9275 mb = sbsndptr_noadv(sb, sb_offset, &moff); 9276 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 9277 m_copydata(mb, moff, (int)len, 9278 mtod(m, caddr_t)+hdrlen); 9279 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 9280 sbsndptr_adv(sb, mb, len); 9281 m->m_len += len; 9282 } else { 9283 struct sockbuf *msb; 9284 9285 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 9286 msb = NULL; 9287 else 9288 msb = sb; 9289 m->m_next = tcp_m_copym( 9290 #ifdef NETFLIX_COPY_ARGS 9291 tp, 9292 #endif 9293 mb, moff, &len, 9294 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 9295 ((rsm == NULL) ? hw_tls : 0) 9296 #ifdef NETFLIX_COPY_ARGS 9297 , &filled_all 9298 #endif 9299 ); 9300 if (len <= (tp->t_maxseg - optlen)) { 9301 /* 9302 * Must have ran out of mbufs for the copy 9303 * shorten it to no longer need tso. Lets 9304 * not put on sendalot since we are low on 9305 * mbufs. 9306 */ 9307 tso = 0; 9308 } 9309 if (m->m_next == NULL) { 9310 SOCKBUF_UNLOCK(sb); 9311 (void)m_free(m); 9312 error = ENOBUFS; 9313 sack_rxmit = 0; 9314 goto out; 9315 } 9316 } 9317 if ((tp->t_flags & TF_FORCEDATA) && len == 1) { 9318 TCPSTAT_INC(tcps_sndprobe); 9319 #ifdef NETFLIX_STATS 9320 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 9321 stats_voi_update_abs_u32(tp->t_stats, 9322 VOI_TCP_RETXPB, len); 9323 else 9324 stats_voi_update_abs_u64(tp->t_stats, 9325 VOI_TCP_TXPB, len); 9326 #endif 9327 } else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { 9328 if (rsm && (rsm->r_flags & RACK_TLP)) { 9329 /* 9330 * TLP should not count in retran count, but 9331 * in its own bin 9332 */ 9333 counter_u64_add(rack_tlp_retran, 1); 9334 counter_u64_add(rack_tlp_retran_bytes, len); 9335 } else { 9336 tp->t_sndrexmitpack++; 9337 TCPSTAT_INC(tcps_sndrexmitpack); 9338 TCPSTAT_ADD(tcps_sndrexmitbyte, len); 9339 } 9340 #ifdef NETFLIX_STATS 9341 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 9342 len); 9343 #endif 9344 } else { 9345 TCPSTAT_INC(tcps_sndpack); 9346 TCPSTAT_ADD(tcps_sndbyte, len); 9347 #ifdef NETFLIX_STATS 9348 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 9349 len); 9350 #endif 9351 } 9352 /* 9353 * If we're sending everything we've got, set PUSH. (This 9354 * will keep happy those implementations which only give 9355 * data to the user when a buffer fills or a PUSH comes in.) 9356 */ 9357 if (sb_offset + len == sbused(sb) && 9358 sbused(sb) && 9359 !(flags & TH_SYN)) 9360 flags |= TH_PUSH; 9361 9362 /* 9363 * Are we doing pacing, if so we must calculate the slot. We 9364 * only do hptsi in ESTABLISHED and with no RESET being 9365 * sent where we have data to send. 9366 */ 9367 if (((tp->t_state == TCPS_ESTABLISHED) || 9368 (tp->t_state == TCPS_CLOSE_WAIT) || 9369 ((tp->t_state == TCPS_FIN_WAIT_1) && 9370 ((tp->t_flags & TF_SENTFIN) == 0) && 9371 ((flags & TH_FIN) == 0))) && 9372 ((flags & TH_RST) == 0)) { 9373 /* Get our pacing rate */ 9374 tot_len_this_send += len; 9375 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send); 9376 } 9377 SOCKBUF_UNLOCK(sb); 9378 } else { 9379 SOCKBUF_UNLOCK(sb); 9380 if (tp->t_flags & TF_ACKNOW) 9381 TCPSTAT_INC(tcps_sndacks); 9382 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 9383 TCPSTAT_INC(tcps_sndctrl); 9384 else if (SEQ_GT(tp->snd_up, tp->snd_una)) 9385 TCPSTAT_INC(tcps_sndurg); 9386 else 9387 TCPSTAT_INC(tcps_sndwinup); 9388 9389 m = m_gethdr(M_NOWAIT, MT_DATA); 9390 if (m == NULL) { 9391 error = ENOBUFS; 9392 sack_rxmit = 0; 9393 goto out; 9394 } 9395 #ifdef INET6 9396 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 9397 MHLEN >= hdrlen) { 9398 M_ALIGN(m, hdrlen); 9399 } else 9400 #endif 9401 m->m_data += max_linkhdr; 9402 m->m_len = hdrlen; 9403 } 9404 SOCKBUF_UNLOCK_ASSERT(sb); 9405 m->m_pkthdr.rcvif = (struct ifnet *)0; 9406 #ifdef MAC 9407 mac_inpcb_create_mbuf(inp, m); 9408 #endif 9409 #ifdef INET6 9410 if (isipv6) { 9411 ip6 = mtod(m, struct ip6_hdr *); 9412 #ifdef NETFLIX_TCPOUDP 9413 if (tp->t_port) { 9414 udp = (struct udphdr *)((caddr_t)ip6 + ipoptlen + sizeof(struct ip6_hdr)); 9415 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 9416 udp->uh_dport = tp->t_port; 9417 ulen = hdrlen + len - sizeof(struct ip6_hdr); 9418 udp->uh_ulen = htons(ulen); 9419 th = (struct tcphdr *)(udp + 1); 9420 } else 9421 #endif 9422 th = (struct tcphdr *)(ip6 + 1); 9423 tcpip_fillheaders(inp, 9424 #ifdef NETFLIX_TCPOUDP 9425 tp->t_port, 9426 #endif 9427 ip6, th); 9428 } else 9429 #endif /* INET6 */ 9430 { 9431 ip = mtod(m, struct ip *); 9432 #ifdef TCPDEBUG 9433 ipov = (struct ipovly *)ip; 9434 #endif 9435 #ifdef NETFLIX_TCPOUDP 9436 if (tp->t_port) { 9437 udp = (struct udphdr *)((caddr_t)ip + ipoptlen + sizeof(struct ip)); 9438 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 9439 udp->uh_dport = tp->t_port; 9440 ulen = hdrlen + len - sizeof(struct ip); 9441 udp->uh_ulen = htons(ulen); 9442 th = (struct tcphdr *)(udp + 1); 9443 } else 9444 #endif 9445 th = (struct tcphdr *)(ip + 1); 9446 tcpip_fillheaders(inp, 9447 #ifdef NETFLIX_TCPOUDP 9448 tp->t_port, 9449 #endif 9450 ip, th); 9451 } 9452 /* 9453 * Fill in fields, remembering maximum advertised window for use in 9454 * delaying messages about window sizes. If resending a FIN, be sure 9455 * not to use a new sequence number. 9456 */ 9457 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 9458 tp->snd_nxt == tp->snd_max) 9459 tp->snd_nxt--; 9460 /* 9461 * If we are starting a connection, send ECN setup SYN packet. If we 9462 * are on a retransmit, we may resend those bits a number of times 9463 * as per RFC 3168. 9464 */ 9465 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn == 1) { 9466 if (tp->t_rxtshift >= 1) { 9467 if (tp->t_rxtshift <= V_tcp_ecn_maxretries) 9468 flags |= TH_ECE | TH_CWR; 9469 } else 9470 flags |= TH_ECE | TH_CWR; 9471 } 9472 if (tp->t_state == TCPS_ESTABLISHED && 9473 (tp->t_flags & TF_ECN_PERMIT)) { 9474 /* 9475 * If the peer has ECN, mark data packets with ECN capable 9476 * transmission (ECT). Ignore pure ack packets, 9477 * retransmissions and window probes. 9478 */ 9479 if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max) && 9480 !((tp->t_flags & TF_FORCEDATA) && len == 1)) { 9481 #ifdef INET6 9482 if (isipv6) 9483 ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20); 9484 else 9485 #endif 9486 ip->ip_tos |= IPTOS_ECN_ECT0; 9487 TCPSTAT_INC(tcps_ecn_ect0); 9488 } 9489 /* 9490 * Reply with proper ECN notifications. 9491 */ 9492 if (tp->t_flags & TF_ECN_SND_CWR) { 9493 flags |= TH_CWR; 9494 tp->t_flags &= ~TF_ECN_SND_CWR; 9495 } 9496 if (tp->t_flags & TF_ECN_SND_ECE) 9497 flags |= TH_ECE; 9498 } 9499 /* 9500 * If we are doing retransmissions, then snd_nxt will not reflect 9501 * the first unsent octet. For ACK only packets, we do not want the 9502 * sequence number of the retransmitted packet, we want the sequence 9503 * number of the next unsent octet. So, if there is no data (and no 9504 * SYN or FIN), use snd_max instead of snd_nxt when filling in 9505 * ti_seq. But if we are in persist state, snd_max might reflect 9506 * one byte beyond the right edge of the window, so use snd_nxt in 9507 * that case, since we know we aren't doing a retransmission. 9508 * (retransmit and persist are mutually exclusive...) 9509 */ 9510 if (sack_rxmit == 0) { 9511 if (len || (flags & (TH_SYN | TH_FIN)) || 9512 rack->rc_in_persist) { 9513 th->th_seq = htonl(tp->snd_nxt); 9514 rack_seq = tp->snd_nxt; 9515 } else if (flags & TH_RST) { 9516 /* 9517 * For a Reset send the last cum ack in sequence 9518 * (this like any other choice may still generate a 9519 * challenge ack, if a ack-update packet is in 9520 * flight). 9521 */ 9522 th->th_seq = htonl(tp->snd_una); 9523 rack_seq = tp->snd_una; 9524 } else { 9525 th->th_seq = htonl(tp->snd_max); 9526 rack_seq = tp->snd_max; 9527 } 9528 } else { 9529 th->th_seq = htonl(rsm->r_start); 9530 rack_seq = rsm->r_start; 9531 } 9532 th->th_ack = htonl(tp->rcv_nxt); 9533 if (optlen) { 9534 bcopy(opt, th + 1, optlen); 9535 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 9536 } 9537 th->th_flags = flags; 9538 /* 9539 * Calculate receive window. Don't shrink window, but avoid silly 9540 * window syndrome. 9541 * If a RST segment is sent, advertise a window of zero. 9542 */ 9543 if (flags & TH_RST) { 9544 recwin = 0; 9545 } else { 9546 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 9547 recwin < (long)ctf_fixed_maxseg(tp)) 9548 recwin = 0; 9549 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 9550 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 9551 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 9552 if (recwin > (long)TCP_MAXWIN << tp->rcv_scale) 9553 recwin = (long)TCP_MAXWIN << tp->rcv_scale; 9554 } 9555 9556 /* 9557 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 9558 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 9559 * handled in syncache. 9560 */ 9561 if (flags & TH_SYN) 9562 th->th_win = htons((u_short) 9563 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 9564 else 9565 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 9566 /* 9567 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 9568 * window. This may cause the remote transmitter to stall. This 9569 * flag tells soreceive() to disable delayed acknowledgements when 9570 * draining the buffer. This can occur if the receiver is 9571 * attempting to read more data than can be buffered prior to 9572 * transmitting on the connection. 9573 */ 9574 if (th->th_win == 0) { 9575 tp->t_sndzerowin++; 9576 tp->t_flags |= TF_RXWIN0SENT; 9577 } else 9578 tp->t_flags &= ~TF_RXWIN0SENT; 9579 if (SEQ_GT(tp->snd_up, tp->snd_nxt)) { 9580 th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt)); 9581 th->th_flags |= TH_URG; 9582 } else 9583 /* 9584 * If no urgent pointer to send, then we pull the urgent 9585 * pointer to the left edge of the send window so that it 9586 * doesn't drift into the send window on sequence number 9587 * wraparound. 9588 */ 9589 tp->snd_up = tp->snd_una; /* drag it along */ 9590 9591 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 9592 if (to.to_flags & TOF_SIGNATURE) { 9593 /* 9594 * Calculate MD5 signature and put it into the place 9595 * determined before. 9596 * NOTE: since TCP options buffer doesn't point into 9597 * mbuf's data, calculate offset and use it. 9598 */ 9599 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 9600 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 9601 /* 9602 * Do not send segment if the calculation of MD5 9603 * digest has failed. 9604 */ 9605 goto out; 9606 } 9607 } 9608 #endif 9609 9610 /* 9611 * Put TCP length in extended header, and then checksum extended 9612 * header and data. 9613 */ 9614 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 9615 #ifdef INET6 9616 if (isipv6) { 9617 /* 9618 * ip6_plen is not need to be filled now, and will be filled 9619 * in ip6_output. 9620 */ 9621 if (tp->t_port) { 9622 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 9623 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 9624 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 9625 th->th_sum = htons(0); 9626 UDPSTAT_INC(udps_opackets); 9627 } else { 9628 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 9629 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 9630 th->th_sum = in6_cksum_pseudo(ip6, 9631 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 9632 0); 9633 } 9634 } 9635 #endif 9636 #if defined(INET6) && defined(INET) 9637 else 9638 #endif 9639 #ifdef INET 9640 { 9641 if (tp->t_port) { 9642 m->m_pkthdr.csum_flags = CSUM_UDP; 9643 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 9644 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 9645 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 9646 th->th_sum = htons(0); 9647 UDPSTAT_INC(udps_opackets); 9648 } else { 9649 m->m_pkthdr.csum_flags = CSUM_TCP; 9650 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 9651 th->th_sum = in_pseudo(ip->ip_src.s_addr, 9652 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 9653 IPPROTO_TCP + len + optlen)); 9654 } 9655 /* IP version must be set here for ipv4/ipv6 checking later */ 9656 KASSERT(ip->ip_v == IPVERSION, 9657 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 9658 } 9659 #endif 9660 /* 9661 * Enable TSO and specify the size of the segments. The TCP pseudo 9662 * header checksum is always provided. XXX: Fixme: This is currently 9663 * not the case for IPv6. 9664 */ 9665 if (tso || force_tso) { 9666 KASSERT(force_tso || len > tp->t_maxseg - optlen, 9667 ("%s: len <= tso_segsz", __func__)); 9668 m->m_pkthdr.csum_flags |= CSUM_TSO; 9669 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 9670 } 9671 KASSERT(len + hdrlen == m_length(m, NULL), 9672 ("%s: mbuf chain different than expected: %d + %u != %u", 9673 __func__, len, hdrlen, m_length(m, NULL))); 9674 9675 #ifdef TCP_HHOOK 9676 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 9677 hhook_run_tcp_est_out(tp, th, &to, len, tso); 9678 #endif 9679 #ifdef TCPDEBUG 9680 /* 9681 * Trace. 9682 */ 9683 if (so->so_options & SO_DEBUG) { 9684 u_short save = 0; 9685 9686 #ifdef INET6 9687 if (!isipv6) 9688 #endif 9689 { 9690 save = ipov->ih_len; 9691 ipov->ih_len = htons(m->m_pkthdr.len /* - hdrlen + 9692 * (th->th_off << 2) */ ); 9693 } 9694 tcp_trace(TA_OUTPUT, tp->t_state, tp, mtod(m, void *), th, 0); 9695 #ifdef INET6 9696 if (!isipv6) 9697 #endif 9698 ipov->ih_len = save; 9699 } 9700 #endif /* TCPDEBUG */ 9701 9702 /* We're getting ready to send; log now. */ 9703 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 9704 union tcp_log_stackspecific log; 9705 struct timeval tv; 9706 9707 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 9708 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 9709 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 9710 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 9711 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 9712 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 9713 log.u_bbr.flex4 = orig_len; 9714 if (filled_all) 9715 log.u_bbr.flex5 = 0x80000000; 9716 else 9717 log.u_bbr.flex5 = 0; 9718 if (rsm || sack_rxmit) { 9719 log.u_bbr.flex8 = 1; 9720 } else { 9721 log.u_bbr.flex8 = 0; 9722 } 9723 log.u_bbr.pkts_out = tp->t_maxseg; 9724 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 9725 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 9726 lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 9727 len, &log, false, NULL, NULL, 0, &tv); 9728 } else 9729 lgb = NULL; 9730 9731 /* 9732 * Fill in IP length and desired time to live and send to IP level. 9733 * There should be a better way to handle ttl and tos; we could keep 9734 * them in the template, but need a way to checksum without them. 9735 */ 9736 /* 9737 * m->m_pkthdr.len should have been set before cksum calcuration, 9738 * because in6_cksum() need it. 9739 */ 9740 #ifdef INET6 9741 if (isipv6) { 9742 /* 9743 * we separately set hoplimit for every segment, since the 9744 * user might want to change the value via setsockopt. Also, 9745 * desired default hop limit might be changed via Neighbor 9746 * Discovery. 9747 */ 9748 ip6->ip6_hlim = in6_selecthlim(inp, NULL); 9749 9750 /* 9751 * Set the packet size here for the benefit of DTrace 9752 * probes. ip6_output() will set it properly; it's supposed 9753 * to include the option header lengths as well. 9754 */ 9755 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 9756 9757 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 9758 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 9759 else 9760 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 9761 9762 if (tp->t_state == TCPS_SYN_SENT) 9763 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 9764 9765 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 9766 /* TODO: IPv6 IP6TOS_ECT bit on */ 9767 error = ip6_output(m, tp->t_inpcb->in6p_outputopts, 9768 &inp->inp_route6, 9769 ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 9770 NULL, NULL, inp); 9771 9772 if (error == EMSGSIZE && inp->inp_route6.ro_rt != NULL) 9773 mtu = inp->inp_route6.ro_rt->rt_mtu; 9774 } 9775 #endif /* INET6 */ 9776 #if defined(INET) && defined(INET6) 9777 else 9778 #endif 9779 #ifdef INET 9780 { 9781 ip->ip_len = htons(m->m_pkthdr.len); 9782 #ifdef INET6 9783 if (inp->inp_vflag & INP_IPV6PROTO) 9784 ip->ip_ttl = in6_selecthlim(inp, NULL); 9785 #endif /* INET6 */ 9786 /* 9787 * If we do path MTU discovery, then we set DF on every 9788 * packet. This might not be the best thing to do according 9789 * to RFC3390 Section 2. However the tcp hostcache migitates 9790 * the problem so it affects only the first tcp connection 9791 * with a host. 9792 * 9793 * NB: Don't set DF on small MTU/MSS to have a safe 9794 * fallback. 9795 */ 9796 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 9797 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 9798 if (tp->t_port == 0 || len < V_tcp_minmss) { 9799 ip->ip_off |= htons(IP_DF); 9800 } 9801 } else { 9802 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 9803 } 9804 9805 if (tp->t_state == TCPS_SYN_SENT) 9806 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 9807 9808 TCP_PROBE5(send, NULL, tp, ip, tp, th); 9809 9810 error = ip_output(m, tp->t_inpcb->inp_options, &inp->inp_route, 9811 ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 0, 9812 inp); 9813 if (error == EMSGSIZE && inp->inp_route.ro_rt != NULL) 9814 mtu = inp->inp_route.ro_rt->rt_mtu; 9815 } 9816 #endif /* INET */ 9817 9818 out: 9819 if (lgb) { 9820 lgb->tlb_errno = error; 9821 lgb = NULL; 9822 } 9823 /* 9824 * In transmit state, time the transmission and arrange for the 9825 * retransmit. In persist state, just set snd_max. 9826 */ 9827 if (error == 0) { 9828 if (TCPS_HAVEESTABLISHED(tp->t_state) && 9829 (tp->t_flags & TF_SACK_PERMIT) && 9830 tp->rcv_numsacks > 0) 9831 tcp_clean_dsack_blocks(tp); 9832 if (len == 0) 9833 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 9834 else if (len == 1) { 9835 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 9836 } else if (len > 1) { 9837 int idx; 9838 9839 idx = (len / ctf_fixed_maxseg(tp)) + 3; 9840 if (idx >= TCP_MSS_ACCT_ATIMER) 9841 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 9842 else 9843 counter_u64_add(rack_out_size[idx], 1); 9844 } 9845 if (hw_tls && len > 0) { 9846 if (filled_all) { 9847 counter_u64_add(rack_tls_filled, 1); 9848 rack_log_type_hrdwtso(tp, rack, len, 0, orig_len, 1); 9849 } else { 9850 if (rsm) { 9851 counter_u64_add(rack_tls_rxt, 1); 9852 rack_log_type_hrdwtso(tp, rack, len, 2, orig_len, 1); 9853 } else if (doing_tlp) { 9854 counter_u64_add(rack_tls_tlp, 1); 9855 rack_log_type_hrdwtso(tp, rack, len, 3, orig_len, 1); 9856 } else if ( (ctf_outstanding(tp) + rack->r_ctl.rc_pace_min_segs) > sbavail(sb)) { 9857 counter_u64_add(rack_tls_app, 1); 9858 rack_log_type_hrdwtso(tp, rack, len, 4, orig_len, 1); 9859 } else if ((ctf_flight_size(tp, rack->r_ctl.rc_sacked) + rack->r_ctl.rc_pace_min_segs) > tp->snd_cwnd) { 9860 counter_u64_add(rack_tls_cwnd, 1); 9861 rack_log_type_hrdwtso(tp, rack, len, 5, orig_len, 1); 9862 } else if ((ctf_outstanding(tp) + rack->r_ctl.rc_pace_min_segs) > tp->snd_wnd) { 9863 counter_u64_add(rack_tls_rwnd, 1); 9864 rack_log_type_hrdwtso(tp, rack, len, 6, orig_len, 1); 9865 } else { 9866 rack_log_type_hrdwtso(tp, rack, len, 7, orig_len, 1); 9867 counter_u64_add(rack_tls_other, 1); 9868 } 9869 } 9870 } 9871 } 9872 if (sub_from_prr && (error == 0)) { 9873 if (rack->r_ctl.rc_prr_sndcnt >= len) 9874 rack->r_ctl.rc_prr_sndcnt -= len; 9875 else 9876 rack->r_ctl.rc_prr_sndcnt = 0; 9877 } 9878 sub_from_prr = 0; 9879 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, cts, 9880 pass, rsm); 9881 if ((error == 0) && 9882 (len > 0) && 9883 (tp->snd_una == tp->snd_max)) 9884 rack->r_ctl.rc_tlp_rxt_last_time = cts; 9885 if ((tp->t_flags & TF_FORCEDATA) == 0 || 9886 (rack->rc_in_persist == 0)) { 9887 tcp_seq startseq = tp->snd_nxt; 9888 9889 /* 9890 * Advance snd_nxt over sequence space of this segment. 9891 */ 9892 if (error) 9893 /* We don't log or do anything with errors */ 9894 goto nomore; 9895 9896 if (flags & (TH_SYN | TH_FIN)) { 9897 if (flags & TH_SYN) 9898 tp->snd_nxt++; 9899 if (flags & TH_FIN) { 9900 tp->snd_nxt++; 9901 tp->t_flags |= TF_SENTFIN; 9902 } 9903 } 9904 /* In the ENOBUFS case we do *not* update snd_max */ 9905 if (sack_rxmit) 9906 goto nomore; 9907 9908 tp->snd_nxt += len; 9909 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { 9910 if (tp->snd_una == tp->snd_max) { 9911 /* 9912 * Update the time we just added data since 9913 * none was outstanding. 9914 */ 9915 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 9916 tp->t_acktime = ticks; 9917 } 9918 tp->snd_max = tp->snd_nxt; 9919 /* 9920 * Time this transmission if not a retransmission and 9921 * not currently timing anything. 9922 * This is only relevant in case of switching back to 9923 * the base stack. 9924 */ 9925 if (tp->t_rtttime == 0) { 9926 tp->t_rtttime = ticks; 9927 tp->t_rtseq = startseq; 9928 TCPSTAT_INC(tcps_segstimed); 9929 } 9930 #ifdef NETFLIX_STATS 9931 if (!(tp->t_flags & TF_GPUTINPROG) && len) { 9932 tp->t_flags |= TF_GPUTINPROG; 9933 tp->gput_seq = startseq; 9934 tp->gput_ack = startseq + 9935 ulmin(sbavail(sb) - sb_offset, sendwin); 9936 tp->gput_ts = tcp_ts_getticks(); 9937 } 9938 #endif 9939 } 9940 } else { 9941 /* 9942 * Persist case, update snd_max but since we are in persist 9943 * mode (no window) we do not update snd_nxt. 9944 */ 9945 int32_t xlen = len; 9946 9947 if (error) 9948 goto nomore; 9949 9950 if (flags & TH_SYN) 9951 ++xlen; 9952 if (flags & TH_FIN) { 9953 ++xlen; 9954 tp->t_flags |= TF_SENTFIN; 9955 } 9956 /* In the ENOBUFS case we do *not* update snd_max */ 9957 if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max)) { 9958 if (tp->snd_una == tp->snd_max) { 9959 /* 9960 * Update the time we just added data since 9961 * none was outstanding. 9962 */ 9963 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 9964 tp->t_acktime = ticks; 9965 } 9966 tp->snd_max = tp->snd_nxt + len; 9967 } 9968 } 9969 nomore: 9970 if (error) { 9971 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 9972 /* 9973 * Failures do not advance the seq counter above. For the 9974 * case of ENOBUFS we will fall out and retry in 1ms with 9975 * the hpts. Everything else will just have to retransmit 9976 * with the timer. 9977 * 9978 * In any case, we do not want to loop around for another 9979 * send without a good reason. 9980 */ 9981 sendalot = 0; 9982 switch (error) { 9983 case EPERM: 9984 tp->t_flags &= ~TF_FORCEDATA; 9985 tp->t_softerror = error; 9986 return (error); 9987 case ENOBUFS: 9988 if (slot == 0) { 9989 /* 9990 * Pace us right away to retry in a some 9991 * time 9992 */ 9993 slot = 1 + rack->rc_enobuf; 9994 if (rack->rc_enobuf < 255) 9995 rack->rc_enobuf++; 9996 if (slot > (rack->rc_rack_rtt / 2)) { 9997 slot = rack->rc_rack_rtt / 2; 9998 } 9999 if (slot < 10) 10000 slot = 10; 10001 } 10002 counter_u64_add(rack_saw_enobuf, 1); 10003 error = 0; 10004 goto enobufs; 10005 case EMSGSIZE: 10006 /* 10007 * For some reason the interface we used initially 10008 * to send segments changed to another or lowered 10009 * its MTU. If TSO was active we either got an 10010 * interface without TSO capabilits or TSO was 10011 * turned off. If we obtained mtu from ip_output() 10012 * then update it and try again. 10013 */ 10014 if (tso) 10015 tp->t_flags &= ~TF_TSO; 10016 if (mtu != 0) { 10017 tcp_mss_update(tp, -1, mtu, NULL, NULL); 10018 goto again; 10019 } 10020 slot = 10; 10021 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 10022 tp->t_flags &= ~TF_FORCEDATA; 10023 return (error); 10024 case ENETUNREACH: 10025 counter_u64_add(rack_saw_enetunreach, 1); 10026 case EHOSTDOWN: 10027 case EHOSTUNREACH: 10028 case ENETDOWN: 10029 if (TCPS_HAVERCVDSYN(tp->t_state)) { 10030 tp->t_softerror = error; 10031 } 10032 /* FALLTHROUGH */ 10033 default: 10034 slot = 10; 10035 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 10036 tp->t_flags &= ~TF_FORCEDATA; 10037 return (error); 10038 } 10039 } else { 10040 rack->rc_enobuf = 0; 10041 } 10042 TCPSTAT_INC(tcps_sndtotal); 10043 10044 /* 10045 * Data sent (as far as we can tell). If this advertises a larger 10046 * window than any other segment, then remember the size of the 10047 * advertised window. Any pending ACK has now been sent. 10048 */ 10049 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 10050 tp->rcv_adv = tp->rcv_nxt + recwin; 10051 tp->last_ack_sent = tp->rcv_nxt; 10052 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 10053 enobufs: 10054 rack->r_tlp_running = 0; 10055 if (flags & TH_RST) { 10056 /* 10057 * We don't send again after sending a RST. 10058 */ 10059 slot = 0; 10060 sendalot = 0; 10061 } 10062 if (rsm && (slot == 0)) { 10063 /* 10064 * Dup ack retransmission possibly, so 10065 * lets assure we have at least min rack 10066 * time, if its a rack resend then the rack 10067 * to will also be set to this. 10068 */ 10069 slot = rack->r_ctl.rc_min_to; 10070 } 10071 if (slot) { 10072 /* set the rack tcb into the slot N */ 10073 counter_u64_add(rack_paced_segments, 1); 10074 } else if (sendalot) { 10075 if (len) 10076 counter_u64_add(rack_unpaced_segments, 1); 10077 sack_rxmit = 0; 10078 tp->t_flags &= ~TF_FORCEDATA; 10079 goto again; 10080 } else if (len) { 10081 counter_u64_add(rack_unpaced_segments, 1); 10082 } 10083 tp->t_flags &= ~TF_FORCEDATA; 10084 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); 10085 return (error); 10086 } 10087 10088 /* 10089 * rack_ctloutput() must drop the inpcb lock before performing copyin on 10090 * socket option arguments. When it re-acquires the lock after the copy, it 10091 * has to revalidate that the connection is still valid for the socket 10092 * option. 10093 */ 10094 static int 10095 rack_set_sockopt(struct socket *so, struct sockopt *sopt, 10096 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack) 10097 { 10098 int32_t error = 0, optval; 10099 10100 switch (sopt->sopt_name) { 10101 case TCP_RACK_PROP_RATE: 10102 case TCP_RACK_PROP: 10103 case TCP_RACK_TLP_REDUCE: 10104 case TCP_RACK_EARLY_RECOV: 10105 case TCP_RACK_PACE_ALWAYS: 10106 case TCP_DELACK: 10107 case TCP_RACK_PACE_REDUCE: 10108 case TCP_RACK_PACE_MAX_SEG: 10109 case TCP_RACK_PRR_SENDALOT: 10110 case TCP_RACK_MIN_TO: 10111 case TCP_RACK_EARLY_SEG: 10112 case TCP_RACK_REORD_THRESH: 10113 case TCP_RACK_REORD_FADE: 10114 case TCP_RACK_TLP_THRESH: 10115 case TCP_RACK_PKT_DELAY: 10116 case TCP_RACK_TLP_USE: 10117 case TCP_RACK_TLP_INC_VAR: 10118 case TCP_RACK_IDLE_REDUCE_HIGH: 10119 case TCP_RACK_MIN_PACE: 10120 case TCP_RACK_GP_INCREASE: 10121 case TCP_BBR_RACK_RTT_USE: 10122 case TCP_BBR_USE_RACK_CHEAT: 10123 case TCP_RACK_DO_DETECTION: 10124 case TCP_DATA_AFTER_CLOSE: 10125 break; 10126 default: 10127 return (tcp_default_ctloutput(so, sopt, inp, tp)); 10128 break; 10129 } 10130 INP_WUNLOCK(inp); 10131 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 10132 if (error) 10133 return (error); 10134 INP_WLOCK(inp); 10135 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 10136 INP_WUNLOCK(inp); 10137 return (ECONNRESET); 10138 } 10139 tp = intotcpcb(inp); 10140 rack = (struct tcp_rack *)tp->t_fb_ptr; 10141 switch (sopt->sopt_name) { 10142 case TCP_RACK_DO_DETECTION: 10143 RACK_OPTS_INC(tcp_rack_no_sack); 10144 if (optval == 0) 10145 rack->do_detection = 0; 10146 else 10147 rack->do_detection = 1; 10148 break; 10149 case TCP_RACK_PROP_RATE: 10150 if ((optval <= 0) || (optval >= 100)) { 10151 error = EINVAL; 10152 break; 10153 } 10154 RACK_OPTS_INC(tcp_rack_prop_rate); 10155 rack->r_ctl.rc_prop_rate = optval; 10156 break; 10157 case TCP_RACK_TLP_USE: 10158 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 10159 error = EINVAL; 10160 break; 10161 } 10162 RACK_OPTS_INC(tcp_tlp_use); 10163 rack->rack_tlp_threshold_use = optval; 10164 break; 10165 case TCP_RACK_PROP: 10166 /* RACK proportional rate reduction (bool) */ 10167 RACK_OPTS_INC(tcp_rack_prop); 10168 rack->r_ctl.rc_prop_reduce = optval; 10169 break; 10170 case TCP_RACK_TLP_REDUCE: 10171 /* RACK TLP cwnd reduction (bool) */ 10172 RACK_OPTS_INC(tcp_rack_tlp_reduce); 10173 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 10174 break; 10175 case TCP_RACK_EARLY_RECOV: 10176 /* Should recovery happen early (bool) */ 10177 RACK_OPTS_INC(tcp_rack_early_recov); 10178 rack->r_ctl.rc_early_recovery = optval; 10179 break; 10180 case TCP_RACK_PACE_ALWAYS: 10181 /* Use the always pace method (bool) */ 10182 RACK_OPTS_INC(tcp_rack_pace_always); 10183 if (optval > 0) 10184 rack->rc_always_pace = 1; 10185 else 10186 rack->rc_always_pace = 0; 10187 break; 10188 case TCP_RACK_PACE_REDUCE: 10189 /* RACK Hptsi reduction factor (divisor) */ 10190 RACK_OPTS_INC(tcp_rack_pace_reduce); 10191 if (optval) 10192 /* Must be non-zero */ 10193 rack->rc_pace_reduce = optval; 10194 else 10195 error = EINVAL; 10196 break; 10197 case TCP_RACK_PACE_MAX_SEG: 10198 /* Max segments in a pace */ 10199 RACK_OPTS_INC(tcp_rack_max_seg); 10200 rack->rc_pace_max_segs = optval; 10201 rack_set_pace_segments(tp, rack); 10202 break; 10203 case TCP_RACK_PRR_SENDALOT: 10204 /* Allow PRR to send more than one seg */ 10205 RACK_OPTS_INC(tcp_rack_prr_sendalot); 10206 rack->r_ctl.rc_prr_sendalot = optval; 10207 break; 10208 case TCP_RACK_MIN_TO: 10209 /* Minimum time between rack t-o's in ms */ 10210 RACK_OPTS_INC(tcp_rack_min_to); 10211 rack->r_ctl.rc_min_to = optval; 10212 break; 10213 case TCP_RACK_EARLY_SEG: 10214 /* If early recovery max segments */ 10215 RACK_OPTS_INC(tcp_rack_early_seg); 10216 rack->r_ctl.rc_early_recovery_segs = optval; 10217 break; 10218 case TCP_RACK_REORD_THRESH: 10219 /* RACK reorder threshold (shift amount) */ 10220 RACK_OPTS_INC(tcp_rack_reord_thresh); 10221 if ((optval > 0) && (optval < 31)) 10222 rack->r_ctl.rc_reorder_shift = optval; 10223 else 10224 error = EINVAL; 10225 break; 10226 case TCP_RACK_REORD_FADE: 10227 /* Does reordering fade after ms time */ 10228 RACK_OPTS_INC(tcp_rack_reord_fade); 10229 rack->r_ctl.rc_reorder_fade = optval; 10230 break; 10231 case TCP_RACK_TLP_THRESH: 10232 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 10233 RACK_OPTS_INC(tcp_rack_tlp_thresh); 10234 if (optval) 10235 rack->r_ctl.rc_tlp_threshold = optval; 10236 else 10237 error = EINVAL; 10238 break; 10239 case TCP_BBR_USE_RACK_CHEAT: 10240 RACK_OPTS_INC(tcp_rack_cheat); 10241 if (optval) 10242 rack->use_rack_cheat = 1; 10243 else 10244 rack->use_rack_cheat = 0; 10245 break; 10246 case TCP_RACK_PKT_DELAY: 10247 /* RACK added ms i.e. rack-rtt + reord + N */ 10248 RACK_OPTS_INC(tcp_rack_pkt_delay); 10249 rack->r_ctl.rc_pkt_delay = optval; 10250 break; 10251 case TCP_RACK_TLP_INC_VAR: 10252 /* Does TLP include rtt variance in t-o */ 10253 error = EINVAL; 10254 break; 10255 case TCP_RACK_IDLE_REDUCE_HIGH: 10256 error = EINVAL; 10257 break; 10258 case TCP_DELACK: 10259 if (optval == 0) 10260 tp->t_delayed_ack = 0; 10261 else 10262 tp->t_delayed_ack = 1; 10263 if (tp->t_flags & TF_DELACK) { 10264 tp->t_flags &= ~TF_DELACK; 10265 tp->t_flags |= TF_ACKNOW; 10266 rack_output(tp); 10267 } 10268 break; 10269 case TCP_RACK_MIN_PACE: 10270 RACK_OPTS_INC(tcp_rack_min_pace); 10271 if (optval > 3) 10272 rack->r_enforce_min_pace = 3; 10273 else 10274 rack->r_enforce_min_pace = optval; 10275 break; 10276 case TCP_RACK_GP_INCREASE: 10277 if ((optval >= 0) && 10278 (optval <= 256)) 10279 rack->rack_per_of_gp = optval; 10280 else 10281 error = EINVAL; 10282 10283 break; 10284 case TCP_BBR_RACK_RTT_USE: 10285 if ((optval != USE_RTT_HIGH) && 10286 (optval != USE_RTT_LOW) && 10287 (optval != USE_RTT_AVG)) 10288 error = EINVAL; 10289 else 10290 rack->r_ctl.rc_rate_sample_method = optval; 10291 break; 10292 case TCP_DATA_AFTER_CLOSE: 10293 if (optval) 10294 rack->rc_allow_data_af_clo = 1; 10295 else 10296 rack->rc_allow_data_af_clo = 0; 10297 break; 10298 default: 10299 return (tcp_default_ctloutput(so, sopt, inp, tp)); 10300 break; 10301 } 10302 #ifdef NETFLIX_STATS 10303 tcp_log_socket_option(tp, sopt->sopt_name, optval, error); 10304 #endif 10305 INP_WUNLOCK(inp); 10306 return (error); 10307 } 10308 10309 static int 10310 rack_get_sockopt(struct socket *so, struct sockopt *sopt, 10311 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack) 10312 { 10313 int32_t error, optval; 10314 10315 /* 10316 * Because all our options are either boolean or an int, we can just 10317 * pull everything into optval and then unlock and copy. If we ever 10318 * add a option that is not a int, then this will have quite an 10319 * impact to this routine. 10320 */ 10321 error = 0; 10322 switch (sopt->sopt_name) { 10323 case TCP_RACK_DO_DETECTION: 10324 optval = rack->do_detection; 10325 break; 10326 10327 case TCP_RACK_PROP_RATE: 10328 optval = rack->r_ctl.rc_prop_rate; 10329 break; 10330 case TCP_RACK_PROP: 10331 /* RACK proportional rate reduction (bool) */ 10332 optval = rack->r_ctl.rc_prop_reduce; 10333 break; 10334 case TCP_RACK_TLP_REDUCE: 10335 /* RACK TLP cwnd reduction (bool) */ 10336 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 10337 break; 10338 case TCP_RACK_EARLY_RECOV: 10339 /* Should recovery happen early (bool) */ 10340 optval = rack->r_ctl.rc_early_recovery; 10341 break; 10342 case TCP_RACK_PACE_REDUCE: 10343 /* RACK Hptsi reduction factor (divisor) */ 10344 optval = rack->rc_pace_reduce; 10345 break; 10346 case TCP_RACK_PACE_MAX_SEG: 10347 /* Max segments in a pace */ 10348 optval = rack->rc_pace_max_segs; 10349 break; 10350 case TCP_RACK_PACE_ALWAYS: 10351 /* Use the always pace method */ 10352 optval = rack->rc_always_pace; 10353 break; 10354 case TCP_RACK_PRR_SENDALOT: 10355 /* Allow PRR to send more than one seg */ 10356 optval = rack->r_ctl.rc_prr_sendalot; 10357 break; 10358 case TCP_RACK_MIN_TO: 10359 /* Minimum time between rack t-o's in ms */ 10360 optval = rack->r_ctl.rc_min_to; 10361 break; 10362 case TCP_RACK_EARLY_SEG: 10363 /* If early recovery max segments */ 10364 optval = rack->r_ctl.rc_early_recovery_segs; 10365 break; 10366 case TCP_RACK_REORD_THRESH: 10367 /* RACK reorder threshold (shift amount) */ 10368 optval = rack->r_ctl.rc_reorder_shift; 10369 break; 10370 case TCP_RACK_REORD_FADE: 10371 /* Does reordering fade after ms time */ 10372 optval = rack->r_ctl.rc_reorder_fade; 10373 break; 10374 case TCP_BBR_USE_RACK_CHEAT: 10375 /* Do we use the rack cheat for rxt */ 10376 optval = rack->use_rack_cheat; 10377 break; 10378 case TCP_RACK_TLP_THRESH: 10379 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 10380 optval = rack->r_ctl.rc_tlp_threshold; 10381 break; 10382 case TCP_RACK_PKT_DELAY: 10383 /* RACK added ms i.e. rack-rtt + reord + N */ 10384 optval = rack->r_ctl.rc_pkt_delay; 10385 break; 10386 case TCP_RACK_TLP_USE: 10387 optval = rack->rack_tlp_threshold_use; 10388 break; 10389 case TCP_RACK_TLP_INC_VAR: 10390 /* Does TLP include rtt variance in t-o */ 10391 error = EINVAL; 10392 break; 10393 case TCP_RACK_IDLE_REDUCE_HIGH: 10394 error = EINVAL; 10395 break; 10396 case TCP_RACK_MIN_PACE: 10397 optval = rack->r_enforce_min_pace; 10398 break; 10399 case TCP_RACK_GP_INCREASE: 10400 optval = rack->rack_per_of_gp; 10401 break; 10402 case TCP_BBR_RACK_RTT_USE: 10403 optval = rack->r_ctl.rc_rate_sample_method; 10404 break; 10405 case TCP_DELACK: 10406 optval = tp->t_delayed_ack; 10407 break; 10408 case TCP_DATA_AFTER_CLOSE: 10409 optval = rack->rc_allow_data_af_clo; 10410 break; 10411 default: 10412 return (tcp_default_ctloutput(so, sopt, inp, tp)); 10413 break; 10414 } 10415 INP_WUNLOCK(inp); 10416 if (error == 0) { 10417 error = sooptcopyout(sopt, &optval, sizeof optval); 10418 } 10419 return (error); 10420 } 10421 10422 static int 10423 rack_ctloutput(struct socket *so, struct sockopt *sopt, struct inpcb *inp, struct tcpcb *tp) 10424 { 10425 int32_t error = EINVAL; 10426 struct tcp_rack *rack; 10427 10428 rack = (struct tcp_rack *)tp->t_fb_ptr; 10429 if (rack == NULL) { 10430 /* Huh? */ 10431 goto out; 10432 } 10433 if (sopt->sopt_dir == SOPT_SET) { 10434 return (rack_set_sockopt(so, sopt, inp, tp, rack)); 10435 } else if (sopt->sopt_dir == SOPT_GET) { 10436 return (rack_get_sockopt(so, sopt, inp, tp, rack)); 10437 } 10438 out: 10439 INP_WUNLOCK(inp); 10440 return (error); 10441 } 10442 10443 10444 static struct tcp_function_block __tcp_rack = { 10445 .tfb_tcp_block_name = __XSTRING(STACKNAME), 10446 .tfb_tcp_output = rack_output, 10447 .tfb_do_queued_segments = ctf_do_queued_segments, 10448 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 10449 .tfb_tcp_do_segment = rack_do_segment, 10450 .tfb_tcp_ctloutput = rack_ctloutput, 10451 .tfb_tcp_fb_init = rack_init, 10452 .tfb_tcp_fb_fini = rack_fini, 10453 .tfb_tcp_timer_stop_all = rack_stopall, 10454 .tfb_tcp_timer_activate = rack_timer_activate, 10455 .tfb_tcp_timer_active = rack_timer_active, 10456 .tfb_tcp_timer_stop = rack_timer_stop, 10457 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 10458 .tfb_tcp_handoff_ok = rack_handoff_ok 10459 }; 10460 10461 static const char *rack_stack_names[] = { 10462 __XSTRING(STACKNAME), 10463 #ifdef STACKALIAS 10464 __XSTRING(STACKALIAS), 10465 #endif 10466 }; 10467 10468 static int 10469 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 10470 { 10471 memset(mem, 0, size); 10472 return (0); 10473 } 10474 10475 static void 10476 rack_dtor(void *mem, int32_t size, void *arg) 10477 { 10478 10479 } 10480 10481 static bool rack_mod_inited = false; 10482 10483 static int 10484 tcp_addrack(module_t mod, int32_t type, void *data) 10485 { 10486 int32_t err = 0; 10487 int num_stacks; 10488 10489 switch (type) { 10490 case MOD_LOAD: 10491 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 10492 sizeof(struct rack_sendmap), 10493 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 10494 10495 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 10496 sizeof(struct tcp_rack), 10497 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 10498 10499 sysctl_ctx_init(&rack_sysctl_ctx); 10500 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 10501 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 10502 OID_AUTO, 10503 #ifdef STACKALIAS 10504 __XSTRING(STACKALIAS), 10505 #else 10506 __XSTRING(STACKNAME), 10507 #endif 10508 CTLFLAG_RW, 0, 10509 ""); 10510 if (rack_sysctl_root == NULL) { 10511 printf("Failed to add sysctl node\n"); 10512 err = EFAULT; 10513 goto free_uma; 10514 } 10515 rack_init_sysctls(); 10516 num_stacks = nitems(rack_stack_names); 10517 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 10518 rack_stack_names, &num_stacks); 10519 if (err) { 10520 printf("Failed to register %s stack name for " 10521 "%s module\n", rack_stack_names[num_stacks], 10522 __XSTRING(MODNAME)); 10523 sysctl_ctx_free(&rack_sysctl_ctx); 10524 free_uma: 10525 uma_zdestroy(rack_zone); 10526 uma_zdestroy(rack_pcb_zone); 10527 rack_counter_destroy(); 10528 printf("Failed to register rack module -- err:%d\n", err); 10529 return (err); 10530 } 10531 tcp_lro_reg_mbufq(); 10532 rack_mod_inited = true; 10533 break; 10534 case MOD_QUIESCE: 10535 err = deregister_tcp_functions(&__tcp_rack, true, false); 10536 break; 10537 case MOD_UNLOAD: 10538 err = deregister_tcp_functions(&__tcp_rack, false, true); 10539 if (err == EBUSY) 10540 break; 10541 if (rack_mod_inited) { 10542 uma_zdestroy(rack_zone); 10543 uma_zdestroy(rack_pcb_zone); 10544 sysctl_ctx_free(&rack_sysctl_ctx); 10545 rack_counter_destroy(); 10546 rack_mod_inited = false; 10547 } 10548 tcp_lro_dereg_mbufq(); 10549 err = 0; 10550 break; 10551 default: 10552 return (EOPNOTSUPP); 10553 } 10554 return (err); 10555 } 10556 10557 static moduledata_t tcp_rack = { 10558 .name = __XSTRING(MODNAME), 10559 .evhand = tcp_addrack, 10560 .priv = 0 10561 }; 10562 10563 MODULE_VERSION(MODNAME, 1); 10564 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 10565 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 10566