1 /*- 2 * Copyright (c) 2016-2020 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_inet.h" 31 #include "opt_inet6.h" 32 #include "opt_ipsec.h" 33 #include "opt_tcpdebug.h" 34 #include "opt_ratelimit.h" 35 #include "opt_kern_tls.h" 36 #include <sys/param.h> 37 #include <sys/arb.h> 38 #include <sys/module.h> 39 #include <sys/kernel.h> 40 #ifdef TCP_HHOOK 41 #include <sys/hhook.h> 42 #endif 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/mbuf.h> 48 #include <sys/proc.h> /* for proc0 declaration */ 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/sysctl.h> 52 #include <sys/systm.h> 53 #ifdef STATS 54 #include <sys/qmath.h> 55 #include <sys/tree.h> 56 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 57 #else 58 #include <sys/tree.h> 59 #endif 60 #include <sys/refcount.h> 61 #include <sys/queue.h> 62 #include <sys/tim_filter.h> 63 #include <sys/smp.h> 64 #include <sys/kthread.h> 65 #include <sys/kern_prefetch.h> 66 #include <sys/protosw.h> 67 #ifdef TCP_ACCOUNTING 68 #include <sys/sched.h> 69 #include <machine/cpu.h> 70 #endif 71 #include <vm/uma.h> 72 73 #include <net/route.h> 74 #include <net/route/nhop.h> 75 #include <net/vnet.h> 76 77 #define TCPSTATES /* for logging */ 78 79 #include <netinet/in.h> 80 #include <netinet/in_kdtrace.h> 81 #include <netinet/in_pcb.h> 82 #include <netinet/ip.h> 83 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 84 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 85 #include <netinet/ip_var.h> 86 #include <netinet/ip6.h> 87 #include <netinet6/in6_pcb.h> 88 #include <netinet6/ip6_var.h> 89 #include <netinet/tcp.h> 90 #define TCPOUTFLAGS 91 #include <netinet/tcp_fsm.h> 92 #include <netinet/tcp_log_buf.h> 93 #include <netinet/tcp_seq.h> 94 #include <netinet/tcp_timer.h> 95 #include <netinet/tcp_var.h> 96 #include <netinet/tcp_syncache.h> 97 #include <netinet/tcp_hpts.h> 98 #include <netinet/tcp_ratelimit.h> 99 #include <netinet/tcp_accounting.h> 100 #include <netinet/tcpip.h> 101 #include <netinet/cc/cc.h> 102 #include <netinet/cc/cc_newreno.h> 103 #include <netinet/tcp_fastopen.h> 104 #include <netinet/tcp_lro.h> 105 #ifdef NETFLIX_SHARED_CWND 106 #include <netinet/tcp_shared_cwnd.h> 107 #endif 108 #ifdef TCPDEBUG 109 #include <netinet/tcp_debug.h> 110 #endif /* TCPDEBUG */ 111 #ifdef TCP_OFFLOAD 112 #include <netinet/tcp_offload.h> 113 #endif 114 #ifdef INET6 115 #include <netinet6/tcp6_var.h> 116 #endif 117 #include <netinet/tcp_ecn.h> 118 119 #include <netipsec/ipsec_support.h> 120 121 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 122 #include <netipsec/ipsec.h> 123 #include <netipsec/ipsec6.h> 124 #endif /* IPSEC */ 125 126 #include <netinet/udp.h> 127 #include <netinet/udp_var.h> 128 #include <machine/in_cksum.h> 129 130 #ifdef MAC 131 #include <security/mac/mac_framework.h> 132 #endif 133 #include "sack_filter.h" 134 #include "tcp_rack.h" 135 #include "rack_bbr_common.h" 136 137 uma_zone_t rack_zone; 138 uma_zone_t rack_pcb_zone; 139 140 #ifndef TICKS2SBT 141 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 142 #endif 143 144 VNET_DECLARE(uint32_t, newreno_beta); 145 VNET_DECLARE(uint32_t, newreno_beta_ecn); 146 #define V_newreno_beta VNET(newreno_beta) 147 #define V_newreno_beta_ecn VNET(newreno_beta_ecn) 148 149 150 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb", "TCP fast send block"); 151 MALLOC_DEFINE(M_TCPDO, "tcp_do", "TCP deferred options"); 152 153 struct sysctl_ctx_list rack_sysctl_ctx; 154 struct sysctl_oid *rack_sysctl_root; 155 156 #define CUM_ACKED 1 157 #define SACKED 2 158 159 /* 160 * The RACK module incorporates a number of 161 * TCP ideas that have been put out into the IETF 162 * over the last few years: 163 * - Matt Mathis's Rate Halving which slowly drops 164 * the congestion window so that the ack clock can 165 * be maintained during a recovery. 166 * - Yuchung Cheng's RACK TCP (for which its named) that 167 * will stop us using the number of dup acks and instead 168 * use time as the gage of when we retransmit. 169 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 170 * of Dukkipati et.al. 171 * RACK depends on SACK, so if an endpoint arrives that 172 * cannot do SACK the state machine below will shuttle the 173 * connection back to using the "default" TCP stack that is 174 * in FreeBSD. 175 * 176 * To implement RACK the original TCP stack was first decomposed 177 * into a functional state machine with individual states 178 * for each of the possible TCP connection states. The do_segment 179 * functions role in life is to mandate the connection supports SACK 180 * initially and then assure that the RACK state matches the conenction 181 * state before calling the states do_segment function. Each 182 * state is simplified due to the fact that the original do_segment 183 * has been decomposed and we *know* what state we are in (no 184 * switches on the state) and all tests for SACK are gone. This 185 * greatly simplifies what each state does. 186 * 187 * TCP output is also over-written with a new version since it 188 * must maintain the new rack scoreboard. 189 * 190 */ 191 static int32_t rack_tlp_thresh = 1; 192 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */ 193 static int32_t rack_tlp_use_greater = 1; 194 static int32_t rack_reorder_thresh = 2; 195 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000 196 * - 60 seconds */ 197 static uint8_t rack_req_measurements = 1; 198 /* Attack threshold detections */ 199 static uint32_t rack_highest_sack_thresh_seen = 0; 200 static uint32_t rack_highest_move_thresh_seen = 0; 201 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */ 202 static int32_t rack_hw_pace_extra_slots = 2; /* 2 extra MSS time betweens */ 203 static int32_t rack_hw_rate_caps = 1; /* 1; */ 204 static int32_t rack_hw_rate_min = 0; /* 1500000;*/ 205 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */ 206 static int32_t rack_hw_up_only = 1; 207 static int32_t rack_stats_gets_ms_rtt = 1; 208 static int32_t rack_prr_addbackmax = 2; 209 static int32_t rack_do_hystart = 0; 210 static int32_t rack_apply_rtt_with_reduced_conf = 0; 211 212 static int32_t rack_pkt_delay = 1000; 213 static int32_t rack_send_a_lot_in_prr = 1; 214 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */ 215 static int32_t rack_verbose_logging = 0; 216 static int32_t rack_ignore_data_after_close = 1; 217 static int32_t rack_enable_shared_cwnd = 1; 218 static int32_t rack_use_cmp_acks = 1; 219 static int32_t rack_use_fsb = 1; 220 static int32_t rack_use_rfo = 1; 221 static int32_t rack_use_rsm_rfo = 1; 222 static int32_t rack_max_abc_post_recovery = 2; 223 static int32_t rack_client_low_buf = 0; 224 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */ 225 #ifdef TCP_ACCOUNTING 226 static int32_t rack_tcp_accounting = 0; 227 #endif 228 static int32_t rack_limits_scwnd = 1; 229 static int32_t rack_enable_mqueue_for_nonpaced = 0; 230 static int32_t rack_disable_prr = 0; 231 static int32_t use_rack_rr = 1; 232 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */ 233 static int32_t rack_persist_min = 250000; /* 250usec */ 234 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */ 235 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */ 236 static int32_t rack_default_init_window = 0; /* Use system default */ 237 static int32_t rack_limit_time_with_srtt = 0; 238 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */ 239 static int32_t rack_enobuf_hw_boost_mult = 2; /* How many times the hw rate we boost slot using time_between */ 240 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */ 241 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */ 242 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */ 243 /* 244 * Currently regular tcp has a rto_min of 30ms 245 * the backoff goes 12 times so that ends up 246 * being a total of 122.850 seconds before a 247 * connection is killed. 248 */ 249 static uint32_t rack_def_data_window = 20; 250 static uint32_t rack_goal_bdp = 2; 251 static uint32_t rack_min_srtts = 1; 252 static uint32_t rack_min_measure_usec = 0; 253 static int32_t rack_tlp_min = 10000; /* 10ms */ 254 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */ 255 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */ 256 static const int32_t rack_free_cache = 2; 257 static int32_t rack_hptsi_segments = 40; 258 static int32_t rack_rate_sample_method = USE_RTT_LOW; 259 static int32_t rack_pace_every_seg = 0; 260 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */ 261 static int32_t rack_slot_reduction = 4; 262 static int32_t rack_wma_divisor = 8; /* For WMA calculation */ 263 static int32_t rack_cwnd_block_ends_measure = 0; 264 static int32_t rack_rwnd_block_ends_measure = 0; 265 static int32_t rack_def_profile = 0; 266 267 static int32_t rack_lower_cwnd_at_tlp = 0; 268 static int32_t rack_limited_retran = 0; 269 static int32_t rack_always_send_oldest = 0; 270 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 271 272 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */ 273 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */ 274 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */ 275 276 /* Probertt */ 277 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */ 278 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */ 279 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */ 280 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */ 281 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */ 282 283 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */ 284 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */ 285 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */ 286 static uint32_t rack_probertt_use_min_rtt_exit = 0; 287 static uint32_t rack_probe_rtt_sets_cwnd = 0; 288 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */ 289 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */ 290 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */ 291 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */ 292 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */ 293 static uint32_t rack_probertt_filter_life = 10000000; 294 static uint32_t rack_probertt_lower_within = 10; 295 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */ 296 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */ 297 static int32_t rack_probertt_clear_is = 1; 298 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */ 299 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */ 300 301 /* Part of pacing */ 302 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */ 303 304 /* Timely information */ 305 /* Combine these two gives the range of 'no change' to bw */ 306 /* ie the up/down provide the upper and lower bound */ 307 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */ 308 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */ 309 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */ 310 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */ 311 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */ 312 static int32_t rack_gp_decrease_per = 20; /* 20% decrease in multipler */ 313 static int32_t rack_gp_increase_per = 2; /* 2% increase in multipler */ 314 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */ 315 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */ 316 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */ 317 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */ 318 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */ 319 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */ 320 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */ 321 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */ 322 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */ 323 static int32_t rack_use_max_for_nobackoff = 0; 324 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */ 325 static int32_t rack_timely_no_stopping = 0; 326 static int32_t rack_down_raise_thresh = 100; 327 static int32_t rack_req_segs = 1; 328 static uint64_t rack_bw_rate_cap = 0; 329 330 /* Weird delayed ack mode */ 331 static int32_t rack_use_imac_dack = 0; 332 /* Rack specific counters */ 333 counter_u64_t rack_badfr; 334 counter_u64_t rack_badfr_bytes; 335 counter_u64_t rack_rtm_prr_retran; 336 counter_u64_t rack_rtm_prr_newdata; 337 counter_u64_t rack_timestamp_mismatch; 338 counter_u64_t rack_reorder_seen; 339 counter_u64_t rack_paced_segments; 340 counter_u64_t rack_unpaced_segments; 341 counter_u64_t rack_calc_zero; 342 counter_u64_t rack_calc_nonzero; 343 counter_u64_t rack_saw_enobuf; 344 counter_u64_t rack_saw_enobuf_hw; 345 counter_u64_t rack_saw_enetunreach; 346 counter_u64_t rack_per_timer_hole; 347 counter_u64_t rack_large_ackcmp; 348 counter_u64_t rack_small_ackcmp; 349 counter_u64_t rack_persists_sends; 350 counter_u64_t rack_persists_acks; 351 counter_u64_t rack_persists_loss; 352 counter_u64_t rack_persists_lost_ends; 353 #ifdef INVARIANTS 354 counter_u64_t rack_adjust_map_bw; 355 #endif 356 /* Tail loss probe counters */ 357 counter_u64_t rack_tlp_tot; 358 counter_u64_t rack_tlp_newdata; 359 counter_u64_t rack_tlp_retran; 360 counter_u64_t rack_tlp_retran_bytes; 361 counter_u64_t rack_tlp_retran_fail; 362 counter_u64_t rack_to_tot; 363 counter_u64_t rack_to_arm_rack; 364 counter_u64_t rack_to_arm_tlp; 365 counter_u64_t rack_hot_alloc; 366 counter_u64_t rack_to_alloc; 367 counter_u64_t rack_to_alloc_hard; 368 counter_u64_t rack_to_alloc_emerg; 369 counter_u64_t rack_to_alloc_limited; 370 counter_u64_t rack_alloc_limited_conns; 371 counter_u64_t rack_split_limited; 372 373 #define MAX_NUM_OF_CNTS 13 374 counter_u64_t rack_proc_comp_ack[MAX_NUM_OF_CNTS]; 375 counter_u64_t rack_multi_single_eq; 376 counter_u64_t rack_proc_non_comp_ack; 377 378 counter_u64_t rack_fto_send; 379 counter_u64_t rack_fto_rsm_send; 380 counter_u64_t rack_nfto_resend; 381 counter_u64_t rack_non_fto_send; 382 counter_u64_t rack_extended_rfo; 383 384 counter_u64_t rack_sack_proc_all; 385 counter_u64_t rack_sack_proc_short; 386 counter_u64_t rack_sack_proc_restart; 387 counter_u64_t rack_sack_attacks_detected; 388 counter_u64_t rack_sack_attacks_reversed; 389 counter_u64_t rack_sack_used_next_merge; 390 counter_u64_t rack_sack_splits; 391 counter_u64_t rack_sack_used_prev_merge; 392 counter_u64_t rack_sack_skipped_acked; 393 counter_u64_t rack_ack_total; 394 counter_u64_t rack_express_sack; 395 counter_u64_t rack_sack_total; 396 counter_u64_t rack_move_none; 397 counter_u64_t rack_move_some; 398 399 counter_u64_t rack_used_tlpmethod; 400 counter_u64_t rack_used_tlpmethod2; 401 counter_u64_t rack_enter_tlp_calc; 402 counter_u64_t rack_input_idle_reduces; 403 counter_u64_t rack_collapsed_win; 404 counter_u64_t rack_tlp_does_nada; 405 counter_u64_t rack_try_scwnd; 406 counter_u64_t rack_hw_pace_init_fail; 407 counter_u64_t rack_hw_pace_lost; 408 counter_u64_t rack_sbsndptr_right; 409 counter_u64_t rack_sbsndptr_wrong; 410 411 /* Temp CPU counters */ 412 counter_u64_t rack_find_high; 413 414 counter_u64_t rack_progress_drops; 415 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 416 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 417 418 419 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2))) 420 421 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \ 422 (tv) = (value) + slop; \ 423 if ((u_long)(tv) < (u_long)(tvmin)) \ 424 (tv) = (tvmin); \ 425 if ((u_long)(tv) > (u_long)(tvmax)) \ 426 (tv) = (tvmax); \ 427 } while (0) 428 429 static void 430 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 431 432 static int 433 rack_process_ack(struct mbuf *m, struct tcphdr *th, 434 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 435 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val); 436 static int 437 rack_process_data(struct mbuf *m, struct tcphdr *th, 438 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 439 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 440 static void 441 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 442 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery); 443 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 444 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 445 uint8_t limit_type); 446 static struct rack_sendmap * 447 rack_check_recovery_mode(struct tcpcb *tp, 448 uint32_t tsused); 449 static void 450 rack_cong_signal(struct tcpcb *tp, 451 uint32_t type, uint32_t ack); 452 static void rack_counter_destroy(void); 453 static int 454 rack_ctloutput(struct inpcb *inp, struct sockopt *sopt); 455 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 456 static void 457 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override); 458 static void 459 rack_do_segment(struct mbuf *m, struct tcphdr *th, 460 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 461 uint8_t iptos); 462 static void rack_dtor(void *mem, int32_t size, void *arg); 463 static void 464 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 465 uint32_t flex1, uint32_t flex2, 466 uint32_t flex3, uint32_t flex4, 467 uint32_t flex5, uint32_t flex6, 468 uint16_t flex7, uint8_t mod); 469 470 static void 471 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 472 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, 473 struct rack_sendmap *rsm, uint8_t quality); 474 static struct rack_sendmap * 475 rack_find_high_nonack(struct tcp_rack *rack, 476 struct rack_sendmap *rsm); 477 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 478 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 479 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 480 static int rack_get_sockopt(struct inpcb *inp, struct sockopt *sopt); 481 static void 482 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 483 tcp_seq th_ack, int line, uint8_t quality); 484 static uint32_t 485 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss); 486 static int32_t rack_handoff_ok(struct tcpcb *tp); 487 static int32_t rack_init(struct tcpcb *tp); 488 static void rack_init_sysctls(void); 489 static void 490 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 491 struct tcphdr *th, int entered_rec, int dup_ack_struck); 492 static void 493 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 494 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t ts, 495 struct rack_sendmap *hintrsm, uint16_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls); 496 497 static void 498 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 499 struct rack_sendmap *rsm); 500 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm); 501 static int32_t rack_output(struct tcpcb *tp); 502 503 static uint32_t 504 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 505 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 506 uint32_t cts, int *moved_two); 507 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq); 508 static void rack_remxt_tmr(struct tcpcb *tp); 509 static int rack_set_sockopt(struct inpcb *inp, struct sockopt *sopt); 510 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 511 static int32_t rack_stopall(struct tcpcb *tp); 512 static void 513 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, 514 uint32_t delta); 515 static int32_t rack_timer_active(struct tcpcb *tp, uint32_t timer_type); 516 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 517 static void rack_timer_stop(struct tcpcb *tp, uint32_t timer_type); 518 static uint32_t 519 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 520 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint16_t add_flag); 521 static void 522 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 523 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag); 524 static int 525 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 526 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack); 527 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 528 static int 529 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 530 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 531 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 532 static int 533 rack_do_closing(struct mbuf *m, struct tcphdr *th, 534 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 535 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 536 static int 537 rack_do_established(struct mbuf *m, struct tcphdr *th, 538 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 539 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 540 static int 541 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 542 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 543 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos); 544 static int 545 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 546 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 547 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 548 static int 549 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 550 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 551 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 552 static int 553 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 554 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 555 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 556 static int 557 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 558 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 559 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 560 static int 561 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 562 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 563 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 564 struct rack_sendmap * 565 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 566 uint32_t tsused); 567 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, 568 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt); 569 static void 570 tcp_rack_partialack(struct tcpcb *tp); 571 static int 572 rack_set_profile(struct tcp_rack *rack, int prof); 573 static void 574 rack_apply_deferred_options(struct tcp_rack *rack); 575 576 int32_t rack_clear_counter=0; 577 578 static void 579 rack_set_cc_pacing(struct tcp_rack *rack) 580 { 581 struct sockopt sopt; 582 struct cc_newreno_opts opt; 583 struct newreno old, *ptr; 584 struct tcpcb *tp; 585 int error; 586 587 if (rack->rc_pacing_cc_set) 588 return; 589 590 tp = rack->rc_tp; 591 if (tp->cc_algo == NULL) { 592 /* Tcb is leaving */ 593 printf("No cc algorithm?\n"); 594 return; 595 } 596 rack->rc_pacing_cc_set = 1; 597 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 598 /* Not new-reno we can't play games with beta! */ 599 goto out; 600 } 601 ptr = ((struct newreno *)tp->ccv->cc_data); 602 if (CC_ALGO(tp)->ctl_output == NULL) { 603 /* Huh, why does new_reno no longer have a set function? */ 604 goto out; 605 } 606 if (ptr == NULL) { 607 /* Just the default values */ 608 old.beta = V_newreno_beta_ecn; 609 old.beta_ecn = V_newreno_beta_ecn; 610 old.newreno_flags = 0; 611 } else { 612 old.beta = ptr->beta; 613 old.beta_ecn = ptr->beta_ecn; 614 old.newreno_flags = ptr->newreno_flags; 615 } 616 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 617 sopt.sopt_dir = SOPT_SET; 618 opt.name = CC_NEWRENO_BETA; 619 opt.val = rack->r_ctl.rc_saved_beta.beta; 620 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 621 if (error) { 622 goto out; 623 } 624 /* 625 * Hack alert we need to set in our newreno_flags 626 * so that Abe behavior is also applied. 627 */ 628 ((struct newreno *)tp->ccv->cc_data)->newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 629 opt.name = CC_NEWRENO_BETA_ECN; 630 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn; 631 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 632 if (error) { 633 goto out; 634 } 635 /* Save off the original values for restoral */ 636 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 637 out: 638 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 639 union tcp_log_stackspecific log; 640 struct timeval tv; 641 642 ptr = ((struct newreno *)tp->ccv->cc_data); 643 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 644 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 645 if (ptr) { 646 log.u_bbr.flex1 = ptr->beta; 647 log.u_bbr.flex2 = ptr->beta_ecn; 648 log.u_bbr.flex3 = ptr->newreno_flags; 649 } 650 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 651 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 652 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags; 653 log.u_bbr.flex7 = rack->gp_ready; 654 log.u_bbr.flex7 <<= 1; 655 log.u_bbr.flex7 |= rack->use_fixed_rate; 656 log.u_bbr.flex7 <<= 1; 657 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 658 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 659 log.u_bbr.flex8 = 3; 660 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, error, 661 0, &log, false, NULL, NULL, 0, &tv); 662 } 663 } 664 665 static void 666 rack_undo_cc_pacing(struct tcp_rack *rack) 667 { 668 struct newreno old, *ptr; 669 struct tcpcb *tp; 670 671 if (rack->rc_pacing_cc_set == 0) 672 return; 673 tp = rack->rc_tp; 674 rack->rc_pacing_cc_set = 0; 675 if (tp->cc_algo == NULL) 676 /* Tcb is leaving */ 677 return; 678 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 679 /* Not new-reno nothing to do! */ 680 return; 681 } 682 ptr = ((struct newreno *)tp->ccv->cc_data); 683 if (ptr == NULL) { 684 /* 685 * This happens at rack_fini() if the 686 * cc module gets freed on us. In that 687 * case we loose our "new" settings but 688 * thats ok, since the tcb is going away anyway. 689 */ 690 return; 691 } 692 /* Grab out our set values */ 693 memcpy(&old, ptr, sizeof(struct newreno)); 694 /* Copy back in the original values */ 695 memcpy(ptr, &rack->r_ctl.rc_saved_beta, sizeof(struct newreno)); 696 /* Now save back the values we had set in (for when pacing is restored) */ 697 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 698 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 699 union tcp_log_stackspecific log; 700 struct timeval tv; 701 702 ptr = ((struct newreno *)tp->ccv->cc_data); 703 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 704 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 705 log.u_bbr.flex1 = ptr->beta; 706 log.u_bbr.flex2 = ptr->beta_ecn; 707 log.u_bbr.flex3 = ptr->newreno_flags; 708 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 709 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 710 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags; 711 log.u_bbr.flex7 = rack->gp_ready; 712 log.u_bbr.flex7 <<= 1; 713 log.u_bbr.flex7 |= rack->use_fixed_rate; 714 log.u_bbr.flex7 <<= 1; 715 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 716 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 717 log.u_bbr.flex8 = 4; 718 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 719 0, &log, false, NULL, NULL, 0, &tv); 720 } 721 } 722 723 #ifdef NETFLIX_PEAKRATE 724 static inline void 725 rack_update_peakrate_thr(struct tcpcb *tp) 726 { 727 /* Keep in mind that t_maxpeakrate is in B/s. */ 728 uint64_t peak; 729 peak = uqmax((tp->t_maxseg * 2), 730 (((uint64_t)tp->t_maxpeakrate * (uint64_t)(tp->t_srtt)) / (uint64_t)HPTS_USEC_IN_SEC)); 731 tp->t_peakrate_thr = (uint32_t)uqmin(peak, UINT32_MAX); 732 } 733 #endif 734 735 static int 736 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 737 { 738 uint32_t stat; 739 int32_t error; 740 int i; 741 742 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 743 if (error || req->newptr == NULL) 744 return error; 745 746 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 747 if (error) 748 return (error); 749 if (stat == 1) { 750 #ifdef INVARIANTS 751 printf("Clearing RACK counters\n"); 752 #endif 753 counter_u64_zero(rack_badfr); 754 counter_u64_zero(rack_badfr_bytes); 755 counter_u64_zero(rack_rtm_prr_retran); 756 counter_u64_zero(rack_rtm_prr_newdata); 757 counter_u64_zero(rack_timestamp_mismatch); 758 counter_u64_zero(rack_reorder_seen); 759 counter_u64_zero(rack_tlp_tot); 760 counter_u64_zero(rack_tlp_newdata); 761 counter_u64_zero(rack_tlp_retran); 762 counter_u64_zero(rack_tlp_retran_bytes); 763 counter_u64_zero(rack_tlp_retran_fail); 764 counter_u64_zero(rack_to_tot); 765 counter_u64_zero(rack_to_arm_rack); 766 counter_u64_zero(rack_to_arm_tlp); 767 counter_u64_zero(rack_paced_segments); 768 counter_u64_zero(rack_calc_zero); 769 counter_u64_zero(rack_calc_nonzero); 770 counter_u64_zero(rack_unpaced_segments); 771 counter_u64_zero(rack_saw_enobuf); 772 counter_u64_zero(rack_saw_enobuf_hw); 773 counter_u64_zero(rack_saw_enetunreach); 774 counter_u64_zero(rack_per_timer_hole); 775 counter_u64_zero(rack_large_ackcmp); 776 counter_u64_zero(rack_small_ackcmp); 777 counter_u64_zero(rack_persists_sends); 778 counter_u64_zero(rack_persists_acks); 779 counter_u64_zero(rack_persists_loss); 780 counter_u64_zero(rack_persists_lost_ends); 781 #ifdef INVARIANTS 782 counter_u64_zero(rack_adjust_map_bw); 783 #endif 784 counter_u64_zero(rack_to_alloc_hard); 785 counter_u64_zero(rack_to_alloc_emerg); 786 counter_u64_zero(rack_sack_proc_all); 787 counter_u64_zero(rack_fto_send); 788 counter_u64_zero(rack_fto_rsm_send); 789 counter_u64_zero(rack_extended_rfo); 790 counter_u64_zero(rack_hw_pace_init_fail); 791 counter_u64_zero(rack_hw_pace_lost); 792 counter_u64_zero(rack_sbsndptr_wrong); 793 counter_u64_zero(rack_sbsndptr_right); 794 counter_u64_zero(rack_non_fto_send); 795 counter_u64_zero(rack_nfto_resend); 796 counter_u64_zero(rack_sack_proc_short); 797 counter_u64_zero(rack_sack_proc_restart); 798 counter_u64_zero(rack_to_alloc); 799 counter_u64_zero(rack_to_alloc_limited); 800 counter_u64_zero(rack_alloc_limited_conns); 801 counter_u64_zero(rack_split_limited); 802 for (i = 0; i < MAX_NUM_OF_CNTS; i++) { 803 counter_u64_zero(rack_proc_comp_ack[i]); 804 } 805 counter_u64_zero(rack_multi_single_eq); 806 counter_u64_zero(rack_proc_non_comp_ack); 807 counter_u64_zero(rack_find_high); 808 counter_u64_zero(rack_sack_attacks_detected); 809 counter_u64_zero(rack_sack_attacks_reversed); 810 counter_u64_zero(rack_sack_used_next_merge); 811 counter_u64_zero(rack_sack_used_prev_merge); 812 counter_u64_zero(rack_sack_splits); 813 counter_u64_zero(rack_sack_skipped_acked); 814 counter_u64_zero(rack_ack_total); 815 counter_u64_zero(rack_express_sack); 816 counter_u64_zero(rack_sack_total); 817 counter_u64_zero(rack_move_none); 818 counter_u64_zero(rack_move_some); 819 counter_u64_zero(rack_used_tlpmethod); 820 counter_u64_zero(rack_used_tlpmethod2); 821 counter_u64_zero(rack_enter_tlp_calc); 822 counter_u64_zero(rack_progress_drops); 823 counter_u64_zero(rack_tlp_does_nada); 824 counter_u64_zero(rack_try_scwnd); 825 counter_u64_zero(rack_collapsed_win); 826 } 827 rack_clear_counter = 0; 828 return (0); 829 } 830 831 static void 832 rack_init_sysctls(void) 833 { 834 int i; 835 struct sysctl_oid *rack_counters; 836 struct sysctl_oid *rack_attack; 837 struct sysctl_oid *rack_pacing; 838 struct sysctl_oid *rack_timely; 839 struct sysctl_oid *rack_timers; 840 struct sysctl_oid *rack_tlp; 841 struct sysctl_oid *rack_misc; 842 struct sysctl_oid *rack_features; 843 struct sysctl_oid *rack_measure; 844 struct sysctl_oid *rack_probertt; 845 struct sysctl_oid *rack_hw_pacing; 846 847 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 848 SYSCTL_CHILDREN(rack_sysctl_root), 849 OID_AUTO, 850 "sack_attack", 851 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 852 "Rack Sack Attack Counters and Controls"); 853 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 854 SYSCTL_CHILDREN(rack_sysctl_root), 855 OID_AUTO, 856 "stats", 857 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 858 "Rack Counters"); 859 SYSCTL_ADD_S32(&rack_sysctl_ctx, 860 SYSCTL_CHILDREN(rack_sysctl_root), 861 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 862 &rack_rate_sample_method , USE_RTT_LOW, 863 "What method should we use for rate sampling 0=high, 1=low "); 864 /* Probe rtt related controls */ 865 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 866 SYSCTL_CHILDREN(rack_sysctl_root), 867 OID_AUTO, 868 "probertt", 869 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 870 "ProbeRTT related Controls"); 871 SYSCTL_ADD_U16(&rack_sysctl_ctx, 872 SYSCTL_CHILDREN(rack_probertt), 873 OID_AUTO, "exit_per_hpb", CTLFLAG_RW, 874 &rack_atexit_prtt_hbp, 130, 875 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); 876 SYSCTL_ADD_U16(&rack_sysctl_ctx, 877 SYSCTL_CHILDREN(rack_probertt), 878 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW, 879 &rack_atexit_prtt, 130, 880 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); 881 SYSCTL_ADD_U16(&rack_sysctl_ctx, 882 SYSCTL_CHILDREN(rack_probertt), 883 OID_AUTO, "gp_per_mul", CTLFLAG_RW, 884 &rack_per_of_gp_probertt, 60, 885 "What percentage of goodput do we pace at in probertt"); 886 SYSCTL_ADD_U16(&rack_sysctl_ctx, 887 SYSCTL_CHILDREN(rack_probertt), 888 OID_AUTO, "gp_per_reduce", CTLFLAG_RW, 889 &rack_per_of_gp_probertt_reduce, 10, 890 "What percentage of goodput do we reduce every gp_srtt"); 891 SYSCTL_ADD_U16(&rack_sysctl_ctx, 892 SYSCTL_CHILDREN(rack_probertt), 893 OID_AUTO, "gp_per_low", CTLFLAG_RW, 894 &rack_per_of_gp_lowthresh, 40, 895 "What percentage of goodput do we allow the multiplier to fall to"); 896 SYSCTL_ADD_U32(&rack_sysctl_ctx, 897 SYSCTL_CHILDREN(rack_probertt), 898 OID_AUTO, "time_between", CTLFLAG_RW, 899 & rack_time_between_probertt, 96000000, 900 "How many useconds between the lowest rtt falling must past before we enter probertt"); 901 SYSCTL_ADD_U32(&rack_sysctl_ctx, 902 SYSCTL_CHILDREN(rack_probertt), 903 OID_AUTO, "safety", CTLFLAG_RW, 904 &rack_probe_rtt_safety_val, 2000000, 905 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)"); 906 SYSCTL_ADD_U32(&rack_sysctl_ctx, 907 SYSCTL_CHILDREN(rack_probertt), 908 OID_AUTO, "sets_cwnd", CTLFLAG_RW, 909 &rack_probe_rtt_sets_cwnd, 0, 910 "Do we set the cwnd too (if always_lower is on)"); 911 SYSCTL_ADD_U32(&rack_sysctl_ctx, 912 SYSCTL_CHILDREN(rack_probertt), 913 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW, 914 &rack_max_drain_wait, 2, 915 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal"); 916 SYSCTL_ADD_U32(&rack_sysctl_ctx, 917 SYSCTL_CHILDREN(rack_probertt), 918 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW, 919 &rack_must_drain, 1, 920 "We must drain this many gp_srtt's waiting for flight to reach goal"); 921 SYSCTL_ADD_U32(&rack_sysctl_ctx, 922 SYSCTL_CHILDREN(rack_probertt), 923 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW, 924 &rack_probertt_use_min_rtt_entry, 1, 925 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); 926 SYSCTL_ADD_U32(&rack_sysctl_ctx, 927 SYSCTL_CHILDREN(rack_probertt), 928 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW, 929 &rack_probertt_use_min_rtt_exit, 0, 930 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); 931 SYSCTL_ADD_U32(&rack_sysctl_ctx, 932 SYSCTL_CHILDREN(rack_probertt), 933 OID_AUTO, "length_div", CTLFLAG_RW, 934 &rack_probertt_gpsrtt_cnt_div, 0, 935 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)"); 936 SYSCTL_ADD_U32(&rack_sysctl_ctx, 937 SYSCTL_CHILDREN(rack_probertt), 938 OID_AUTO, "length_mul", CTLFLAG_RW, 939 &rack_probertt_gpsrtt_cnt_mul, 0, 940 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)"); 941 SYSCTL_ADD_U32(&rack_sysctl_ctx, 942 SYSCTL_CHILDREN(rack_probertt), 943 OID_AUTO, "holdtim_at_target", CTLFLAG_RW, 944 &rack_min_probertt_hold, 200000, 945 "What is the minimum time we hold probertt at target"); 946 SYSCTL_ADD_U32(&rack_sysctl_ctx, 947 SYSCTL_CHILDREN(rack_probertt), 948 OID_AUTO, "filter_life", CTLFLAG_RW, 949 &rack_probertt_filter_life, 10000000, 950 "What is the time for the filters life in useconds"); 951 SYSCTL_ADD_U32(&rack_sysctl_ctx, 952 SYSCTL_CHILDREN(rack_probertt), 953 OID_AUTO, "lower_within", CTLFLAG_RW, 954 &rack_probertt_lower_within, 10, 955 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); 956 SYSCTL_ADD_U32(&rack_sysctl_ctx, 957 SYSCTL_CHILDREN(rack_probertt), 958 OID_AUTO, "must_move", CTLFLAG_RW, 959 &rack_min_rtt_movement, 250, 960 "How much is the minimum movement in rtt to count as a drop for probertt purposes"); 961 SYSCTL_ADD_U32(&rack_sysctl_ctx, 962 SYSCTL_CHILDREN(rack_probertt), 963 OID_AUTO, "clear_is_cnts", CTLFLAG_RW, 964 &rack_probertt_clear_is, 1, 965 "Do we clear I/S counts on exiting probe-rtt"); 966 SYSCTL_ADD_S32(&rack_sysctl_ctx, 967 SYSCTL_CHILDREN(rack_probertt), 968 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW, 969 &rack_max_drain_hbp, 1, 970 "How many extra drain gpsrtt's do we get in highly buffered paths"); 971 SYSCTL_ADD_S32(&rack_sysctl_ctx, 972 SYSCTL_CHILDREN(rack_probertt), 973 OID_AUTO, "hbp_threshold", CTLFLAG_RW, 974 &rack_hbp_thresh, 3, 975 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); 976 /* Pacing related sysctls */ 977 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 978 SYSCTL_CHILDREN(rack_sysctl_root), 979 OID_AUTO, 980 "pacing", 981 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 982 "Pacing related Controls"); 983 SYSCTL_ADD_S32(&rack_sysctl_ctx, 984 SYSCTL_CHILDREN(rack_pacing), 985 OID_AUTO, "max_pace_over", CTLFLAG_RW, 986 &rack_max_per_above, 30, 987 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)"); 988 SYSCTL_ADD_S32(&rack_sysctl_ctx, 989 SYSCTL_CHILDREN(rack_pacing), 990 OID_AUTO, "pace_to_one", CTLFLAG_RW, 991 &rack_pace_one_seg, 0, 992 "Do we allow low b/w pacing of 1MSS instead of two"); 993 SYSCTL_ADD_S32(&rack_sysctl_ctx, 994 SYSCTL_CHILDREN(rack_pacing), 995 OID_AUTO, "limit_wsrtt", CTLFLAG_RW, 996 &rack_limit_time_with_srtt, 0, 997 "Do we limit pacing time based on srtt"); 998 SYSCTL_ADD_S32(&rack_sysctl_ctx, 999 SYSCTL_CHILDREN(rack_pacing), 1000 OID_AUTO, "init_win", CTLFLAG_RW, 1001 &rack_default_init_window, 0, 1002 "Do we have a rack initial window 0 = system default"); 1003 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1004 SYSCTL_CHILDREN(rack_pacing), 1005 OID_AUTO, "gp_per_ss", CTLFLAG_RW, 1006 &rack_per_of_gp_ss, 250, 1007 "If non zero, what percentage of goodput to pace at in slow start"); 1008 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1009 SYSCTL_CHILDREN(rack_pacing), 1010 OID_AUTO, "gp_per_ca", CTLFLAG_RW, 1011 &rack_per_of_gp_ca, 150, 1012 "If non zero, what percentage of goodput to pace at in congestion avoidance"); 1013 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1014 SYSCTL_CHILDREN(rack_pacing), 1015 OID_AUTO, "gp_per_rec", CTLFLAG_RW, 1016 &rack_per_of_gp_rec, 200, 1017 "If non zero, what percentage of goodput to pace at in recovery"); 1018 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1019 SYSCTL_CHILDREN(rack_pacing), 1020 OID_AUTO, "pace_max_seg", CTLFLAG_RW, 1021 &rack_hptsi_segments, 40, 1022 "What size is the max for TSO segments in pacing and burst mitigation"); 1023 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1024 SYSCTL_CHILDREN(rack_pacing), 1025 OID_AUTO, "burst_reduces", CTLFLAG_RW, 1026 &rack_slot_reduction, 4, 1027 "When doing only burst mitigation what is the reduce divisor"); 1028 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1029 SYSCTL_CHILDREN(rack_sysctl_root), 1030 OID_AUTO, "use_pacing", CTLFLAG_RW, 1031 &rack_pace_every_seg, 0, 1032 "If set we use pacing, if clear we use only the original burst mitigation"); 1033 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1034 SYSCTL_CHILDREN(rack_pacing), 1035 OID_AUTO, "rate_cap", CTLFLAG_RW, 1036 &rack_bw_rate_cap, 0, 1037 "If set we apply this value to the absolute rate cap used by pacing"); 1038 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1039 SYSCTL_CHILDREN(rack_sysctl_root), 1040 OID_AUTO, "req_measure_cnt", CTLFLAG_RW, 1041 &rack_req_measurements, 1, 1042 "If doing dynamic pacing, how many measurements must be in before we start pacing?"); 1043 /* Hardware pacing */ 1044 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1045 SYSCTL_CHILDREN(rack_sysctl_root), 1046 OID_AUTO, 1047 "hdwr_pacing", 1048 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1049 "Pacing related Controls"); 1050 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1051 SYSCTL_CHILDREN(rack_hw_pacing), 1052 OID_AUTO, "rwnd_factor", CTLFLAG_RW, 1053 &rack_hw_rwnd_factor, 2, 1054 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?"); 1055 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1056 SYSCTL_CHILDREN(rack_hw_pacing), 1057 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW, 1058 &rack_enobuf_hw_boost_mult, 2, 1059 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?"); 1060 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1061 SYSCTL_CHILDREN(rack_hw_pacing), 1062 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW, 1063 &rack_enobuf_hw_max, 2, 1064 "What is the max boost the pacing time if we see a ENOBUFS?"); 1065 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1066 SYSCTL_CHILDREN(rack_hw_pacing), 1067 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW, 1068 &rack_enobuf_hw_min, 2, 1069 "What is the min boost the pacing time if we see a ENOBUFS?"); 1070 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1071 SYSCTL_CHILDREN(rack_hw_pacing), 1072 OID_AUTO, "enable", CTLFLAG_RW, 1073 &rack_enable_hw_pacing, 0, 1074 "Should RACK attempt to use hw pacing?"); 1075 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1076 SYSCTL_CHILDREN(rack_hw_pacing), 1077 OID_AUTO, "rate_cap", CTLFLAG_RW, 1078 &rack_hw_rate_caps, 1, 1079 "Does the highest hardware pacing rate cap the rate we will send at??"); 1080 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1081 SYSCTL_CHILDREN(rack_hw_pacing), 1082 OID_AUTO, "rate_min", CTLFLAG_RW, 1083 &rack_hw_rate_min, 0, 1084 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?"); 1085 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1086 SYSCTL_CHILDREN(rack_hw_pacing), 1087 OID_AUTO, "rate_to_low", CTLFLAG_RW, 1088 &rack_hw_rate_to_low, 0, 1089 "If we fall below this rate, dis-engage hw pacing?"); 1090 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1091 SYSCTL_CHILDREN(rack_hw_pacing), 1092 OID_AUTO, "up_only", CTLFLAG_RW, 1093 &rack_hw_up_only, 1, 1094 "Do we allow hw pacing to lower the rate selected?"); 1095 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1096 SYSCTL_CHILDREN(rack_hw_pacing), 1097 OID_AUTO, "extra_mss_precise", CTLFLAG_RW, 1098 &rack_hw_pace_extra_slots, 2, 1099 "If the rates between software and hardware match precisely how many extra time_betweens do we get?"); 1100 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1101 SYSCTL_CHILDREN(rack_sysctl_root), 1102 OID_AUTO, 1103 "timely", 1104 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1105 "Rack Timely RTT Controls"); 1106 /* Timely based GP dynmics */ 1107 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1108 SYSCTL_CHILDREN(rack_timely), 1109 OID_AUTO, "upper", CTLFLAG_RW, 1110 &rack_gp_per_bw_mul_up, 2, 1111 "Rack timely upper range for equal b/w (in percentage)"); 1112 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1113 SYSCTL_CHILDREN(rack_timely), 1114 OID_AUTO, "lower", CTLFLAG_RW, 1115 &rack_gp_per_bw_mul_down, 4, 1116 "Rack timely lower range for equal b/w (in percentage)"); 1117 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1118 SYSCTL_CHILDREN(rack_timely), 1119 OID_AUTO, "rtt_max_mul", CTLFLAG_RW, 1120 &rack_gp_rtt_maxmul, 3, 1121 "Rack timely multipler of lowest rtt for rtt_max"); 1122 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1123 SYSCTL_CHILDREN(rack_timely), 1124 OID_AUTO, "rtt_min_div", CTLFLAG_RW, 1125 &rack_gp_rtt_mindiv, 4, 1126 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1127 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1128 SYSCTL_CHILDREN(rack_timely), 1129 OID_AUTO, "rtt_min_mul", CTLFLAG_RW, 1130 &rack_gp_rtt_minmul, 1, 1131 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1132 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1133 SYSCTL_CHILDREN(rack_timely), 1134 OID_AUTO, "decrease", CTLFLAG_RW, 1135 &rack_gp_decrease_per, 20, 1136 "Rack timely decrease percentage of our GP multiplication factor"); 1137 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1138 SYSCTL_CHILDREN(rack_timely), 1139 OID_AUTO, "increase", CTLFLAG_RW, 1140 &rack_gp_increase_per, 2, 1141 "Rack timely increase perentage of our GP multiplication factor"); 1142 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1143 SYSCTL_CHILDREN(rack_timely), 1144 OID_AUTO, "lowerbound", CTLFLAG_RW, 1145 &rack_per_lower_bound, 50, 1146 "Rack timely lowest percentage we allow GP multiplier to fall to"); 1147 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1148 SYSCTL_CHILDREN(rack_timely), 1149 OID_AUTO, "upperboundss", CTLFLAG_RW, 1150 &rack_per_upper_bound_ss, 0, 1151 "Rack timely higest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)"); 1152 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1153 SYSCTL_CHILDREN(rack_timely), 1154 OID_AUTO, "upperboundca", CTLFLAG_RW, 1155 &rack_per_upper_bound_ca, 0, 1156 "Rack timely higest percentage we allow GP multiplier to CA raise to (0 is no upperbound)"); 1157 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1158 SYSCTL_CHILDREN(rack_timely), 1159 OID_AUTO, "dynamicgp", CTLFLAG_RW, 1160 &rack_do_dyn_mul, 0, 1161 "Rack timely do we enable dynmaic timely goodput by default"); 1162 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1163 SYSCTL_CHILDREN(rack_timely), 1164 OID_AUTO, "no_rec_red", CTLFLAG_RW, 1165 &rack_gp_no_rec_chg, 1, 1166 "Rack timely do we prohibit the recovery multiplier from being lowered"); 1167 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1168 SYSCTL_CHILDREN(rack_timely), 1169 OID_AUTO, "red_clear_cnt", CTLFLAG_RW, 1170 &rack_timely_dec_clear, 6, 1171 "Rack timely what threshold do we count to before another boost during b/w decent"); 1172 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1173 SYSCTL_CHILDREN(rack_timely), 1174 OID_AUTO, "max_push_rise", CTLFLAG_RW, 1175 &rack_timely_max_push_rise, 3, 1176 "Rack timely how many times do we push up with b/w increase"); 1177 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1178 SYSCTL_CHILDREN(rack_timely), 1179 OID_AUTO, "max_push_drop", CTLFLAG_RW, 1180 &rack_timely_max_push_drop, 3, 1181 "Rack timely how many times do we push back on b/w decent"); 1182 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1183 SYSCTL_CHILDREN(rack_timely), 1184 OID_AUTO, "min_segs", CTLFLAG_RW, 1185 &rack_timely_min_segs, 4, 1186 "Rack timely when setting the cwnd what is the min num segments"); 1187 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1188 SYSCTL_CHILDREN(rack_timely), 1189 OID_AUTO, "noback_max", CTLFLAG_RW, 1190 &rack_use_max_for_nobackoff, 0, 1191 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min"); 1192 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1193 SYSCTL_CHILDREN(rack_timely), 1194 OID_AUTO, "interim_timely_only", CTLFLAG_RW, 1195 &rack_timely_int_timely_only, 0, 1196 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)"); 1197 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1198 SYSCTL_CHILDREN(rack_timely), 1199 OID_AUTO, "nonstop", CTLFLAG_RW, 1200 &rack_timely_no_stopping, 0, 1201 "Rack timely don't stop increase"); 1202 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1203 SYSCTL_CHILDREN(rack_timely), 1204 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW, 1205 &rack_down_raise_thresh, 100, 1206 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)"); 1207 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1208 SYSCTL_CHILDREN(rack_timely), 1209 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW, 1210 &rack_req_segs, 1, 1211 "Bottom dragging if not these many segments outstanding and room"); 1212 1213 /* TLP and Rack related parameters */ 1214 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1215 SYSCTL_CHILDREN(rack_sysctl_root), 1216 OID_AUTO, 1217 "tlp", 1218 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1219 "TLP and Rack related Controls"); 1220 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1221 SYSCTL_CHILDREN(rack_tlp), 1222 OID_AUTO, "use_rrr", CTLFLAG_RW, 1223 &use_rack_rr, 1, 1224 "Do we use Rack Rapid Recovery"); 1225 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1226 SYSCTL_CHILDREN(rack_tlp), 1227 OID_AUTO, "post_rec_labc", CTLFLAG_RW, 1228 &rack_max_abc_post_recovery, 2, 1229 "Since we do early recovery, do we override the l_abc to a value, if so what?"); 1230 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1231 SYSCTL_CHILDREN(rack_tlp), 1232 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW, 1233 &rack_non_rxt_use_cr, 0, 1234 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk"); 1235 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1236 SYSCTL_CHILDREN(rack_tlp), 1237 OID_AUTO, "tlpmethod", CTLFLAG_RW, 1238 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 1239 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 1240 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1241 SYSCTL_CHILDREN(rack_tlp), 1242 OID_AUTO, "limit", CTLFLAG_RW, 1243 &rack_tlp_limit, 2, 1244 "How many TLP's can be sent without sending new data"); 1245 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1246 SYSCTL_CHILDREN(rack_tlp), 1247 OID_AUTO, "use_greater", CTLFLAG_RW, 1248 &rack_tlp_use_greater, 1, 1249 "Should we use the rack_rtt time if its greater than srtt"); 1250 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1251 SYSCTL_CHILDREN(rack_tlp), 1252 OID_AUTO, "tlpminto", CTLFLAG_RW, 1253 &rack_tlp_min, 10000, 1254 "TLP minimum timeout per the specification (in microseconds)"); 1255 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1256 SYSCTL_CHILDREN(rack_tlp), 1257 OID_AUTO, "send_oldest", CTLFLAG_RW, 1258 &rack_always_send_oldest, 0, 1259 "Should we always send the oldest TLP and RACK-TLP"); 1260 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1261 SYSCTL_CHILDREN(rack_tlp), 1262 OID_AUTO, "rack_tlimit", CTLFLAG_RW, 1263 &rack_limited_retran, 0, 1264 "How many times can a rack timeout drive out sends"); 1265 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1266 SYSCTL_CHILDREN(rack_tlp), 1267 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 1268 &rack_lower_cwnd_at_tlp, 0, 1269 "When a TLP completes a retran should we enter recovery"); 1270 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1271 SYSCTL_CHILDREN(rack_tlp), 1272 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 1273 &rack_reorder_thresh, 2, 1274 "What factor for rack will be added when seeing reordering (shift right)"); 1275 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1276 SYSCTL_CHILDREN(rack_tlp), 1277 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 1278 &rack_tlp_thresh, 1, 1279 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 1280 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1281 SYSCTL_CHILDREN(rack_tlp), 1282 OID_AUTO, "reorder_fade", CTLFLAG_RW, 1283 &rack_reorder_fade, 60000000, 1284 "Does reorder detection fade, if so how many microseconds (0 means never)"); 1285 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1286 SYSCTL_CHILDREN(rack_tlp), 1287 OID_AUTO, "pktdelay", CTLFLAG_RW, 1288 &rack_pkt_delay, 1000, 1289 "Extra RACK time (in microseconds) besides reordering thresh"); 1290 1291 /* Timer related controls */ 1292 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1293 SYSCTL_CHILDREN(rack_sysctl_root), 1294 OID_AUTO, 1295 "timers", 1296 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1297 "Timer related controls"); 1298 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1299 SYSCTL_CHILDREN(rack_timers), 1300 OID_AUTO, "persmin", CTLFLAG_RW, 1301 &rack_persist_min, 250000, 1302 "What is the minimum time in microseconds between persists"); 1303 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1304 SYSCTL_CHILDREN(rack_timers), 1305 OID_AUTO, "persmax", CTLFLAG_RW, 1306 &rack_persist_max, 2000000, 1307 "What is the largest delay in microseconds between persists"); 1308 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1309 SYSCTL_CHILDREN(rack_timers), 1310 OID_AUTO, "delayed_ack", CTLFLAG_RW, 1311 &rack_delayed_ack_time, 40000, 1312 "Delayed ack time (40ms in microseconds)"); 1313 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1314 SYSCTL_CHILDREN(rack_timers), 1315 OID_AUTO, "minrto", CTLFLAG_RW, 1316 &rack_rto_min, 30000, 1317 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); 1318 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1319 SYSCTL_CHILDREN(rack_timers), 1320 OID_AUTO, "maxrto", CTLFLAG_RW, 1321 &rack_rto_max, 4000000, 1322 "Maximum RTO in microseconds -- should be at least as large as min_rto"); 1323 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1324 SYSCTL_CHILDREN(rack_timers), 1325 OID_AUTO, "minto", CTLFLAG_RW, 1326 &rack_min_to, 1000, 1327 "Minimum rack timeout in microseconds"); 1328 /* Measure controls */ 1329 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1330 SYSCTL_CHILDREN(rack_sysctl_root), 1331 OID_AUTO, 1332 "measure", 1333 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1334 "Measure related controls"); 1335 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1336 SYSCTL_CHILDREN(rack_measure), 1337 OID_AUTO, "wma_divisor", CTLFLAG_RW, 1338 &rack_wma_divisor, 8, 1339 "When doing b/w calculation what is the divisor for the WMA"); 1340 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1341 SYSCTL_CHILDREN(rack_measure), 1342 OID_AUTO, "end_cwnd", CTLFLAG_RW, 1343 &rack_cwnd_block_ends_measure, 0, 1344 "Does a cwnd just-return end the measurement window (app limited)"); 1345 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1346 SYSCTL_CHILDREN(rack_measure), 1347 OID_AUTO, "end_rwnd", CTLFLAG_RW, 1348 &rack_rwnd_block_ends_measure, 0, 1349 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); 1350 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1351 SYSCTL_CHILDREN(rack_measure), 1352 OID_AUTO, "min_target", CTLFLAG_RW, 1353 &rack_def_data_window, 20, 1354 "What is the minimum target window (in mss) for a GP measurements"); 1355 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1356 SYSCTL_CHILDREN(rack_measure), 1357 OID_AUTO, "goal_bdp", CTLFLAG_RW, 1358 &rack_goal_bdp, 2, 1359 "What is the goal BDP to measure"); 1360 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1361 SYSCTL_CHILDREN(rack_measure), 1362 OID_AUTO, "min_srtts", CTLFLAG_RW, 1363 &rack_min_srtts, 1, 1364 "What is the goal BDP to measure"); 1365 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1366 SYSCTL_CHILDREN(rack_measure), 1367 OID_AUTO, "min_measure_tim", CTLFLAG_RW, 1368 &rack_min_measure_usec, 0, 1369 "What is the Minimum time time for a measurement if 0, this is off"); 1370 /* Features */ 1371 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1372 SYSCTL_CHILDREN(rack_sysctl_root), 1373 OID_AUTO, 1374 "features", 1375 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1376 "Feature controls"); 1377 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1378 SYSCTL_CHILDREN(rack_features), 1379 OID_AUTO, "cmpack", CTLFLAG_RW, 1380 &rack_use_cmp_acks, 1, 1381 "Should RACK have LRO send compressed acks"); 1382 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1383 SYSCTL_CHILDREN(rack_features), 1384 OID_AUTO, "fsb", CTLFLAG_RW, 1385 &rack_use_fsb, 1, 1386 "Should RACK use the fast send block?"); 1387 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1388 SYSCTL_CHILDREN(rack_features), 1389 OID_AUTO, "rfo", CTLFLAG_RW, 1390 &rack_use_rfo, 1, 1391 "Should RACK use rack_fast_output()?"); 1392 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1393 SYSCTL_CHILDREN(rack_features), 1394 OID_AUTO, "rsmrfo", CTLFLAG_RW, 1395 &rack_use_rsm_rfo, 1, 1396 "Should RACK use rack_fast_rsm_output()?"); 1397 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1398 SYSCTL_CHILDREN(rack_features), 1399 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, 1400 &rack_enable_mqueue_for_nonpaced, 0, 1401 "Should RACK use mbuf queuing for non-paced connections"); 1402 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1403 SYSCTL_CHILDREN(rack_features), 1404 OID_AUTO, "hystartplusplus", CTLFLAG_RW, 1405 &rack_do_hystart, 0, 1406 "Should RACK enable HyStart++ on connections?"); 1407 /* Misc rack controls */ 1408 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1409 SYSCTL_CHILDREN(rack_sysctl_root), 1410 OID_AUTO, 1411 "misc", 1412 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1413 "Misc related controls"); 1414 #ifdef TCP_ACCOUNTING 1415 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1416 SYSCTL_CHILDREN(rack_misc), 1417 OID_AUTO, "tcp_acct", CTLFLAG_RW, 1418 &rack_tcp_accounting, 0, 1419 "Should we turn on TCP accounting for all rack sessions?"); 1420 #endif 1421 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1422 SYSCTL_CHILDREN(rack_misc), 1423 OID_AUTO, "apply_rtt_with_low_conf", CTLFLAG_RW, 1424 &rack_apply_rtt_with_reduced_conf, 0, 1425 "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?"); 1426 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1427 SYSCTL_CHILDREN(rack_misc), 1428 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW, 1429 &rack_dsack_std_based, 3, 1430 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?"); 1431 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1432 SYSCTL_CHILDREN(rack_misc), 1433 OID_AUTO, "prr_addback_max", CTLFLAG_RW, 1434 &rack_prr_addbackmax, 2, 1435 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?"); 1436 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1437 SYSCTL_CHILDREN(rack_misc), 1438 OID_AUTO, "stats_gets_ms", CTLFLAG_RW, 1439 &rack_stats_gets_ms_rtt, 1, 1440 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?"); 1441 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1442 SYSCTL_CHILDREN(rack_misc), 1443 OID_AUTO, "clientlowbuf", CTLFLAG_RW, 1444 &rack_client_low_buf, 0, 1445 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?"); 1446 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1447 SYSCTL_CHILDREN(rack_misc), 1448 OID_AUTO, "defprofile", CTLFLAG_RW, 1449 &rack_def_profile, 0, 1450 "Should RACK use a default profile (0=no, num == profile num)?"); 1451 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1452 SYSCTL_CHILDREN(rack_misc), 1453 OID_AUTO, "shared_cwnd", CTLFLAG_RW, 1454 &rack_enable_shared_cwnd, 1, 1455 "Should RACK try to use the shared cwnd on connections where allowed"); 1456 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1457 SYSCTL_CHILDREN(rack_misc), 1458 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW, 1459 &rack_limits_scwnd, 1, 1460 "Should RACK place low end time limits on the shared cwnd feature"); 1461 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1462 SYSCTL_CHILDREN(rack_misc), 1463 OID_AUTO, "iMac_dack", CTLFLAG_RW, 1464 &rack_use_imac_dack, 0, 1465 "Should RACK try to emulate iMac delayed ack"); 1466 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1467 SYSCTL_CHILDREN(rack_misc), 1468 OID_AUTO, "no_prr", CTLFLAG_RW, 1469 &rack_disable_prr, 0, 1470 "Should RACK not use prr and only pace (must have pacing on)"); 1471 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1472 SYSCTL_CHILDREN(rack_misc), 1473 OID_AUTO, "bb_verbose", CTLFLAG_RW, 1474 &rack_verbose_logging, 0, 1475 "Should RACK black box logging be verbose"); 1476 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1477 SYSCTL_CHILDREN(rack_misc), 1478 OID_AUTO, "data_after_close", CTLFLAG_RW, 1479 &rack_ignore_data_after_close, 1, 1480 "Do we hold off sending a RST until all pending data is ack'd"); 1481 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1482 SYSCTL_CHILDREN(rack_misc), 1483 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 1484 &rack_sack_not_required, 1, 1485 "Do we allow rack to run on connections not supporting SACK"); 1486 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1487 SYSCTL_CHILDREN(rack_misc), 1488 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 1489 &rack_send_a_lot_in_prr, 1, 1490 "Send a lot in prr"); 1491 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1492 SYSCTL_CHILDREN(rack_misc), 1493 OID_AUTO, "autoscale", CTLFLAG_RW, 1494 &rack_autosndbuf_inc, 20, 1495 "What percentage should rack scale up its snd buffer by?"); 1496 /* Sack Attacker detection stuff */ 1497 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1498 SYSCTL_CHILDREN(rack_attack), 1499 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 1500 &rack_highest_sack_thresh_seen, 0, 1501 "Highest sack to ack ratio seen"); 1502 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1503 SYSCTL_CHILDREN(rack_attack), 1504 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 1505 &rack_highest_move_thresh_seen, 0, 1506 "Highest move to non-move ratio seen"); 1507 rack_ack_total = counter_u64_alloc(M_WAITOK); 1508 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1509 SYSCTL_CHILDREN(rack_attack), 1510 OID_AUTO, "acktotal", CTLFLAG_RD, 1511 &rack_ack_total, 1512 "Total number of Ack's"); 1513 rack_express_sack = counter_u64_alloc(M_WAITOK); 1514 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1515 SYSCTL_CHILDREN(rack_attack), 1516 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 1517 &rack_express_sack, 1518 "Total expresss number of Sack's"); 1519 rack_sack_total = counter_u64_alloc(M_WAITOK); 1520 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1521 SYSCTL_CHILDREN(rack_attack), 1522 OID_AUTO, "sacktotal", CTLFLAG_RD, 1523 &rack_sack_total, 1524 "Total number of SACKs"); 1525 rack_move_none = counter_u64_alloc(M_WAITOK); 1526 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1527 SYSCTL_CHILDREN(rack_attack), 1528 OID_AUTO, "move_none", CTLFLAG_RD, 1529 &rack_move_none, 1530 "Total number of SACK index reuse of postions under threshold"); 1531 rack_move_some = counter_u64_alloc(M_WAITOK); 1532 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1533 SYSCTL_CHILDREN(rack_attack), 1534 OID_AUTO, "move_some", CTLFLAG_RD, 1535 &rack_move_some, 1536 "Total number of SACK index reuse of postions over threshold"); 1537 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 1538 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1539 SYSCTL_CHILDREN(rack_attack), 1540 OID_AUTO, "attacks", CTLFLAG_RD, 1541 &rack_sack_attacks_detected, 1542 "Total number of SACK attackers that had sack disabled"); 1543 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 1544 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1545 SYSCTL_CHILDREN(rack_attack), 1546 OID_AUTO, "reversed", CTLFLAG_RD, 1547 &rack_sack_attacks_reversed, 1548 "Total number of SACK attackers that were later determined false positive"); 1549 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 1550 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1551 SYSCTL_CHILDREN(rack_attack), 1552 OID_AUTO, "nextmerge", CTLFLAG_RD, 1553 &rack_sack_used_next_merge, 1554 "Total number of times we used the next merge"); 1555 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 1556 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1557 SYSCTL_CHILDREN(rack_attack), 1558 OID_AUTO, "prevmerge", CTLFLAG_RD, 1559 &rack_sack_used_prev_merge, 1560 "Total number of times we used the prev merge"); 1561 /* Counters */ 1562 rack_fto_send = counter_u64_alloc(M_WAITOK); 1563 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1564 SYSCTL_CHILDREN(rack_counters), 1565 OID_AUTO, "fto_send", CTLFLAG_RD, 1566 &rack_fto_send, "Total number of rack_fast_output sends"); 1567 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK); 1568 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1569 SYSCTL_CHILDREN(rack_counters), 1570 OID_AUTO, "fto_rsm_send", CTLFLAG_RD, 1571 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends"); 1572 rack_nfto_resend = counter_u64_alloc(M_WAITOK); 1573 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1574 SYSCTL_CHILDREN(rack_counters), 1575 OID_AUTO, "nfto_resend", CTLFLAG_RD, 1576 &rack_nfto_resend, "Total number of rack_output retransmissions"); 1577 rack_non_fto_send = counter_u64_alloc(M_WAITOK); 1578 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1579 SYSCTL_CHILDREN(rack_counters), 1580 OID_AUTO, "nfto_send", CTLFLAG_RD, 1581 &rack_non_fto_send, "Total number of rack_output first sends"); 1582 rack_extended_rfo = counter_u64_alloc(M_WAITOK); 1583 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1584 SYSCTL_CHILDREN(rack_counters), 1585 OID_AUTO, "rfo_extended", CTLFLAG_RD, 1586 &rack_extended_rfo, "Total number of times we extended rfo"); 1587 1588 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK); 1589 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1590 SYSCTL_CHILDREN(rack_counters), 1591 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD, 1592 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing"); 1593 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK); 1594 1595 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1596 SYSCTL_CHILDREN(rack_counters), 1597 OID_AUTO, "hwpace_lost", CTLFLAG_RD, 1598 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing"); 1599 rack_badfr = counter_u64_alloc(M_WAITOK); 1600 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1601 SYSCTL_CHILDREN(rack_counters), 1602 OID_AUTO, "badfr", CTLFLAG_RD, 1603 &rack_badfr, "Total number of bad FRs"); 1604 rack_badfr_bytes = counter_u64_alloc(M_WAITOK); 1605 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1606 SYSCTL_CHILDREN(rack_counters), 1607 OID_AUTO, "badfr_bytes", CTLFLAG_RD, 1608 &rack_badfr_bytes, "Total number of bad FRs"); 1609 rack_rtm_prr_retran = counter_u64_alloc(M_WAITOK); 1610 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1611 SYSCTL_CHILDREN(rack_counters), 1612 OID_AUTO, "prrsndret", CTLFLAG_RD, 1613 &rack_rtm_prr_retran, 1614 "Total number of prr based retransmits"); 1615 rack_rtm_prr_newdata = counter_u64_alloc(M_WAITOK); 1616 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1617 SYSCTL_CHILDREN(rack_counters), 1618 OID_AUTO, "prrsndnew", CTLFLAG_RD, 1619 &rack_rtm_prr_newdata, 1620 "Total number of prr based new transmits"); 1621 rack_timestamp_mismatch = counter_u64_alloc(M_WAITOK); 1622 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1623 SYSCTL_CHILDREN(rack_counters), 1624 OID_AUTO, "tsnf", CTLFLAG_RD, 1625 &rack_timestamp_mismatch, 1626 "Total number of timestamps that we could not find the reported ts"); 1627 rack_find_high = counter_u64_alloc(M_WAITOK); 1628 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1629 SYSCTL_CHILDREN(rack_counters), 1630 OID_AUTO, "findhigh", CTLFLAG_RD, 1631 &rack_find_high, 1632 "Total number of FIN causing find-high"); 1633 rack_reorder_seen = counter_u64_alloc(M_WAITOK); 1634 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1635 SYSCTL_CHILDREN(rack_counters), 1636 OID_AUTO, "reordering", CTLFLAG_RD, 1637 &rack_reorder_seen, 1638 "Total number of times we added delay due to reordering"); 1639 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 1640 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1641 SYSCTL_CHILDREN(rack_counters), 1642 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 1643 &rack_tlp_tot, 1644 "Total number of tail loss probe expirations"); 1645 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 1646 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1647 SYSCTL_CHILDREN(rack_counters), 1648 OID_AUTO, "tlp_new", CTLFLAG_RD, 1649 &rack_tlp_newdata, 1650 "Total number of tail loss probe sending new data"); 1651 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 1652 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1653 SYSCTL_CHILDREN(rack_counters), 1654 OID_AUTO, "tlp_retran", CTLFLAG_RD, 1655 &rack_tlp_retran, 1656 "Total number of tail loss probe sending retransmitted data"); 1657 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 1658 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1659 SYSCTL_CHILDREN(rack_counters), 1660 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 1661 &rack_tlp_retran_bytes, 1662 "Total bytes of tail loss probe sending retransmitted data"); 1663 rack_tlp_retran_fail = counter_u64_alloc(M_WAITOK); 1664 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1665 SYSCTL_CHILDREN(rack_counters), 1666 OID_AUTO, "tlp_retran_fail", CTLFLAG_RD, 1667 &rack_tlp_retran_fail, 1668 "Total number of tail loss probe sending retransmitted data that failed (wait for t3)"); 1669 rack_to_tot = counter_u64_alloc(M_WAITOK); 1670 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1671 SYSCTL_CHILDREN(rack_counters), 1672 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 1673 &rack_to_tot, 1674 "Total number of times the rack to expired"); 1675 rack_to_arm_rack = counter_u64_alloc(M_WAITOK); 1676 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1677 SYSCTL_CHILDREN(rack_counters), 1678 OID_AUTO, "arm_rack", CTLFLAG_RD, 1679 &rack_to_arm_rack, 1680 "Total number of times the rack timer armed"); 1681 rack_to_arm_tlp = counter_u64_alloc(M_WAITOK); 1682 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1683 SYSCTL_CHILDREN(rack_counters), 1684 OID_AUTO, "arm_tlp", CTLFLAG_RD, 1685 &rack_to_arm_tlp, 1686 "Total number of times the tlp timer armed"); 1687 rack_calc_zero = counter_u64_alloc(M_WAITOK); 1688 rack_calc_nonzero = counter_u64_alloc(M_WAITOK); 1689 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1690 SYSCTL_CHILDREN(rack_counters), 1691 OID_AUTO, "calc_zero", CTLFLAG_RD, 1692 &rack_calc_zero, 1693 "Total number of times pacing time worked out to zero"); 1694 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1695 SYSCTL_CHILDREN(rack_counters), 1696 OID_AUTO, "calc_nonzero", CTLFLAG_RD, 1697 &rack_calc_nonzero, 1698 "Total number of times pacing time worked out to non-zero"); 1699 rack_paced_segments = counter_u64_alloc(M_WAITOK); 1700 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1701 SYSCTL_CHILDREN(rack_counters), 1702 OID_AUTO, "paced", CTLFLAG_RD, 1703 &rack_paced_segments, 1704 "Total number of times a segment send caused hptsi"); 1705 rack_unpaced_segments = counter_u64_alloc(M_WAITOK); 1706 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1707 SYSCTL_CHILDREN(rack_counters), 1708 OID_AUTO, "unpaced", CTLFLAG_RD, 1709 &rack_unpaced_segments, 1710 "Total number of times a segment did not cause hptsi"); 1711 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 1712 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1713 SYSCTL_CHILDREN(rack_counters), 1714 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 1715 &rack_saw_enobuf, 1716 "Total number of times a sends returned enobuf for non-hdwr paced connections"); 1717 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK); 1718 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1719 SYSCTL_CHILDREN(rack_counters), 1720 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD, 1721 &rack_saw_enobuf_hw, 1722 "Total number of times a send returned enobuf for hdwr paced connections"); 1723 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 1724 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1725 SYSCTL_CHILDREN(rack_counters), 1726 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 1727 &rack_saw_enetunreach, 1728 "Total number of times a send received a enetunreachable"); 1729 rack_hot_alloc = counter_u64_alloc(M_WAITOK); 1730 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1731 SYSCTL_CHILDREN(rack_counters), 1732 OID_AUTO, "alloc_hot", CTLFLAG_RD, 1733 &rack_hot_alloc, 1734 "Total allocations from the top of our list"); 1735 rack_to_alloc = counter_u64_alloc(M_WAITOK); 1736 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1737 SYSCTL_CHILDREN(rack_counters), 1738 OID_AUTO, "allocs", CTLFLAG_RD, 1739 &rack_to_alloc, 1740 "Total allocations of tracking structures"); 1741 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 1742 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1743 SYSCTL_CHILDREN(rack_counters), 1744 OID_AUTO, "allochard", CTLFLAG_RD, 1745 &rack_to_alloc_hard, 1746 "Total allocations done with sleeping the hard way"); 1747 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 1748 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1749 SYSCTL_CHILDREN(rack_counters), 1750 OID_AUTO, "allocemerg", CTLFLAG_RD, 1751 &rack_to_alloc_emerg, 1752 "Total allocations done from emergency cache"); 1753 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 1754 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1755 SYSCTL_CHILDREN(rack_counters), 1756 OID_AUTO, "alloc_limited", CTLFLAG_RD, 1757 &rack_to_alloc_limited, 1758 "Total allocations dropped due to limit"); 1759 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 1760 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1761 SYSCTL_CHILDREN(rack_counters), 1762 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 1763 &rack_alloc_limited_conns, 1764 "Connections with allocations dropped due to limit"); 1765 rack_split_limited = counter_u64_alloc(M_WAITOK); 1766 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1767 SYSCTL_CHILDREN(rack_counters), 1768 OID_AUTO, "split_limited", CTLFLAG_RD, 1769 &rack_split_limited, 1770 "Split allocations dropped due to limit"); 1771 1772 for (i = 0; i < MAX_NUM_OF_CNTS; i++) { 1773 char name[32]; 1774 sprintf(name, "cmp_ack_cnt_%d", i); 1775 rack_proc_comp_ack[i] = counter_u64_alloc(M_WAITOK); 1776 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1777 SYSCTL_CHILDREN(rack_counters), 1778 OID_AUTO, name, CTLFLAG_RD, 1779 &rack_proc_comp_ack[i], 1780 "Number of compressed acks we processed"); 1781 } 1782 rack_large_ackcmp = counter_u64_alloc(M_WAITOK); 1783 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1784 SYSCTL_CHILDREN(rack_counters), 1785 OID_AUTO, "cmp_large_mbufs", CTLFLAG_RD, 1786 &rack_large_ackcmp, 1787 "Number of TCP connections with large mbuf's for compressed acks"); 1788 rack_persists_sends = counter_u64_alloc(M_WAITOK); 1789 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1790 SYSCTL_CHILDREN(rack_counters), 1791 OID_AUTO, "persist_sends", CTLFLAG_RD, 1792 &rack_persists_sends, 1793 "Number of times we sent a persist probe"); 1794 rack_persists_acks = counter_u64_alloc(M_WAITOK); 1795 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1796 SYSCTL_CHILDREN(rack_counters), 1797 OID_AUTO, "persist_acks", CTLFLAG_RD, 1798 &rack_persists_acks, 1799 "Number of times a persist probe was acked"); 1800 rack_persists_loss = counter_u64_alloc(M_WAITOK); 1801 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1802 SYSCTL_CHILDREN(rack_counters), 1803 OID_AUTO, "persist_loss", CTLFLAG_RD, 1804 &rack_persists_loss, 1805 "Number of times we detected a lost persist probe (no ack)"); 1806 rack_persists_lost_ends = counter_u64_alloc(M_WAITOK); 1807 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1808 SYSCTL_CHILDREN(rack_counters), 1809 OID_AUTO, "persist_loss_ends", CTLFLAG_RD, 1810 &rack_persists_lost_ends, 1811 "Number of lost persist probe (no ack) that the run ended with a PERSIST abort"); 1812 rack_small_ackcmp = counter_u64_alloc(M_WAITOK); 1813 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1814 SYSCTL_CHILDREN(rack_counters), 1815 OID_AUTO, "cmp_small_mbufs", CTLFLAG_RD, 1816 &rack_small_ackcmp, 1817 "Number of TCP connections with small mbuf's for compressed acks"); 1818 #ifdef INVARIANTS 1819 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK); 1820 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1821 SYSCTL_CHILDREN(rack_counters), 1822 OID_AUTO, "map_adjust_req", CTLFLAG_RD, 1823 &rack_adjust_map_bw, 1824 "Number of times we hit the case where the sb went up and down on a sendmap entry"); 1825 #endif 1826 rack_multi_single_eq = counter_u64_alloc(M_WAITOK); 1827 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1828 SYSCTL_CHILDREN(rack_counters), 1829 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD, 1830 &rack_multi_single_eq, 1831 "Number of compressed acks total represented"); 1832 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK); 1833 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1834 SYSCTL_CHILDREN(rack_counters), 1835 OID_AUTO, "cmp_ack_not", CTLFLAG_RD, 1836 &rack_proc_non_comp_ack, 1837 "Number of non compresseds acks that we processed"); 1838 1839 1840 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 1841 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1842 SYSCTL_CHILDREN(rack_counters), 1843 OID_AUTO, "sack_long", CTLFLAG_RD, 1844 &rack_sack_proc_all, 1845 "Total times we had to walk whole list for sack processing"); 1846 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 1847 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1848 SYSCTL_CHILDREN(rack_counters), 1849 OID_AUTO, "sack_restart", CTLFLAG_RD, 1850 &rack_sack_proc_restart, 1851 "Total times we had to walk whole list due to a restart"); 1852 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 1853 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1854 SYSCTL_CHILDREN(rack_counters), 1855 OID_AUTO, "sack_short", CTLFLAG_RD, 1856 &rack_sack_proc_short, 1857 "Total times we took shortcut for sack processing"); 1858 rack_enter_tlp_calc = counter_u64_alloc(M_WAITOK); 1859 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1860 SYSCTL_CHILDREN(rack_counters), 1861 OID_AUTO, "tlp_calc_entered", CTLFLAG_RD, 1862 &rack_enter_tlp_calc, 1863 "Total times we called calc-tlp"); 1864 rack_used_tlpmethod = counter_u64_alloc(M_WAITOK); 1865 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1866 SYSCTL_CHILDREN(rack_counters), 1867 OID_AUTO, "hit_tlp_method", CTLFLAG_RD, 1868 &rack_used_tlpmethod, 1869 "Total number of runt sacks"); 1870 rack_used_tlpmethod2 = counter_u64_alloc(M_WAITOK); 1871 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1872 SYSCTL_CHILDREN(rack_counters), 1873 OID_AUTO, "hit_tlp_method2", CTLFLAG_RD, 1874 &rack_used_tlpmethod2, 1875 "Total number of times we hit TLP method 2"); 1876 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 1877 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1878 SYSCTL_CHILDREN(rack_attack), 1879 OID_AUTO, "skipacked", CTLFLAG_RD, 1880 &rack_sack_skipped_acked, 1881 "Total number of times we skipped previously sacked"); 1882 rack_sack_splits = counter_u64_alloc(M_WAITOK); 1883 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1884 SYSCTL_CHILDREN(rack_attack), 1885 OID_AUTO, "ofsplit", CTLFLAG_RD, 1886 &rack_sack_splits, 1887 "Total number of times we did the old fashion tree split"); 1888 rack_progress_drops = counter_u64_alloc(M_WAITOK); 1889 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1890 SYSCTL_CHILDREN(rack_counters), 1891 OID_AUTO, "prog_drops", CTLFLAG_RD, 1892 &rack_progress_drops, 1893 "Total number of progress drops"); 1894 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 1895 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1896 SYSCTL_CHILDREN(rack_counters), 1897 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 1898 &rack_input_idle_reduces, 1899 "Total number of idle reductions on input"); 1900 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 1901 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1902 SYSCTL_CHILDREN(rack_counters), 1903 OID_AUTO, "collapsed_win", CTLFLAG_RD, 1904 &rack_collapsed_win, 1905 "Total number of collapsed windows"); 1906 rack_tlp_does_nada = counter_u64_alloc(M_WAITOK); 1907 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1908 SYSCTL_CHILDREN(rack_counters), 1909 OID_AUTO, "tlp_nada", CTLFLAG_RD, 1910 &rack_tlp_does_nada, 1911 "Total number of nada tlp calls"); 1912 rack_try_scwnd = counter_u64_alloc(M_WAITOK); 1913 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1914 SYSCTL_CHILDREN(rack_counters), 1915 OID_AUTO, "tried_scwnd", CTLFLAG_RD, 1916 &rack_try_scwnd, 1917 "Total number of scwnd attempts"); 1918 1919 rack_per_timer_hole = counter_u64_alloc(M_WAITOK); 1920 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1921 SYSCTL_CHILDREN(rack_counters), 1922 OID_AUTO, "timer_hole", CTLFLAG_RD, 1923 &rack_per_timer_hole, 1924 "Total persists start in timer hole"); 1925 1926 rack_sbsndptr_wrong = counter_u64_alloc(M_WAITOK); 1927 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1928 SYSCTL_CHILDREN(rack_counters), 1929 OID_AUTO, "sndptr_wrong", CTLFLAG_RD, 1930 &rack_sbsndptr_wrong, "Total number of times the saved sbsndptr was incorrect"); 1931 rack_sbsndptr_right = counter_u64_alloc(M_WAITOK); 1932 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1933 SYSCTL_CHILDREN(rack_counters), 1934 OID_AUTO, "sndptr_right", CTLFLAG_RD, 1935 &rack_sbsndptr_right, "Total number of times the saved sbsndptr was correct"); 1936 1937 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 1938 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1939 OID_AUTO, "outsize", CTLFLAG_RD, 1940 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 1941 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 1942 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1943 OID_AUTO, "opts", CTLFLAG_RD, 1944 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 1945 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 1946 SYSCTL_CHILDREN(rack_sysctl_root), 1947 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1948 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 1949 } 1950 1951 static __inline int 1952 rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a) 1953 { 1954 if (SEQ_GEQ(b->r_start, a->r_start) && 1955 SEQ_LT(b->r_start, a->r_end)) { 1956 /* 1957 * The entry b is within the 1958 * block a. i.e.: 1959 * a -- |-------------| 1960 * b -- |----| 1961 * <or> 1962 * b -- |------| 1963 * <or> 1964 * b -- |-----------| 1965 */ 1966 return (0); 1967 } else if (SEQ_GEQ(b->r_start, a->r_end)) { 1968 /* 1969 * b falls as either the next 1970 * sequence block after a so a 1971 * is said to be smaller than b. 1972 * i.e: 1973 * a -- |------| 1974 * b -- |--------| 1975 * or 1976 * b -- |-----| 1977 */ 1978 return (1); 1979 } 1980 /* 1981 * Whats left is where a is 1982 * larger than b. i.e: 1983 * a -- |-------| 1984 * b -- |---| 1985 * or even possibly 1986 * b -- |--------------| 1987 */ 1988 return (-1); 1989 } 1990 1991 RB_PROTOTYPE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1992 RB_GENERATE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1993 1994 static uint32_t 1995 rc_init_window(struct tcp_rack *rack) 1996 { 1997 uint32_t win; 1998 1999 if (rack->rc_init_win == 0) { 2000 /* 2001 * Nothing set by the user, use the system stack 2002 * default. 2003 */ 2004 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); 2005 } 2006 win = ctf_fixed_maxseg(rack->rc_tp) * rack->rc_init_win; 2007 return (win); 2008 } 2009 2010 static uint64_t 2011 rack_get_fixed_pacing_bw(struct tcp_rack *rack) 2012 { 2013 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) 2014 return (rack->r_ctl.rc_fixed_pacing_rate_rec); 2015 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 2016 return (rack->r_ctl.rc_fixed_pacing_rate_ss); 2017 else 2018 return (rack->r_ctl.rc_fixed_pacing_rate_ca); 2019 } 2020 2021 static uint64_t 2022 rack_get_bw(struct tcp_rack *rack) 2023 { 2024 if (rack->use_fixed_rate) { 2025 /* Return the fixed pacing rate */ 2026 return (rack_get_fixed_pacing_bw(rack)); 2027 } 2028 if (rack->r_ctl.gp_bw == 0) { 2029 /* 2030 * We have yet no b/w measurement, 2031 * if we have a user set initial bw 2032 * return it. If we don't have that and 2033 * we have an srtt, use the tcp IW (10) to 2034 * calculate a fictional b/w over the SRTT 2035 * which is more or less a guess. Note 2036 * we don't use our IW from rack on purpose 2037 * so if we have like IW=30, we are not 2038 * calculating a "huge" b/w. 2039 */ 2040 uint64_t bw, srtt; 2041 if (rack->r_ctl.init_rate) 2042 return (rack->r_ctl.init_rate); 2043 2044 /* Has the user set a max peak rate? */ 2045 #ifdef NETFLIX_PEAKRATE 2046 if (rack->rc_tp->t_maxpeakrate) 2047 return (rack->rc_tp->t_maxpeakrate); 2048 #endif 2049 /* Ok lets come up with the IW guess, if we have a srtt */ 2050 if (rack->rc_tp->t_srtt == 0) { 2051 /* 2052 * Go with old pacing method 2053 * i.e. burst mitigation only. 2054 */ 2055 return (0); 2056 } 2057 /* Ok lets get the initial TCP win (not racks) */ 2058 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); 2059 srtt = (uint64_t)rack->rc_tp->t_srtt; 2060 bw *= (uint64_t)USECS_IN_SECOND; 2061 bw /= srtt; 2062 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 2063 bw = rack->r_ctl.bw_rate_cap; 2064 return (bw); 2065 } else { 2066 uint64_t bw; 2067 2068 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 2069 /* Averaging is done, we can return the value */ 2070 bw = rack->r_ctl.gp_bw; 2071 } else { 2072 /* Still doing initial average must calculate */ 2073 bw = rack->r_ctl.gp_bw / rack->r_ctl.num_measurements; 2074 } 2075 #ifdef NETFLIX_PEAKRATE 2076 if ((rack->rc_tp->t_maxpeakrate) && 2077 (bw > rack->rc_tp->t_maxpeakrate)) { 2078 /* The user has set a peak rate to pace at 2079 * don't allow us to pace faster than that. 2080 */ 2081 return (rack->rc_tp->t_maxpeakrate); 2082 } 2083 #endif 2084 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 2085 bw = rack->r_ctl.bw_rate_cap; 2086 return (bw); 2087 } 2088 } 2089 2090 static uint16_t 2091 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm) 2092 { 2093 if (rack->use_fixed_rate) { 2094 return (100); 2095 } else if (rack->in_probe_rtt && (rsm == NULL)) 2096 return (rack->r_ctl.rack_per_of_gp_probertt); 2097 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && 2098 rack->r_ctl.rack_per_of_gp_rec)) { 2099 if (rsm) { 2100 /* a retransmission always use the recovery rate */ 2101 return (rack->r_ctl.rack_per_of_gp_rec); 2102 } else if (rack->rack_rec_nonrxt_use_cr) { 2103 /* Directed to use the configured rate */ 2104 goto configured_rate; 2105 } else if (rack->rack_no_prr && 2106 (rack->r_ctl.rack_per_of_gp_rec > 100)) { 2107 /* No PRR, lets just use the b/w estimate only */ 2108 return (100); 2109 } else { 2110 /* 2111 * Here we may have a non-retransmit but we 2112 * have no overrides, so just use the recovery 2113 * rate (prr is in effect). 2114 */ 2115 return (rack->r_ctl.rack_per_of_gp_rec); 2116 } 2117 } 2118 configured_rate: 2119 /* For the configured rate we look at our cwnd vs the ssthresh */ 2120 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 2121 return (rack->r_ctl.rack_per_of_gp_ss); 2122 else 2123 return (rack->r_ctl.rack_per_of_gp_ca); 2124 } 2125 2126 static void 2127 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6) 2128 { 2129 /* 2130 * Types of logs (mod value) 2131 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. 2132 * 2 = a dsack round begins, persist is reset to 16. 2133 * 3 = a dsack round ends 2134 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh 2135 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack 2136 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh. 2137 */ 2138 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2139 union tcp_log_stackspecific log; 2140 struct timeval tv; 2141 2142 memset(&log, 0, sizeof(log)); 2143 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; 2144 log.u_bbr.flex1 <<= 1; 2145 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; 2146 log.u_bbr.flex1 <<= 1; 2147 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; 2148 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; 2149 log.u_bbr.flex3 = rack->r_ctl.num_dsack; 2150 log.u_bbr.flex4 = flex4; 2151 log.u_bbr.flex5 = flex5; 2152 log.u_bbr.flex6 = flex6; 2153 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; 2154 log.u_bbr.flex8 = mod; 2155 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2156 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2157 &rack->rc_inp->inp_socket->so_rcv, 2158 &rack->rc_inp->inp_socket->so_snd, 2159 RACK_DSACK_HANDLING, 0, 2160 0, &log, false, &tv); 2161 } 2162 } 2163 2164 static void 2165 rack_log_hdwr_pacing(struct tcp_rack *rack, 2166 uint64_t rate, uint64_t hw_rate, int line, 2167 int error, uint16_t mod) 2168 { 2169 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2170 union tcp_log_stackspecific log; 2171 struct timeval tv; 2172 const struct ifnet *ifp; 2173 2174 memset(&log, 0, sizeof(log)); 2175 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff); 2176 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff); 2177 if (rack->r_ctl.crte) { 2178 ifp = rack->r_ctl.crte->ptbl->rs_ifp; 2179 } else if (rack->rc_inp->inp_route.ro_nh && 2180 rack->rc_inp->inp_route.ro_nh->nh_ifp) { 2181 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; 2182 } else 2183 ifp = NULL; 2184 if (ifp) { 2185 log.u_bbr.flex3 = (((uint64_t)ifp >> 32) & 0x00000000ffffffff); 2186 log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff); 2187 } 2188 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2189 log.u_bbr.bw_inuse = rate; 2190 log.u_bbr.flex5 = line; 2191 log.u_bbr.flex6 = error; 2192 log.u_bbr.flex7 = mod; 2193 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; 2194 log.u_bbr.flex8 = rack->use_fixed_rate; 2195 log.u_bbr.flex8 <<= 1; 2196 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; 2197 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 2198 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; 2199 if (rack->r_ctl.crte) 2200 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; 2201 else 2202 log.u_bbr.cur_del_rate = 0; 2203 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; 2204 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2205 &rack->rc_inp->inp_socket->so_rcv, 2206 &rack->rc_inp->inp_socket->so_snd, 2207 BBR_LOG_HDWR_PACE, 0, 2208 0, &log, false, &tv); 2209 } 2210 } 2211 2212 static uint64_t 2213 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped) 2214 { 2215 /* 2216 * We allow rack_per_of_gp_xx to dictate our bw rate we want. 2217 */ 2218 uint64_t bw_est, high_rate; 2219 uint64_t gain; 2220 2221 gain = (uint64_t)rack_get_output_gain(rack, rsm); 2222 bw_est = bw * gain; 2223 bw_est /= (uint64_t)100; 2224 /* Never fall below the minimum (def 64kbps) */ 2225 if (bw_est < RACK_MIN_BW) 2226 bw_est = RACK_MIN_BW; 2227 if (rack->r_rack_hw_rate_caps) { 2228 /* Rate caps are in place */ 2229 if (rack->r_ctl.crte != NULL) { 2230 /* We have a hdwr rate already */ 2231 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 2232 if (bw_est >= high_rate) { 2233 /* We are capping bw at the highest rate table entry */ 2234 rack_log_hdwr_pacing(rack, 2235 bw_est, high_rate, __LINE__, 2236 0, 3); 2237 bw_est = high_rate; 2238 if (capped) 2239 *capped = 1; 2240 } 2241 } else if ((rack->rack_hdrw_pacing == 0) && 2242 (rack->rack_hdw_pace_ena) && 2243 (rack->rack_attempt_hdwr_pace == 0) && 2244 (rack->rc_inp->inp_route.ro_nh != NULL) && 2245 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 2246 /* 2247 * Special case, we have not yet attempted hardware 2248 * pacing, and yet we may, when we do, find out if we are 2249 * above the highest rate. We need to know the maxbw for the interface 2250 * in question (if it supports ratelimiting). We get back 2251 * a 0, if the interface is not found in the RL lists. 2252 */ 2253 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 2254 if (high_rate) { 2255 /* Yep, we have a rate is it above this rate? */ 2256 if (bw_est > high_rate) { 2257 bw_est = high_rate; 2258 if (capped) 2259 *capped = 1; 2260 } 2261 } 2262 } 2263 } 2264 return (bw_est); 2265 } 2266 2267 static void 2268 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 2269 { 2270 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2271 union tcp_log_stackspecific log; 2272 struct timeval tv; 2273 2274 if ((mod != 1) && (rack_verbose_logging == 0)) { 2275 /* 2276 * We get 3 values currently for mod 2277 * 1 - We are retransmitting and this tells the reason. 2278 * 2 - We are clearing a dup-ack count. 2279 * 3 - We are incrementing a dup-ack count. 2280 * 2281 * The clear/increment are only logged 2282 * if you have BBverbose on. 2283 */ 2284 return; 2285 } 2286 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2287 log.u_bbr.flex1 = tsused; 2288 log.u_bbr.flex2 = thresh; 2289 log.u_bbr.flex3 = rsm->r_flags; 2290 log.u_bbr.flex4 = rsm->r_dupack; 2291 log.u_bbr.flex5 = rsm->r_start; 2292 log.u_bbr.flex6 = rsm->r_end; 2293 log.u_bbr.flex8 = mod; 2294 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2295 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2296 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2297 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2298 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2299 log.u_bbr.pacing_gain = rack->r_must_retran; 2300 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2301 &rack->rc_inp->inp_socket->so_rcv, 2302 &rack->rc_inp->inp_socket->so_snd, 2303 BBR_LOG_SETTINGS_CHG, 0, 2304 0, &log, false, &tv); 2305 } 2306 } 2307 2308 static void 2309 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 2310 { 2311 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2312 union tcp_log_stackspecific log; 2313 struct timeval tv; 2314 2315 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2316 log.u_bbr.flex1 = rack->rc_tp->t_srtt; 2317 log.u_bbr.flex2 = to; 2318 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 2319 log.u_bbr.flex4 = slot; 2320 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot; 2321 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2322 log.u_bbr.flex7 = rack->rc_in_persist; 2323 log.u_bbr.flex8 = which; 2324 if (rack->rack_no_prr) 2325 log.u_bbr.pkts_out = 0; 2326 else 2327 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 2328 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2329 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2330 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2331 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2332 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2333 log.u_bbr.pacing_gain = rack->r_must_retran; 2334 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; 2335 log.u_bbr.lost = rack_rto_min; 2336 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2337 &rack->rc_inp->inp_socket->so_rcv, 2338 &rack->rc_inp->inp_socket->so_snd, 2339 BBR_LOG_TIMERSTAR, 0, 2340 0, &log, false, &tv); 2341 } 2342 } 2343 2344 static void 2345 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm) 2346 { 2347 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2348 union tcp_log_stackspecific log; 2349 struct timeval tv; 2350 2351 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2352 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2353 log.u_bbr.flex8 = to_num; 2354 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 2355 log.u_bbr.flex2 = rack->rc_rack_rtt; 2356 if (rsm == NULL) 2357 log.u_bbr.flex3 = 0; 2358 else 2359 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; 2360 if (rack->rack_no_prr) 2361 log.u_bbr.flex5 = 0; 2362 else 2363 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2364 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2365 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2366 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2367 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2368 log.u_bbr.pacing_gain = rack->r_must_retran; 2369 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2370 &rack->rc_inp->inp_socket->so_rcv, 2371 &rack->rc_inp->inp_socket->so_snd, 2372 BBR_LOG_RTO, 0, 2373 0, &log, false, &tv); 2374 } 2375 } 2376 2377 static void 2378 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack, 2379 struct rack_sendmap *prev, 2380 struct rack_sendmap *rsm, 2381 struct rack_sendmap *next, 2382 int flag, uint32_t th_ack, int line) 2383 { 2384 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2385 union tcp_log_stackspecific log; 2386 struct timeval tv; 2387 2388 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2389 log.u_bbr.flex8 = flag; 2390 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2391 log.u_bbr.cur_del_rate = (uint64_t)prev; 2392 log.u_bbr.delRate = (uint64_t)rsm; 2393 log.u_bbr.rttProp = (uint64_t)next; 2394 log.u_bbr.flex7 = 0; 2395 if (prev) { 2396 log.u_bbr.flex1 = prev->r_start; 2397 log.u_bbr.flex2 = prev->r_end; 2398 log.u_bbr.flex7 |= 0x4; 2399 } 2400 if (rsm) { 2401 log.u_bbr.flex3 = rsm->r_start; 2402 log.u_bbr.flex4 = rsm->r_end; 2403 log.u_bbr.flex7 |= 0x2; 2404 } 2405 if (next) { 2406 log.u_bbr.flex5 = next->r_start; 2407 log.u_bbr.flex6 = next->r_end; 2408 log.u_bbr.flex7 |= 0x1; 2409 } 2410 log.u_bbr.applimited = line; 2411 log.u_bbr.pkts_out = th_ack; 2412 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2413 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2414 if (rack->rack_no_prr) 2415 log.u_bbr.lost = 0; 2416 else 2417 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; 2418 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2419 &rack->rc_inp->inp_socket->so_rcv, 2420 &rack->rc_inp->inp_socket->so_snd, 2421 TCP_LOG_MAPCHG, 0, 2422 0, &log, false, &tv); 2423 } 2424 } 2425 2426 static void 2427 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len, 2428 struct rack_sendmap *rsm, int conf) 2429 { 2430 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2431 union tcp_log_stackspecific log; 2432 struct timeval tv; 2433 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2434 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2435 log.u_bbr.flex1 = t; 2436 log.u_bbr.flex2 = len; 2437 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; 2438 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 2439 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 2440 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2441 log.u_bbr.flex7 = conf; 2442 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; 2443 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 2444 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2445 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2446 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; 2447 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2448 if (rsm) { 2449 log.u_bbr.pkt_epoch = rsm->r_start; 2450 log.u_bbr.lost = rsm->r_end; 2451 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; 2452 /* We loose any upper of the 24 bits */ 2453 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; 2454 } else { 2455 /* Its a SYN */ 2456 log.u_bbr.pkt_epoch = rack->rc_tp->iss; 2457 log.u_bbr.lost = 0; 2458 log.u_bbr.cwnd_gain = 0; 2459 log.u_bbr.pacing_gain = 0; 2460 } 2461 /* Write out general bits of interest rrs here */ 2462 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; 2463 log.u_bbr.use_lt_bw <<= 1; 2464 log.u_bbr.use_lt_bw |= rack->forced_ack; 2465 log.u_bbr.use_lt_bw <<= 1; 2466 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; 2467 log.u_bbr.use_lt_bw <<= 1; 2468 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 2469 log.u_bbr.use_lt_bw <<= 1; 2470 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 2471 log.u_bbr.use_lt_bw <<= 1; 2472 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 2473 log.u_bbr.use_lt_bw <<= 1; 2474 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 2475 log.u_bbr.use_lt_bw <<= 1; 2476 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; 2477 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; 2478 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; 2479 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; 2480 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; 2481 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; 2482 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 2483 log.u_bbr.bw_inuse <<= 32; 2484 if (rsm) 2485 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); 2486 TCP_LOG_EVENTP(tp, NULL, 2487 &rack->rc_inp->inp_socket->so_rcv, 2488 &rack->rc_inp->inp_socket->so_snd, 2489 BBR_LOG_BBRRTT, 0, 2490 0, &log, false, &tv); 2491 2492 2493 } 2494 } 2495 2496 static void 2497 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 2498 { 2499 /* 2500 * Log the rtt sample we are 2501 * applying to the srtt algorithm in 2502 * useconds. 2503 */ 2504 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2505 union tcp_log_stackspecific log; 2506 struct timeval tv; 2507 2508 /* Convert our ms to a microsecond */ 2509 memset(&log, 0, sizeof(log)); 2510 log.u_bbr.flex1 = rtt; 2511 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2512 log.u_bbr.flex3 = rack->r_ctl.sack_count; 2513 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2514 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra; 2515 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2516 log.u_bbr.flex7 = 1; 2517 log.u_bbr.flex8 = rack->sack_attack_disable; 2518 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2519 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2520 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2521 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2522 log.u_bbr.pacing_gain = rack->r_must_retran; 2523 /* 2524 * We capture in delRate the upper 32 bits as 2525 * the confidence level we had declared, and the 2526 * lower 32 bits as the actual RTT using the arrival 2527 * timestamp. 2528 */ 2529 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; 2530 log.u_bbr.delRate <<= 32; 2531 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; 2532 /* Lets capture all the things that make up t_rtxcur */ 2533 log.u_bbr.applimited = rack_rto_min; 2534 log.u_bbr.epoch = rack_rto_max; 2535 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; 2536 log.u_bbr.lost = rack_rto_min; 2537 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop); 2538 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); 2539 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; 2540 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC; 2541 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; 2542 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2543 &rack->rc_inp->inp_socket->so_rcv, 2544 &rack->rc_inp->inp_socket->so_snd, 2545 TCP_LOG_RTT, 0, 2546 0, &log, false, &tv); 2547 } 2548 } 2549 2550 static void 2551 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where) 2552 { 2553 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 2554 union tcp_log_stackspecific log; 2555 struct timeval tv; 2556 2557 /* Convert our ms to a microsecond */ 2558 memset(&log, 0, sizeof(log)); 2559 log.u_bbr.flex1 = rtt; 2560 log.u_bbr.flex2 = send_time; 2561 log.u_bbr.flex3 = ack_time; 2562 log.u_bbr.flex4 = where; 2563 log.u_bbr.flex7 = 2; 2564 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2565 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2566 &rack->rc_inp->inp_socket->so_rcv, 2567 &rack->rc_inp->inp_socket->so_snd, 2568 TCP_LOG_RTT, 0, 2569 0, &log, false, &tv); 2570 } 2571 } 2572 2573 2574 2575 static inline void 2576 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 2577 { 2578 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2579 union tcp_log_stackspecific log; 2580 struct timeval tv; 2581 2582 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2583 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2584 log.u_bbr.flex1 = line; 2585 log.u_bbr.flex2 = tick; 2586 log.u_bbr.flex3 = tp->t_maxunacktime; 2587 log.u_bbr.flex4 = tp->t_acktime; 2588 log.u_bbr.flex8 = event; 2589 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2590 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2591 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2592 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2593 log.u_bbr.pacing_gain = rack->r_must_retran; 2594 TCP_LOG_EVENTP(tp, NULL, 2595 &rack->rc_inp->inp_socket->so_rcv, 2596 &rack->rc_inp->inp_socket->so_snd, 2597 BBR_LOG_PROGRESS, 0, 2598 0, &log, false, &tv); 2599 } 2600 } 2601 2602 static void 2603 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv) 2604 { 2605 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2606 union tcp_log_stackspecific log; 2607 2608 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2609 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2610 log.u_bbr.flex1 = slot; 2611 if (rack->rack_no_prr) 2612 log.u_bbr.flex2 = 0; 2613 else 2614 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 2615 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 2616 log.u_bbr.flex8 = rack->rc_in_persist; 2617 log.u_bbr.timeStamp = cts; 2618 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2619 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2620 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2621 log.u_bbr.pacing_gain = rack->r_must_retran; 2622 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2623 &rack->rc_inp->inp_socket->so_rcv, 2624 &rack->rc_inp->inp_socket->so_snd, 2625 BBR_LOG_BBRSND, 0, 2626 0, &log, false, tv); 2627 } 2628 } 2629 2630 static void 2631 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs) 2632 { 2633 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2634 union tcp_log_stackspecific log; 2635 struct timeval tv; 2636 2637 memset(&log, 0, sizeof(log)); 2638 log.u_bbr.flex1 = did_out; 2639 log.u_bbr.flex2 = nxt_pkt; 2640 log.u_bbr.flex3 = way_out; 2641 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2642 if (rack->rack_no_prr) 2643 log.u_bbr.flex5 = 0; 2644 else 2645 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2646 log.u_bbr.flex6 = nsegs; 2647 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 2648 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ 2649 log.u_bbr.flex7 <<= 1; 2650 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ 2651 log.u_bbr.flex7 <<= 1; 2652 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ 2653 log.u_bbr.flex8 = rack->rc_in_persist; 2654 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2655 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2656 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2657 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2658 log.u_bbr.use_lt_bw <<= 1; 2659 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2660 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2661 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2662 log.u_bbr.pacing_gain = rack->r_must_retran; 2663 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2664 &rack->rc_inp->inp_socket->so_rcv, 2665 &rack->rc_inp->inp_socket->so_snd, 2666 BBR_LOG_DOSEG_DONE, 0, 2667 0, &log, false, &tv); 2668 } 2669 } 2670 2671 static void 2672 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm) 2673 { 2674 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2675 union tcp_log_stackspecific log; 2676 struct timeval tv; 2677 2678 memset(&log, 0, sizeof(log)); 2679 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 2680 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 2681 log.u_bbr.flex4 = arg1; 2682 log.u_bbr.flex5 = arg2; 2683 log.u_bbr.flex6 = arg3; 2684 log.u_bbr.flex8 = frm; 2685 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2686 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2687 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2688 log.u_bbr.applimited = rack->r_ctl.rc_sacked; 2689 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2690 log.u_bbr.pacing_gain = rack->r_must_retran; 2691 TCP_LOG_EVENTP(tp, NULL, 2692 &tp->t_inpcb->inp_socket->so_rcv, 2693 &tp->t_inpcb->inp_socket->so_snd, 2694 TCP_HDWR_PACE_SIZE, 0, 2695 0, &log, false, &tv); 2696 } 2697 } 2698 2699 static void 2700 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, 2701 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use) 2702 { 2703 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2704 union tcp_log_stackspecific log; 2705 struct timeval tv; 2706 2707 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2708 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2709 log.u_bbr.flex1 = slot; 2710 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 2711 log.u_bbr.flex4 = reason; 2712 if (rack->rack_no_prr) 2713 log.u_bbr.flex5 = 0; 2714 else 2715 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2716 log.u_bbr.flex7 = hpts_calling; 2717 log.u_bbr.flex8 = rack->rc_in_persist; 2718 log.u_bbr.lt_epoch = cwnd_to_use; 2719 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2720 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2721 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2722 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2723 log.u_bbr.pacing_gain = rack->r_must_retran; 2724 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2725 &rack->rc_inp->inp_socket->so_rcv, 2726 &rack->rc_inp->inp_socket->so_snd, 2727 BBR_LOG_JUSTRET, 0, 2728 tlen, &log, false, &tv); 2729 } 2730 } 2731 2732 static void 2733 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts, 2734 struct timeval *tv, uint32_t flags_on_entry) 2735 { 2736 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2737 union tcp_log_stackspecific log; 2738 2739 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2740 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2741 log.u_bbr.flex1 = line; 2742 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; 2743 log.u_bbr.flex3 = flags_on_entry; 2744 log.u_bbr.flex4 = us_cts; 2745 if (rack->rack_no_prr) 2746 log.u_bbr.flex5 = 0; 2747 else 2748 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2749 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2750 log.u_bbr.flex7 = hpts_removed; 2751 log.u_bbr.flex8 = 1; 2752 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; 2753 log.u_bbr.timeStamp = us_cts; 2754 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2755 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2756 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2757 log.u_bbr.pacing_gain = rack->r_must_retran; 2758 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2759 &rack->rc_inp->inp_socket->so_rcv, 2760 &rack->rc_inp->inp_socket->so_snd, 2761 BBR_LOG_TIMERCANC, 0, 2762 0, &log, false, tv); 2763 } 2764 } 2765 2766 static void 2767 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 2768 uint32_t flex1, uint32_t flex2, 2769 uint32_t flex3, uint32_t flex4, 2770 uint32_t flex5, uint32_t flex6, 2771 uint16_t flex7, uint8_t mod) 2772 { 2773 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2774 union tcp_log_stackspecific log; 2775 struct timeval tv; 2776 2777 if (mod == 1) { 2778 /* No you can't use 1, its for the real to cancel */ 2779 return; 2780 } 2781 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2782 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2783 log.u_bbr.flex1 = flex1; 2784 log.u_bbr.flex2 = flex2; 2785 log.u_bbr.flex3 = flex3; 2786 log.u_bbr.flex4 = flex4; 2787 log.u_bbr.flex5 = flex5; 2788 log.u_bbr.flex6 = flex6; 2789 log.u_bbr.flex7 = flex7; 2790 log.u_bbr.flex8 = mod; 2791 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2792 &rack->rc_inp->inp_socket->so_rcv, 2793 &rack->rc_inp->inp_socket->so_snd, 2794 BBR_LOG_TIMERCANC, 0, 2795 0, &log, false, &tv); 2796 } 2797 } 2798 2799 static void 2800 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 2801 { 2802 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2803 union tcp_log_stackspecific log; 2804 struct timeval tv; 2805 2806 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2807 log.u_bbr.flex1 = timers; 2808 log.u_bbr.flex2 = ret; 2809 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 2810 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2811 log.u_bbr.flex5 = cts; 2812 if (rack->rack_no_prr) 2813 log.u_bbr.flex6 = 0; 2814 else 2815 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 2816 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2817 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2818 log.u_bbr.pacing_gain = rack->r_must_retran; 2819 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2820 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2821 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2822 &rack->rc_inp->inp_socket->so_rcv, 2823 &rack->rc_inp->inp_socket->so_snd, 2824 BBR_LOG_TO_PROCESS, 0, 2825 0, &log, false, &tv); 2826 } 2827 } 2828 2829 static void 2830 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd) 2831 { 2832 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2833 union tcp_log_stackspecific log; 2834 struct timeval tv; 2835 2836 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2837 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 2838 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 2839 if (rack->rack_no_prr) 2840 log.u_bbr.flex3 = 0; 2841 else 2842 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 2843 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 2844 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 2845 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 2846 log.u_bbr.flex8 = frm; 2847 log.u_bbr.pkts_out = orig_cwnd; 2848 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2849 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2850 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2851 log.u_bbr.use_lt_bw <<= 1; 2852 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2853 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2854 &rack->rc_inp->inp_socket->so_rcv, 2855 &rack->rc_inp->inp_socket->so_snd, 2856 BBR_LOG_BBRUPD, 0, 2857 0, &log, false, &tv); 2858 } 2859 } 2860 2861 #ifdef NETFLIX_EXP_DETECTION 2862 static void 2863 rack_log_sad(struct tcp_rack *rack, int event) 2864 { 2865 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2866 union tcp_log_stackspecific log; 2867 struct timeval tv; 2868 2869 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2870 log.u_bbr.flex1 = rack->r_ctl.sack_count; 2871 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2872 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra; 2873 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2874 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced; 2875 log.u_bbr.flex6 = tcp_sack_to_ack_thresh; 2876 log.u_bbr.pkts_out = tcp_sack_to_move_thresh; 2877 log.u_bbr.lt_epoch = (tcp_force_detection << 8); 2878 log.u_bbr.lt_epoch |= rack->do_detection; 2879 log.u_bbr.applimited = tcp_map_minimum; 2880 log.u_bbr.flex7 = rack->sack_attack_disable; 2881 log.u_bbr.flex8 = event; 2882 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2883 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2884 log.u_bbr.delivered = tcp_sad_decay_val; 2885 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2886 &rack->rc_inp->inp_socket->so_rcv, 2887 &rack->rc_inp->inp_socket->so_snd, 2888 TCP_SAD_DETECTION, 0, 2889 0, &log, false, &tv); 2890 } 2891 } 2892 #endif 2893 2894 static void 2895 rack_counter_destroy(void) 2896 { 2897 int i; 2898 2899 counter_u64_free(rack_fto_send); 2900 counter_u64_free(rack_fto_rsm_send); 2901 counter_u64_free(rack_nfto_resend); 2902 counter_u64_free(rack_hw_pace_init_fail); 2903 counter_u64_free(rack_hw_pace_lost); 2904 counter_u64_free(rack_non_fto_send); 2905 counter_u64_free(rack_extended_rfo); 2906 counter_u64_free(rack_ack_total); 2907 counter_u64_free(rack_express_sack); 2908 counter_u64_free(rack_sack_total); 2909 counter_u64_free(rack_move_none); 2910 counter_u64_free(rack_move_some); 2911 counter_u64_free(rack_sack_attacks_detected); 2912 counter_u64_free(rack_sack_attacks_reversed); 2913 counter_u64_free(rack_sack_used_next_merge); 2914 counter_u64_free(rack_sack_used_prev_merge); 2915 counter_u64_free(rack_badfr); 2916 counter_u64_free(rack_badfr_bytes); 2917 counter_u64_free(rack_rtm_prr_retran); 2918 counter_u64_free(rack_rtm_prr_newdata); 2919 counter_u64_free(rack_timestamp_mismatch); 2920 counter_u64_free(rack_find_high); 2921 counter_u64_free(rack_reorder_seen); 2922 counter_u64_free(rack_tlp_tot); 2923 counter_u64_free(rack_tlp_newdata); 2924 counter_u64_free(rack_tlp_retran); 2925 counter_u64_free(rack_tlp_retran_bytes); 2926 counter_u64_free(rack_tlp_retran_fail); 2927 counter_u64_free(rack_to_tot); 2928 counter_u64_free(rack_to_arm_rack); 2929 counter_u64_free(rack_to_arm_tlp); 2930 counter_u64_free(rack_calc_zero); 2931 counter_u64_free(rack_calc_nonzero); 2932 counter_u64_free(rack_paced_segments); 2933 counter_u64_free(rack_unpaced_segments); 2934 counter_u64_free(rack_saw_enobuf); 2935 counter_u64_free(rack_saw_enobuf_hw); 2936 counter_u64_free(rack_saw_enetunreach); 2937 counter_u64_free(rack_hot_alloc); 2938 counter_u64_free(rack_to_alloc); 2939 counter_u64_free(rack_to_alloc_hard); 2940 counter_u64_free(rack_to_alloc_emerg); 2941 counter_u64_free(rack_to_alloc_limited); 2942 counter_u64_free(rack_alloc_limited_conns); 2943 counter_u64_free(rack_split_limited); 2944 for (i = 0; i < MAX_NUM_OF_CNTS; i++) { 2945 counter_u64_free(rack_proc_comp_ack[i]); 2946 } 2947 counter_u64_free(rack_multi_single_eq); 2948 counter_u64_free(rack_proc_non_comp_ack); 2949 counter_u64_free(rack_sack_proc_all); 2950 counter_u64_free(rack_sack_proc_restart); 2951 counter_u64_free(rack_sack_proc_short); 2952 counter_u64_free(rack_enter_tlp_calc); 2953 counter_u64_free(rack_used_tlpmethod); 2954 counter_u64_free(rack_used_tlpmethod2); 2955 counter_u64_free(rack_sack_skipped_acked); 2956 counter_u64_free(rack_sack_splits); 2957 counter_u64_free(rack_progress_drops); 2958 counter_u64_free(rack_input_idle_reduces); 2959 counter_u64_free(rack_collapsed_win); 2960 counter_u64_free(rack_tlp_does_nada); 2961 counter_u64_free(rack_try_scwnd); 2962 counter_u64_free(rack_per_timer_hole); 2963 counter_u64_free(rack_large_ackcmp); 2964 counter_u64_free(rack_small_ackcmp); 2965 counter_u64_free(rack_persists_sends); 2966 counter_u64_free(rack_persists_acks); 2967 counter_u64_free(rack_persists_loss); 2968 counter_u64_free(rack_persists_lost_ends); 2969 #ifdef INVARIANTS 2970 counter_u64_free(rack_adjust_map_bw); 2971 #endif 2972 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 2973 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 2974 } 2975 2976 static struct rack_sendmap * 2977 rack_alloc(struct tcp_rack *rack) 2978 { 2979 struct rack_sendmap *rsm; 2980 2981 /* 2982 * First get the top of the list it in 2983 * theory is the "hottest" rsm we have, 2984 * possibly just freed by ack processing. 2985 */ 2986 if (rack->rc_free_cnt > rack_free_cache) { 2987 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 2988 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2989 counter_u64_add(rack_hot_alloc, 1); 2990 rack->rc_free_cnt--; 2991 return (rsm); 2992 } 2993 /* 2994 * Once we get under our free cache we probably 2995 * no longer have a "hot" one available. Lets 2996 * get one from UMA. 2997 */ 2998 rsm = uma_zalloc(rack_zone, M_NOWAIT); 2999 if (rsm) { 3000 rack->r_ctl.rc_num_maps_alloced++; 3001 counter_u64_add(rack_to_alloc, 1); 3002 return (rsm); 3003 } 3004 /* 3005 * Dig in to our aux rsm's (the last two) since 3006 * UMA failed to get us one. 3007 */ 3008 if (rack->rc_free_cnt) { 3009 counter_u64_add(rack_to_alloc_emerg, 1); 3010 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 3011 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3012 rack->rc_free_cnt--; 3013 return (rsm); 3014 } 3015 return (NULL); 3016 } 3017 3018 static struct rack_sendmap * 3019 rack_alloc_full_limit(struct tcp_rack *rack) 3020 { 3021 if ((V_tcp_map_entries_limit > 0) && 3022 (rack->do_detection == 0) && 3023 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 3024 counter_u64_add(rack_to_alloc_limited, 1); 3025 if (!rack->alloc_limit_reported) { 3026 rack->alloc_limit_reported = 1; 3027 counter_u64_add(rack_alloc_limited_conns, 1); 3028 } 3029 return (NULL); 3030 } 3031 return (rack_alloc(rack)); 3032 } 3033 3034 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 3035 static struct rack_sendmap * 3036 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 3037 { 3038 struct rack_sendmap *rsm; 3039 3040 if (limit_type) { 3041 /* currently there is only one limit type */ 3042 if (V_tcp_map_split_limit > 0 && 3043 (rack->do_detection == 0) && 3044 rack->r_ctl.rc_num_split_allocs >= V_tcp_map_split_limit) { 3045 counter_u64_add(rack_split_limited, 1); 3046 if (!rack->alloc_limit_reported) { 3047 rack->alloc_limit_reported = 1; 3048 counter_u64_add(rack_alloc_limited_conns, 1); 3049 } 3050 return (NULL); 3051 } 3052 } 3053 3054 /* allocate and mark in the limit type, if set */ 3055 rsm = rack_alloc(rack); 3056 if (rsm != NULL && limit_type) { 3057 rsm->r_limit_type = limit_type; 3058 rack->r_ctl.rc_num_split_allocs++; 3059 } 3060 return (rsm); 3061 } 3062 3063 static void 3064 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 3065 { 3066 if (rsm->r_flags & RACK_APP_LIMITED) { 3067 if (rack->r_ctl.rc_app_limited_cnt > 0) { 3068 rack->r_ctl.rc_app_limited_cnt--; 3069 } 3070 } 3071 if (rsm->r_limit_type) { 3072 /* currently there is only one limit type */ 3073 rack->r_ctl.rc_num_split_allocs--; 3074 } 3075 if (rsm == rack->r_ctl.rc_first_appl) { 3076 if (rack->r_ctl.rc_app_limited_cnt == 0) 3077 rack->r_ctl.rc_first_appl = NULL; 3078 else { 3079 /* Follow the next one out */ 3080 struct rack_sendmap fe; 3081 3082 fe.r_start = rsm->r_nseq_appl; 3083 rack->r_ctl.rc_first_appl = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 3084 } 3085 } 3086 if (rsm == rack->r_ctl.rc_resend) 3087 rack->r_ctl.rc_resend = NULL; 3088 if (rsm == rack->r_ctl.rc_rsm_at_retran) 3089 rack->r_ctl.rc_rsm_at_retran = NULL; 3090 if (rsm == rack->r_ctl.rc_end_appl) 3091 rack->r_ctl.rc_end_appl = NULL; 3092 if (rack->r_ctl.rc_tlpsend == rsm) 3093 rack->r_ctl.rc_tlpsend = NULL; 3094 if (rack->r_ctl.rc_sacklast == rsm) 3095 rack->r_ctl.rc_sacklast = NULL; 3096 memset(rsm, 0, sizeof(struct rack_sendmap)); 3097 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); 3098 rack->rc_free_cnt++; 3099 } 3100 3101 static void 3102 rack_free_trim(struct tcp_rack *rack) 3103 { 3104 struct rack_sendmap *rsm; 3105 3106 /* 3107 * Free up all the tail entries until 3108 * we get our list down to the limit. 3109 */ 3110 while (rack->rc_free_cnt > rack_free_cache) { 3111 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); 3112 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3113 rack->rc_free_cnt--; 3114 uma_zfree(rack_zone, rsm); 3115 } 3116 } 3117 3118 3119 static uint32_t 3120 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack) 3121 { 3122 uint64_t srtt, bw, len, tim; 3123 uint32_t segsiz, def_len, minl; 3124 3125 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3126 def_len = rack_def_data_window * segsiz; 3127 if (rack->rc_gp_filled == 0) { 3128 /* 3129 * We have no measurement (IW is in flight?) so 3130 * we can only guess using our data_window sysctl 3131 * value (usually 20MSS). 3132 */ 3133 return (def_len); 3134 } 3135 /* 3136 * Now we have a number of factors to consider. 3137 * 3138 * 1) We have a desired BDP which is usually 3139 * at least 2. 3140 * 2) We have a minimum number of rtt's usually 1 SRTT 3141 * but we allow it too to be more. 3142 * 3) We want to make sure a measurement last N useconds (if 3143 * we have set rack_min_measure_usec. 3144 * 3145 * We handle the first concern here by trying to create a data 3146 * window of max(rack_def_data_window, DesiredBDP). The 3147 * second concern we handle in not letting the measurement 3148 * window end normally until at least the required SRTT's 3149 * have gone by which is done further below in 3150 * rack_enough_for_measurement(). Finally the third concern 3151 * we also handle here by calculating how long that time 3152 * would take at the current BW and then return the 3153 * max of our first calculation and that length. Note 3154 * that if rack_min_measure_usec is 0, we don't deal 3155 * with concern 3. Also for both Concern 1 and 3 an 3156 * application limited period could end the measurement 3157 * earlier. 3158 * 3159 * So lets calculate the BDP with the "known" b/w using 3160 * the SRTT has our rtt and then multiply it by the 3161 * goal. 3162 */ 3163 bw = rack_get_bw(rack); 3164 srtt = (uint64_t)tp->t_srtt; 3165 len = bw * srtt; 3166 len /= (uint64_t)HPTS_USEC_IN_SEC; 3167 len *= max(1, rack_goal_bdp); 3168 /* Now we need to round up to the nearest MSS */ 3169 len = roundup(len, segsiz); 3170 if (rack_min_measure_usec) { 3171 /* Now calculate our min length for this b/w */ 3172 tim = rack_min_measure_usec; 3173 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC; 3174 if (minl == 0) 3175 minl = 1; 3176 minl = roundup(minl, segsiz); 3177 if (len < minl) 3178 len = minl; 3179 } 3180 /* 3181 * Now if we have a very small window we want 3182 * to attempt to get the window that is 3183 * as small as possible. This happens on 3184 * low b/w connections and we don't want to 3185 * span huge numbers of rtt's between measurements. 3186 * 3187 * We basically include 2 over our "MIN window" so 3188 * that the measurement can be shortened (possibly) by 3189 * an ack'ed packet. 3190 */ 3191 if (len < def_len) 3192 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz))); 3193 else 3194 return (max((uint32_t)len, def_len)); 3195 3196 } 3197 3198 static int 3199 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality) 3200 { 3201 uint32_t tim, srtts, segsiz; 3202 3203 /* 3204 * Has enough time passed for the GP measurement to be valid? 3205 */ 3206 if ((tp->snd_max == tp->snd_una) || 3207 (th_ack == tp->snd_max)){ 3208 /* All is acked */ 3209 *quality = RACK_QUALITY_ALLACKED; 3210 return (1); 3211 } 3212 if (SEQ_LT(th_ack, tp->gput_seq)) { 3213 /* Not enough bytes yet */ 3214 return (0); 3215 } 3216 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3217 if (SEQ_LT(th_ack, tp->gput_ack) && 3218 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 3219 /* Not enough bytes yet */ 3220 return (0); 3221 } 3222 if (rack->r_ctl.rc_first_appl && 3223 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { 3224 /* 3225 * We are up to the app limited send point 3226 * we have to measure irrespective of the time.. 3227 */ 3228 *quality = RACK_QUALITY_APPLIMITED; 3229 return (1); 3230 } 3231 /* Now what about time? */ 3232 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); 3233 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; 3234 if (tim >= srtts) { 3235 *quality = RACK_QUALITY_HIGH; 3236 return (1); 3237 } 3238 /* Nope not even a full SRTT has passed */ 3239 return (0); 3240 } 3241 3242 static void 3243 rack_log_timely(struct tcp_rack *rack, 3244 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd, 3245 uint64_t up_bnd, int line, uint8_t method) 3246 { 3247 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 3248 union tcp_log_stackspecific log; 3249 struct timeval tv; 3250 3251 memset(&log, 0, sizeof(log)); 3252 log.u_bbr.flex1 = logged; 3253 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; 3254 log.u_bbr.flex2 <<= 4; 3255 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; 3256 log.u_bbr.flex2 <<= 4; 3257 log.u_bbr.flex2 |= rack->rc_gp_incr; 3258 log.u_bbr.flex2 <<= 4; 3259 log.u_bbr.flex2 |= rack->rc_gp_bwred; 3260 log.u_bbr.flex3 = rack->rc_gp_incr; 3261 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3262 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; 3263 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; 3264 log.u_bbr.flex7 = rack->rc_gp_bwred; 3265 log.u_bbr.flex8 = method; 3266 log.u_bbr.cur_del_rate = cur_bw; 3267 log.u_bbr.delRate = low_bnd; 3268 log.u_bbr.bw_inuse = up_bnd; 3269 log.u_bbr.rttProp = rack_get_bw(rack); 3270 log.u_bbr.pkt_epoch = line; 3271 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3272 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3273 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3274 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3275 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3276 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; 3277 log.u_bbr.cwnd_gain <<= 1; 3278 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; 3279 log.u_bbr.cwnd_gain <<= 1; 3280 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 3281 log.u_bbr.cwnd_gain <<= 1; 3282 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 3283 log.u_bbr.lost = rack->r_ctl.rc_loss_count; 3284 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3285 &rack->rc_inp->inp_socket->so_rcv, 3286 &rack->rc_inp->inp_socket->so_snd, 3287 TCP_TIMELY_WORK, 0, 3288 0, &log, false, &tv); 3289 } 3290 } 3291 3292 static int 3293 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult) 3294 { 3295 /* 3296 * Before we increase we need to know if 3297 * the estimate just made was less than 3298 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est) 3299 * 3300 * If we already are pacing at a fast enough 3301 * rate to push us faster there is no sense of 3302 * increasing. 3303 * 3304 * We first caculate our actual pacing rate (ss or ca multipler 3305 * times our cur_bw). 3306 * 3307 * Then we take the last measured rate and multipy by our 3308 * maximum pacing overage to give us a max allowable rate. 3309 * 3310 * If our act_rate is smaller than our max_allowable rate 3311 * then we should increase. Else we should hold steady. 3312 * 3313 */ 3314 uint64_t act_rate, max_allow_rate; 3315 3316 if (rack_timely_no_stopping) 3317 return (1); 3318 3319 if ((cur_bw == 0) || (last_bw_est == 0)) { 3320 /* 3321 * Initial startup case or 3322 * everything is acked case. 3323 */ 3324 rack_log_timely(rack, mult, cur_bw, 0, 0, 3325 __LINE__, 9); 3326 return (1); 3327 } 3328 if (mult <= 100) { 3329 /* 3330 * We can always pace at or slightly above our rate. 3331 */ 3332 rack_log_timely(rack, mult, cur_bw, 0, 0, 3333 __LINE__, 9); 3334 return (1); 3335 } 3336 act_rate = cur_bw * (uint64_t)mult; 3337 act_rate /= 100; 3338 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100); 3339 max_allow_rate /= 100; 3340 if (act_rate < max_allow_rate) { 3341 /* 3342 * Here the rate we are actually pacing at 3343 * is smaller than 10% above our last measurement. 3344 * This means we are pacing below what we would 3345 * like to try to achieve (plus some wiggle room). 3346 */ 3347 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3348 __LINE__, 9); 3349 return (1); 3350 } else { 3351 /* 3352 * Here we are already pacing at least rack_max_per_above(10%) 3353 * what we are getting back. This indicates most likely 3354 * that we are being limited (cwnd/rwnd/app) and can't 3355 * get any more b/w. There is no sense of trying to 3356 * raise up the pacing rate its not speeding us up 3357 * and we already are pacing faster than we are getting. 3358 */ 3359 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3360 __LINE__, 8); 3361 return (0); 3362 } 3363 } 3364 3365 static void 3366 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack) 3367 { 3368 /* 3369 * When we drag bottom, we want to assure 3370 * that no multiplier is below 1.0, if so 3371 * we want to restore it to at least that. 3372 */ 3373 if (rack->r_ctl.rack_per_of_gp_rec < 100) { 3374 /* This is unlikely we usually do not touch recovery */ 3375 rack->r_ctl.rack_per_of_gp_rec = 100; 3376 } 3377 if (rack->r_ctl.rack_per_of_gp_ca < 100) { 3378 rack->r_ctl.rack_per_of_gp_ca = 100; 3379 } 3380 if (rack->r_ctl.rack_per_of_gp_ss < 100) { 3381 rack->r_ctl.rack_per_of_gp_ss = 100; 3382 } 3383 } 3384 3385 static void 3386 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack) 3387 { 3388 if (rack->r_ctl.rack_per_of_gp_ca > 100) { 3389 rack->r_ctl.rack_per_of_gp_ca = 100; 3390 } 3391 if (rack->r_ctl.rack_per_of_gp_ss > 100) { 3392 rack->r_ctl.rack_per_of_gp_ss = 100; 3393 } 3394 } 3395 3396 static void 3397 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override) 3398 { 3399 int32_t calc, logged, plus; 3400 3401 logged = 0; 3402 3403 if (override) { 3404 /* 3405 * override is passed when we are 3406 * loosing b/w and making one last 3407 * gasp at trying to not loose out 3408 * to a new-reno flow. 3409 */ 3410 goto extra_boost; 3411 } 3412 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */ 3413 if (rack->rc_gp_incr && 3414 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { 3415 /* 3416 * Reset and get 5 strokes more before the boost. Note 3417 * that the count is 0 based so we have to add one. 3418 */ 3419 extra_boost: 3420 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST; 3421 rack->rc_gp_timely_inc_cnt = 0; 3422 } else 3423 plus = (uint32_t)rack_gp_increase_per; 3424 /* Must be at least 1% increase for true timely increases */ 3425 if ((plus < 1) && 3426 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) 3427 plus = 1; 3428 if (rack->rc_gp_saw_rec && 3429 (rack->rc_gp_no_rec_chg == 0) && 3430 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3431 rack->r_ctl.rack_per_of_gp_rec)) { 3432 /* We have been in recovery ding it too */ 3433 calc = rack->r_ctl.rack_per_of_gp_rec + plus; 3434 if (calc > 0xffff) 3435 calc = 0xffff; 3436 logged |= 1; 3437 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; 3438 if (rack_per_upper_bound_ss && 3439 (rack->rc_dragged_bottom == 0) && 3440 (rack->r_ctl.rack_per_of_gp_rec > rack_per_upper_bound_ss)) 3441 rack->r_ctl.rack_per_of_gp_rec = rack_per_upper_bound_ss; 3442 } 3443 if (rack->rc_gp_saw_ca && 3444 (rack->rc_gp_saw_ss == 0) && 3445 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3446 rack->r_ctl.rack_per_of_gp_ca)) { 3447 /* In CA */ 3448 calc = rack->r_ctl.rack_per_of_gp_ca + plus; 3449 if (calc > 0xffff) 3450 calc = 0xffff; 3451 logged |= 2; 3452 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; 3453 if (rack_per_upper_bound_ca && 3454 (rack->rc_dragged_bottom == 0) && 3455 (rack->r_ctl.rack_per_of_gp_ca > rack_per_upper_bound_ca)) 3456 rack->r_ctl.rack_per_of_gp_ca = rack_per_upper_bound_ca; 3457 } 3458 if (rack->rc_gp_saw_ss && 3459 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3460 rack->r_ctl.rack_per_of_gp_ss)) { 3461 /* In SS */ 3462 calc = rack->r_ctl.rack_per_of_gp_ss + plus; 3463 if (calc > 0xffff) 3464 calc = 0xffff; 3465 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; 3466 if (rack_per_upper_bound_ss && 3467 (rack->rc_dragged_bottom == 0) && 3468 (rack->r_ctl.rack_per_of_gp_ss > rack_per_upper_bound_ss)) 3469 rack->r_ctl.rack_per_of_gp_ss = rack_per_upper_bound_ss; 3470 logged |= 4; 3471 } 3472 if (logged && 3473 (rack->rc_gp_incr == 0)){ 3474 /* Go into increment mode */ 3475 rack->rc_gp_incr = 1; 3476 rack->rc_gp_timely_inc_cnt = 0; 3477 } 3478 if (rack->rc_gp_incr && 3479 logged && 3480 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { 3481 rack->rc_gp_timely_inc_cnt++; 3482 } 3483 rack_log_timely(rack, logged, plus, 0, 0, 3484 __LINE__, 1); 3485 } 3486 3487 static uint32_t 3488 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff) 3489 { 3490 /* 3491 * norm_grad = rtt_diff / minrtt; 3492 * new_per = curper * (1 - B * norm_grad) 3493 * 3494 * B = rack_gp_decrease_per (default 10%) 3495 * rtt_dif = input var current rtt-diff 3496 * curper = input var current percentage 3497 * minrtt = from rack filter 3498 * 3499 */ 3500 uint64_t perf; 3501 3502 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3503 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 * 3504 (((uint64_t)rtt_diff * (uint64_t)1000000)/ 3505 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ 3506 (uint64_t)1000000)) / 3507 (uint64_t)1000000); 3508 if (perf > curper) { 3509 /* TSNH */ 3510 perf = curper - 1; 3511 } 3512 return ((uint32_t)perf); 3513 } 3514 3515 static uint32_t 3516 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) 3517 { 3518 /* 3519 * highrttthresh 3520 * result = curper * (1 - (B * ( 1 - ------ )) 3521 * gp_srtt 3522 * 3523 * B = rack_gp_decrease_per (default 10%) 3524 * highrttthresh = filter_min * rack_gp_rtt_maxmul 3525 */ 3526 uint64_t perf; 3527 uint32_t highrttthresh; 3528 3529 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 3530 3531 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3532 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - 3533 ((uint64_t)highrttthresh * (uint64_t)1000000) / 3534 (uint64_t)rtt)) / 100)) /(uint64_t)1000000); 3535 return (perf); 3536 } 3537 3538 static void 3539 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) 3540 { 3541 uint64_t logvar, logvar2, logvar3; 3542 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val; 3543 3544 if (rack->rc_gp_incr) { 3545 /* Turn off increment counting */ 3546 rack->rc_gp_incr = 0; 3547 rack->rc_gp_timely_inc_cnt = 0; 3548 } 3549 ss_red = ca_red = rec_red = 0; 3550 logged = 0; 3551 /* Calculate the reduction value */ 3552 if (rtt_diff < 0) { 3553 rtt_diff *= -1; 3554 } 3555 /* Must be at least 1% reduction */ 3556 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { 3557 /* We have been in recovery ding it too */ 3558 if (timely_says == 2) { 3559 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); 3560 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3561 if (alt < new_per) 3562 val = alt; 3563 else 3564 val = new_per; 3565 } else 3566 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3567 if (rack->r_ctl.rack_per_of_gp_rec > val) { 3568 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); 3569 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; 3570 } else { 3571 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3572 rec_red = 0; 3573 } 3574 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) 3575 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3576 logged |= 1; 3577 } 3578 if (rack->rc_gp_saw_ss) { 3579 /* Sent in SS */ 3580 if (timely_says == 2) { 3581 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); 3582 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3583 if (alt < new_per) 3584 val = alt; 3585 else 3586 val = new_per; 3587 } else 3588 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 3589 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { 3590 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; 3591 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; 3592 } else { 3593 ss_red = new_per; 3594 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3595 logvar = new_per; 3596 logvar <<= 32; 3597 logvar |= alt; 3598 logvar2 = (uint32_t)rtt; 3599 logvar2 <<= 32; 3600 logvar2 |= (uint32_t)rtt_diff; 3601 logvar3 = rack_gp_rtt_maxmul; 3602 logvar3 <<= 32; 3603 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3604 rack_log_timely(rack, timely_says, 3605 logvar2, logvar3, 3606 logvar, __LINE__, 10); 3607 } 3608 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) 3609 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3610 logged |= 4; 3611 } else if (rack->rc_gp_saw_ca) { 3612 /* Sent in CA */ 3613 if (timely_says == 2) { 3614 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); 3615 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3616 if (alt < new_per) 3617 val = alt; 3618 else 3619 val = new_per; 3620 } else 3621 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 3622 if (rack->r_ctl.rack_per_of_gp_ca > val) { 3623 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; 3624 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; 3625 } else { 3626 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3627 ca_red = 0; 3628 logvar = new_per; 3629 logvar <<= 32; 3630 logvar |= alt; 3631 logvar2 = (uint32_t)rtt; 3632 logvar2 <<= 32; 3633 logvar2 |= (uint32_t)rtt_diff; 3634 logvar3 = rack_gp_rtt_maxmul; 3635 logvar3 <<= 32; 3636 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3637 rack_log_timely(rack, timely_says, 3638 logvar2, logvar3, 3639 logvar, __LINE__, 10); 3640 } 3641 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) 3642 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3643 logged |= 2; 3644 } 3645 if (rack->rc_gp_timely_dec_cnt < 0x7) { 3646 rack->rc_gp_timely_dec_cnt++; 3647 if (rack_timely_dec_clear && 3648 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) 3649 rack->rc_gp_timely_dec_cnt = 0; 3650 } 3651 logvar = ss_red; 3652 logvar <<= 32; 3653 logvar |= ca_red; 3654 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar, 3655 __LINE__, 2); 3656 } 3657 3658 static void 3659 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts, 3660 uint32_t rtt, uint32_t line, uint8_t reas) 3661 { 3662 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 3663 union tcp_log_stackspecific log; 3664 struct timeval tv; 3665 3666 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3667 log.u_bbr.flex1 = line; 3668 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; 3669 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; 3670 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3671 log.u_bbr.flex5 = rtt; 3672 log.u_bbr.flex6 = rack->rc_highly_buffered; 3673 log.u_bbr.flex6 <<= 1; 3674 log.u_bbr.flex6 |= rack->forced_ack; 3675 log.u_bbr.flex6 <<= 1; 3676 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; 3677 log.u_bbr.flex6 <<= 1; 3678 log.u_bbr.flex6 |= rack->in_probe_rtt; 3679 log.u_bbr.flex6 <<= 1; 3680 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; 3681 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; 3682 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; 3683 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; 3684 log.u_bbr.flex8 = reas; 3685 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3686 log.u_bbr.delRate = rack_get_bw(rack); 3687 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; 3688 log.u_bbr.cur_del_rate <<= 32; 3689 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; 3690 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; 3691 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3692 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3693 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3694 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3695 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; 3696 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; 3697 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3698 log.u_bbr.rttProp = us_cts; 3699 log.u_bbr.rttProp <<= 32; 3700 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; 3701 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3702 &rack->rc_inp->inp_socket->so_rcv, 3703 &rack->rc_inp->inp_socket->so_snd, 3704 BBR_LOG_RTT_SHRINKS, 0, 3705 0, &log, false, &rack->r_ctl.act_rcv_time); 3706 } 3707 } 3708 3709 static void 3710 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) 3711 { 3712 uint64_t bwdp; 3713 3714 bwdp = rack_get_bw(rack); 3715 bwdp *= (uint64_t)rtt; 3716 bwdp /= (uint64_t)HPTS_USEC_IN_SEC; 3717 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); 3718 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { 3719 /* 3720 * A window protocol must be able to have 4 packets 3721 * outstanding as the floor in order to function 3722 * (especially considering delayed ack :D). 3723 */ 3724 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); 3725 } 3726 } 3727 3728 static void 3729 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts) 3730 { 3731 /** 3732 * ProbeRTT is a bit different in rack_pacing than in 3733 * BBR. It is like BBR in that it uses the lowering of 3734 * the RTT as a signal that we saw something new and 3735 * counts from there for how long between. But it is 3736 * different in that its quite simple. It does not 3737 * play with the cwnd and wait until we get down 3738 * to N segments outstanding and hold that for 3739 * 200ms. Instead it just sets the pacing reduction 3740 * rate to a set percentage (70 by default) and hold 3741 * that for a number of recent GP Srtt's. 3742 */ 3743 uint32_t segsiz; 3744 3745 if (rack->rc_gp_dyn_mul == 0) 3746 return; 3747 3748 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { 3749 /* We are idle */ 3750 return; 3751 } 3752 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3753 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3754 /* 3755 * Stop the goodput now, the idea here is 3756 * that future measurements with in_probe_rtt 3757 * won't register if they are not greater so 3758 * we want to get what info (if any) is available 3759 * now. 3760 */ 3761 rack_do_goodput_measurement(rack->rc_tp, rack, 3762 rack->rc_tp->snd_una, __LINE__, 3763 RACK_QUALITY_PROBERTT); 3764 } 3765 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 3766 rack->r_ctl.rc_time_probertt_entered = us_cts; 3767 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3768 rack->r_ctl.rc_pace_min_segs); 3769 rack->in_probe_rtt = 1; 3770 rack->measure_saw_probe_rtt = 1; 3771 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3772 rack->r_ctl.rc_time_probertt_starts = 0; 3773 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; 3774 if (rack_probertt_use_min_rtt_entry) 3775 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3776 else 3777 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); 3778 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3779 __LINE__, RACK_RTTS_ENTERPROBE); 3780 } 3781 3782 static void 3783 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts) 3784 { 3785 struct rack_sendmap *rsm; 3786 uint32_t segsiz; 3787 3788 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3789 rack->r_ctl.rc_pace_min_segs); 3790 rack->in_probe_rtt = 0; 3791 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3792 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3793 /* 3794 * Stop the goodput now, the idea here is 3795 * that future measurements with in_probe_rtt 3796 * won't register if they are not greater so 3797 * we want to get what info (if any) is available 3798 * now. 3799 */ 3800 rack_do_goodput_measurement(rack->rc_tp, rack, 3801 rack->rc_tp->snd_una, __LINE__, 3802 RACK_QUALITY_PROBERTT); 3803 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 3804 /* 3805 * We don't have enough data to make a measurement. 3806 * So lets just stop and start here after exiting 3807 * probe-rtt. We probably are not interested in 3808 * the results anyway. 3809 */ 3810 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; 3811 } 3812 /* 3813 * Measurements through the current snd_max are going 3814 * to be limited by the slower pacing rate. 3815 * 3816 * We need to mark these as app-limited so we 3817 * don't collapse the b/w. 3818 */ 3819 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 3820 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 3821 if (rack->r_ctl.rc_app_limited_cnt == 0) 3822 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 3823 else { 3824 /* 3825 * Go out to the end app limited and mark 3826 * this new one as next and move the end_appl up 3827 * to this guy. 3828 */ 3829 if (rack->r_ctl.rc_end_appl) 3830 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 3831 rack->r_ctl.rc_end_appl = rsm; 3832 } 3833 rsm->r_flags |= RACK_APP_LIMITED; 3834 rack->r_ctl.rc_app_limited_cnt++; 3835 } 3836 /* 3837 * Now, we need to examine our pacing rate multipliers. 3838 * If its under 100%, we need to kick it back up to 3839 * 100%. We also don't let it be over our "max" above 3840 * the actual rate i.e. 100% + rack_clamp_atexit_prtt. 3841 * Note setting clamp_atexit_prtt to 0 has the effect 3842 * of setting CA/SS to 100% always at exit (which is 3843 * the default behavior). 3844 */ 3845 if (rack_probertt_clear_is) { 3846 rack->rc_gp_incr = 0; 3847 rack->rc_gp_bwred = 0; 3848 rack->rc_gp_timely_inc_cnt = 0; 3849 rack->rc_gp_timely_dec_cnt = 0; 3850 } 3851 /* Do we do any clamping at exit? */ 3852 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { 3853 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; 3854 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; 3855 } 3856 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { 3857 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; 3858 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; 3859 } 3860 /* 3861 * Lets set rtt_diff to 0, so that we will get a "boost" 3862 * after exiting. 3863 */ 3864 rack->r_ctl.rc_rtt_diff = 0; 3865 3866 /* Clear all flags so we start fresh */ 3867 rack->rc_tp->t_bytes_acked = 0; 3868 rack->rc_tp->ccv->flags &= ~CCF_ABC_SENTAWND; 3869 /* 3870 * If configured to, set the cwnd and ssthresh to 3871 * our targets. 3872 */ 3873 if (rack_probe_rtt_sets_cwnd) { 3874 uint64_t ebdp; 3875 uint32_t setto; 3876 3877 /* Set ssthresh so we get into CA once we hit our target */ 3878 if (rack_probertt_use_min_rtt_exit == 1) { 3879 /* Set to min rtt */ 3880 rack_set_prtt_target(rack, segsiz, 3881 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3882 } else if (rack_probertt_use_min_rtt_exit == 2) { 3883 /* Set to current gp rtt */ 3884 rack_set_prtt_target(rack, segsiz, 3885 rack->r_ctl.rc_gp_srtt); 3886 } else if (rack_probertt_use_min_rtt_exit == 3) { 3887 /* Set to entry gp rtt */ 3888 rack_set_prtt_target(rack, segsiz, 3889 rack->r_ctl.rc_entry_gp_rtt); 3890 } else { 3891 uint64_t sum; 3892 uint32_t setval; 3893 3894 sum = rack->r_ctl.rc_entry_gp_rtt; 3895 sum *= 10; 3896 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); 3897 if (sum >= 20) { 3898 /* 3899 * A highly buffered path needs 3900 * cwnd space for timely to work. 3901 * Lets set things up as if 3902 * we are heading back here again. 3903 */ 3904 setval = rack->r_ctl.rc_entry_gp_rtt; 3905 } else if (sum >= 15) { 3906 /* 3907 * Lets take the smaller of the 3908 * two since we are just somewhat 3909 * buffered. 3910 */ 3911 setval = rack->r_ctl.rc_gp_srtt; 3912 if (setval > rack->r_ctl.rc_entry_gp_rtt) 3913 setval = rack->r_ctl.rc_entry_gp_rtt; 3914 } else { 3915 /* 3916 * Here we are not highly buffered 3917 * and should pick the min we can to 3918 * keep from causing loss. 3919 */ 3920 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3921 } 3922 rack_set_prtt_target(rack, segsiz, 3923 setval); 3924 } 3925 if (rack_probe_rtt_sets_cwnd > 1) { 3926 /* There is a percentage here to boost */ 3927 ebdp = rack->r_ctl.rc_target_probertt_flight; 3928 ebdp *= rack_probe_rtt_sets_cwnd; 3929 ebdp /= 100; 3930 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; 3931 } else 3932 setto = rack->r_ctl.rc_target_probertt_flight; 3933 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); 3934 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { 3935 /* Enforce a min */ 3936 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; 3937 } 3938 /* If we set in the cwnd also set the ssthresh point so we are in CA */ 3939 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); 3940 } 3941 rack_log_rtt_shrinks(rack, us_cts, 3942 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3943 __LINE__, RACK_RTTS_EXITPROBE); 3944 /* Clear times last so log has all the info */ 3945 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; 3946 rack->r_ctl.rc_time_probertt_entered = us_cts; 3947 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3948 rack->r_ctl.rc_time_of_last_probertt = us_cts; 3949 } 3950 3951 static void 3952 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts) 3953 { 3954 /* Check in on probe-rtt */ 3955 if (rack->rc_gp_filled == 0) { 3956 /* We do not do p-rtt unless we have gp measurements */ 3957 return; 3958 } 3959 if (rack->in_probe_rtt) { 3960 uint64_t no_overflow; 3961 uint32_t endtime, must_stay; 3962 3963 if (rack->r_ctl.rc_went_idle_time && 3964 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { 3965 /* 3966 * We went idle during prtt, just exit now. 3967 */ 3968 rack_exit_probertt(rack, us_cts); 3969 } else if (rack_probe_rtt_safety_val && 3970 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && 3971 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { 3972 /* 3973 * Probe RTT safety value triggered! 3974 */ 3975 rack_log_rtt_shrinks(rack, us_cts, 3976 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3977 __LINE__, RACK_RTTS_SAFETY); 3978 rack_exit_probertt(rack, us_cts); 3979 } 3980 /* Calculate the max we will wait */ 3981 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); 3982 if (rack->rc_highly_buffered) 3983 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); 3984 /* Calculate the min we must wait */ 3985 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); 3986 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) && 3987 TSTMP_LT(us_cts, endtime)) { 3988 uint32_t calc; 3989 /* Do we lower more? */ 3990 no_exit: 3991 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) 3992 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; 3993 else 3994 calc = 0; 3995 calc /= max(rack->r_ctl.rc_gp_srtt, 1); 3996 if (calc) { 3997 /* Maybe */ 3998 calc *= rack_per_of_gp_probertt_reduce; 3999 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; 4000 /* Limit it too */ 4001 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) 4002 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 4003 } 4004 /* We must reach target or the time set */ 4005 return; 4006 } 4007 if (rack->r_ctl.rc_time_probertt_starts == 0) { 4008 if ((TSTMP_LT(us_cts, must_stay) && 4009 rack->rc_highly_buffered) || 4010 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > 4011 rack->r_ctl.rc_target_probertt_flight)) { 4012 /* We are not past the must_stay time */ 4013 goto no_exit; 4014 } 4015 rack_log_rtt_shrinks(rack, us_cts, 4016 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4017 __LINE__, RACK_RTTS_REACHTARGET); 4018 rack->r_ctl.rc_time_probertt_starts = us_cts; 4019 if (rack->r_ctl.rc_time_probertt_starts == 0) 4020 rack->r_ctl.rc_time_probertt_starts = 1; 4021 /* Restore back to our rate we want to pace at in prtt */ 4022 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 4023 } 4024 /* 4025 * Setup our end time, some number of gp_srtts plus 200ms. 4026 */ 4027 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * 4028 (uint64_t)rack_probertt_gpsrtt_cnt_mul); 4029 if (rack_probertt_gpsrtt_cnt_div) 4030 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div); 4031 else 4032 endtime = 0; 4033 endtime += rack_min_probertt_hold; 4034 endtime += rack->r_ctl.rc_time_probertt_starts; 4035 if (TSTMP_GEQ(us_cts, endtime)) { 4036 /* yes, exit probertt */ 4037 rack_exit_probertt(rack, us_cts); 4038 } 4039 4040 } else if ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt) { 4041 /* Go into probertt, its been too long since we went lower */ 4042 rack_enter_probertt(rack, us_cts); 4043 } 4044 } 4045 4046 static void 4047 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est, 4048 uint32_t rtt, int32_t rtt_diff) 4049 { 4050 uint64_t cur_bw, up_bnd, low_bnd, subfr; 4051 uint32_t losses; 4052 4053 if ((rack->rc_gp_dyn_mul == 0) || 4054 (rack->use_fixed_rate) || 4055 (rack->in_probe_rtt) || 4056 (rack->rc_always_pace == 0)) { 4057 /* No dynamic GP multipler in play */ 4058 return; 4059 } 4060 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; 4061 cur_bw = rack_get_bw(rack); 4062 /* Calculate our up and down range */ 4063 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; 4064 up_bnd /= 100; 4065 up_bnd += rack->r_ctl.last_gp_comp_bw; 4066 4067 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; 4068 subfr /= 100; 4069 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; 4070 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { 4071 /* 4072 * This is the case where our RTT is above 4073 * the max target and we have been configured 4074 * to just do timely no bonus up stuff in that case. 4075 * 4076 * There are two configurations, set to 1, and we 4077 * just do timely if we are over our max. If its 4078 * set above 1 then we slam the multipliers down 4079 * to 100 and then decrement per timely. 4080 */ 4081 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4082 __LINE__, 3); 4083 if (rack->r_ctl.rc_no_push_at_mrtt > 1) 4084 rack_validate_multipliers_at_or_below_100(rack); 4085 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4086 } else if ((last_bw_est < low_bnd) && !losses) { 4087 /* 4088 * We are decreasing this is a bit complicated this 4089 * means we are loosing ground. This could be 4090 * because another flow entered and we are competing 4091 * for b/w with it. This will push the RTT up which 4092 * makes timely unusable unless we want to get shoved 4093 * into a corner and just be backed off (the age 4094 * old problem with delay based CC). 4095 * 4096 * On the other hand if it was a route change we 4097 * would like to stay somewhat contained and not 4098 * blow out the buffers. 4099 */ 4100 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4101 __LINE__, 3); 4102 rack->r_ctl.last_gp_comp_bw = cur_bw; 4103 if (rack->rc_gp_bwred == 0) { 4104 /* Go into reduction counting */ 4105 rack->rc_gp_bwred = 1; 4106 rack->rc_gp_timely_dec_cnt = 0; 4107 } 4108 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) || 4109 (timely_says == 0)) { 4110 /* 4111 * Push another time with a faster pacing 4112 * to try to gain back (we include override to 4113 * get a full raise factor). 4114 */ 4115 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || 4116 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || 4117 (timely_says == 0) || 4118 (rack_down_raise_thresh == 0)) { 4119 /* 4120 * Do an override up in b/w if we were 4121 * below the threshold or if the threshold 4122 * is zero we always do the raise. 4123 */ 4124 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1); 4125 } else { 4126 /* Log it stays the same */ 4127 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0, 4128 __LINE__, 11); 4129 } 4130 rack->rc_gp_timely_dec_cnt++; 4131 /* We are not incrementing really no-count */ 4132 rack->rc_gp_incr = 0; 4133 rack->rc_gp_timely_inc_cnt = 0; 4134 } else { 4135 /* 4136 * Lets just use the RTT 4137 * information and give up 4138 * pushing. 4139 */ 4140 goto use_timely; 4141 } 4142 } else if ((timely_says != 2) && 4143 !losses && 4144 (last_bw_est > up_bnd)) { 4145 /* 4146 * We are increasing b/w lets keep going, updating 4147 * our b/w and ignoring any timely input, unless 4148 * of course we are at our max raise (if there is one). 4149 */ 4150 4151 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4152 __LINE__, 3); 4153 rack->r_ctl.last_gp_comp_bw = cur_bw; 4154 if (rack->rc_gp_saw_ss && 4155 rack_per_upper_bound_ss && 4156 (rack->r_ctl.rack_per_of_gp_ss == rack_per_upper_bound_ss)) { 4157 /* 4158 * In cases where we can't go higher 4159 * we should just use timely. 4160 */ 4161 goto use_timely; 4162 } 4163 if (rack->rc_gp_saw_ca && 4164 rack_per_upper_bound_ca && 4165 (rack->r_ctl.rack_per_of_gp_ca == rack_per_upper_bound_ca)) { 4166 /* 4167 * In cases where we can't go higher 4168 * we should just use timely. 4169 */ 4170 goto use_timely; 4171 } 4172 rack->rc_gp_bwred = 0; 4173 rack->rc_gp_timely_dec_cnt = 0; 4174 /* You get a set number of pushes if timely is trying to reduce */ 4175 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { 4176 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4177 } else { 4178 /* Log it stays the same */ 4179 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0, 4180 __LINE__, 12); 4181 } 4182 return; 4183 } else { 4184 /* 4185 * We are staying between the lower and upper range bounds 4186 * so use timely to decide. 4187 */ 4188 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4189 __LINE__, 3); 4190 use_timely: 4191 if (timely_says) { 4192 rack->rc_gp_incr = 0; 4193 rack->rc_gp_timely_inc_cnt = 0; 4194 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && 4195 !losses && 4196 (last_bw_est < low_bnd)) { 4197 /* We are loosing ground */ 4198 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4199 rack->rc_gp_timely_dec_cnt++; 4200 /* We are not incrementing really no-count */ 4201 rack->rc_gp_incr = 0; 4202 rack->rc_gp_timely_inc_cnt = 0; 4203 } else 4204 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4205 } else { 4206 rack->rc_gp_bwred = 0; 4207 rack->rc_gp_timely_dec_cnt = 0; 4208 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4209 } 4210 } 4211 } 4212 4213 static int32_t 4214 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) 4215 { 4216 int32_t timely_says; 4217 uint64_t log_mult, log_rtt_a_diff; 4218 4219 log_rtt_a_diff = rtt; 4220 log_rtt_a_diff <<= 32; 4221 log_rtt_a_diff |= (uint32_t)rtt_diff; 4222 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * 4223 rack_gp_rtt_maxmul)) { 4224 /* Reduce the b/w multipler */ 4225 timely_says = 2; 4226 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 4227 log_mult <<= 32; 4228 log_mult |= prev_rtt; 4229 rack_log_timely(rack, timely_says, log_mult, 4230 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4231 log_rtt_a_diff, __LINE__, 4); 4232 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4233 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4234 max(rack_gp_rtt_mindiv , 1)))) { 4235 /* Increase the b/w multipler */ 4236 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4237 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4238 max(rack_gp_rtt_mindiv , 1)); 4239 log_mult <<= 32; 4240 log_mult |= prev_rtt; 4241 timely_says = 0; 4242 rack_log_timely(rack, timely_says, log_mult , 4243 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4244 log_rtt_a_diff, __LINE__, 5); 4245 } else { 4246 /* 4247 * Use a gradient to find it the timely gradient 4248 * is: 4249 * grad = rc_rtt_diff / min_rtt; 4250 * 4251 * anything below or equal to 0 will be 4252 * a increase indication. Anything above 4253 * zero is a decrease. Note we take care 4254 * of the actual gradient calculation 4255 * in the reduction (its not needed for 4256 * increase). 4257 */ 4258 log_mult = prev_rtt; 4259 if (rtt_diff <= 0) { 4260 /* 4261 * Rttdiff is less than zero, increase the 4262 * b/w multipler (its 0 or negative) 4263 */ 4264 timely_says = 0; 4265 rack_log_timely(rack, timely_says, log_mult, 4266 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); 4267 } else { 4268 /* Reduce the b/w multipler */ 4269 timely_says = 1; 4270 rack_log_timely(rack, timely_says, log_mult, 4271 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); 4272 } 4273 } 4274 return (timely_says); 4275 } 4276 4277 static void 4278 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 4279 tcp_seq th_ack, int line, uint8_t quality) 4280 { 4281 uint64_t tim, bytes_ps, ltim, stim, utim; 4282 uint32_t segsiz, bytes, reqbytes, us_cts; 4283 int32_t gput, new_rtt_diff, timely_says; 4284 uint64_t resid_bw, subpart = 0, addpart = 0, srtt; 4285 int did_add = 0; 4286 4287 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4288 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 4289 if (TSTMP_GEQ(us_cts, tp->gput_ts)) 4290 tim = us_cts - tp->gput_ts; 4291 else 4292 tim = 0; 4293 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) 4294 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; 4295 else 4296 stim = 0; 4297 /* 4298 * Use the larger of the send time or ack time. This prevents us 4299 * from being influenced by ack artifacts to come up with too 4300 * high of measurement. Note that since we are spanning over many more 4301 * bytes in most of our measurements hopefully that is less likely to 4302 * occur. 4303 */ 4304 if (tim > stim) 4305 utim = max(tim, 1); 4306 else 4307 utim = max(stim, 1); 4308 /* Lets get a msec time ltim too for the old stuff */ 4309 ltim = max(1, (utim / HPTS_USEC_IN_MSEC)); 4310 gput = (((uint64_t) (th_ack - tp->gput_seq)) << 3) / ltim; 4311 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz)); 4312 if ((tim == 0) && (stim == 0)) { 4313 /* 4314 * Invalid measurement time, maybe 4315 * all on one ack/one send? 4316 */ 4317 bytes = 0; 4318 bytes_ps = 0; 4319 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4320 0, 0, 0, 10, __LINE__, NULL, quality); 4321 goto skip_measurement; 4322 } 4323 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { 4324 /* We never made a us_rtt measurement? */ 4325 bytes = 0; 4326 bytes_ps = 0; 4327 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4328 0, 0, 0, 10, __LINE__, NULL, quality); 4329 goto skip_measurement; 4330 } 4331 /* 4332 * Calculate the maximum possible b/w this connection 4333 * could have. We base our calculation on the lowest 4334 * rtt we have seen during the measurement and the 4335 * largest rwnd the client has given us in that time. This 4336 * forms a BDP that is the maximum that we could ever 4337 * get to the client. Anything larger is not valid. 4338 * 4339 * I originally had code here that rejected measurements 4340 * where the time was less than 1/2 the latest us_rtt. 4341 * But after thinking on that I realized its wrong since 4342 * say you had a 150Mbps or even 1Gbps link, and you 4343 * were a long way away.. example I am in Europe (100ms rtt) 4344 * talking to my 1Gbps link in S.C. Now measuring say 150,000 4345 * bytes my time would be 1.2ms, and yet my rtt would say 4346 * the measurement was invalid the time was < 50ms. The 4347 * same thing is true for 150Mb (8ms of time). 4348 * 4349 * A better way I realized is to look at what the maximum 4350 * the connection could possibly do. This is gated on 4351 * the lowest RTT we have seen and the highest rwnd. 4352 * We should in theory never exceed that, if we are 4353 * then something on the path is storing up packets 4354 * and then feeding them all at once to our endpoint 4355 * messing up our measurement. 4356 */ 4357 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; 4358 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; 4359 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; 4360 if (SEQ_LT(th_ack, tp->gput_seq)) { 4361 /* No measurement can be made */ 4362 bytes = 0; 4363 bytes_ps = 0; 4364 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4365 0, 0, 0, 10, __LINE__, NULL, quality); 4366 goto skip_measurement; 4367 } else 4368 bytes = (th_ack - tp->gput_seq); 4369 bytes_ps = (uint64_t)bytes; 4370 /* 4371 * Don't measure a b/w for pacing unless we have gotten at least 4372 * an initial windows worth of data in this measurement interval. 4373 * 4374 * Small numbers of bytes get badly influenced by delayed ack and 4375 * other artifacts. Note we take the initial window or our 4376 * defined minimum GP (defaulting to 10 which hopefully is the 4377 * IW). 4378 */ 4379 if (rack->rc_gp_filled == 0) { 4380 /* 4381 * The initial estimate is special. We 4382 * have blasted out an IW worth of packets 4383 * without a real valid ack ts results. We 4384 * then setup the app_limited_needs_set flag, 4385 * this should get the first ack in (probably 2 4386 * MSS worth) to be recorded as the timestamp. 4387 * We thus allow a smaller number of bytes i.e. 4388 * IW - 2MSS. 4389 */ 4390 reqbytes -= (2 * segsiz); 4391 /* Also lets fill previous for our first measurement to be neutral */ 4392 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4393 } 4394 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { 4395 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4396 rack->r_ctl.rc_app_limited_cnt, 4397 0, 0, 10, __LINE__, NULL, quality); 4398 goto skip_measurement; 4399 } 4400 /* 4401 * We now need to calculate the Timely like status so 4402 * we can update (possibly) the b/w multipliers. 4403 */ 4404 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; 4405 if (rack->rc_gp_filled == 0) { 4406 /* No previous reading */ 4407 rack->r_ctl.rc_rtt_diff = new_rtt_diff; 4408 } else { 4409 if (rack->measure_saw_probe_rtt == 0) { 4410 /* 4411 * We don't want a probertt to be counted 4412 * since it will be negative incorrectly. We 4413 * expect to be reducing the RTT when we 4414 * pace at a slower rate. 4415 */ 4416 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); 4417 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); 4418 } 4419 } 4420 timely_says = rack_make_timely_judgement(rack, 4421 rack->r_ctl.rc_gp_srtt, 4422 rack->r_ctl.rc_rtt_diff, 4423 rack->r_ctl.rc_prev_gp_srtt 4424 ); 4425 bytes_ps *= HPTS_USEC_IN_SEC; 4426 bytes_ps /= utim; 4427 if (bytes_ps > rack->r_ctl.last_max_bw) { 4428 /* 4429 * Something is on path playing 4430 * since this b/w is not possible based 4431 * on our BDP (highest rwnd and lowest rtt 4432 * we saw in the measurement window). 4433 * 4434 * Another option here would be to 4435 * instead skip the measurement. 4436 */ 4437 rack_log_pacing_delay_calc(rack, bytes, reqbytes, 4438 bytes_ps, rack->r_ctl.last_max_bw, 0, 4439 11, __LINE__, NULL, quality); 4440 bytes_ps = rack->r_ctl.last_max_bw; 4441 } 4442 /* We store gp for b/w in bytes per second */ 4443 if (rack->rc_gp_filled == 0) { 4444 /* Initial measurement */ 4445 if (bytes_ps) { 4446 rack->r_ctl.gp_bw = bytes_ps; 4447 rack->rc_gp_filled = 1; 4448 rack->r_ctl.num_measurements = 1; 4449 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 4450 } else { 4451 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4452 rack->r_ctl.rc_app_limited_cnt, 4453 0, 0, 10, __LINE__, NULL, quality); 4454 } 4455 if (tcp_in_hpts(rack->rc_inp) && 4456 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 4457 /* 4458 * Ok we can't trust the pacer in this case 4459 * where we transition from un-paced to paced. 4460 * Or for that matter when the burst mitigation 4461 * was making a wild guess and got it wrong. 4462 * Stop the pacer and clear up all the aggregate 4463 * delays etc. 4464 */ 4465 tcp_hpts_remove(rack->rc_inp); 4466 rack->r_ctl.rc_hpts_flags = 0; 4467 rack->r_ctl.rc_last_output_to = 0; 4468 } 4469 did_add = 2; 4470 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { 4471 /* Still a small number run an average */ 4472 rack->r_ctl.gp_bw += bytes_ps; 4473 addpart = rack->r_ctl.num_measurements; 4474 rack->r_ctl.num_measurements++; 4475 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 4476 /* We have collected enought to move forward */ 4477 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; 4478 } 4479 did_add = 3; 4480 } else { 4481 /* 4482 * We want to take 1/wma of the goodput and add in to 7/8th 4483 * of the old value weighted by the srtt. So if your measurement 4484 * period is say 2 SRTT's long you would get 1/4 as the 4485 * value, if it was like 1/2 SRTT then you would get 1/16th. 4486 * 4487 * But we must be careful not to take too much i.e. if the 4488 * srtt is say 20ms and the measurement is taken over 4489 * 400ms our weight would be 400/20 i.e. 20. On the 4490 * other hand if we get a measurement over 1ms with a 4491 * 10ms rtt we only want to take a much smaller portion. 4492 */ 4493 if (rack->r_ctl.num_measurements < 0xff) { 4494 rack->r_ctl.num_measurements++; 4495 } 4496 srtt = (uint64_t)tp->t_srtt; 4497 if (srtt == 0) { 4498 /* 4499 * Strange why did t_srtt go back to zero? 4500 */ 4501 if (rack->r_ctl.rc_rack_min_rtt) 4502 srtt = rack->r_ctl.rc_rack_min_rtt; 4503 else 4504 srtt = HPTS_USEC_IN_MSEC; 4505 } 4506 /* 4507 * XXXrrs: Note for reviewers, in playing with 4508 * dynamic pacing I discovered this GP calculation 4509 * as done originally leads to some undesired results. 4510 * Basically you can get longer measurements contributing 4511 * too much to the WMA. Thus I changed it if you are doing 4512 * dynamic adjustments to only do the aportioned adjustment 4513 * if we have a very small (time wise) measurement. Longer 4514 * measurements just get there weight (defaulting to 1/8) 4515 * add to the WMA. We may want to think about changing 4516 * this to always do that for both sides i.e. dynamic 4517 * and non-dynamic... but considering lots of folks 4518 * were playing with this I did not want to change the 4519 * calculation per.se. without your thoughts.. Lawerence? 4520 * Peter?? 4521 */ 4522 if (rack->rc_gp_dyn_mul == 0) { 4523 subpart = rack->r_ctl.gp_bw * utim; 4524 subpart /= (srtt * 8); 4525 if (subpart < (rack->r_ctl.gp_bw / 2)) { 4526 /* 4527 * The b/w update takes no more 4528 * away then 1/2 our running total 4529 * so factor it in. 4530 */ 4531 addpart = bytes_ps * utim; 4532 addpart /= (srtt * 8); 4533 } else { 4534 /* 4535 * Don't allow a single measurement 4536 * to account for more than 1/2 of the 4537 * WMA. This could happen on a retransmission 4538 * where utim becomes huge compared to 4539 * srtt (multiple retransmissions when using 4540 * the sending rate which factors in all the 4541 * transmissions from the first one). 4542 */ 4543 subpart = rack->r_ctl.gp_bw / 2; 4544 addpart = bytes_ps / 2; 4545 } 4546 resid_bw = rack->r_ctl.gp_bw - subpart; 4547 rack->r_ctl.gp_bw = resid_bw + addpart; 4548 did_add = 1; 4549 } else { 4550 if ((utim / srtt) <= 1) { 4551 /* 4552 * The b/w update was over a small period 4553 * of time. The idea here is to prevent a small 4554 * measurement time period from counting 4555 * too much. So we scale it based on the 4556 * time so it attributes less than 1/rack_wma_divisor 4557 * of its measurement. 4558 */ 4559 subpart = rack->r_ctl.gp_bw * utim; 4560 subpart /= (srtt * rack_wma_divisor); 4561 addpart = bytes_ps * utim; 4562 addpart /= (srtt * rack_wma_divisor); 4563 } else { 4564 /* 4565 * The scaled measurement was long 4566 * enough so lets just add in the 4567 * portion of the measurement i.e. 1/rack_wma_divisor 4568 */ 4569 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; 4570 addpart = bytes_ps / rack_wma_divisor; 4571 } 4572 if ((rack->measure_saw_probe_rtt == 0) || 4573 (bytes_ps > rack->r_ctl.gp_bw)) { 4574 /* 4575 * For probe-rtt we only add it in 4576 * if its larger, all others we just 4577 * add in. 4578 */ 4579 did_add = 1; 4580 resid_bw = rack->r_ctl.gp_bw - subpart; 4581 rack->r_ctl.gp_bw = resid_bw + addpart; 4582 } 4583 } 4584 } 4585 if ((rack->gp_ready == 0) && 4586 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 4587 /* We have enough measurements now */ 4588 rack->gp_ready = 1; 4589 rack_set_cc_pacing(rack); 4590 if (rack->defer_options) 4591 rack_apply_deferred_options(rack); 4592 } 4593 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim, 4594 rack_get_bw(rack), 22, did_add, NULL, quality); 4595 /* We do not update any multipliers if we are in or have seen a probe-rtt */ 4596 if ((rack->measure_saw_probe_rtt == 0) && rack->rc_gp_rtt_set) 4597 rack_update_multiplier(rack, timely_says, bytes_ps, 4598 rack->r_ctl.rc_gp_srtt, 4599 rack->r_ctl.rc_rtt_diff); 4600 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim, 4601 rack_get_bw(rack), 3, line, NULL, quality); 4602 /* reset the gp srtt and setup the new prev */ 4603 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4604 /* Record the lost count for the next measurement */ 4605 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; 4606 /* 4607 * We restart our diffs based on the gpsrtt in the 4608 * measurement window. 4609 */ 4610 rack->rc_gp_rtt_set = 0; 4611 rack->rc_gp_saw_rec = 0; 4612 rack->rc_gp_saw_ca = 0; 4613 rack->rc_gp_saw_ss = 0; 4614 rack->rc_dragged_bottom = 0; 4615 skip_measurement: 4616 4617 #ifdef STATS 4618 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 4619 gput); 4620 /* 4621 * XXXLAS: This is a temporary hack, and should be 4622 * chained off VOI_TCP_GPUT when stats(9) grows an 4623 * API to deal with chained VOIs. 4624 */ 4625 if (tp->t_stats_gput_prev > 0) 4626 stats_voi_update_abs_s32(tp->t_stats, 4627 VOI_TCP_GPUT_ND, 4628 ((gput - tp->t_stats_gput_prev) * 100) / 4629 tp->t_stats_gput_prev); 4630 #endif 4631 tp->t_flags &= ~TF_GPUTINPROG; 4632 tp->t_stats_gput_prev = gput; 4633 /* 4634 * Now are we app limited now and there is space from where we 4635 * were to where we want to go? 4636 * 4637 * We don't do the other case i.e. non-applimited here since 4638 * the next send will trigger us picking up the missing data. 4639 */ 4640 if (rack->r_ctl.rc_first_appl && 4641 TCPS_HAVEESTABLISHED(tp->t_state) && 4642 rack->r_ctl.rc_app_limited_cnt && 4643 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && 4644 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > 4645 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 4646 /* 4647 * Yep there is enough outstanding to make a measurement here. 4648 */ 4649 struct rack_sendmap *rsm, fe; 4650 4651 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 4652 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 4653 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4654 rack->app_limited_needs_set = 0; 4655 tp->gput_seq = th_ack; 4656 if (rack->in_probe_rtt) 4657 rack->measure_saw_probe_rtt = 1; 4658 else if ((rack->measure_saw_probe_rtt) && 4659 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 4660 rack->measure_saw_probe_rtt = 0; 4661 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { 4662 /* There is a full window to gain info from */ 4663 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 4664 } else { 4665 /* We can only measure up to the applimited point */ 4666 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); 4667 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 4668 /* 4669 * We don't have enough to make a measurement. 4670 */ 4671 tp->t_flags &= ~TF_GPUTINPROG; 4672 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 4673 0, 0, 0, 6, __LINE__, NULL, quality); 4674 return; 4675 } 4676 } 4677 if (tp->t_state >= TCPS_FIN_WAIT_1) { 4678 /* 4679 * We will get no more data into the SB 4680 * this means we need to have the data available 4681 * before we start a measurement. 4682 */ 4683 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) < (tp->gput_ack - tp->gput_seq)) { 4684 /* Nope not enough data. */ 4685 return; 4686 } 4687 } 4688 tp->t_flags |= TF_GPUTINPROG; 4689 /* 4690 * Now we need to find the timestamp of the send at tp->gput_seq 4691 * for the send based measurement. 4692 */ 4693 fe.r_start = tp->gput_seq; 4694 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 4695 if (rsm) { 4696 /* Ok send-based limit is set */ 4697 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { 4698 /* 4699 * Move back to include the earlier part 4700 * so our ack time lines up right (this may 4701 * make an overlapping measurement but thats 4702 * ok). 4703 */ 4704 tp->gput_seq = rsm->r_start; 4705 } 4706 if (rsm->r_flags & RACK_ACKED) 4707 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; 4708 else 4709 rack->app_limited_needs_set = 1; 4710 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 4711 } else { 4712 /* 4713 * If we don't find the rsm due to some 4714 * send-limit set the current time, which 4715 * basically disables the send-limit. 4716 */ 4717 struct timeval tv; 4718 4719 microuptime(&tv); 4720 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 4721 } 4722 rack_log_pacing_delay_calc(rack, 4723 tp->gput_seq, 4724 tp->gput_ack, 4725 (uint64_t)rsm, 4726 tp->gput_ts, 4727 rack->r_ctl.rc_app_limited_cnt, 4728 9, 4729 __LINE__, NULL, quality); 4730 } 4731 } 4732 4733 /* 4734 * CC wrapper hook functions 4735 */ 4736 static void 4737 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs, 4738 uint16_t type, int32_t recovery) 4739 { 4740 uint32_t prior_cwnd, acked; 4741 struct tcp_log_buffer *lgb = NULL; 4742 uint8_t labc_to_use, quality; 4743 4744 INP_WLOCK_ASSERT(tp->t_inpcb); 4745 tp->ccv->nsegs = nsegs; 4746 acked = tp->ccv->bytes_this_ack = (th_ack - tp->snd_una); 4747 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 4748 uint32_t max; 4749 4750 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 4751 if (tp->ccv->bytes_this_ack > max) { 4752 tp->ccv->bytes_this_ack = max; 4753 } 4754 } 4755 #ifdef STATS 4756 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 4757 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); 4758 #endif 4759 quality = RACK_QUALITY_NONE; 4760 if ((tp->t_flags & TF_GPUTINPROG) && 4761 rack_enough_for_measurement(tp, rack, th_ack, &quality)) { 4762 /* Measure the Goodput */ 4763 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality); 4764 #ifdef NETFLIX_PEAKRATE 4765 if ((type == CC_ACK) && 4766 (tp->t_maxpeakrate)) { 4767 /* 4768 * We update t_peakrate_thr. This gives us roughly 4769 * one update per round trip time. Note 4770 * it will only be used if pace_always is off i.e 4771 * we don't do this for paced flows. 4772 */ 4773 rack_update_peakrate_thr(tp); 4774 } 4775 #endif 4776 } 4777 /* Which way our we limited, if not cwnd limited no advance in CA */ 4778 if (tp->snd_cwnd <= tp->snd_wnd) 4779 tp->ccv->flags |= CCF_CWND_LIMITED; 4780 else 4781 tp->ccv->flags &= ~CCF_CWND_LIMITED; 4782 if (tp->snd_cwnd > tp->snd_ssthresh) { 4783 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack, 4784 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 4785 /* For the setting of a window past use the actual scwnd we are using */ 4786 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { 4787 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; 4788 tp->ccv->flags |= CCF_ABC_SENTAWND; 4789 } 4790 } else { 4791 tp->ccv->flags &= ~CCF_ABC_SENTAWND; 4792 tp->t_bytes_acked = 0; 4793 } 4794 prior_cwnd = tp->snd_cwnd; 4795 if ((recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || 4796 (rack_client_low_buf && (rack->client_bufferlvl < rack_client_low_buf))) 4797 labc_to_use = rack->rc_labc; 4798 else 4799 labc_to_use = rack_max_abc_post_recovery; 4800 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 4801 union tcp_log_stackspecific log; 4802 struct timeval tv; 4803 4804 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4805 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4806 log.u_bbr.flex1 = th_ack; 4807 log.u_bbr.flex2 = tp->ccv->flags; 4808 log.u_bbr.flex3 = tp->ccv->bytes_this_ack; 4809 log.u_bbr.flex4 = tp->ccv->nsegs; 4810 log.u_bbr.flex5 = labc_to_use; 4811 log.u_bbr.flex6 = prior_cwnd; 4812 log.u_bbr.flex7 = V_tcp_do_newsack; 4813 log.u_bbr.flex8 = 1; 4814 lgb = tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4815 0, &log, false, NULL, NULL, 0, &tv); 4816 } 4817 if (CC_ALGO(tp)->ack_received != NULL) { 4818 /* XXXLAS: Find a way to live without this */ 4819 tp->ccv->curack = th_ack; 4820 tp->ccv->labc = labc_to_use; 4821 tp->ccv->flags |= CCF_USE_LOCAL_ABC; 4822 CC_ALGO(tp)->ack_received(tp->ccv, type); 4823 } 4824 if (lgb) { 4825 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; 4826 } 4827 if (rack->r_must_retran) { 4828 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { 4829 /* 4830 * We now are beyond the rxt point so lets disable 4831 * the flag. 4832 */ 4833 rack->r_ctl.rc_out_at_rto = 0; 4834 rack->r_must_retran = 0; 4835 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { 4836 /* 4837 * Only decrement the rc_out_at_rto if the cwnd advances 4838 * at least a whole segment. Otherwise next time the peer 4839 * acks, we won't be able to send this generaly happens 4840 * when we are in Congestion Avoidance. 4841 */ 4842 if (acked <= rack->r_ctl.rc_out_at_rto){ 4843 rack->r_ctl.rc_out_at_rto -= acked; 4844 } else { 4845 rack->r_ctl.rc_out_at_rto = 0; 4846 } 4847 } 4848 } 4849 #ifdef STATS 4850 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); 4851 #endif 4852 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { 4853 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; 4854 } 4855 #ifdef NETFLIX_PEAKRATE 4856 /* we enforce max peak rate if it is set and we are not pacing */ 4857 if ((rack->rc_always_pace == 0) && 4858 tp->t_peakrate_thr && 4859 (tp->snd_cwnd > tp->t_peakrate_thr)) { 4860 tp->snd_cwnd = tp->t_peakrate_thr; 4861 } 4862 #endif 4863 } 4864 4865 static void 4866 tcp_rack_partialack(struct tcpcb *tp) 4867 { 4868 struct tcp_rack *rack; 4869 4870 rack = (struct tcp_rack *)tp->t_fb_ptr; 4871 INP_WLOCK_ASSERT(tp->t_inpcb); 4872 /* 4873 * If we are doing PRR and have enough 4874 * room to send <or> we are pacing and prr 4875 * is disabled we will want to see if we 4876 * can send data (by setting r_wanted_output to 4877 * true). 4878 */ 4879 if ((rack->r_ctl.rc_prr_sndcnt > 0) || 4880 rack->rack_no_prr) 4881 rack->r_wanted_output = 1; 4882 } 4883 4884 static void 4885 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack) 4886 { 4887 struct tcp_rack *rack; 4888 uint32_t orig_cwnd; 4889 4890 orig_cwnd = tp->snd_cwnd; 4891 INP_WLOCK_ASSERT(tp->t_inpcb); 4892 rack = (struct tcp_rack *)tp->t_fb_ptr; 4893 /* only alert CC if we alerted when we entered */ 4894 if (CC_ALGO(tp)->post_recovery != NULL) { 4895 tp->ccv->curack = th_ack; 4896 CC_ALGO(tp)->post_recovery(tp->ccv); 4897 if (tp->snd_cwnd < tp->snd_ssthresh) { 4898 /* 4899 * Rack has burst control and pacing 4900 * so lets not set this any lower than 4901 * snd_ssthresh per RFC-6582 (option 2). 4902 */ 4903 tp->snd_cwnd = tp->snd_ssthresh; 4904 } 4905 } 4906 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 4907 union tcp_log_stackspecific log; 4908 struct timeval tv; 4909 4910 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4911 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4912 log.u_bbr.flex1 = th_ack; 4913 log.u_bbr.flex2 = tp->ccv->flags; 4914 log.u_bbr.flex3 = tp->ccv->bytes_this_ack; 4915 log.u_bbr.flex4 = tp->ccv->nsegs; 4916 log.u_bbr.flex5 = V_tcp_abc_l_var; 4917 log.u_bbr.flex6 = orig_cwnd; 4918 log.u_bbr.flex7 = V_tcp_do_newsack; 4919 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 4920 log.u_bbr.flex8 = 2; 4921 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4922 0, &log, false, NULL, NULL, 0, &tv); 4923 } 4924 if ((rack->rack_no_prr == 0) && 4925 (rack->no_prr_addback == 0) && 4926 (rack->r_ctl.rc_prr_sndcnt > 0)) { 4927 /* 4928 * Suck the next prr cnt back into cwnd, but 4929 * only do that if we are not application limited. 4930 */ 4931 if (ctf_outstanding(tp) <= sbavail(&(tp->t_inpcb->inp_socket->so_snd))) { 4932 /* 4933 * We are allowed to add back to the cwnd the amount we did 4934 * not get out if: 4935 * a) no_prr_addback is off. 4936 * b) we are not app limited 4937 * c) we are doing prr 4938 * <and> 4939 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none). 4940 */ 4941 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), 4942 rack->r_ctl.rc_prr_sndcnt); 4943 } 4944 rack->r_ctl.rc_prr_sndcnt = 0; 4945 rack_log_to_prr(rack, 1, 0); 4946 } 4947 rack_log_to_prr(rack, 14, orig_cwnd); 4948 tp->snd_recover = tp->snd_una; 4949 if (rack->r_ctl.dsack_persist) { 4950 rack->r_ctl.dsack_persist--; 4951 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 4952 rack->r_ctl.num_dsack = 0; 4953 } 4954 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 4955 } 4956 EXIT_RECOVERY(tp->t_flags); 4957 } 4958 4959 static void 4960 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack) 4961 { 4962 struct tcp_rack *rack; 4963 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd; 4964 4965 INP_WLOCK_ASSERT(tp->t_inpcb); 4966 #ifdef STATS 4967 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 4968 #endif 4969 if (IN_RECOVERY(tp->t_flags) == 0) { 4970 in_rec_at_entry = 0; 4971 ssthresh_enter = tp->snd_ssthresh; 4972 cwnd_enter = tp->snd_cwnd; 4973 } else 4974 in_rec_at_entry = 1; 4975 rack = (struct tcp_rack *)tp->t_fb_ptr; 4976 switch (type) { 4977 case CC_NDUPACK: 4978 tp->t_flags &= ~TF_WASFRECOVERY; 4979 tp->t_flags &= ~TF_WASCRECOVERY; 4980 if (!IN_FASTRECOVERY(tp->t_flags)) { 4981 rack->r_ctl.rc_prr_delivered = 0; 4982 rack->r_ctl.rc_prr_out = 0; 4983 if (rack->rack_no_prr == 0) { 4984 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 4985 rack_log_to_prr(rack, 2, in_rec_at_entry); 4986 } 4987 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 4988 tp->snd_recover = tp->snd_max; 4989 if (tp->t_flags2 & TF2_ECN_PERMIT) 4990 tp->t_flags2 |= TF2_ECN_SND_CWR; 4991 } 4992 break; 4993 case CC_ECN: 4994 if (!IN_CONGRECOVERY(tp->t_flags) || 4995 /* 4996 * Allow ECN reaction on ACK to CWR, if 4997 * that data segment was also CE marked. 4998 */ 4999 SEQ_GEQ(ack, tp->snd_recover)) { 5000 EXIT_CONGRECOVERY(tp->t_flags); 5001 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd); 5002 tp->snd_recover = tp->snd_max + 1; 5003 if (tp->t_flags2 & TF2_ECN_PERMIT) 5004 tp->t_flags2 |= TF2_ECN_SND_CWR; 5005 } 5006 break; 5007 case CC_RTO: 5008 tp->t_dupacks = 0; 5009 tp->t_bytes_acked = 0; 5010 EXIT_RECOVERY(tp->t_flags); 5011 tp->snd_ssthresh = max(2, min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / 5012 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 5013 orig_cwnd = tp->snd_cwnd; 5014 tp->snd_cwnd = ctf_fixed_maxseg(tp); 5015 rack_log_to_prr(rack, 16, orig_cwnd); 5016 if (tp->t_flags2 & TF2_ECN_PERMIT) 5017 tp->t_flags2 |= TF2_ECN_SND_CWR; 5018 break; 5019 case CC_RTO_ERR: 5020 KMOD_TCPSTAT_INC(tcps_sndrexmitbad); 5021 /* RTO was unnecessary, so reset everything. */ 5022 tp->snd_cwnd = tp->snd_cwnd_prev; 5023 tp->snd_ssthresh = tp->snd_ssthresh_prev; 5024 tp->snd_recover = tp->snd_recover_prev; 5025 if (tp->t_flags & TF_WASFRECOVERY) { 5026 ENTER_FASTRECOVERY(tp->t_flags); 5027 tp->t_flags &= ~TF_WASFRECOVERY; 5028 } 5029 if (tp->t_flags & TF_WASCRECOVERY) { 5030 ENTER_CONGRECOVERY(tp->t_flags); 5031 tp->t_flags &= ~TF_WASCRECOVERY; 5032 } 5033 tp->snd_nxt = tp->snd_max; 5034 tp->t_badrxtwin = 0; 5035 break; 5036 } 5037 if ((CC_ALGO(tp)->cong_signal != NULL) && 5038 (type != CC_RTO)){ 5039 tp->ccv->curack = ack; 5040 CC_ALGO(tp)->cong_signal(tp->ccv, type); 5041 } 5042 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { 5043 rack_log_to_prr(rack, 15, cwnd_enter); 5044 rack->r_ctl.dsack_byte_cnt = 0; 5045 rack->r_ctl.retran_during_recovery = 0; 5046 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; 5047 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; 5048 rack->r_ent_rec_ns = 1; 5049 } 5050 } 5051 5052 static inline void 5053 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp) 5054 { 5055 uint32_t i_cwnd; 5056 5057 INP_WLOCK_ASSERT(tp->t_inpcb); 5058 5059 #ifdef NETFLIX_STATS 5060 KMOD_TCPSTAT_INC(tcps_idle_restarts); 5061 if (tp->t_state == TCPS_ESTABLISHED) 5062 KMOD_TCPSTAT_INC(tcps_idle_estrestarts); 5063 #endif 5064 if (CC_ALGO(tp)->after_idle != NULL) 5065 CC_ALGO(tp)->after_idle(tp->ccv); 5066 5067 if (tp->snd_cwnd == 1) 5068 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 5069 else 5070 i_cwnd = rc_init_window(rack); 5071 5072 /* 5073 * Being idle is no different than the initial window. If the cc 5074 * clamps it down below the initial window raise it to the initial 5075 * window. 5076 */ 5077 if (tp->snd_cwnd < i_cwnd) { 5078 tp->snd_cwnd = i_cwnd; 5079 } 5080 } 5081 5082 /* 5083 * Indicate whether this ack should be delayed. We can delay the ack if 5084 * following conditions are met: 5085 * - There is no delayed ack timer in progress. 5086 * - Our last ack wasn't a 0-sized window. We never want to delay 5087 * the ack that opens up a 0-sized window. 5088 * - LRO wasn't used for this segment. We make sure by checking that the 5089 * segment size is not larger than the MSS. 5090 * - Delayed acks are enabled or this is a half-synchronized T/TCP 5091 * connection. 5092 */ 5093 #define DELAY_ACK(tp, tlen) \ 5094 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 5095 ((tp->t_flags & TF_DELACK) == 0) && \ 5096 (tlen <= tp->t_maxseg) && \ 5097 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 5098 5099 static struct rack_sendmap * 5100 rack_find_lowest_rsm(struct tcp_rack *rack) 5101 { 5102 struct rack_sendmap *rsm; 5103 5104 /* 5105 * Walk the time-order transmitted list looking for an rsm that is 5106 * not acked. This will be the one that was sent the longest time 5107 * ago that is still outstanding. 5108 */ 5109 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 5110 if (rsm->r_flags & RACK_ACKED) { 5111 continue; 5112 } 5113 goto finish; 5114 } 5115 finish: 5116 return (rsm); 5117 } 5118 5119 static struct rack_sendmap * 5120 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 5121 { 5122 struct rack_sendmap *prsm; 5123 5124 /* 5125 * Walk the sequence order list backward until we hit and arrive at 5126 * the highest seq not acked. In theory when this is called it 5127 * should be the last segment (which it was not). 5128 */ 5129 counter_u64_add(rack_find_high, 1); 5130 prsm = rsm; 5131 RB_FOREACH_REVERSE_FROM(prsm, rack_rb_tree_head, rsm) { 5132 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 5133 continue; 5134 } 5135 return (prsm); 5136 } 5137 return (NULL); 5138 } 5139 5140 static uint32_t 5141 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts) 5142 { 5143 int32_t lro; 5144 uint32_t thresh; 5145 5146 /* 5147 * lro is the flag we use to determine if we have seen reordering. 5148 * If it gets set we have seen reordering. The reorder logic either 5149 * works in one of two ways: 5150 * 5151 * If reorder-fade is configured, then we track the last time we saw 5152 * re-ordering occur. If we reach the point where enough time as 5153 * passed we no longer consider reordering has occuring. 5154 * 5155 * Or if reorder-face is 0, then once we see reordering we consider 5156 * the connection to alway be subject to reordering and just set lro 5157 * to 1. 5158 * 5159 * In the end if lro is non-zero we add the extra time for 5160 * reordering in. 5161 */ 5162 if (srtt == 0) 5163 srtt = 1; 5164 if (rack->r_ctl.rc_reorder_ts) { 5165 if (rack->r_ctl.rc_reorder_fade) { 5166 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 5167 lro = cts - rack->r_ctl.rc_reorder_ts; 5168 if (lro == 0) { 5169 /* 5170 * No time as passed since the last 5171 * reorder, mark it as reordering. 5172 */ 5173 lro = 1; 5174 } 5175 } else { 5176 /* Negative time? */ 5177 lro = 0; 5178 } 5179 if (lro > rack->r_ctl.rc_reorder_fade) { 5180 /* Turn off reordering seen too */ 5181 rack->r_ctl.rc_reorder_ts = 0; 5182 lro = 0; 5183 } 5184 } else { 5185 /* Reodering does not fade */ 5186 lro = 1; 5187 } 5188 } else { 5189 lro = 0; 5190 } 5191 if (rack->rc_rack_tmr_std_based == 0) { 5192 thresh = srtt + rack->r_ctl.rc_pkt_delay; 5193 } else { 5194 /* Standards based pkt-delay is 1/4 srtt */ 5195 thresh = srtt + (srtt >> 2); 5196 } 5197 if (lro && (rack->rc_rack_tmr_std_based == 0)) { 5198 /* It must be set, if not you get 1/4 rtt */ 5199 if (rack->r_ctl.rc_reorder_shift) 5200 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 5201 else 5202 thresh += (srtt >> 2); 5203 } 5204 if (rack->rc_rack_use_dsack && 5205 lro && 5206 (rack->r_ctl.num_dsack > 0)) { 5207 /* 5208 * We only increase the reordering window if we 5209 * have seen reordering <and> we have a DSACK count. 5210 */ 5211 thresh += rack->r_ctl.num_dsack * (srtt >> 2); 5212 rack_log_dsack_event(rack, 4, __LINE__, srtt, thresh); 5213 } 5214 /* SRTT * 2 is the ceiling */ 5215 if (thresh > (srtt * 2)) { 5216 thresh = srtt * 2; 5217 } 5218 /* And we don't want it above the RTO max either */ 5219 if (thresh > rack_rto_max) { 5220 thresh = rack_rto_max; 5221 } 5222 rack_log_dsack_event(rack, 6, __LINE__, srtt, thresh); 5223 return (thresh); 5224 } 5225 5226 static uint32_t 5227 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 5228 struct rack_sendmap *rsm, uint32_t srtt) 5229 { 5230 struct rack_sendmap *prsm; 5231 uint32_t thresh, len; 5232 int segsiz; 5233 5234 if (srtt == 0) 5235 srtt = 1; 5236 if (rack->r_ctl.rc_tlp_threshold) 5237 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 5238 else 5239 thresh = (srtt * 2); 5240 5241 /* Get the previous sent packet, if any */ 5242 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 5243 counter_u64_add(rack_enter_tlp_calc, 1); 5244 len = rsm->r_end - rsm->r_start; 5245 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 5246 /* Exactly like the ID */ 5247 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { 5248 uint32_t alt_thresh; 5249 /* 5250 * Compensate for delayed-ack with the d-ack time. 5251 */ 5252 counter_u64_add(rack_used_tlpmethod, 1); 5253 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5254 if (alt_thresh > thresh) 5255 thresh = alt_thresh; 5256 } 5257 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 5258 /* 2.1 behavior */ 5259 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 5260 if (prsm && (len <= segsiz)) { 5261 /* 5262 * Two packets outstanding, thresh should be (2*srtt) + 5263 * possible inter-packet delay (if any). 5264 */ 5265 uint32_t inter_gap = 0; 5266 int idx, nidx; 5267 5268 counter_u64_add(rack_used_tlpmethod, 1); 5269 idx = rsm->r_rtr_cnt - 1; 5270 nidx = prsm->r_rtr_cnt - 1; 5271 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { 5272 /* Yes it was sent later (or at the same time) */ 5273 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 5274 } 5275 thresh += inter_gap; 5276 } else if (len <= segsiz) { 5277 /* 5278 * Possibly compensate for delayed-ack. 5279 */ 5280 uint32_t alt_thresh; 5281 5282 counter_u64_add(rack_used_tlpmethod2, 1); 5283 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5284 if (alt_thresh > thresh) 5285 thresh = alt_thresh; 5286 } 5287 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 5288 /* 2.2 behavior */ 5289 if (len <= segsiz) { 5290 uint32_t alt_thresh; 5291 /* 5292 * Compensate for delayed-ack with the d-ack time. 5293 */ 5294 counter_u64_add(rack_used_tlpmethod, 1); 5295 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5296 if (alt_thresh > thresh) 5297 thresh = alt_thresh; 5298 } 5299 } 5300 /* Not above an RTO */ 5301 if (thresh > tp->t_rxtcur) { 5302 thresh = tp->t_rxtcur; 5303 } 5304 /* Not above a RTO max */ 5305 if (thresh > rack_rto_max) { 5306 thresh = rack_rto_max; 5307 } 5308 /* Apply user supplied min TLP */ 5309 if (thresh < rack_tlp_min) { 5310 thresh = rack_tlp_min; 5311 } 5312 return (thresh); 5313 } 5314 5315 static uint32_t 5316 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 5317 { 5318 /* 5319 * We want the rack_rtt which is the 5320 * last rtt we measured. However if that 5321 * does not exist we fallback to the srtt (which 5322 * we probably will never do) and then as a last 5323 * resort we use RACK_INITIAL_RTO if no srtt is 5324 * yet set. 5325 */ 5326 if (rack->rc_rack_rtt) 5327 return (rack->rc_rack_rtt); 5328 else if (tp->t_srtt == 0) 5329 return (RACK_INITIAL_RTO); 5330 return (tp->t_srtt); 5331 } 5332 5333 static struct rack_sendmap * 5334 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 5335 { 5336 /* 5337 * Check to see that we don't need to fall into recovery. We will 5338 * need to do so if our oldest transmit is past the time we should 5339 * have had an ack. 5340 */ 5341 struct tcp_rack *rack; 5342 struct rack_sendmap *rsm; 5343 int32_t idx; 5344 uint32_t srtt, thresh; 5345 5346 rack = (struct tcp_rack *)tp->t_fb_ptr; 5347 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 5348 return (NULL); 5349 } 5350 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5351 if (rsm == NULL) 5352 return (NULL); 5353 5354 if (rsm->r_flags & RACK_ACKED) { 5355 rsm = rack_find_lowest_rsm(rack); 5356 if (rsm == NULL) 5357 return (NULL); 5358 } 5359 idx = rsm->r_rtr_cnt - 1; 5360 srtt = rack_grab_rtt(tp, rack); 5361 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 5362 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { 5363 return (NULL); 5364 } 5365 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { 5366 return (NULL); 5367 } 5368 /* Ok if we reach here we are over-due and this guy can be sent */ 5369 if (IN_RECOVERY(tp->t_flags) == 0) { 5370 /* 5371 * For the one that enters us into recovery record undo 5372 * info. 5373 */ 5374 rack->r_ctl.rc_rsm_start = rsm->r_start; 5375 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 5376 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 5377 } 5378 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 5379 return (rsm); 5380 } 5381 5382 static uint32_t 5383 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 5384 { 5385 int32_t t; 5386 int32_t tt; 5387 uint32_t ret_val; 5388 5389 t = (tp->t_srtt + (tp->t_rttvar << 2)); 5390 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 5391 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); 5392 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 5393 ret_val = (uint32_t)tt; 5394 return (ret_val); 5395 } 5396 5397 static uint32_t 5398 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 5399 { 5400 /* 5401 * Start the FR timer, we do this based on getting the first one in 5402 * the rc_tmap. Note that if its NULL we must stop the timer. in all 5403 * events we need to stop the running timer (if its running) before 5404 * starting the new one. 5405 */ 5406 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 5407 uint32_t srtt_cur; 5408 int32_t idx; 5409 int32_t is_tlp_timer = 0; 5410 struct rack_sendmap *rsm; 5411 5412 if (rack->t_timers_stopped) { 5413 /* All timers have been stopped none are to run */ 5414 return (0); 5415 } 5416 if (rack->rc_in_persist) { 5417 /* We can't start any timer in persists */ 5418 return (rack_get_persists_timer_val(tp, rack)); 5419 } 5420 rack->rc_on_min_to = 0; 5421 if ((tp->t_state < TCPS_ESTABLISHED) || 5422 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 5423 goto activate_rxt; 5424 } 5425 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5426 if ((rsm == NULL) || sup_rack) { 5427 /* Nothing on the send map or no rack */ 5428 activate_rxt: 5429 time_since_sent = 0; 5430 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5431 if (rsm) { 5432 /* 5433 * Should we discount the RTX timer any? 5434 * 5435 * We want to discount it the smallest amount. 5436 * If a timer (Rack/TLP or RXT) has gone off more 5437 * recently thats the discount we want to use (now - timer time). 5438 * If the retransmit of the oldest packet was more recent then 5439 * we want to use that (now - oldest-packet-last_transmit_time). 5440 * 5441 */ 5442 idx = rsm->r_rtr_cnt - 1; 5443 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) 5444 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5445 else 5446 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5447 if (TSTMP_GT(cts, tstmp_touse)) 5448 time_since_sent = cts - tstmp_touse; 5449 } 5450 if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) { 5451 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 5452 to = tp->t_rxtcur; 5453 if (to > time_since_sent) 5454 to -= time_since_sent; 5455 else 5456 to = rack->r_ctl.rc_min_to; 5457 if (to == 0) 5458 to = 1; 5459 /* Special case for KEEPINIT */ 5460 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 5461 (TP_KEEPINIT(tp) != 0) && 5462 rsm) { 5463 /* 5464 * We have to put a ceiling on the rxt timer 5465 * of the keep-init timeout. 5466 */ 5467 uint32_t max_time, red; 5468 5469 max_time = TICKS_2_USEC(TP_KEEPINIT(tp)); 5470 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { 5471 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); 5472 if (red < max_time) 5473 max_time -= red; 5474 else 5475 max_time = 1; 5476 } 5477 /* Reduce timeout to the keep value if needed */ 5478 if (max_time < to) 5479 to = max_time; 5480 } 5481 return (to); 5482 } 5483 return (0); 5484 } 5485 if (rsm->r_flags & RACK_ACKED) { 5486 rsm = rack_find_lowest_rsm(rack); 5487 if (rsm == NULL) { 5488 /* No lowest? */ 5489 goto activate_rxt; 5490 } 5491 } 5492 if (rack->sack_attack_disable) { 5493 /* 5494 * We don't want to do 5495 * any TLP's if you are an attacker. 5496 * Though if you are doing what 5497 * is expected you may still have 5498 * SACK-PASSED marks. 5499 */ 5500 goto activate_rxt; 5501 } 5502 /* Convert from ms to usecs */ 5503 if ((rsm->r_flags & RACK_SACK_PASSED) || (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 5504 if ((tp->t_flags & TF_SENTFIN) && 5505 ((tp->snd_max - tp->snd_una) == 1) && 5506 (rsm->r_flags & RACK_HAS_FIN)) { 5507 /* 5508 * We don't start a rack timer if all we have is a 5509 * FIN outstanding. 5510 */ 5511 goto activate_rxt; 5512 } 5513 if ((rack->use_rack_rr == 0) && 5514 (IN_FASTRECOVERY(tp->t_flags)) && 5515 (rack->rack_no_prr == 0) && 5516 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 5517 /* 5518 * We are not cheating, in recovery and 5519 * not enough ack's to yet get our next 5520 * retransmission out. 5521 * 5522 * Note that classified attackers do not 5523 * get to use the rack-cheat. 5524 */ 5525 goto activate_tlp; 5526 } 5527 srtt = rack_grab_rtt(tp, rack); 5528 thresh = rack_calc_thresh_rack(rack, srtt, cts); 5529 idx = rsm->r_rtr_cnt - 1; 5530 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; 5531 if (SEQ_GEQ(exp, cts)) { 5532 to = exp - cts; 5533 if (to < rack->r_ctl.rc_min_to) { 5534 to = rack->r_ctl.rc_min_to; 5535 if (rack->r_rr_config == 3) 5536 rack->rc_on_min_to = 1; 5537 } 5538 } else { 5539 to = rack->r_ctl.rc_min_to; 5540 if (rack->r_rr_config == 3) 5541 rack->rc_on_min_to = 1; 5542 } 5543 } else { 5544 /* Ok we need to do a TLP not RACK */ 5545 activate_tlp: 5546 if ((rack->rc_tlp_in_progress != 0) && 5547 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { 5548 /* 5549 * The previous send was a TLP and we have sent 5550 * N TLP's without sending new data. 5551 */ 5552 goto activate_rxt; 5553 } 5554 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 5555 if (rsm == NULL) { 5556 /* We found no rsm to TLP with. */ 5557 goto activate_rxt; 5558 } 5559 if (rsm->r_flags & RACK_HAS_FIN) { 5560 /* If its a FIN we dont do TLP */ 5561 rsm = NULL; 5562 goto activate_rxt; 5563 } 5564 idx = rsm->r_rtr_cnt - 1; 5565 time_since_sent = 0; 5566 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) 5567 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5568 else 5569 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5570 if (TSTMP_GT(cts, tstmp_touse)) 5571 time_since_sent = cts - tstmp_touse; 5572 is_tlp_timer = 1; 5573 if (tp->t_srtt) { 5574 if ((rack->rc_srtt_measure_made == 0) && 5575 (tp->t_srtt == 1)) { 5576 /* 5577 * If another stack as run and set srtt to 1, 5578 * then the srtt was 0, so lets use the initial. 5579 */ 5580 srtt = RACK_INITIAL_RTO; 5581 } else { 5582 srtt_cur = tp->t_srtt; 5583 srtt = srtt_cur; 5584 } 5585 } else 5586 srtt = RACK_INITIAL_RTO; 5587 /* 5588 * If the SRTT is not keeping up and the 5589 * rack RTT has spiked we want to use 5590 * the last RTT not the smoothed one. 5591 */ 5592 if (rack_tlp_use_greater && 5593 tp->t_srtt && 5594 (srtt < rack_grab_rtt(tp, rack))) { 5595 srtt = rack_grab_rtt(tp, rack); 5596 } 5597 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 5598 if (thresh > time_since_sent) { 5599 to = thresh - time_since_sent; 5600 } else { 5601 to = rack->r_ctl.rc_min_to; 5602 rack_log_alt_to_to_cancel(rack, 5603 thresh, /* flex1 */ 5604 time_since_sent, /* flex2 */ 5605 tstmp_touse, /* flex3 */ 5606 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ 5607 (uint32_t)rsm->r_tim_lastsent[idx], 5608 srtt, 5609 idx, 99); 5610 } 5611 if (to < rack_tlp_min) { 5612 to = rack_tlp_min; 5613 } 5614 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) { 5615 /* 5616 * If the TLP time works out to larger than the max 5617 * RTO lets not do TLP.. just RTO. 5618 */ 5619 goto activate_rxt; 5620 } 5621 } 5622 if (is_tlp_timer == 0) { 5623 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 5624 } else { 5625 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 5626 } 5627 if (to == 0) 5628 to = 1; 5629 return (to); 5630 } 5631 5632 static void 5633 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5634 { 5635 if (rack->rc_in_persist == 0) { 5636 if (tp->t_flags & TF_GPUTINPROG) { 5637 /* 5638 * Stop the goodput now, the calling of the 5639 * measurement function clears the flag. 5640 */ 5641 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, 5642 RACK_QUALITY_PERSIST); 5643 } 5644 #ifdef NETFLIX_SHARED_CWND 5645 if (rack->r_ctl.rc_scw) { 5646 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5647 rack->rack_scwnd_is_idle = 1; 5648 } 5649 #endif 5650 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 5651 if (rack->r_ctl.rc_went_idle_time == 0) 5652 rack->r_ctl.rc_went_idle_time = 1; 5653 rack_timer_cancel(tp, rack, cts, __LINE__); 5654 rack->r_ctl.persist_lost_ends = 0; 5655 rack->probe_not_answered = 0; 5656 rack->forced_ack = 0; 5657 tp->t_rxtshift = 0; 5658 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5659 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5660 rack->rc_in_persist = 1; 5661 } 5662 } 5663 5664 static void 5665 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5666 { 5667 if (tcp_in_hpts(rack->rc_inp)) { 5668 tcp_hpts_remove(rack->rc_inp); 5669 rack->r_ctl.rc_hpts_flags = 0; 5670 } 5671 #ifdef NETFLIX_SHARED_CWND 5672 if (rack->r_ctl.rc_scw) { 5673 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5674 rack->rack_scwnd_is_idle = 0; 5675 } 5676 #endif 5677 if (rack->rc_gp_dyn_mul && 5678 (rack->use_fixed_rate == 0) && 5679 (rack->rc_always_pace)) { 5680 /* 5681 * Do we count this as if a probe-rtt just 5682 * finished? 5683 */ 5684 uint32_t time_idle, idle_min; 5685 5686 time_idle = tcp_get_usecs(NULL) - rack->r_ctl.rc_went_idle_time; 5687 idle_min = rack_min_probertt_hold; 5688 if (rack_probertt_gpsrtt_cnt_div) { 5689 uint64_t extra; 5690 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * 5691 (uint64_t)rack_probertt_gpsrtt_cnt_mul; 5692 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div; 5693 idle_min += (uint32_t)extra; 5694 } 5695 if (time_idle >= idle_min) { 5696 /* Yes, we count it as a probe-rtt. */ 5697 uint32_t us_cts; 5698 5699 us_cts = tcp_get_usecs(NULL); 5700 if (rack->in_probe_rtt == 0) { 5701 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 5702 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 5703 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 5704 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 5705 } else { 5706 rack_exit_probertt(rack, us_cts); 5707 } 5708 } 5709 } 5710 rack->rc_in_persist = 0; 5711 rack->r_ctl.rc_went_idle_time = 0; 5712 tp->t_rxtshift = 0; 5713 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5714 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5715 rack->r_ctl.rc_agg_delayed = 0; 5716 rack->r_early = 0; 5717 rack->r_late = 0; 5718 rack->r_ctl.rc_agg_early = 0; 5719 } 5720 5721 static void 5722 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, 5723 struct hpts_diag *diag, struct timeval *tv) 5724 { 5725 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 5726 union tcp_log_stackspecific log; 5727 5728 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5729 log.u_bbr.flex1 = diag->p_nxt_slot; 5730 log.u_bbr.flex2 = diag->p_cur_slot; 5731 log.u_bbr.flex3 = diag->slot_req; 5732 log.u_bbr.flex4 = diag->inp_hptsslot; 5733 log.u_bbr.flex5 = diag->slot_remaining; 5734 log.u_bbr.flex6 = diag->need_new_to; 5735 log.u_bbr.flex7 = diag->p_hpts_active; 5736 log.u_bbr.flex8 = diag->p_on_min_sleep; 5737 /* Hijack other fields as needed */ 5738 log.u_bbr.epoch = diag->have_slept; 5739 log.u_bbr.lt_epoch = diag->yet_to_sleep; 5740 log.u_bbr.pkts_out = diag->co_ret; 5741 log.u_bbr.applimited = diag->hpts_sleep_time; 5742 log.u_bbr.delivered = diag->p_prev_slot; 5743 log.u_bbr.inflight = diag->p_runningslot; 5744 log.u_bbr.bw_inuse = diag->wheel_slot; 5745 log.u_bbr.rttProp = diag->wheel_cts; 5746 log.u_bbr.timeStamp = cts; 5747 log.u_bbr.delRate = diag->maxslots; 5748 log.u_bbr.cur_del_rate = diag->p_curtick; 5749 log.u_bbr.cur_del_rate <<= 32; 5750 log.u_bbr.cur_del_rate |= diag->p_lasttick; 5751 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5752 &rack->rc_inp->inp_socket->so_rcv, 5753 &rack->rc_inp->inp_socket->so_snd, 5754 BBR_LOG_HPTSDIAG, 0, 5755 0, &log, false, tv); 5756 } 5757 5758 } 5759 5760 static void 5761 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type) 5762 { 5763 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 5764 union tcp_log_stackspecific log; 5765 struct timeval tv; 5766 5767 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5768 log.u_bbr.flex1 = sb->sb_flags; 5769 log.u_bbr.flex2 = len; 5770 log.u_bbr.flex3 = sb->sb_state; 5771 log.u_bbr.flex8 = type; 5772 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5773 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5774 &rack->rc_inp->inp_socket->so_rcv, 5775 &rack->rc_inp->inp_socket->so_snd, 5776 TCP_LOG_SB_WAKE, 0, 5777 len, &log, false, &tv); 5778 } 5779 } 5780 5781 static void 5782 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 5783 int32_t slot, uint32_t tot_len_this_send, int sup_rack) 5784 { 5785 struct hpts_diag diag; 5786 struct inpcb *inp; 5787 struct timeval tv; 5788 uint32_t delayed_ack = 0; 5789 uint32_t hpts_timeout; 5790 uint32_t entry_slot = slot; 5791 uint8_t stopped; 5792 uint32_t left = 0; 5793 uint32_t us_cts; 5794 5795 inp = tp->t_inpcb; 5796 if ((tp->t_state == TCPS_CLOSED) || 5797 (tp->t_state == TCPS_LISTEN)) { 5798 return; 5799 } 5800 if (tcp_in_hpts(inp)) { 5801 /* Already on the pacer */ 5802 return; 5803 } 5804 stopped = rack->rc_tmr_stopped; 5805 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 5806 left = rack->r_ctl.rc_timer_exp - cts; 5807 } 5808 rack->r_ctl.rc_timer_exp = 0; 5809 rack->r_ctl.rc_hpts_flags = 0; 5810 us_cts = tcp_get_usecs(&tv); 5811 /* Now early/late accounting */ 5812 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0); 5813 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { 5814 /* 5815 * We have a early carry over set, 5816 * we can always add more time so we 5817 * can always make this compensation. 5818 * 5819 * Note if ack's are allowed to wake us do not 5820 * penalize the next timer for being awoke 5821 * by an ack aka the rc_agg_early (non-paced mode). 5822 */ 5823 slot += rack->r_ctl.rc_agg_early; 5824 rack->r_early = 0; 5825 rack->r_ctl.rc_agg_early = 0; 5826 } 5827 if (rack->r_late) { 5828 /* 5829 * This is harder, we can 5830 * compensate some but it 5831 * really depends on what 5832 * the current pacing time is. 5833 */ 5834 if (rack->r_ctl.rc_agg_delayed >= slot) { 5835 /* 5836 * We can't compensate for it all. 5837 * And we have to have some time 5838 * on the clock. We always have a min 5839 * 10 slots (10 x 10 i.e. 100 usecs). 5840 */ 5841 if (slot <= HPTS_TICKS_PER_SLOT) { 5842 /* We gain delay */ 5843 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); 5844 slot = HPTS_TICKS_PER_SLOT; 5845 } else { 5846 /* We take off some */ 5847 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); 5848 slot = HPTS_TICKS_PER_SLOT; 5849 } 5850 } else { 5851 slot -= rack->r_ctl.rc_agg_delayed; 5852 rack->r_ctl.rc_agg_delayed = 0; 5853 /* Make sure we have 100 useconds at minimum */ 5854 if (slot < HPTS_TICKS_PER_SLOT) { 5855 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; 5856 slot = HPTS_TICKS_PER_SLOT; 5857 } 5858 if (rack->r_ctl.rc_agg_delayed == 0) 5859 rack->r_late = 0; 5860 } 5861 } 5862 if (slot) { 5863 /* We are pacing too */ 5864 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 5865 } 5866 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 5867 #ifdef NETFLIX_EXP_DETECTION 5868 if (rack->sack_attack_disable && 5869 (slot < tcp_sad_pacing_interval)) { 5870 /* 5871 * We have a potential attacker on 5872 * the line. We have possibly some 5873 * (or now) pacing time set. We want to 5874 * slow down the processing of sacks by some 5875 * amount (if it is an attacker). Set the default 5876 * slot for attackers in place (unless the orginal 5877 * interval is longer). Its stored in 5878 * micro-seconds, so lets convert to msecs. 5879 */ 5880 slot = tcp_sad_pacing_interval; 5881 } 5882 #endif 5883 if (tp->t_flags & TF_DELACK) { 5884 delayed_ack = TICKS_2_USEC(tcp_delacktime); 5885 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 5886 } 5887 if (delayed_ack && ((hpts_timeout == 0) || 5888 (delayed_ack < hpts_timeout))) 5889 hpts_timeout = delayed_ack; 5890 else 5891 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 5892 /* 5893 * If no timers are going to run and we will fall off the hptsi 5894 * wheel, we resort to a keep-alive timer if its configured. 5895 */ 5896 if ((hpts_timeout == 0) && 5897 (slot == 0)) { 5898 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 5899 (tp->t_state <= TCPS_CLOSING)) { 5900 /* 5901 * Ok we have no timer (persists, rack, tlp, rxt or 5902 * del-ack), we don't have segments being paced. So 5903 * all that is left is the keepalive timer. 5904 */ 5905 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 5906 /* Get the established keep-alive time */ 5907 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp)); 5908 } else { 5909 /* 5910 * Get the initial setup keep-alive time, 5911 * note that this is probably not going to 5912 * happen, since rack will be running a rxt timer 5913 * if a SYN of some sort is outstanding. It is 5914 * actually handled in rack_timeout_rxt(). 5915 */ 5916 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp)); 5917 } 5918 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 5919 if (rack->in_probe_rtt) { 5920 /* 5921 * We want to instead not wake up a long time from 5922 * now but to wake up about the time we would 5923 * exit probe-rtt and initiate a keep-alive ack. 5924 * This will get us out of probe-rtt and update 5925 * our min-rtt. 5926 */ 5927 hpts_timeout = rack_min_probertt_hold; 5928 } 5929 } 5930 } 5931 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 5932 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 5933 /* 5934 * RACK, TLP, persists and RXT timers all are restartable 5935 * based on actions input .. i.e we received a packet (ack 5936 * or sack) and that changes things (rw, or snd_una etc). 5937 * Thus we can restart them with a new value. For 5938 * keep-alive, delayed_ack we keep track of what was left 5939 * and restart the timer with a smaller value. 5940 */ 5941 if (left < hpts_timeout) 5942 hpts_timeout = left; 5943 } 5944 if (hpts_timeout) { 5945 /* 5946 * Hack alert for now we can't time-out over 2,147,483 5947 * seconds (a bit more than 596 hours), which is probably ok 5948 * :). 5949 */ 5950 if (hpts_timeout > 0x7ffffffe) 5951 hpts_timeout = 0x7ffffffe; 5952 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 5953 } 5954 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0); 5955 if ((rack->gp_ready == 0) && 5956 (rack->use_fixed_rate == 0) && 5957 (hpts_timeout < slot) && 5958 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { 5959 /* 5960 * We have no good estimate yet for the 5961 * old clunky burst mitigation or the 5962 * real pacing. And the tlp or rxt is smaller 5963 * than the pacing calculation. Lets not 5964 * pace that long since we know the calculation 5965 * so far is not accurate. 5966 */ 5967 slot = hpts_timeout; 5968 } 5969 rack->r_ctl.last_pacing_time = slot; 5970 /** 5971 * Turn off all the flags for queuing by default. The 5972 * flags have important meanings to what happens when 5973 * LRO interacts with the transport. Most likely (by default now) 5974 * mbuf_queueing and ack compression are on. So the transport 5975 * has a couple of flags that control what happens (if those 5976 * are not on then these flags won't have any effect since it 5977 * won't go through the queuing LRO path). 5978 * 5979 * INP_MBUF_QUEUE_READY - This flags says that I am busy 5980 * pacing output, so don't disturb. But 5981 * it also means LRO can wake me if there 5982 * is a SACK arrival. 5983 * 5984 * INP_DONT_SACK_QUEUE - This flag is used in conjunction 5985 * with the above flag (QUEUE_READY) and 5986 * when present it says don't even wake me 5987 * if a SACK arrives. 5988 * 5989 * The idea behind these flags is that if we are pacing we 5990 * set the MBUF_QUEUE_READY and only get woken up if 5991 * a SACK arrives (which could change things) or if 5992 * our pacing timer expires. If, however, we have a rack 5993 * timer running, then we don't even want a sack to wake 5994 * us since the rack timer has to expire before we can send. 5995 * 5996 * Other cases should usually have none of the flags set 5997 * so LRO can call into us. 5998 */ 5999 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 6000 if (slot) { 6001 rack->r_ctl.rc_last_output_to = us_cts + slot; 6002 /* 6003 * A pacing timer (slot) is being set, in 6004 * such a case we cannot send (we are blocked by 6005 * the timer). So lets tell LRO that it should not 6006 * wake us unless there is a SACK. Note this only 6007 * will be effective if mbuf queueing is on or 6008 * compressed acks are being processed. 6009 */ 6010 inp->inp_flags2 |= INP_MBUF_QUEUE_READY; 6011 /* 6012 * But wait if we have a Rack timer running 6013 * even a SACK should not disturb us (with 6014 * the exception of r_rr_config 3). 6015 */ 6016 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) && 6017 (rack->r_rr_config != 3)) 6018 inp->inp_flags2 |= INP_DONT_SACK_QUEUE; 6019 if (rack->rc_ack_can_sendout_data) { 6020 /* 6021 * Ahh but wait, this is that special case 6022 * where the pacing timer can be disturbed 6023 * backout the changes (used for non-paced 6024 * burst limiting). 6025 */ 6026 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 6027 } 6028 if ((rack->use_rack_rr) && 6029 (rack->r_rr_config < 2) && 6030 ((hpts_timeout) && (hpts_timeout < slot))) { 6031 /* 6032 * Arrange for the hpts to kick back in after the 6033 * t-o if the t-o does not cause a send. 6034 */ 6035 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout), 6036 __LINE__, &diag); 6037 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6038 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 6039 } else { 6040 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(slot), 6041 __LINE__, &diag); 6042 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6043 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 6044 } 6045 } else if (hpts_timeout) { 6046 /* 6047 * With respect to inp_flags2 here, lets let any new acks wake 6048 * us up here. Since we are not pacing (no pacing timer), output 6049 * can happen so we should let it. If its a Rack timer, then any inbound 6050 * packet probably won't change the sending (we will be blocked) 6051 * but it may change the prr stats so letting it in (the set defaults 6052 * at the start of this block) are good enough. 6053 */ 6054 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout), 6055 __LINE__, &diag); 6056 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6057 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 6058 } else { 6059 /* No timer starting */ 6060 #ifdef INVARIANTS 6061 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 6062 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 6063 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 6064 } 6065 #endif 6066 } 6067 rack->rc_tmr_stopped = 0; 6068 if (slot) 6069 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv); 6070 } 6071 6072 /* 6073 * RACK Timer, here we simply do logging and house keeping. 6074 * the normal rack_output() function will call the 6075 * appropriate thing to check if we need to do a RACK retransmit. 6076 * We return 1, saying don't proceed with rack_output only 6077 * when all timers have been stopped (destroyed PCB?). 6078 */ 6079 static int 6080 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6081 { 6082 /* 6083 * This timer simply provides an internal trigger to send out data. 6084 * The check_recovery_mode call will see if there are needed 6085 * retransmissions, if so we will enter fast-recovery. The output 6086 * call may or may not do the same thing depending on sysctl 6087 * settings. 6088 */ 6089 struct rack_sendmap *rsm; 6090 6091 if (tp->t_timers->tt_flags & TT_STOPPED) { 6092 return (1); 6093 } 6094 counter_u64_add(rack_to_tot, 1); 6095 if (rack->r_state && (rack->r_state != tp->t_state)) 6096 rack_set_state(tp, rack); 6097 rack->rc_on_min_to = 0; 6098 rsm = rack_check_recovery_mode(tp, cts); 6099 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm); 6100 if (rsm) { 6101 rack->r_ctl.rc_resend = rsm; 6102 rack->r_timer_override = 1; 6103 if (rack->use_rack_rr) { 6104 /* 6105 * Don't accumulate extra pacing delay 6106 * we are allowing the rack timer to 6107 * over-ride pacing i.e. rrr takes precedence 6108 * if the pacing interval is longer than the rrr 6109 * time (in other words we get the min pacing 6110 * time versus rrr pacing time). 6111 */ 6112 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 6113 } 6114 } 6115 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 6116 if (rsm == NULL) { 6117 /* restart a timer and return 1 */ 6118 rack_start_hpts_timer(rack, tp, cts, 6119 0, 0, 0); 6120 return (1); 6121 } 6122 return (0); 6123 } 6124 6125 static void 6126 rack_adjust_orig_mlen(struct rack_sendmap *rsm) 6127 { 6128 if (rsm->m->m_len > rsm->orig_m_len) { 6129 /* 6130 * Mbuf grew, caused by sbcompress, our offset does 6131 * not change. 6132 */ 6133 rsm->orig_m_len = rsm->m->m_len; 6134 } else if (rsm->m->m_len < rsm->orig_m_len) { 6135 /* 6136 * Mbuf shrank, trimmed off the top by an ack, our 6137 * offset changes. 6138 */ 6139 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); 6140 rsm->orig_m_len = rsm->m->m_len; 6141 } 6142 } 6143 6144 static void 6145 rack_setup_offset_for_rsm(struct rack_sendmap *src_rsm, struct rack_sendmap *rsm) 6146 { 6147 struct mbuf *m; 6148 uint32_t soff; 6149 6150 if (src_rsm->m && (src_rsm->orig_m_len != src_rsm->m->m_len)) { 6151 /* Fix up the orig_m_len and possibly the mbuf offset */ 6152 rack_adjust_orig_mlen(src_rsm); 6153 } 6154 m = src_rsm->m; 6155 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); 6156 while (soff >= m->m_len) { 6157 /* Move out past this mbuf */ 6158 soff -= m->m_len; 6159 m = m->m_next; 6160 KASSERT((m != NULL), 6161 ("rsm:%p nrsm:%p hit at soff:%u null m", 6162 src_rsm, rsm, soff)); 6163 } 6164 rsm->m = m; 6165 rsm->soff = soff; 6166 rsm->orig_m_len = m->m_len; 6167 } 6168 6169 static __inline void 6170 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 6171 struct rack_sendmap *rsm, uint32_t start) 6172 { 6173 int idx; 6174 6175 nrsm->r_start = start; 6176 nrsm->r_end = rsm->r_end; 6177 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 6178 nrsm->r_flags = rsm->r_flags; 6179 nrsm->r_dupack = rsm->r_dupack; 6180 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; 6181 nrsm->r_rtr_bytes = 0; 6182 nrsm->r_fas = rsm->r_fas; 6183 rsm->r_end = nrsm->r_start; 6184 nrsm->r_just_ret = rsm->r_just_ret; 6185 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 6186 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 6187 } 6188 /* Now if we have SYN flag we keep it on the left edge */ 6189 if (nrsm->r_flags & RACK_HAS_SYN) 6190 nrsm->r_flags &= ~RACK_HAS_SYN; 6191 /* Now if we have a FIN flag we keep it on the right edge */ 6192 if (rsm->r_flags & RACK_HAS_FIN) 6193 rsm->r_flags &= ~RACK_HAS_FIN; 6194 /* Push bit must go to the right edge as well */ 6195 if (rsm->r_flags & RACK_HAD_PUSH) 6196 rsm->r_flags &= ~RACK_HAD_PUSH; 6197 /* Clone over the state of the hw_tls flag */ 6198 nrsm->r_hw_tls = rsm->r_hw_tls; 6199 /* 6200 * Now we need to find nrsm's new location in the mbuf chain 6201 * we basically calculate a new offset, which is soff + 6202 * how much is left in original rsm. Then we walk out the mbuf 6203 * chain to find the righ postion, it may be the same mbuf 6204 * or maybe not. 6205 */ 6206 KASSERT(((rsm->m != NULL) || 6207 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), 6208 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); 6209 if (rsm->m) 6210 rack_setup_offset_for_rsm(rsm, nrsm); 6211 } 6212 6213 static struct rack_sendmap * 6214 rack_merge_rsm(struct tcp_rack *rack, 6215 struct rack_sendmap *l_rsm, 6216 struct rack_sendmap *r_rsm) 6217 { 6218 /* 6219 * We are merging two ack'd RSM's, 6220 * the l_rsm is on the left (lower seq 6221 * values) and the r_rsm is on the right 6222 * (higher seq value). The simplest way 6223 * to merge these is to move the right 6224 * one into the left. I don't think there 6225 * is any reason we need to try to find 6226 * the oldest (or last oldest retransmitted). 6227 */ 6228 #ifdef INVARIANTS 6229 struct rack_sendmap *rm; 6230 #endif 6231 rack_log_map_chg(rack->rc_tp, rack, NULL, 6232 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); 6233 l_rsm->r_end = r_rsm->r_end; 6234 if (l_rsm->r_dupack < r_rsm->r_dupack) 6235 l_rsm->r_dupack = r_rsm->r_dupack; 6236 if (r_rsm->r_rtr_bytes) 6237 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 6238 if (r_rsm->r_in_tmap) { 6239 /* This really should not happen */ 6240 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 6241 r_rsm->r_in_tmap = 0; 6242 } 6243 6244 /* Now the flags */ 6245 if (r_rsm->r_flags & RACK_HAS_FIN) 6246 l_rsm->r_flags |= RACK_HAS_FIN; 6247 if (r_rsm->r_flags & RACK_TLP) 6248 l_rsm->r_flags |= RACK_TLP; 6249 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 6250 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 6251 if ((r_rsm->r_flags & RACK_APP_LIMITED) && 6252 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { 6253 /* 6254 * If both are app-limited then let the 6255 * free lower the count. If right is app 6256 * limited and left is not, transfer. 6257 */ 6258 l_rsm->r_flags |= RACK_APP_LIMITED; 6259 r_rsm->r_flags &= ~RACK_APP_LIMITED; 6260 if (r_rsm == rack->r_ctl.rc_first_appl) 6261 rack->r_ctl.rc_first_appl = l_rsm; 6262 } 6263 #ifndef INVARIANTS 6264 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm); 6265 #else 6266 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm); 6267 if (rm != r_rsm) { 6268 panic("removing head in rack:%p rsm:%p rm:%p", 6269 rack, r_rsm, rm); 6270 } 6271 #endif 6272 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 6273 /* Transfer the split limit to the map we free */ 6274 r_rsm->r_limit_type = l_rsm->r_limit_type; 6275 l_rsm->r_limit_type = 0; 6276 } 6277 rack_free(rack, r_rsm); 6278 return (l_rsm); 6279 } 6280 6281 /* 6282 * TLP Timer, here we simply setup what segment we want to 6283 * have the TLP expire on, the normal rack_output() will then 6284 * send it out. 6285 * 6286 * We return 1, saying don't proceed with rack_output only 6287 * when all timers have been stopped (destroyed PCB?). 6288 */ 6289 static int 6290 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp) 6291 { 6292 /* 6293 * Tail Loss Probe. 6294 */ 6295 struct rack_sendmap *rsm = NULL; 6296 #ifdef INVARIANTS 6297 struct rack_sendmap *insret; 6298 #endif 6299 struct socket *so; 6300 uint32_t amm; 6301 uint32_t out, avail; 6302 int collapsed_win = 0; 6303 6304 if (tp->t_timers->tt_flags & TT_STOPPED) { 6305 return (1); 6306 } 6307 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 6308 /* Its not time yet */ 6309 return (0); 6310 } 6311 if (ctf_progress_timeout_check(tp, true)) { 6312 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6313 return (-ETIMEDOUT); /* tcp_drop() */ 6314 } 6315 /* 6316 * A TLP timer has expired. We have been idle for 2 rtts. So we now 6317 * need to figure out how to force a full MSS segment out. 6318 */ 6319 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL); 6320 rack->r_ctl.retran_during_recovery = 0; 6321 rack->r_ctl.dsack_byte_cnt = 0; 6322 counter_u64_add(rack_tlp_tot, 1); 6323 if (rack->r_state && (rack->r_state != tp->t_state)) 6324 rack_set_state(tp, rack); 6325 so = tp->t_inpcb->inp_socket; 6326 avail = sbavail(&so->so_snd); 6327 out = tp->snd_max - tp->snd_una; 6328 if (out > tp->snd_wnd) { 6329 /* special case, we need a retransmission */ 6330 collapsed_win = 1; 6331 goto need_retran; 6332 } 6333 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { 6334 rack->r_ctl.dsack_persist--; 6335 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6336 rack->r_ctl.num_dsack = 0; 6337 } 6338 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6339 } 6340 if ((tp->t_flags & TF_GPUTINPROG) && 6341 (rack->r_ctl.rc_tlp_cnt_out == 1)) { 6342 /* 6343 * If this is the second in a row 6344 * TLP and we are doing a measurement 6345 * its time to abandon the measurement. 6346 * Something is likely broken on 6347 * the clients network and measuring a 6348 * broken network does us no good. 6349 */ 6350 tp->t_flags &= ~TF_GPUTINPROG; 6351 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6352 rack->r_ctl.rc_gp_srtt /*flex1*/, 6353 tp->gput_seq, 6354 0, 0, 18, __LINE__, NULL, 0); 6355 } 6356 /* 6357 * Check our send oldest always settings, and if 6358 * there is an oldest to send jump to the need_retran. 6359 */ 6360 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) 6361 goto need_retran; 6362 6363 if (avail > out) { 6364 /* New data is available */ 6365 amm = avail - out; 6366 if (amm > ctf_fixed_maxseg(tp)) { 6367 amm = ctf_fixed_maxseg(tp); 6368 if ((amm + out) > tp->snd_wnd) { 6369 /* We are rwnd limited */ 6370 goto need_retran; 6371 } 6372 } else if (amm < ctf_fixed_maxseg(tp)) { 6373 /* not enough to fill a MTU */ 6374 goto need_retran; 6375 } 6376 if (IN_FASTRECOVERY(tp->t_flags)) { 6377 /* Unlikely */ 6378 if (rack->rack_no_prr == 0) { 6379 if (out + amm <= tp->snd_wnd) { 6380 rack->r_ctl.rc_prr_sndcnt = amm; 6381 rack->r_ctl.rc_tlp_new_data = amm; 6382 rack_log_to_prr(rack, 4, 0); 6383 } 6384 } else 6385 goto need_retran; 6386 } else { 6387 /* Set the send-new override */ 6388 if (out + amm <= tp->snd_wnd) 6389 rack->r_ctl.rc_tlp_new_data = amm; 6390 else 6391 goto need_retran; 6392 } 6393 rack->r_ctl.rc_tlpsend = NULL; 6394 counter_u64_add(rack_tlp_newdata, 1); 6395 goto send; 6396 } 6397 need_retran: 6398 /* 6399 * Ok we need to arrange the last un-acked segment to be re-sent, or 6400 * optionally the first un-acked segment. 6401 */ 6402 if (collapsed_win == 0) { 6403 if (rack_always_send_oldest) 6404 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6405 else { 6406 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6407 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 6408 rsm = rack_find_high_nonack(rack, rsm); 6409 } 6410 } 6411 if (rsm == NULL) { 6412 counter_u64_add(rack_tlp_does_nada, 1); 6413 #ifdef TCP_BLACKBOX 6414 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6415 #endif 6416 goto out; 6417 } 6418 } else { 6419 /* 6420 * We must find the last segment 6421 * that was acceptable by the client. 6422 */ 6423 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6424 if ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0) { 6425 /* Found one */ 6426 break; 6427 } 6428 } 6429 if (rsm == NULL) { 6430 /* None? if so send the first */ 6431 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6432 if (rsm == NULL) { 6433 counter_u64_add(rack_tlp_does_nada, 1); 6434 #ifdef TCP_BLACKBOX 6435 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6436 #endif 6437 goto out; 6438 } 6439 } 6440 } 6441 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 6442 /* 6443 * We need to split this the last segment in two. 6444 */ 6445 struct rack_sendmap *nrsm; 6446 6447 nrsm = rack_alloc_full_limit(rack); 6448 if (nrsm == NULL) { 6449 /* 6450 * No memory to split, we will just exit and punt 6451 * off to the RXT timer. 6452 */ 6453 counter_u64_add(rack_tlp_does_nada, 1); 6454 goto out; 6455 } 6456 rack_clone_rsm(rack, nrsm, rsm, 6457 (rsm->r_end - ctf_fixed_maxseg(tp))); 6458 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 6459 #ifndef INVARIANTS 6460 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 6461 #else 6462 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 6463 if (insret != NULL) { 6464 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 6465 nrsm, insret, rack, rsm); 6466 } 6467 #endif 6468 if (rsm->r_in_tmap) { 6469 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 6470 nrsm->r_in_tmap = 1; 6471 } 6472 rsm = nrsm; 6473 } 6474 rack->r_ctl.rc_tlpsend = rsm; 6475 send: 6476 /* Make sure output path knows we are doing a TLP */ 6477 *doing_tlp = 1; 6478 rack->r_timer_override = 1; 6479 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6480 return (0); 6481 out: 6482 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6483 return (0); 6484 } 6485 6486 /* 6487 * Delayed ack Timer, here we simply need to setup the 6488 * ACK_NOW flag and remove the DELACK flag. From there 6489 * the output routine will send the ack out. 6490 * 6491 * We only return 1, saying don't proceed, if all timers 6492 * are stopped (destroyed PCB?). 6493 */ 6494 static int 6495 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6496 { 6497 if (tp->t_timers->tt_flags & TT_STOPPED) { 6498 return (1); 6499 } 6500 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL); 6501 tp->t_flags &= ~TF_DELACK; 6502 tp->t_flags |= TF_ACKNOW; 6503 KMOD_TCPSTAT_INC(tcps_delack); 6504 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 6505 return (0); 6506 } 6507 6508 /* 6509 * Persists timer, here we simply send the 6510 * same thing as a keepalive will. 6511 * the one byte send. 6512 * 6513 * We only return 1, saying don't proceed, if all timers 6514 * are stopped (destroyed PCB?). 6515 */ 6516 static int 6517 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6518 { 6519 struct tcptemp *t_template; 6520 struct inpcb *inp; 6521 int32_t retval = 1; 6522 6523 inp = tp->t_inpcb; 6524 6525 if (tp->t_timers->tt_flags & TT_STOPPED) { 6526 return (1); 6527 } 6528 if (rack->rc_in_persist == 0) 6529 return (0); 6530 if (ctf_progress_timeout_check(tp, false)) { 6531 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6532 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6533 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6534 return (-ETIMEDOUT); /* tcp_drop() */ 6535 } 6536 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 6537 /* 6538 * Persistence timer into zero window. Force a byte to be output, if 6539 * possible. 6540 */ 6541 KMOD_TCPSTAT_INC(tcps_persisttimeo); 6542 /* 6543 * Hack: if the peer is dead/unreachable, we do not time out if the 6544 * window is closed. After a full backoff, drop the connection if 6545 * the idle time (no responses to probes) reaches the maximum 6546 * backoff that we would use if retransmitting. 6547 */ 6548 if (tp->t_rxtshift == TCP_MAXRXTSHIFT && 6549 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 6550 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { 6551 KMOD_TCPSTAT_INC(tcps_persistdrop); 6552 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6553 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6554 retval = -ETIMEDOUT; /* tcp_drop() */ 6555 goto out; 6556 } 6557 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 6558 tp->snd_una == tp->snd_max) 6559 rack_exit_persist(tp, rack, cts); 6560 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 6561 /* 6562 * If the user has closed the socket then drop a persisting 6563 * connection after a much reduced timeout. 6564 */ 6565 if (tp->t_state > TCPS_CLOSE_WAIT && 6566 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 6567 KMOD_TCPSTAT_INC(tcps_persistdrop); 6568 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6569 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6570 retval = -ETIMEDOUT; /* tcp_drop() */ 6571 goto out; 6572 } 6573 t_template = tcpip_maketemplate(rack->rc_inp); 6574 if (t_template) { 6575 /* only set it if we were answered */ 6576 if (rack->forced_ack == 0) { 6577 rack->forced_ack = 1; 6578 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6579 } else { 6580 rack->probe_not_answered = 1; 6581 counter_u64_add(rack_persists_loss, 1); 6582 rack->r_ctl.persist_lost_ends++; 6583 } 6584 counter_u64_add(rack_persists_sends, 1); 6585 tcp_respond(tp, t_template->tt_ipgen, 6586 &t_template->tt_t, (struct mbuf *)NULL, 6587 tp->rcv_nxt, tp->snd_una - 1, 0); 6588 /* This sends an ack */ 6589 if (tp->t_flags & TF_DELACK) 6590 tp->t_flags &= ~TF_DELACK; 6591 free(t_template, M_TEMP); 6592 } 6593 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 6594 tp->t_rxtshift++; 6595 out: 6596 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL); 6597 rack_start_hpts_timer(rack, tp, cts, 6598 0, 0, 0); 6599 return (retval); 6600 } 6601 6602 /* 6603 * If a keepalive goes off, we had no other timers 6604 * happening. We always return 1 here since this 6605 * routine either drops the connection or sends 6606 * out a segment with respond. 6607 */ 6608 static int 6609 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6610 { 6611 struct tcptemp *t_template; 6612 struct inpcb *inp; 6613 6614 if (tp->t_timers->tt_flags & TT_STOPPED) { 6615 return (1); 6616 } 6617 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 6618 inp = tp->t_inpcb; 6619 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); 6620 /* 6621 * Keep-alive timer went off; send something or drop connection if 6622 * idle for too long. 6623 */ 6624 KMOD_TCPSTAT_INC(tcps_keeptimeo); 6625 if (tp->t_state < TCPS_ESTABLISHED) 6626 goto dropit; 6627 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 6628 tp->t_state <= TCPS_CLOSING) { 6629 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 6630 goto dropit; 6631 /* 6632 * Send a packet designed to force a response if the peer is 6633 * up and reachable: either an ACK if the connection is 6634 * still alive, or an RST if the peer has closed the 6635 * connection due to timeout or reboot. Using sequence 6636 * number tp->snd_una-1 causes the transmitted zero-length 6637 * segment to lie outside the receive window; by the 6638 * protocol spec, this requires the correspondent TCP to 6639 * respond. 6640 */ 6641 KMOD_TCPSTAT_INC(tcps_keepprobe); 6642 t_template = tcpip_maketemplate(inp); 6643 if (t_template) { 6644 if (rack->forced_ack == 0) { 6645 rack->forced_ack = 1; 6646 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6647 } else { 6648 rack->probe_not_answered = 1; 6649 } 6650 tcp_respond(tp, t_template->tt_ipgen, 6651 &t_template->tt_t, (struct mbuf *)NULL, 6652 tp->rcv_nxt, tp->snd_una - 1, 0); 6653 free(t_template, M_TEMP); 6654 } 6655 } 6656 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 6657 return (1); 6658 dropit: 6659 KMOD_TCPSTAT_INC(tcps_keepdrops); 6660 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6661 return (-ETIMEDOUT); /* tcp_drop() */ 6662 } 6663 6664 /* 6665 * Retransmit helper function, clear up all the ack 6666 * flags and take care of important book keeping. 6667 */ 6668 static void 6669 rack_remxt_tmr(struct tcpcb *tp) 6670 { 6671 /* 6672 * The retransmit timer went off, all sack'd blocks must be 6673 * un-acked. 6674 */ 6675 struct rack_sendmap *rsm, *trsm = NULL; 6676 struct tcp_rack *rack; 6677 6678 rack = (struct tcp_rack *)tp->t_fb_ptr; 6679 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__); 6680 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL); 6681 if (rack->r_state && (rack->r_state != tp->t_state)) 6682 rack_set_state(tp, rack); 6683 /* 6684 * Ideally we would like to be able to 6685 * mark SACK-PASS on anything not acked here. 6686 * 6687 * However, if we do that we would burst out 6688 * all that data 1ms apart. This would be unwise, 6689 * so for now we will just let the normal rxt timer 6690 * and tlp timer take care of it. 6691 * 6692 * Also we really need to stick them back in sequence 6693 * order. This way we send in the proper order and any 6694 * sacks that come floating in will "re-ack" the data. 6695 * To do this we zap the tmap with an INIT and then 6696 * walk through and place every rsm in the RB tree 6697 * back in its seq ordered place. 6698 */ 6699 TAILQ_INIT(&rack->r_ctl.rc_tmap); 6700 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6701 rsm->r_dupack = 0; 6702 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 6703 /* We must re-add it back to the tlist */ 6704 if (trsm == NULL) { 6705 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 6706 } else { 6707 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 6708 } 6709 rsm->r_in_tmap = 1; 6710 trsm = rsm; 6711 if (rsm->r_flags & RACK_ACKED) 6712 rsm->r_flags |= RACK_WAS_ACKED; 6713 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS); 6714 rsm->r_flags |= RACK_MUST_RXT; 6715 } 6716 /* Clear the count (we just un-acked them) */ 6717 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; 6718 rack->r_ctl.rc_sacked = 0; 6719 rack->r_ctl.rc_sacklast = NULL; 6720 rack->r_ctl.rc_agg_delayed = 0; 6721 rack->r_early = 0; 6722 rack->r_ctl.rc_agg_early = 0; 6723 rack->r_late = 0; 6724 /* Clear the tlp rtx mark */ 6725 rack->r_ctl.rc_resend = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6726 if (rack->r_ctl.rc_resend != NULL) 6727 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 6728 rack->r_ctl.rc_prr_sndcnt = 0; 6729 rack_log_to_prr(rack, 6, 0); 6730 rack->r_timer_override = 1; 6731 if ((((tp->t_flags & TF_SACK_PERMIT) == 0) 6732 #ifdef NETFLIX_EXP_DETECTION 6733 || (rack->sack_attack_disable != 0) 6734 #endif 6735 ) && ((tp->t_flags & TF_SENTFIN) == 0)) { 6736 /* 6737 * For non-sack customers new data 6738 * needs to go out as retransmits until 6739 * we retransmit up to snd_max. 6740 */ 6741 rack->r_must_retran = 1; 6742 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, 6743 rack->r_ctl.rc_sacked); 6744 } 6745 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 6746 } 6747 6748 static void 6749 rack_convert_rtts(struct tcpcb *tp) 6750 { 6751 if (tp->t_srtt > 1) { 6752 uint32_t val, frac; 6753 6754 val = tp->t_srtt >> TCP_RTT_SHIFT; 6755 frac = tp->t_srtt & 0x1f; 6756 tp->t_srtt = TICKS_2_USEC(val); 6757 /* 6758 * frac is the fractional part of the srtt (if any) 6759 * but its in ticks and every bit represents 6760 * 1/32nd of a hz. 6761 */ 6762 if (frac) { 6763 if (hz == 1000) { 6764 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6765 } else { 6766 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6767 } 6768 tp->t_srtt += frac; 6769 } 6770 } 6771 if (tp->t_rttvar) { 6772 uint32_t val, frac; 6773 6774 val = tp->t_rttvar >> TCP_RTTVAR_SHIFT; 6775 frac = tp->t_rttvar & 0x1f; 6776 tp->t_rttvar = TICKS_2_USEC(val); 6777 /* 6778 * frac is the fractional part of the srtt (if any) 6779 * but its in ticks and every bit represents 6780 * 1/32nd of a hz. 6781 */ 6782 if (frac) { 6783 if (hz == 1000) { 6784 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6785 } else { 6786 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6787 } 6788 tp->t_rttvar += frac; 6789 } 6790 } 6791 tp->t_rxtcur = RACK_REXMTVAL(tp); 6792 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 6793 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); 6794 } 6795 if (tp->t_rxtcur > rack_rto_max) { 6796 tp->t_rxtcur = rack_rto_max; 6797 } 6798 } 6799 6800 static void 6801 rack_cc_conn_init(struct tcpcb *tp) 6802 { 6803 struct tcp_rack *rack; 6804 uint32_t srtt; 6805 6806 rack = (struct tcp_rack *)tp->t_fb_ptr; 6807 srtt = tp->t_srtt; 6808 cc_conn_init(tp); 6809 /* 6810 * Now convert to rack's internal format, 6811 * if required. 6812 */ 6813 if ((srtt == 0) && (tp->t_srtt != 0)) 6814 rack_convert_rtts(tp); 6815 /* 6816 * We want a chance to stay in slowstart as 6817 * we create a connection. TCP spec says that 6818 * initially ssthresh is infinite. For our 6819 * purposes that is the snd_wnd. 6820 */ 6821 if (tp->snd_ssthresh < tp->snd_wnd) { 6822 tp->snd_ssthresh = tp->snd_wnd; 6823 } 6824 /* 6825 * We also want to assure a IW worth of 6826 * data can get inflight. 6827 */ 6828 if (rc_init_window(rack) < tp->snd_cwnd) 6829 tp->snd_cwnd = rc_init_window(rack); 6830 } 6831 6832 /* 6833 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 6834 * we will setup to retransmit the lowest seq number outstanding. 6835 */ 6836 static int 6837 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6838 { 6839 int32_t rexmt; 6840 int32_t retval = 0; 6841 bool isipv6; 6842 6843 if (tp->t_timers->tt_flags & TT_STOPPED) { 6844 return (1); 6845 } 6846 if ((tp->t_flags & TF_GPUTINPROG) && 6847 (tp->t_rxtshift)) { 6848 /* 6849 * We have had a second timeout 6850 * measurements on successive rxt's are not profitable. 6851 * It is unlikely to be of any use (the network is 6852 * broken or the client went away). 6853 */ 6854 tp->t_flags &= ~TF_GPUTINPROG; 6855 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6856 rack->r_ctl.rc_gp_srtt /*flex1*/, 6857 tp->gput_seq, 6858 0, 0, 18, __LINE__, NULL, 0); 6859 } 6860 if (ctf_progress_timeout_check(tp, false)) { 6861 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6862 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6863 return (-ETIMEDOUT); /* tcp_drop() */ 6864 } 6865 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 6866 rack->r_ctl.retran_during_recovery = 0; 6867 rack->r_ctl.dsack_byte_cnt = 0; 6868 if (IN_FASTRECOVERY(tp->t_flags)) 6869 tp->t_flags |= TF_WASFRECOVERY; 6870 else 6871 tp->t_flags &= ~TF_WASFRECOVERY; 6872 if (IN_CONGRECOVERY(tp->t_flags)) 6873 tp->t_flags |= TF_WASCRECOVERY; 6874 else 6875 tp->t_flags &= ~TF_WASCRECOVERY; 6876 if (TCPS_HAVEESTABLISHED(tp->t_state) && 6877 (tp->snd_una == tp->snd_max)) { 6878 /* Nothing outstanding .. nothing to do */ 6879 return (0); 6880 } 6881 if (rack->r_ctl.dsack_persist) { 6882 rack->r_ctl.dsack_persist--; 6883 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6884 rack->r_ctl.num_dsack = 0; 6885 } 6886 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6887 } 6888 /* 6889 * Rack can only run one timer at a time, so we cannot 6890 * run a KEEPINIT (gating SYN sending) and a retransmit 6891 * timer for the SYN. So if we are in a front state and 6892 * have a KEEPINIT timer we need to check the first transmit 6893 * against now to see if we have exceeded the KEEPINIT time 6894 * (if one is set). 6895 */ 6896 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 6897 (TP_KEEPINIT(tp) != 0)) { 6898 struct rack_sendmap *rsm; 6899 6900 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6901 if (rsm) { 6902 /* Ok we have something outstanding to test keepinit with */ 6903 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && 6904 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { 6905 /* We have exceeded the KEEPINIT time */ 6906 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6907 goto drop_it; 6908 } 6909 } 6910 } 6911 /* 6912 * Retransmission timer went off. Message has not been acked within 6913 * retransmit interval. Back off to a longer retransmit interval 6914 * and retransmit one segment. 6915 */ 6916 rack_remxt_tmr(tp); 6917 if ((rack->r_ctl.rc_resend == NULL) || 6918 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 6919 /* 6920 * If the rwnd collapsed on 6921 * the one we are retransmitting 6922 * it does not count against the 6923 * rxt count. 6924 */ 6925 tp->t_rxtshift++; 6926 } 6927 if (tp->t_rxtshift > TCP_MAXRXTSHIFT) { 6928 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6929 drop_it: 6930 tp->t_rxtshift = TCP_MAXRXTSHIFT; 6931 KMOD_TCPSTAT_INC(tcps_timeoutdrop); 6932 /* XXXGL: previously t_softerror was casted to uint16_t */ 6933 MPASS(tp->t_softerror >= 0); 6934 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT; 6935 goto out; /* tcp_drop() */ 6936 } 6937 if (tp->t_state == TCPS_SYN_SENT) { 6938 /* 6939 * If the SYN was retransmitted, indicate CWND to be limited 6940 * to 1 segment in cc_conn_init(). 6941 */ 6942 tp->snd_cwnd = 1; 6943 } else if (tp->t_rxtshift == 1) { 6944 /* 6945 * first retransmit; record ssthresh and cwnd so they can be 6946 * recovered if this turns out to be a "bad" retransmit. A 6947 * retransmit is considered "bad" if an ACK for this segment 6948 * is received within RTT/2 interval; the assumption here is 6949 * that the ACK was already in flight. See "On Estimating 6950 * End-to-End Network Path Properties" by Allman and Paxson 6951 * for more details. 6952 */ 6953 tp->snd_cwnd_prev = tp->snd_cwnd; 6954 tp->snd_ssthresh_prev = tp->snd_ssthresh; 6955 tp->snd_recover_prev = tp->snd_recover; 6956 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); 6957 tp->t_flags |= TF_PREVVALID; 6958 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 6959 tp->t_flags &= ~TF_PREVVALID; 6960 KMOD_TCPSTAT_INC(tcps_rexmttimeo); 6961 if ((tp->t_state == TCPS_SYN_SENT) || 6962 (tp->t_state == TCPS_SYN_RECEIVED)) 6963 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; 6964 else 6965 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; 6966 6967 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, 6968 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); 6969 /* 6970 * We enter the path for PLMTUD if connection is established or, if 6971 * connection is FIN_WAIT_1 status, reason for the last is that if 6972 * amount of data we send is very small, we could send it in couple 6973 * of packets and process straight to FIN. In that case we won't 6974 * catch ESTABLISHED state. 6975 */ 6976 #ifdef INET6 6977 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false; 6978 #else 6979 isipv6 = false; 6980 #endif 6981 if (((V_tcp_pmtud_blackhole_detect == 1) || 6982 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 6983 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 6984 ((tp->t_state == TCPS_ESTABLISHED) || 6985 (tp->t_state == TCPS_FIN_WAIT_1))) { 6986 /* 6987 * Idea here is that at each stage of mtu probe (usually, 6988 * 1448 -> 1188 -> 524) should be given 2 chances to recover 6989 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 6990 * should take care of that. 6991 */ 6992 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 6993 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 6994 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 6995 tp->t_rxtshift % 2 == 0)) { 6996 /* 6997 * Enter Path MTU Black-hole Detection mechanism: - 6998 * Disable Path MTU Discovery (IP "DF" bit). - 6999 * Reduce MTU to lower value than what we negotiated 7000 * with peer. 7001 */ 7002 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 7003 /* Record that we may have found a black hole. */ 7004 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 7005 /* Keep track of previous MSS. */ 7006 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 7007 } 7008 7009 /* 7010 * Reduce the MSS to blackhole value or to the 7011 * default in an attempt to retransmit. 7012 */ 7013 #ifdef INET6 7014 if (isipv6 && 7015 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 7016 /* Use the sysctl tuneable blackhole MSS. */ 7017 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 7018 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 7019 } else if (isipv6) { 7020 /* Use the default MSS. */ 7021 tp->t_maxseg = V_tcp_v6mssdflt; 7022 /* 7023 * Disable Path MTU Discovery when we switch 7024 * to minmss. 7025 */ 7026 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 7027 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 7028 } 7029 #endif 7030 #if defined(INET6) && defined(INET) 7031 else 7032 #endif 7033 #ifdef INET 7034 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 7035 /* Use the sysctl tuneable blackhole MSS. */ 7036 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 7037 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 7038 } else { 7039 /* Use the default MSS. */ 7040 tp->t_maxseg = V_tcp_mssdflt; 7041 /* 7042 * Disable Path MTU Discovery when we switch 7043 * to minmss. 7044 */ 7045 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 7046 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 7047 } 7048 #endif 7049 } else { 7050 /* 7051 * If further retransmissions are still unsuccessful 7052 * with a lowered MTU, maybe this isn't a blackhole 7053 * and we restore the previous MSS and blackhole 7054 * detection flags. The limit '6' is determined by 7055 * giving each probe stage (1448, 1188, 524) 2 7056 * chances to recover. 7057 */ 7058 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 7059 (tp->t_rxtshift >= 6)) { 7060 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 7061 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 7062 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 7063 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed); 7064 } 7065 } 7066 } 7067 /* 7068 * Disable RFC1323 and SACK if we haven't got any response to 7069 * our third SYN to work-around some broken terminal servers 7070 * (most of which have hopefully been retired) that have bad VJ 7071 * header compression code which trashes TCP segments containing 7072 * unknown-to-them TCP options. 7073 */ 7074 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 7075 (tp->t_rxtshift == 3)) 7076 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 7077 /* 7078 * If we backed off this far, our srtt estimate is probably bogus. 7079 * Clobber it so we'll take the next rtt measurement as our srtt; 7080 * move the current srtt into rttvar to keep the current retransmit 7081 * times until then. 7082 */ 7083 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 7084 #ifdef INET6 7085 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 7086 in6_losing(tp->t_inpcb); 7087 else 7088 #endif 7089 in_losing(tp->t_inpcb); 7090 tp->t_rttvar += tp->t_srtt; 7091 tp->t_srtt = 0; 7092 } 7093 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 7094 tp->snd_recover = tp->snd_max; 7095 tp->t_flags |= TF_ACKNOW; 7096 tp->t_rtttime = 0; 7097 rack_cong_signal(tp, CC_RTO, tp->snd_una); 7098 out: 7099 return (retval); 7100 } 7101 7102 static int 7103 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp) 7104 { 7105 int32_t ret = 0; 7106 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 7107 7108 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 7109 (tp->t_flags & TF_GPUTINPROG)) { 7110 /* 7111 * We have a goodput in progress 7112 * and we have entered a late state. 7113 * Do we have enough data in the sb 7114 * to handle the GPUT request? 7115 */ 7116 uint32_t bytes; 7117 7118 bytes = tp->gput_ack - tp->gput_seq; 7119 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 7120 bytes += tp->gput_seq - tp->snd_una; 7121 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 7122 /* 7123 * There are not enough bytes in the socket 7124 * buffer that have been sent to cover this 7125 * measurement. Cancel it. 7126 */ 7127 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7128 rack->r_ctl.rc_gp_srtt /*flex1*/, 7129 tp->gput_seq, 7130 0, 0, 18, __LINE__, NULL, 0); 7131 tp->t_flags &= ~TF_GPUTINPROG; 7132 } 7133 } 7134 if (timers == 0) { 7135 return (0); 7136 } 7137 if (tp->t_state == TCPS_LISTEN) { 7138 /* no timers on listen sockets */ 7139 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 7140 return (0); 7141 return (1); 7142 } 7143 if ((timers & PACE_TMR_RACK) && 7144 rack->rc_on_min_to) { 7145 /* 7146 * For the rack timer when we 7147 * are on a min-timeout (which means rrr_conf = 3) 7148 * we don't want to check the timer. It may 7149 * be going off for a pace and thats ok we 7150 * want to send the retransmit (if its ready). 7151 * 7152 * If its on a normal rack timer (non-min) then 7153 * we will check if its expired. 7154 */ 7155 goto skip_time_check; 7156 } 7157 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 7158 uint32_t left; 7159 7160 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 7161 ret = -1; 7162 rack_log_to_processing(rack, cts, ret, 0); 7163 return (0); 7164 } 7165 if (hpts_calling == 0) { 7166 /* 7167 * A user send or queued mbuf (sack) has called us? We 7168 * return 0 and let the pacing guards 7169 * deal with it if they should or 7170 * should not cause a send. 7171 */ 7172 ret = -2; 7173 rack_log_to_processing(rack, cts, ret, 0); 7174 return (0); 7175 } 7176 /* 7177 * Ok our timer went off early and we are not paced false 7178 * alarm, go back to sleep. 7179 */ 7180 ret = -3; 7181 left = rack->r_ctl.rc_timer_exp - cts; 7182 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(left)); 7183 rack_log_to_processing(rack, cts, ret, left); 7184 return (1); 7185 } 7186 skip_time_check: 7187 rack->rc_tmr_stopped = 0; 7188 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 7189 if (timers & PACE_TMR_DELACK) { 7190 ret = rack_timeout_delack(tp, rack, cts); 7191 } else if (timers & PACE_TMR_RACK) { 7192 rack->r_ctl.rc_tlp_rxt_last_time = cts; 7193 rack->r_fast_output = 0; 7194 ret = rack_timeout_rack(tp, rack, cts); 7195 } else if (timers & PACE_TMR_TLP) { 7196 rack->r_ctl.rc_tlp_rxt_last_time = cts; 7197 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp); 7198 } else if (timers & PACE_TMR_RXT) { 7199 rack->r_ctl.rc_tlp_rxt_last_time = cts; 7200 rack->r_fast_output = 0; 7201 ret = rack_timeout_rxt(tp, rack, cts); 7202 } else if (timers & PACE_TMR_PERSIT) { 7203 ret = rack_timeout_persist(tp, rack, cts); 7204 } else if (timers & PACE_TMR_KEEP) { 7205 ret = rack_timeout_keepalive(tp, rack, cts); 7206 } 7207 rack_log_to_processing(rack, cts, ret, timers); 7208 return (ret); 7209 } 7210 7211 static void 7212 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 7213 { 7214 struct timeval tv; 7215 uint32_t us_cts, flags_on_entry; 7216 uint8_t hpts_removed = 0; 7217 7218 flags_on_entry = rack->r_ctl.rc_hpts_flags; 7219 us_cts = tcp_get_usecs(&tv); 7220 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 7221 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || 7222 ((tp->snd_max - tp->snd_una) == 0))) { 7223 tcp_hpts_remove(rack->rc_inp); 7224 hpts_removed = 1; 7225 /* If we were not delayed cancel out the flag. */ 7226 if ((tp->snd_max - tp->snd_una) == 0) 7227 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 7228 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 7229 } 7230 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 7231 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 7232 if (tcp_in_hpts(rack->rc_inp) && 7233 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 7234 /* 7235 * Canceling timer's when we have no output being 7236 * paced. We also must remove ourselves from the 7237 * hpts. 7238 */ 7239 tcp_hpts_remove(rack->rc_inp); 7240 hpts_removed = 1; 7241 } 7242 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 7243 } 7244 if (hpts_removed == 0) 7245 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 7246 } 7247 7248 static void 7249 rack_timer_stop(struct tcpcb *tp, uint32_t timer_type) 7250 { 7251 return; 7252 } 7253 7254 static int 7255 rack_stopall(struct tcpcb *tp) 7256 { 7257 struct tcp_rack *rack; 7258 rack = (struct tcp_rack *)tp->t_fb_ptr; 7259 rack->t_timers_stopped = 1; 7260 return (0); 7261 } 7262 7263 static void 7264 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, uint32_t delta) 7265 { 7266 return; 7267 } 7268 7269 static int 7270 rack_timer_active(struct tcpcb *tp, uint32_t timer_type) 7271 { 7272 return (0); 7273 } 7274 7275 static void 7276 rack_stop_all_timers(struct tcpcb *tp) 7277 { 7278 struct tcp_rack *rack; 7279 7280 /* 7281 * Assure no timers are running. 7282 */ 7283 if (tcp_timer_active(tp, TT_PERSIST)) { 7284 /* We enter in persists, set the flag appropriately */ 7285 rack = (struct tcp_rack *)tp->t_fb_ptr; 7286 rack->rc_in_persist = 1; 7287 } 7288 tcp_timer_suspend(tp, TT_PERSIST); 7289 tcp_timer_suspend(tp, TT_REXMT); 7290 tcp_timer_suspend(tp, TT_KEEP); 7291 tcp_timer_suspend(tp, TT_DELACK); 7292 } 7293 7294 static void 7295 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 7296 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag) 7297 { 7298 int32_t idx; 7299 7300 rsm->r_rtr_cnt++; 7301 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7302 rsm->r_dupack = 0; 7303 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 7304 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 7305 rsm->r_flags |= RACK_OVERMAX; 7306 } 7307 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { 7308 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 7309 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 7310 } 7311 idx = rsm->r_rtr_cnt - 1; 7312 rsm->r_tim_lastsent[idx] = ts; 7313 /* 7314 * Here we don't add in the len of send, since its already 7315 * in snduna <->snd_max. 7316 */ 7317 rsm->r_fas = ctf_flight_size(rack->rc_tp, 7318 rack->r_ctl.rc_sacked); 7319 if (rsm->r_flags & RACK_ACKED) { 7320 /* Problably MTU discovery messing with us */ 7321 rsm->r_flags &= ~RACK_ACKED; 7322 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 7323 } 7324 if (rsm->r_in_tmap) { 7325 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7326 rsm->r_in_tmap = 0; 7327 } 7328 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7329 rsm->r_in_tmap = 1; 7330 if (rsm->r_flags & RACK_SACK_PASSED) { 7331 /* We have retransmitted due to the SACK pass */ 7332 rsm->r_flags &= ~RACK_SACK_PASSED; 7333 rsm->r_flags |= RACK_WAS_SACKPASS; 7334 } 7335 } 7336 7337 static uint32_t 7338 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 7339 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint16_t add_flag) 7340 { 7341 /* 7342 * We (re-)transmitted starting at rsm->r_start for some length 7343 * (possibly less than r_end. 7344 */ 7345 struct rack_sendmap *nrsm; 7346 #ifdef INVARIANTS 7347 struct rack_sendmap *insret; 7348 #endif 7349 uint32_t c_end; 7350 int32_t len; 7351 7352 len = *lenp; 7353 c_end = rsm->r_start + len; 7354 if (SEQ_GEQ(c_end, rsm->r_end)) { 7355 /* 7356 * We retransmitted the whole piece or more than the whole 7357 * slopping into the next rsm. 7358 */ 7359 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7360 if (c_end == rsm->r_end) { 7361 *lenp = 0; 7362 return (0); 7363 } else { 7364 int32_t act_len; 7365 7366 /* Hangs over the end return whats left */ 7367 act_len = rsm->r_end - rsm->r_start; 7368 *lenp = (len - act_len); 7369 return (rsm->r_end); 7370 } 7371 /* We don't get out of this block. */ 7372 } 7373 /* 7374 * Here we retransmitted less than the whole thing which means we 7375 * have to split this into what was transmitted and what was not. 7376 */ 7377 nrsm = rack_alloc_full_limit(rack); 7378 if (nrsm == NULL) { 7379 /* 7380 * We can't get memory, so lets not proceed. 7381 */ 7382 *lenp = 0; 7383 return (0); 7384 } 7385 /* 7386 * So here we are going to take the original rsm and make it what we 7387 * retransmitted. nrsm will be the tail portion we did not 7388 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 7389 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 7390 * 1, 6 and the new piece will be 6, 11. 7391 */ 7392 rack_clone_rsm(rack, nrsm, rsm, c_end); 7393 nrsm->r_dupack = 0; 7394 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 7395 #ifndef INVARIANTS 7396 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7397 #else 7398 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7399 if (insret != NULL) { 7400 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7401 nrsm, insret, rack, rsm); 7402 } 7403 #endif 7404 if (rsm->r_in_tmap) { 7405 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7406 nrsm->r_in_tmap = 1; 7407 } 7408 rsm->r_flags &= (~RACK_HAS_FIN); 7409 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7410 /* Log a split of rsm into rsm and nrsm */ 7411 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7412 *lenp = 0; 7413 return (0); 7414 } 7415 7416 static void 7417 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 7418 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t cts, 7419 struct rack_sendmap *hintrsm, uint16_t add_flag, struct mbuf *s_mb, uint32_t s_moff, int hw_tls) 7420 { 7421 struct tcp_rack *rack; 7422 struct rack_sendmap *rsm, *nrsm, fe; 7423 #ifdef INVARIANTS 7424 struct rack_sendmap *insret; 7425 #endif 7426 register uint32_t snd_max, snd_una; 7427 7428 /* 7429 * Add to the RACK log of packets in flight or retransmitted. If 7430 * there is a TS option we will use the TS echoed, if not we will 7431 * grab a TS. 7432 * 7433 * Retransmissions will increment the count and move the ts to its 7434 * proper place. Note that if options do not include TS's then we 7435 * won't be able to effectively use the ACK for an RTT on a retran. 7436 * 7437 * Notes about r_start and r_end. Lets consider a send starting at 7438 * sequence 1 for 10 bytes. In such an example the r_start would be 7439 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 7440 * This means that r_end is actually the first sequence for the next 7441 * slot (11). 7442 * 7443 */ 7444 /* 7445 * If err is set what do we do XXXrrs? should we not add the thing? 7446 * -- i.e. return if err != 0 or should we pretend we sent it? -- 7447 * i.e. proceed with add ** do this for now. 7448 */ 7449 INP_WLOCK_ASSERT(tp->t_inpcb); 7450 if (err) 7451 /* 7452 * We don't log errors -- we could but snd_max does not 7453 * advance in this case either. 7454 */ 7455 return; 7456 7457 if (th_flags & TH_RST) { 7458 /* 7459 * We don't log resets and we return immediately from 7460 * sending 7461 */ 7462 return; 7463 } 7464 rack = (struct tcp_rack *)tp->t_fb_ptr; 7465 snd_una = tp->snd_una; 7466 snd_max = tp->snd_max; 7467 if (th_flags & (TH_SYN | TH_FIN)) { 7468 /* 7469 * The call to rack_log_output is made before bumping 7470 * snd_max. This means we can record one extra byte on a SYN 7471 * or FIN if seq_out is adding more on and a FIN is present 7472 * (and we are not resending). 7473 */ 7474 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) 7475 len++; 7476 if (th_flags & TH_FIN) 7477 len++; 7478 if (SEQ_LT(snd_max, tp->snd_nxt)) { 7479 /* 7480 * The add/update as not been done for the FIN/SYN 7481 * yet. 7482 */ 7483 snd_max = tp->snd_nxt; 7484 } 7485 } 7486 if (SEQ_LEQ((seq_out + len), snd_una)) { 7487 /* Are sending an old segment to induce an ack (keep-alive)? */ 7488 return; 7489 } 7490 if (SEQ_LT(seq_out, snd_una)) { 7491 /* huh? should we panic? */ 7492 uint32_t end; 7493 7494 end = seq_out + len; 7495 seq_out = snd_una; 7496 if (SEQ_GEQ(end, seq_out)) 7497 len = end - seq_out; 7498 else 7499 len = 0; 7500 } 7501 if (len == 0) { 7502 /* We don't log zero window probes */ 7503 return; 7504 } 7505 rack->r_ctl.rc_time_last_sent = cts; 7506 if (IN_FASTRECOVERY(tp->t_flags)) { 7507 rack->r_ctl.rc_prr_out += len; 7508 } 7509 /* First question is it a retransmission or new? */ 7510 if (seq_out == snd_max) { 7511 /* Its new */ 7512 again: 7513 rsm = rack_alloc(rack); 7514 if (rsm == NULL) { 7515 /* 7516 * Hmm out of memory and the tcb got destroyed while 7517 * we tried to wait. 7518 */ 7519 return; 7520 } 7521 if (th_flags & TH_FIN) { 7522 rsm->r_flags = RACK_HAS_FIN|add_flag; 7523 } else { 7524 rsm->r_flags = add_flag; 7525 } 7526 if (hw_tls) 7527 rsm->r_hw_tls = 1; 7528 rsm->r_tim_lastsent[0] = cts; 7529 rsm->r_rtr_cnt = 1; 7530 rsm->r_rtr_bytes = 0; 7531 if (th_flags & TH_SYN) { 7532 /* The data space is one beyond snd_una */ 7533 rsm->r_flags |= RACK_HAS_SYN; 7534 } 7535 rsm->r_start = seq_out; 7536 rsm->r_end = rsm->r_start + len; 7537 rsm->r_dupack = 0; 7538 /* 7539 * save off the mbuf location that 7540 * sndmbuf_noadv returned (which is 7541 * where we started copying from).. 7542 */ 7543 rsm->m = s_mb; 7544 rsm->soff = s_moff; 7545 /* 7546 * Here we do add in the len of send, since its not yet 7547 * reflected in in snduna <->snd_max 7548 */ 7549 rsm->r_fas = (ctf_flight_size(rack->rc_tp, 7550 rack->r_ctl.rc_sacked) + 7551 (rsm->r_end - rsm->r_start)); 7552 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ 7553 if (rsm->m) { 7554 if (rsm->m->m_len <= rsm->soff) { 7555 /* 7556 * XXXrrs Question, will this happen? 7557 * 7558 * If sbsndptr is set at the correct place 7559 * then s_moff should always be somewhere 7560 * within rsm->m. But if the sbsndptr was 7561 * off then that won't be true. If it occurs 7562 * we need to walkout to the correct location. 7563 */ 7564 struct mbuf *lm; 7565 7566 lm = rsm->m; 7567 while (lm->m_len <= rsm->soff) { 7568 rsm->soff -= lm->m_len; 7569 lm = lm->m_next; 7570 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", 7571 __func__, rack, s_moff, s_mb, rsm->soff)); 7572 } 7573 rsm->m = lm; 7574 counter_u64_add(rack_sbsndptr_wrong, 1); 7575 } else 7576 counter_u64_add(rack_sbsndptr_right, 1); 7577 rsm->orig_m_len = rsm->m->m_len; 7578 } else 7579 rsm->orig_m_len = 0; 7580 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7581 /* Log a new rsm */ 7582 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__); 7583 #ifndef INVARIANTS 7584 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7585 #else 7586 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7587 if (insret != NULL) { 7588 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7589 nrsm, insret, rack, rsm); 7590 } 7591 #endif 7592 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7593 rsm->r_in_tmap = 1; 7594 /* 7595 * Special case detection, is there just a single 7596 * packet outstanding when we are not in recovery? 7597 * 7598 * If this is true mark it so. 7599 */ 7600 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 7601 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { 7602 struct rack_sendmap *prsm; 7603 7604 prsm = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7605 if (prsm) 7606 prsm->r_one_out_nr = 1; 7607 } 7608 return; 7609 } 7610 /* 7611 * If we reach here its a retransmission and we need to find it. 7612 */ 7613 memset(&fe, 0, sizeof(fe)); 7614 more: 7615 if (hintrsm && (hintrsm->r_start == seq_out)) { 7616 rsm = hintrsm; 7617 hintrsm = NULL; 7618 } else { 7619 /* No hints sorry */ 7620 rsm = NULL; 7621 } 7622 if ((rsm) && (rsm->r_start == seq_out)) { 7623 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7624 if (len == 0) { 7625 return; 7626 } else { 7627 goto more; 7628 } 7629 } 7630 /* Ok it was not the last pointer go through it the hard way. */ 7631 refind: 7632 fe.r_start = seq_out; 7633 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 7634 if (rsm) { 7635 if (rsm->r_start == seq_out) { 7636 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7637 if (len == 0) { 7638 return; 7639 } else { 7640 goto refind; 7641 } 7642 } 7643 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 7644 /* Transmitted within this piece */ 7645 /* 7646 * Ok we must split off the front and then let the 7647 * update do the rest 7648 */ 7649 nrsm = rack_alloc_full_limit(rack); 7650 if (nrsm == NULL) { 7651 rack_update_rsm(tp, rack, rsm, cts, add_flag); 7652 return; 7653 } 7654 /* 7655 * copy rsm to nrsm and then trim the front of rsm 7656 * to not include this part. 7657 */ 7658 rack_clone_rsm(rack, nrsm, rsm, seq_out); 7659 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7660 #ifndef INVARIANTS 7661 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7662 #else 7663 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7664 if (insret != NULL) { 7665 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7666 nrsm, insret, rack, rsm); 7667 } 7668 #endif 7669 if (rsm->r_in_tmap) { 7670 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7671 nrsm->r_in_tmap = 1; 7672 } 7673 rsm->r_flags &= (~RACK_HAS_FIN); 7674 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag); 7675 if (len == 0) { 7676 return; 7677 } else if (len > 0) 7678 goto refind; 7679 } 7680 } 7681 /* 7682 * Hmm not found in map did they retransmit both old and on into the 7683 * new? 7684 */ 7685 if (seq_out == tp->snd_max) { 7686 goto again; 7687 } else if (SEQ_LT(seq_out, tp->snd_max)) { 7688 #ifdef INVARIANTS 7689 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 7690 seq_out, len, tp->snd_una, tp->snd_max); 7691 printf("Starting Dump of all rack entries\n"); 7692 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 7693 printf("rsm:%p start:%u end:%u\n", 7694 rsm, rsm->r_start, rsm->r_end); 7695 } 7696 printf("Dump complete\n"); 7697 panic("seq_out not found rack:%p tp:%p", 7698 rack, tp); 7699 #endif 7700 } else { 7701 #ifdef INVARIANTS 7702 /* 7703 * Hmm beyond sndmax? (only if we are using the new rtt-pack 7704 * flag) 7705 */ 7706 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 7707 seq_out, len, tp->snd_max, tp); 7708 #endif 7709 } 7710 } 7711 7712 /* 7713 * Record one of the RTT updates from an ack into 7714 * our sample structure. 7715 */ 7716 7717 static void 7718 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, 7719 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt) 7720 { 7721 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7722 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 7723 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 7724 } 7725 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7726 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 7727 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 7728 } 7729 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 7730 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) 7731 rack->r_ctl.rc_gp_lowrtt = us_rtt; 7732 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) 7733 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 7734 } 7735 if ((confidence == 1) && 7736 ((rsm == NULL) || 7737 (rsm->r_just_ret) || 7738 (rsm->r_one_out_nr && 7739 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { 7740 /* 7741 * If the rsm had a just return 7742 * hit it then we can't trust the 7743 * rtt measurement for buffer deterimination 7744 * Note that a confidence of 2, indicates 7745 * SACK'd which overrides the r_just_ret or 7746 * the r_one_out_nr. If it was a CUM-ACK and 7747 * we had only two outstanding, but get an 7748 * ack for only 1. Then that also lowers our 7749 * confidence. 7750 */ 7751 confidence = 0; 7752 } 7753 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7754 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { 7755 if (rack->r_ctl.rack_rs.confidence == 0) { 7756 /* 7757 * We take anything with no current confidence 7758 * saved. 7759 */ 7760 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7761 rack->r_ctl.rack_rs.confidence = confidence; 7762 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7763 } else if (confidence || rack->r_ctl.rack_rs.confidence) { 7764 /* 7765 * Once we have a confident number, 7766 * we can update it with a smaller 7767 * value since this confident number 7768 * may include the DSACK time until 7769 * the next segment (the second one) arrived. 7770 */ 7771 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7772 rack->r_ctl.rack_rs.confidence = confidence; 7773 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7774 } 7775 } 7776 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); 7777 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 7778 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 7779 rack->r_ctl.rack_rs.rs_rtt_cnt++; 7780 } 7781 7782 /* 7783 * Collect new round-trip time estimate 7784 * and update averages and current timeout. 7785 */ 7786 static void 7787 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 7788 { 7789 int32_t delta; 7790 int32_t rtt; 7791 7792 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 7793 /* No valid sample */ 7794 return; 7795 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 7796 /* We are to use the lowest RTT seen in a single ack */ 7797 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 7798 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 7799 /* We are to use the highest RTT seen in a single ack */ 7800 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 7801 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 7802 /* We are to use the average RTT seen in a single ack */ 7803 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 7804 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 7805 } else { 7806 #ifdef INVARIANTS 7807 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 7808 #endif 7809 return; 7810 } 7811 if (rtt == 0) 7812 rtt = 1; 7813 if (rack->rc_gp_rtt_set == 0) { 7814 /* 7815 * With no RTT we have to accept 7816 * even one we are not confident of. 7817 */ 7818 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; 7819 rack->rc_gp_rtt_set = 1; 7820 } else if (rack->r_ctl.rack_rs.confidence) { 7821 /* update the running gp srtt */ 7822 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); 7823 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; 7824 } 7825 if (rack->r_ctl.rack_rs.confidence) { 7826 /* 7827 * record the low and high for highly buffered path computation, 7828 * we only do this if we are confident (not a retransmission). 7829 */ 7830 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { 7831 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7832 } 7833 if (rack->rc_highly_buffered == 0) { 7834 /* 7835 * Currently once we declare a path has 7836 * highly buffered there is no going 7837 * back, which may be a problem... 7838 */ 7839 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { 7840 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, 7841 rack->r_ctl.rc_highest_us_rtt, 7842 rack->r_ctl.rc_lowest_us_rtt, 7843 RACK_RTTS_SEEHBP); 7844 rack->rc_highly_buffered = 1; 7845 } 7846 } 7847 } 7848 if ((rack->r_ctl.rack_rs.confidence) || 7849 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { 7850 /* 7851 * If we are highly confident of it <or> it was 7852 * never retransmitted we accept it as the last us_rtt. 7853 */ 7854 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7855 /* The lowest rtt can be set if its was not retransmited */ 7856 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { 7857 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7858 if (rack->r_ctl.rc_lowest_us_rtt == 0) 7859 rack->r_ctl.rc_lowest_us_rtt = 1; 7860 } 7861 } 7862 rack = (struct tcp_rack *)tp->t_fb_ptr; 7863 if (tp->t_srtt != 0) { 7864 /* 7865 * We keep a simple srtt in microseconds, like our rtt 7866 * measurement. We don't need to do any tricks with shifting 7867 * etc. Instead we just add in 1/8th of the new measurement 7868 * and subtract out 1/8 of the old srtt. We do the same with 7869 * the variance after finding the absolute value of the 7870 * difference between this sample and the current srtt. 7871 */ 7872 delta = tp->t_srtt - rtt; 7873 /* Take off 1/8th of the current sRTT */ 7874 tp->t_srtt -= (tp->t_srtt >> 3); 7875 /* Add in 1/8th of the new RTT just measured */ 7876 tp->t_srtt += (rtt >> 3); 7877 if (tp->t_srtt <= 0) 7878 tp->t_srtt = 1; 7879 /* Now lets make the absolute value of the variance */ 7880 if (delta < 0) 7881 delta = -delta; 7882 /* Subtract out 1/8th */ 7883 tp->t_rttvar -= (tp->t_rttvar >> 3); 7884 /* Add in 1/8th of the new variance we just saw */ 7885 tp->t_rttvar += (delta >> 3); 7886 if (tp->t_rttvar <= 0) 7887 tp->t_rttvar = 1; 7888 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 7889 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 7890 } else { 7891 /* 7892 * No rtt measurement yet - use the unsmoothed rtt. Set the 7893 * variance to half the rtt (so our first retransmit happens 7894 * at 3*rtt). 7895 */ 7896 tp->t_srtt = rtt; 7897 tp->t_rttvar = rtt >> 1; 7898 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 7899 } 7900 rack->rc_srtt_measure_made = 1; 7901 KMOD_TCPSTAT_INC(tcps_rttupdated); 7902 tp->t_rttupdated++; 7903 #ifdef STATS 7904 if (rack_stats_gets_ms_rtt == 0) { 7905 /* Send in the microsecond rtt used for rxt timeout purposes */ 7906 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 7907 } else if (rack_stats_gets_ms_rtt == 1) { 7908 /* Send in the millisecond rtt used for rxt timeout purposes */ 7909 int32_t ms_rtt; 7910 7911 /* Round up */ 7912 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7913 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7914 } else if (rack_stats_gets_ms_rtt == 2) { 7915 /* Send in the millisecond rtt has close to the path RTT as we can get */ 7916 int32_t ms_rtt; 7917 7918 /* Round up */ 7919 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7920 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7921 } else { 7922 /* Send in the microsecond rtt has close to the path RTT as we can get */ 7923 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 7924 } 7925 7926 #endif 7927 /* 7928 * the retransmit should happen at rtt + 4 * rttvar. Because of the 7929 * way we do the smoothing, srtt and rttvar will each average +1/2 7930 * tick of bias. When we compute the retransmit timer, we want 1/2 7931 * tick of rounding and 1 extra tick because of +-1/2 tick 7932 * uncertainty in the firing of the timer. The bias will give us 7933 * exactly the 1.5 tick we need. But, because the bias is 7934 * statistical, we have to test that we don't drop below the minimum 7935 * feasible timer (which is 2 ticks). 7936 */ 7937 tp->t_rxtshift = 0; 7938 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7939 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); 7940 rack_log_rtt_sample(rack, rtt); 7941 tp->t_softerror = 0; 7942 } 7943 7944 7945 static void 7946 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts) 7947 { 7948 /* 7949 * Apply to filter the inbound us-rtt at us_cts. 7950 */ 7951 uint32_t old_rtt; 7952 7953 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 7954 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, 7955 us_rtt, us_cts); 7956 if (rack->r_ctl.last_pacing_time && 7957 rack->rc_gp_dyn_mul && 7958 (rack->r_ctl.last_pacing_time > us_rtt)) 7959 rack->pacing_longer_than_rtt = 1; 7960 else 7961 rack->pacing_longer_than_rtt = 0; 7962 if (old_rtt > us_rtt) { 7963 /* We just hit a new lower rtt time */ 7964 rack_log_rtt_shrinks(rack, us_cts, old_rtt, 7965 __LINE__, RACK_RTTS_NEWRTT); 7966 /* 7967 * Only count it if its lower than what we saw within our 7968 * calculated range. 7969 */ 7970 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { 7971 if (rack_probertt_lower_within && 7972 rack->rc_gp_dyn_mul && 7973 (rack->use_fixed_rate == 0) && 7974 (rack->rc_always_pace)) { 7975 /* 7976 * We are seeing a new lower rtt very close 7977 * to the time that we would have entered probe-rtt. 7978 * This is probably due to the fact that a peer flow 7979 * has entered probe-rtt. Lets go in now too. 7980 */ 7981 uint32_t val; 7982 7983 val = rack_probertt_lower_within * rack_time_between_probertt; 7984 val /= 100; 7985 if ((rack->in_probe_rtt == 0) && 7986 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { 7987 rack_enter_probertt(rack, us_cts); 7988 } 7989 } 7990 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 7991 } 7992 } 7993 } 7994 7995 static int 7996 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 7997 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack) 7998 { 7999 uint32_t us_rtt; 8000 int32_t i, all; 8001 uint32_t t, len_acked; 8002 8003 if ((rsm->r_flags & RACK_ACKED) || 8004 (rsm->r_flags & RACK_WAS_ACKED)) 8005 /* Already done */ 8006 return (0); 8007 if (rsm->r_no_rtt_allowed) { 8008 /* Not allowed */ 8009 return (0); 8010 } 8011 if (ack_type == CUM_ACKED) { 8012 if (SEQ_GT(th_ack, rsm->r_end)) { 8013 len_acked = rsm->r_end - rsm->r_start; 8014 all = 1; 8015 } else { 8016 len_acked = th_ack - rsm->r_start; 8017 all = 0; 8018 } 8019 } else { 8020 len_acked = rsm->r_end - rsm->r_start; 8021 all = 0; 8022 } 8023 if (rsm->r_rtr_cnt == 1) { 8024 8025 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 8026 if ((int)t <= 0) 8027 t = 1; 8028 if (!tp->t_rttlow || tp->t_rttlow > t) 8029 tp->t_rttlow = t; 8030 if (!rack->r_ctl.rc_rack_min_rtt || 8031 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8032 rack->r_ctl.rc_rack_min_rtt = t; 8033 if (rack->r_ctl.rc_rack_min_rtt == 0) { 8034 rack->r_ctl.rc_rack_min_rtt = 1; 8035 } 8036 } 8037 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) 8038 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8039 else 8040 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8041 if (us_rtt == 0) 8042 us_rtt = 1; 8043 if (CC_ALGO(tp)->rttsample != NULL) { 8044 /* Kick the RTT to the CC */ 8045 CC_ALGO(tp)->rttsample(tp->ccv, us_rtt, 1, rsm->r_fas); 8046 } 8047 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 8048 if (ack_type == SACKED) { 8049 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); 8050 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); 8051 } else { 8052 /* 8053 * We need to setup what our confidence 8054 * is in this ack. 8055 * 8056 * If the rsm was app limited and it is 8057 * less than a mss in length (the end 8058 * of the send) then we have a gap. If we 8059 * were app limited but say we were sending 8060 * multiple MSS's then we are more confident 8061 * int it. 8062 * 8063 * When we are not app-limited then we see if 8064 * the rsm is being included in the current 8065 * measurement, we tell this by the app_limited_needs_set 8066 * flag. 8067 * 8068 * Note that being cwnd blocked is not applimited 8069 * as well as the pacing delay between packets which 8070 * are sending only 1 or 2 MSS's also will show up 8071 * in the RTT. We probably need to examine this algorithm 8072 * a bit more and enhance it to account for the delay 8073 * between rsm's. We could do that by saving off the 8074 * pacing delay of each rsm (in an rsm) and then 8075 * factoring that in somehow though for now I am 8076 * not sure how :) 8077 */ 8078 int calc_conf = 0; 8079 8080 if (rsm->r_flags & RACK_APP_LIMITED) { 8081 if (all && (len_acked <= ctf_fixed_maxseg(tp))) 8082 calc_conf = 0; 8083 else 8084 calc_conf = 1; 8085 } else if (rack->app_limited_needs_set == 0) { 8086 calc_conf = 1; 8087 } else { 8088 calc_conf = 0; 8089 } 8090 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); 8091 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 8092 calc_conf, rsm, rsm->r_rtr_cnt); 8093 } 8094 if ((rsm->r_flags & RACK_TLP) && 8095 (!IN_FASTRECOVERY(tp->t_flags))) { 8096 /* Segment was a TLP and our retrans matched */ 8097 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 8098 rack->r_ctl.rc_rsm_start = tp->snd_max; 8099 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 8100 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 8101 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 8102 } 8103 } 8104 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 8105 /* New more recent rack_tmit_time */ 8106 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 8107 rack->rc_rack_rtt = t; 8108 } 8109 return (1); 8110 } 8111 /* 8112 * We clear the soft/rxtshift since we got an ack. 8113 * There is no assurance we will call the commit() function 8114 * so we need to clear these to avoid incorrect handling. 8115 */ 8116 tp->t_rxtshift = 0; 8117 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 8118 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 8119 tp->t_softerror = 0; 8120 if (to && (to->to_flags & TOF_TS) && 8121 (ack_type == CUM_ACKED) && 8122 (to->to_tsecr) && 8123 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 8124 /* 8125 * Now which timestamp does it match? In this block the ACK 8126 * must be coming from a previous transmission. 8127 */ 8128 for (i = 0; i < rsm->r_rtr_cnt; i++) { 8129 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { 8130 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 8131 if ((int)t <= 0) 8132 t = 1; 8133 if (CC_ALGO(tp)->rttsample != NULL) { 8134 /* 8135 * Kick the RTT to the CC, here 8136 * we lie a bit in that we know the 8137 * retransmission is correct even though 8138 * we retransmitted. This is because 8139 * we match the timestamps. 8140 */ 8141 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) 8142 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; 8143 else 8144 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; 8145 CC_ALGO(tp)->rttsample(tp->ccv, us_rtt, 1, rsm->r_fas); 8146 } 8147 if ((i + 1) < rsm->r_rtr_cnt) { 8148 /* 8149 * The peer ack'd from our previous 8150 * transmission. We have a spurious 8151 * retransmission and thus we dont 8152 * want to update our rack_rtt. 8153 * 8154 * Hmm should there be a CC revert here? 8155 * 8156 */ 8157 return (0); 8158 } 8159 if (!tp->t_rttlow || tp->t_rttlow > t) 8160 tp->t_rttlow = t; 8161 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8162 rack->r_ctl.rc_rack_min_rtt = t; 8163 if (rack->r_ctl.rc_rack_min_rtt == 0) { 8164 rack->r_ctl.rc_rack_min_rtt = 1; 8165 } 8166 } 8167 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 8168 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 8169 /* New more recent rack_tmit_time */ 8170 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 8171 rack->rc_rack_rtt = t; 8172 } 8173 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); 8174 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm, 8175 rsm->r_rtr_cnt); 8176 return (1); 8177 } 8178 } 8179 goto ts_not_found; 8180 } else { 8181 /* 8182 * Ok its a SACK block that we retransmitted. or a windows 8183 * machine without timestamps. We can tell nothing from the 8184 * time-stamp since its not there or the time the peer last 8185 * recieved a segment that moved forward its cum-ack point. 8186 */ 8187 ts_not_found: 8188 i = rsm->r_rtr_cnt - 1; 8189 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 8190 if ((int)t <= 0) 8191 t = 1; 8192 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8193 /* 8194 * We retransmitted and the ack came back in less 8195 * than the smallest rtt we have observed. We most 8196 * likely did an improper retransmit as outlined in 8197 * 6.2 Step 2 point 2 in the rack-draft so we 8198 * don't want to update our rack_rtt. We in 8199 * theory (in future) might want to think about reverting our 8200 * cwnd state but we won't for now. 8201 */ 8202 return (0); 8203 } else if (rack->r_ctl.rc_rack_min_rtt) { 8204 /* 8205 * We retransmitted it and the retransmit did the 8206 * job. 8207 */ 8208 if (!rack->r_ctl.rc_rack_min_rtt || 8209 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8210 rack->r_ctl.rc_rack_min_rtt = t; 8211 if (rack->r_ctl.rc_rack_min_rtt == 0) { 8212 rack->r_ctl.rc_rack_min_rtt = 1; 8213 } 8214 } 8215 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[i])) { 8216 /* New more recent rack_tmit_time */ 8217 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; 8218 rack->rc_rack_rtt = t; 8219 } 8220 return (1); 8221 } 8222 } 8223 return (0); 8224 } 8225 8226 /* 8227 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 8228 */ 8229 static void 8230 rack_log_sack_passed(struct tcpcb *tp, 8231 struct tcp_rack *rack, struct rack_sendmap *rsm) 8232 { 8233 struct rack_sendmap *nrsm; 8234 8235 nrsm = rsm; 8236 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 8237 rack_head, r_tnext) { 8238 if (nrsm == rsm) { 8239 /* Skip orginal segment he is acked */ 8240 continue; 8241 } 8242 if (nrsm->r_flags & RACK_ACKED) { 8243 /* 8244 * Skip ack'd segments, though we 8245 * should not see these, since tmap 8246 * should not have ack'd segments. 8247 */ 8248 continue; 8249 } 8250 if (nrsm->r_flags & RACK_SACK_PASSED) { 8251 /* 8252 * We found one that is already marked 8253 * passed, we have been here before and 8254 * so all others below this are marked. 8255 */ 8256 break; 8257 } 8258 nrsm->r_flags |= RACK_SACK_PASSED; 8259 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 8260 } 8261 } 8262 8263 static void 8264 rack_need_set_test(struct tcpcb *tp, 8265 struct tcp_rack *rack, 8266 struct rack_sendmap *rsm, 8267 tcp_seq th_ack, 8268 int line, 8269 int use_which) 8270 { 8271 8272 if ((tp->t_flags & TF_GPUTINPROG) && 8273 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 8274 /* 8275 * We were app limited, and this ack 8276 * butts up or goes beyond the point where we want 8277 * to start our next measurement. We need 8278 * to record the new gput_ts as here and 8279 * possibly update the start sequence. 8280 */ 8281 uint32_t seq, ts; 8282 8283 if (rsm->r_rtr_cnt > 1) { 8284 /* 8285 * This is a retransmit, can we 8286 * really make any assessment at this 8287 * point? We are not really sure of 8288 * the timestamp, is it this or the 8289 * previous transmission? 8290 * 8291 * Lets wait for something better that 8292 * is not retransmitted. 8293 */ 8294 return; 8295 } 8296 seq = tp->gput_seq; 8297 ts = tp->gput_ts; 8298 rack->app_limited_needs_set = 0; 8299 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 8300 /* Do we start at a new end? */ 8301 if ((use_which == RACK_USE_BEG) && 8302 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { 8303 /* 8304 * When we get an ACK that just eats 8305 * up some of the rsm, we set RACK_USE_BEG 8306 * since whats at r_start (i.e. th_ack) 8307 * is left unacked and thats where the 8308 * measurement not starts. 8309 */ 8310 tp->gput_seq = rsm->r_start; 8311 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8312 } 8313 if ((use_which == RACK_USE_END) && 8314 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 8315 /* 8316 * We use the end when the cumack 8317 * is moving forward and completely 8318 * deleting the rsm passed so basically 8319 * r_end holds th_ack. 8320 * 8321 * For SACK's we also want to use the end 8322 * since this piece just got sacked and 8323 * we want to target anything after that 8324 * in our measurement. 8325 */ 8326 tp->gput_seq = rsm->r_end; 8327 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8328 } 8329 if (use_which == RACK_USE_END_OR_THACK) { 8330 /* 8331 * special case for ack moving forward, 8332 * not a sack, we need to move all the 8333 * way up to where this ack cum-ack moves 8334 * to. 8335 */ 8336 if (SEQ_GT(th_ack, rsm->r_end)) 8337 tp->gput_seq = th_ack; 8338 else 8339 tp->gput_seq = rsm->r_end; 8340 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8341 } 8342 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { 8343 /* 8344 * We moved beyond this guy's range, re-calculate 8345 * the new end point. 8346 */ 8347 if (rack->rc_gp_filled == 0) { 8348 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 8349 } else { 8350 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 8351 } 8352 } 8353 /* 8354 * We are moving the goal post, we may be able to clear the 8355 * measure_saw_probe_rtt flag. 8356 */ 8357 if ((rack->in_probe_rtt == 0) && 8358 (rack->measure_saw_probe_rtt) && 8359 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 8360 rack->measure_saw_probe_rtt = 0; 8361 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, 8362 seq, tp->gput_seq, 0, 5, line, NULL, 0); 8363 if (rack->rc_gp_filled && 8364 ((tp->gput_ack - tp->gput_seq) < 8365 max(rc_init_window(rack), (MIN_GP_WIN * 8366 ctf_fixed_maxseg(tp))))) { 8367 uint32_t ideal_amount; 8368 8369 ideal_amount = rack_get_measure_window(tp, rack); 8370 if (ideal_amount > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 8371 /* 8372 * There is no sense of continuing this measurement 8373 * because its too small to gain us anything we 8374 * trust. Skip it and that way we can start a new 8375 * measurement quicker. 8376 */ 8377 tp->t_flags &= ~TF_GPUTINPROG; 8378 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 8379 0, 0, 0, 6, __LINE__, NULL, 0); 8380 } else { 8381 /* 8382 * Reset the window further out. 8383 */ 8384 tp->gput_ack = tp->gput_seq + ideal_amount; 8385 } 8386 } 8387 } 8388 } 8389 8390 static inline int 8391 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm) 8392 { 8393 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { 8394 /* Behind our TLP definition or right at */ 8395 return (0); 8396 } 8397 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { 8398 /* The start is beyond or right at our end of TLP definition */ 8399 return (0); 8400 } 8401 /* It has to be a sub-part of the original TLP recorded */ 8402 return (1); 8403 } 8404 8405 8406 static uint32_t 8407 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 8408 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, int *moved_two) 8409 { 8410 uint32_t start, end, changed = 0; 8411 struct rack_sendmap stack_map; 8412 struct rack_sendmap *rsm, *nrsm, fe, *prev, *next; 8413 #ifdef INVARIANTS 8414 struct rack_sendmap *insret; 8415 #endif 8416 int32_t used_ref = 1; 8417 int moved = 0; 8418 8419 start = sack->start; 8420 end = sack->end; 8421 rsm = *prsm; 8422 memset(&fe, 0, sizeof(fe)); 8423 do_rest_ofb: 8424 if ((rsm == NULL) || 8425 (SEQ_LT(end, rsm->r_start)) || 8426 (SEQ_GEQ(start, rsm->r_end)) || 8427 (SEQ_LT(start, rsm->r_start))) { 8428 /* 8429 * We are not in the right spot, 8430 * find the correct spot in the tree. 8431 */ 8432 used_ref = 0; 8433 fe.r_start = start; 8434 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 8435 moved++; 8436 } 8437 if (rsm == NULL) { 8438 /* TSNH */ 8439 goto out; 8440 } 8441 /* Ok we have an ACK for some piece of this rsm */ 8442 if (rsm->r_start != start) { 8443 if ((rsm->r_flags & RACK_ACKED) == 0) { 8444 /* 8445 * Before any splitting or hookery is 8446 * done is it a TLP of interest i.e. rxt? 8447 */ 8448 if ((rsm->r_flags & RACK_TLP) && 8449 (rsm->r_rtr_cnt > 1)) { 8450 /* 8451 * We are splitting a rxt TLP, check 8452 * if we need to save off the start/end 8453 */ 8454 if (rack->rc_last_tlp_acked_set && 8455 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8456 /* 8457 * We already turned this on since we are inside 8458 * the previous one was a partially sack now we 8459 * are getting another one (maybe all of it). 8460 * 8461 */ 8462 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8463 /* 8464 * Lets make sure we have all of it though. 8465 */ 8466 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8467 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8468 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8469 rack->r_ctl.last_tlp_acked_end); 8470 } 8471 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8472 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8473 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8474 rack->r_ctl.last_tlp_acked_end); 8475 } 8476 } else { 8477 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8478 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8479 rack->rc_last_tlp_past_cumack = 0; 8480 rack->rc_last_tlp_acked_set = 1; 8481 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8482 } 8483 } 8484 /** 8485 * Need to split this in two pieces the before and after, 8486 * the before remains in the map, the after must be 8487 * added. In other words we have: 8488 * rsm |--------------| 8489 * sackblk |-------> 8490 * rsm will become 8491 * rsm |---| 8492 * and nrsm will be the sacked piece 8493 * nrsm |----------| 8494 * 8495 * But before we start down that path lets 8496 * see if the sack spans over on top of 8497 * the next guy and it is already sacked. 8498 * 8499 */ 8500 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8501 if (next && (next->r_flags & RACK_ACKED) && 8502 SEQ_GEQ(end, next->r_start)) { 8503 /** 8504 * So the next one is already acked, and 8505 * we can thus by hookery use our stack_map 8506 * to reflect the piece being sacked and 8507 * then adjust the two tree entries moving 8508 * the start and ends around. So we start like: 8509 * rsm |------------| (not-acked) 8510 * next |-----------| (acked) 8511 * sackblk |--------> 8512 * We want to end like so: 8513 * rsm |------| (not-acked) 8514 * next |-----------------| (acked) 8515 * nrsm |-----| 8516 * Where nrsm is a temporary stack piece we 8517 * use to update all the gizmos. 8518 */ 8519 /* Copy up our fudge block */ 8520 nrsm = &stack_map; 8521 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8522 /* Now adjust our tree blocks */ 8523 rsm->r_end = start; 8524 next->r_start = start; 8525 /* Now we must adjust back where next->m is */ 8526 rack_setup_offset_for_rsm(rsm, next); 8527 8528 /* We don't need to adjust rsm, it did not change */ 8529 /* Clear out the dup ack count of the remainder */ 8530 rsm->r_dupack = 0; 8531 rsm->r_just_ret = 0; 8532 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8533 /* Now lets make sure our fudge block is right */ 8534 nrsm->r_start = start; 8535 /* Now lets update all the stats and such */ 8536 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8537 if (rack->app_limited_needs_set) 8538 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8539 changed += (nrsm->r_end - nrsm->r_start); 8540 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8541 if (nrsm->r_flags & RACK_SACK_PASSED) { 8542 counter_u64_add(rack_reorder_seen, 1); 8543 rack->r_ctl.rc_reorder_ts = cts; 8544 } 8545 /* 8546 * Now we want to go up from rsm (the 8547 * one left un-acked) to the next one 8548 * in the tmap. We do this so when 8549 * we walk backwards we include marking 8550 * sack-passed on rsm (The one passed in 8551 * is skipped since it is generally called 8552 * on something sacked before removing it 8553 * from the tmap). 8554 */ 8555 if (rsm->r_in_tmap) { 8556 nrsm = TAILQ_NEXT(rsm, r_tnext); 8557 /* 8558 * Now that we have the next 8559 * one walk backwards from there. 8560 */ 8561 if (nrsm && nrsm->r_in_tmap) 8562 rack_log_sack_passed(tp, rack, nrsm); 8563 } 8564 /* Now are we done? */ 8565 if (SEQ_LT(end, next->r_end) || 8566 (end == next->r_end)) { 8567 /* Done with block */ 8568 goto out; 8569 } 8570 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__); 8571 counter_u64_add(rack_sack_used_next_merge, 1); 8572 /* Postion for the next block */ 8573 start = next->r_end; 8574 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, next); 8575 if (rsm == NULL) 8576 goto out; 8577 } else { 8578 /** 8579 * We can't use any hookery here, so we 8580 * need to split the map. We enter like 8581 * so: 8582 * rsm |--------| 8583 * sackblk |-----> 8584 * We will add the new block nrsm and 8585 * that will be the new portion, and then 8586 * fall through after reseting rsm. So we 8587 * split and look like this: 8588 * rsm |----| 8589 * sackblk |-----> 8590 * nrsm |---| 8591 * We then fall through reseting 8592 * rsm to nrsm, so the next block 8593 * picks it up. 8594 */ 8595 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8596 if (nrsm == NULL) { 8597 /* 8598 * failed XXXrrs what can we do but loose the sack 8599 * info? 8600 */ 8601 goto out; 8602 } 8603 counter_u64_add(rack_sack_splits, 1); 8604 rack_clone_rsm(rack, nrsm, rsm, start); 8605 rsm->r_just_ret = 0; 8606 #ifndef INVARIANTS 8607 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8608 #else 8609 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8610 if (insret != NULL) { 8611 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8612 nrsm, insret, rack, rsm); 8613 } 8614 #endif 8615 if (rsm->r_in_tmap) { 8616 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8617 nrsm->r_in_tmap = 1; 8618 } 8619 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__); 8620 rsm->r_flags &= (~RACK_HAS_FIN); 8621 /* Position us to point to the new nrsm that starts the sack blk */ 8622 rsm = nrsm; 8623 } 8624 } else { 8625 /* Already sacked this piece */ 8626 counter_u64_add(rack_sack_skipped_acked, 1); 8627 moved++; 8628 if (end == rsm->r_end) { 8629 /* Done with block */ 8630 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8631 goto out; 8632 } else if (SEQ_LT(end, rsm->r_end)) { 8633 /* A partial sack to a already sacked block */ 8634 moved++; 8635 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8636 goto out; 8637 } else { 8638 /* 8639 * The end goes beyond this guy 8640 * repostion the start to the 8641 * next block. 8642 */ 8643 start = rsm->r_end; 8644 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8645 if (rsm == NULL) 8646 goto out; 8647 } 8648 } 8649 } 8650 if (SEQ_GEQ(end, rsm->r_end)) { 8651 /** 8652 * The end of this block is either beyond this guy or right 8653 * at this guy. I.e.: 8654 * rsm --- |-----| 8655 * end |-----| 8656 * <or> 8657 * end |---------| 8658 */ 8659 if ((rsm->r_flags & RACK_ACKED) == 0) { 8660 /* 8661 * Is it a TLP of interest? 8662 */ 8663 if ((rsm->r_flags & RACK_TLP) && 8664 (rsm->r_rtr_cnt > 1)) { 8665 /* 8666 * We are splitting a rxt TLP, check 8667 * if we need to save off the start/end 8668 */ 8669 if (rack->rc_last_tlp_acked_set && 8670 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8671 /* 8672 * We already turned this on since we are inside 8673 * the previous one was a partially sack now we 8674 * are getting another one (maybe all of it). 8675 */ 8676 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8677 /* 8678 * Lets make sure we have all of it though. 8679 */ 8680 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8681 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8682 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8683 rack->r_ctl.last_tlp_acked_end); 8684 } 8685 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8686 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8687 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8688 rack->r_ctl.last_tlp_acked_end); 8689 } 8690 } else { 8691 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8692 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8693 rack->rc_last_tlp_past_cumack = 0; 8694 rack->rc_last_tlp_acked_set = 1; 8695 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8696 } 8697 } 8698 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8699 changed += (rsm->r_end - rsm->r_start); 8700 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8701 if (rsm->r_in_tmap) /* should be true */ 8702 rack_log_sack_passed(tp, rack, rsm); 8703 /* Is Reordering occuring? */ 8704 if (rsm->r_flags & RACK_SACK_PASSED) { 8705 rsm->r_flags &= ~RACK_SACK_PASSED; 8706 counter_u64_add(rack_reorder_seen, 1); 8707 rack->r_ctl.rc_reorder_ts = cts; 8708 } 8709 if (rack->app_limited_needs_set) 8710 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8711 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8712 rsm->r_flags |= RACK_ACKED; 8713 if (rsm->r_in_tmap) { 8714 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8715 rsm->r_in_tmap = 0; 8716 } 8717 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__); 8718 } else { 8719 counter_u64_add(rack_sack_skipped_acked, 1); 8720 moved++; 8721 } 8722 if (end == rsm->r_end) { 8723 /* This block only - done, setup for next */ 8724 goto out; 8725 } 8726 /* 8727 * There is more not coverend by this rsm move on 8728 * to the next block in the RB tree. 8729 */ 8730 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8731 start = rsm->r_end; 8732 rsm = nrsm; 8733 if (rsm == NULL) 8734 goto out; 8735 goto do_rest_ofb; 8736 } 8737 /** 8738 * The end of this sack block is smaller than 8739 * our rsm i.e.: 8740 * rsm --- |-----| 8741 * end |--| 8742 */ 8743 if ((rsm->r_flags & RACK_ACKED) == 0) { 8744 /* 8745 * Is it a TLP of interest? 8746 */ 8747 if ((rsm->r_flags & RACK_TLP) && 8748 (rsm->r_rtr_cnt > 1)) { 8749 /* 8750 * We are splitting a rxt TLP, check 8751 * if we need to save off the start/end 8752 */ 8753 if (rack->rc_last_tlp_acked_set && 8754 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8755 /* 8756 * We already turned this on since we are inside 8757 * the previous one was a partially sack now we 8758 * are getting another one (maybe all of it). 8759 */ 8760 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8761 /* 8762 * Lets make sure we have all of it though. 8763 */ 8764 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8765 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8766 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8767 rack->r_ctl.last_tlp_acked_end); 8768 } 8769 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8770 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8771 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8772 rack->r_ctl.last_tlp_acked_end); 8773 } 8774 } else { 8775 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8776 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8777 rack->rc_last_tlp_past_cumack = 0; 8778 rack->rc_last_tlp_acked_set = 1; 8779 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8780 } 8781 } 8782 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8783 if (prev && 8784 (prev->r_flags & RACK_ACKED)) { 8785 /** 8786 * Goal, we want the right remainder of rsm to shrink 8787 * in place and span from (rsm->r_start = end) to rsm->r_end. 8788 * We want to expand prev to go all the way 8789 * to prev->r_end <- end. 8790 * so in the tree we have before: 8791 * prev |--------| (acked) 8792 * rsm |-------| (non-acked) 8793 * sackblk |-| 8794 * We churn it so we end up with 8795 * prev |----------| (acked) 8796 * rsm |-----| (non-acked) 8797 * nrsm |-| (temporary) 8798 * 8799 * Note if either prev/rsm is a TLP we don't 8800 * do this. 8801 */ 8802 nrsm = &stack_map; 8803 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8804 prev->r_end = end; 8805 rsm->r_start = end; 8806 /* Now adjust nrsm (stack copy) to be 8807 * the one that is the small 8808 * piece that was "sacked". 8809 */ 8810 nrsm->r_end = end; 8811 rsm->r_dupack = 0; 8812 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8813 /* 8814 * Now that the rsm has had its start moved forward 8815 * lets go ahead and get its new place in the world. 8816 */ 8817 rack_setup_offset_for_rsm(prev, rsm); 8818 /* 8819 * Now nrsm is our new little piece 8820 * that is acked (which was merged 8821 * to prev). Update the rtt and changed 8822 * based on that. Also check for reordering. 8823 */ 8824 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8825 if (rack->app_limited_needs_set) 8826 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8827 changed += (nrsm->r_end - nrsm->r_start); 8828 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8829 if (nrsm->r_flags & RACK_SACK_PASSED) { 8830 counter_u64_add(rack_reorder_seen, 1); 8831 rack->r_ctl.rc_reorder_ts = cts; 8832 } 8833 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__); 8834 rsm = prev; 8835 counter_u64_add(rack_sack_used_prev_merge, 1); 8836 } else { 8837 /** 8838 * This is the case where our previous 8839 * block is not acked either, so we must 8840 * split the block in two. 8841 */ 8842 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8843 if (nrsm == NULL) { 8844 /* failed rrs what can we do but loose the sack info? */ 8845 goto out; 8846 } 8847 if ((rsm->r_flags & RACK_TLP) && 8848 (rsm->r_rtr_cnt > 1)) { 8849 /* 8850 * We are splitting a rxt TLP, check 8851 * if we need to save off the start/end 8852 */ 8853 if (rack->rc_last_tlp_acked_set && 8854 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8855 /* 8856 * We already turned this on since this block is inside 8857 * the previous one was a partially sack now we 8858 * are getting another one (maybe all of it). 8859 */ 8860 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8861 /* 8862 * Lets make sure we have all of it though. 8863 */ 8864 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8865 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8866 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8867 rack->r_ctl.last_tlp_acked_end); 8868 } 8869 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8870 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8871 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8872 rack->r_ctl.last_tlp_acked_end); 8873 } 8874 } else { 8875 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8876 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8877 rack->rc_last_tlp_acked_set = 1; 8878 rack->rc_last_tlp_past_cumack = 0; 8879 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8880 } 8881 } 8882 /** 8883 * In this case nrsm becomes 8884 * nrsm->r_start = end; 8885 * nrsm->r_end = rsm->r_end; 8886 * which is un-acked. 8887 * <and> 8888 * rsm->r_end = nrsm->r_start; 8889 * i.e. the remaining un-acked 8890 * piece is left on the left 8891 * hand side. 8892 * 8893 * So we start like this 8894 * rsm |----------| (not acked) 8895 * sackblk |---| 8896 * build it so we have 8897 * rsm |---| (acked) 8898 * nrsm |------| (not acked) 8899 */ 8900 counter_u64_add(rack_sack_splits, 1); 8901 rack_clone_rsm(rack, nrsm, rsm, end); 8902 rsm->r_flags &= (~RACK_HAS_FIN); 8903 rsm->r_just_ret = 0; 8904 #ifndef INVARIANTS 8905 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8906 #else 8907 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8908 if (insret != NULL) { 8909 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8910 nrsm, insret, rack, rsm); 8911 } 8912 #endif 8913 if (rsm->r_in_tmap) { 8914 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8915 nrsm->r_in_tmap = 1; 8916 } 8917 nrsm->r_dupack = 0; 8918 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 8919 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8920 changed += (rsm->r_end - rsm->r_start); 8921 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8922 if (rsm->r_in_tmap) /* should be true */ 8923 rack_log_sack_passed(tp, rack, rsm); 8924 /* Is Reordering occuring? */ 8925 if (rsm->r_flags & RACK_SACK_PASSED) { 8926 rsm->r_flags &= ~RACK_SACK_PASSED; 8927 counter_u64_add(rack_reorder_seen, 1); 8928 rack->r_ctl.rc_reorder_ts = cts; 8929 } 8930 if (rack->app_limited_needs_set) 8931 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8932 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8933 rsm->r_flags |= RACK_ACKED; 8934 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__); 8935 if (rsm->r_in_tmap) { 8936 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8937 rsm->r_in_tmap = 0; 8938 } 8939 } 8940 } else if (start != end){ 8941 /* 8942 * The block was already acked. 8943 */ 8944 counter_u64_add(rack_sack_skipped_acked, 1); 8945 moved++; 8946 } 8947 out: 8948 if (rsm && 8949 ((rsm->r_flags & RACK_TLP) == 0) && 8950 (rsm->r_flags & RACK_ACKED)) { 8951 /* 8952 * Now can we merge where we worked 8953 * with either the previous or 8954 * next block? 8955 */ 8956 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8957 while (next) { 8958 if (next->r_flags & RACK_TLP) 8959 break; 8960 if (next->r_flags & RACK_ACKED) { 8961 /* yep this and next can be merged */ 8962 rsm = rack_merge_rsm(rack, rsm, next); 8963 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8964 } else 8965 break; 8966 } 8967 /* Now what about the previous? */ 8968 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8969 while (prev) { 8970 if (prev->r_flags & RACK_TLP) 8971 break; 8972 if (prev->r_flags & RACK_ACKED) { 8973 /* yep the previous and this can be merged */ 8974 rsm = rack_merge_rsm(rack, prev, rsm); 8975 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8976 } else 8977 break; 8978 } 8979 } 8980 if (used_ref == 0) { 8981 counter_u64_add(rack_sack_proc_all, 1); 8982 } else { 8983 counter_u64_add(rack_sack_proc_short, 1); 8984 } 8985 /* Save off the next one for quick reference. */ 8986 if (rsm) 8987 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8988 else 8989 nrsm = NULL; 8990 *prsm = rack->r_ctl.rc_sacklast = nrsm; 8991 /* Pass back the moved. */ 8992 *moved_two = moved; 8993 return (changed); 8994 } 8995 8996 static void inline 8997 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 8998 { 8999 struct rack_sendmap *tmap; 9000 9001 tmap = NULL; 9002 while (rsm && (rsm->r_flags & RACK_ACKED)) { 9003 /* Its no longer sacked, mark it so */ 9004 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 9005 #ifdef INVARIANTS 9006 if (rsm->r_in_tmap) { 9007 panic("rack:%p rsm:%p flags:0x%x in tmap?", 9008 rack, rsm, rsm->r_flags); 9009 } 9010 #endif 9011 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 9012 /* Rebuild it into our tmap */ 9013 if (tmap == NULL) { 9014 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 9015 tmap = rsm; 9016 } else { 9017 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 9018 tmap = rsm; 9019 } 9020 tmap->r_in_tmap = 1; 9021 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 9022 } 9023 /* 9024 * Now lets possibly clear the sack filter so we start 9025 * recognizing sacks that cover this area. 9026 */ 9027 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 9028 9029 } 9030 9031 static void 9032 rack_do_decay(struct tcp_rack *rack) 9033 { 9034 struct timeval res; 9035 9036 #define timersub(tvp, uvp, vvp) \ 9037 do { \ 9038 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ 9039 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ 9040 if ((vvp)->tv_usec < 0) { \ 9041 (vvp)->tv_sec--; \ 9042 (vvp)->tv_usec += 1000000; \ 9043 } \ 9044 } while (0) 9045 9046 timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res); 9047 #undef timersub 9048 9049 rack->r_ctl.input_pkt++; 9050 if ((rack->rc_in_persist) || 9051 (res.tv_sec >= 1) || 9052 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) { 9053 /* 9054 * Check for decay of non-SAD, 9055 * we want all SAD detection metrics to 9056 * decay 1/4 per second (or more) passed. 9057 */ 9058 #ifdef NETFLIX_EXP_DETECTION 9059 uint32_t pkt_delta; 9060 9061 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt; 9062 #endif 9063 /* Update our saved tracking values */ 9064 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt; 9065 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 9066 /* Now do we escape without decay? */ 9067 #ifdef NETFLIX_EXP_DETECTION 9068 if (rack->rc_in_persist || 9069 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) || 9070 (pkt_delta < tcp_sad_low_pps)){ 9071 /* 9072 * We don't decay idle connections 9073 * or ones that have a low input pps. 9074 */ 9075 return; 9076 } 9077 /* Decay the counters */ 9078 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count, 9079 tcp_sad_decay_val); 9080 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count, 9081 tcp_sad_decay_val); 9082 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra, 9083 tcp_sad_decay_val); 9084 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move, 9085 tcp_sad_decay_val); 9086 #endif 9087 } 9088 } 9089 9090 static void 9091 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to) 9092 { 9093 struct rack_sendmap *rsm; 9094 #ifdef INVARIANTS 9095 struct rack_sendmap *rm; 9096 #endif 9097 9098 /* 9099 * The ACK point is advancing to th_ack, we must drop off 9100 * the packets in the rack log and calculate any eligble 9101 * RTT's. 9102 */ 9103 rack->r_wanted_output = 1; 9104 9105 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */ 9106 if ((rack->rc_last_tlp_acked_set == 1)&& 9107 (rack->rc_last_tlp_past_cumack == 1) && 9108 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { 9109 /* 9110 * We have reached the point where our last rack 9111 * tlp retransmit sequence is ahead of the cum-ack. 9112 * This can only happen when the cum-ack moves all 9113 * the way around (its been a full 2^^31+1 bytes 9114 * or more since we sent a retransmitted TLP). Lets 9115 * turn off the valid flag since its not really valid. 9116 * 9117 * Note since sack's also turn on this event we have 9118 * a complication, we have to wait to age it out until 9119 * the cum-ack is by the TLP before checking which is 9120 * what the next else clause does. 9121 */ 9122 rack_log_dsack_event(rack, 9, __LINE__, 9123 rack->r_ctl.last_tlp_acked_start, 9124 rack->r_ctl.last_tlp_acked_end); 9125 rack->rc_last_tlp_acked_set = 0; 9126 rack->rc_last_tlp_past_cumack = 0; 9127 } else if ((rack->rc_last_tlp_acked_set == 1) && 9128 (rack->rc_last_tlp_past_cumack == 0) && 9129 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { 9130 /* 9131 * It is safe to start aging TLP's out. 9132 */ 9133 rack->rc_last_tlp_past_cumack = 1; 9134 } 9135 /* We do the same for the tlp send seq as well */ 9136 if ((rack->rc_last_sent_tlp_seq_valid == 1) && 9137 (rack->rc_last_sent_tlp_past_cumack == 1) && 9138 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { 9139 rack_log_dsack_event(rack, 9, __LINE__, 9140 rack->r_ctl.last_sent_tlp_seq, 9141 (rack->r_ctl.last_sent_tlp_seq + 9142 rack->r_ctl.last_sent_tlp_len)); 9143 rack->rc_last_sent_tlp_seq_valid = 0; 9144 rack->rc_last_sent_tlp_past_cumack = 0; 9145 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && 9146 (rack->rc_last_sent_tlp_past_cumack == 0) && 9147 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { 9148 /* 9149 * It is safe to start aging TLP's send. 9150 */ 9151 rack->rc_last_sent_tlp_past_cumack = 1; 9152 } 9153 more: 9154 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9155 if (rsm == NULL) { 9156 if ((th_ack - 1) == tp->iss) { 9157 /* 9158 * For the SYN incoming case we will not 9159 * have called tcp_output for the sending of 9160 * the SYN, so there will be no map. All 9161 * other cases should probably be a panic. 9162 */ 9163 return; 9164 } 9165 if (tp->t_flags & TF_SENTFIN) { 9166 /* if we sent a FIN we often will not have map */ 9167 return; 9168 } 9169 #ifdef INVARIANTS 9170 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u snd_nxt:%u\n", 9171 tp, 9172 tp->t_state, th_ack, rack, 9173 tp->snd_una, tp->snd_max, tp->snd_nxt); 9174 #endif 9175 return; 9176 } 9177 if (SEQ_LT(th_ack, rsm->r_start)) { 9178 /* Huh map is missing this */ 9179 #ifdef INVARIANTS 9180 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 9181 rsm->r_start, 9182 th_ack, tp->t_state, rack->r_state); 9183 #endif 9184 return; 9185 } 9186 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack); 9187 9188 /* Now was it a retransmitted TLP? */ 9189 if ((rsm->r_flags & RACK_TLP) && 9190 (rsm->r_rtr_cnt > 1)) { 9191 /* 9192 * Yes, this rsm was a TLP and retransmitted, remember that 9193 * since if a DSACK comes back on this we don't want 9194 * to think of it as a reordered segment. This may 9195 * get updated again with possibly even other TLPs 9196 * in flight, but thats ok. Only when we don't send 9197 * a retransmitted TLP for 1/2 the sequences space 9198 * will it get turned off (above). 9199 */ 9200 if (rack->rc_last_tlp_acked_set && 9201 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9202 /* 9203 * We already turned this on since the end matches, 9204 * the previous one was a partially ack now we 9205 * are getting another one (maybe all of it). 9206 */ 9207 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9208 /* 9209 * Lets make sure we have all of it though. 9210 */ 9211 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9212 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9213 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9214 rack->r_ctl.last_tlp_acked_end); 9215 } 9216 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9217 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9218 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9219 rack->r_ctl.last_tlp_acked_end); 9220 } 9221 } else { 9222 rack->rc_last_tlp_past_cumack = 1; 9223 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9224 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9225 rack->rc_last_tlp_acked_set = 1; 9226 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9227 } 9228 } 9229 /* Now do we consume the whole thing? */ 9230 if (SEQ_GEQ(th_ack, rsm->r_end)) { 9231 /* Its all consumed. */ 9232 uint32_t left; 9233 uint8_t newly_acked; 9234 9235 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); 9236 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 9237 rsm->r_rtr_bytes = 0; 9238 /* Record the time of highest cumack sent */ 9239 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9240 #ifndef INVARIANTS 9241 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 9242 #else 9243 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 9244 if (rm != rsm) { 9245 panic("removing head in rack:%p rsm:%p rm:%p", 9246 rack, rsm, rm); 9247 } 9248 #endif 9249 if (rsm->r_in_tmap) { 9250 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 9251 rsm->r_in_tmap = 0; 9252 } 9253 newly_acked = 1; 9254 if (rsm->r_flags & RACK_ACKED) { 9255 /* 9256 * It was acked on the scoreboard -- remove 9257 * it from total 9258 */ 9259 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 9260 newly_acked = 0; 9261 } else if (rsm->r_flags & RACK_SACK_PASSED) { 9262 /* 9263 * There are segments ACKED on the 9264 * scoreboard further up. We are seeing 9265 * reordering. 9266 */ 9267 rsm->r_flags &= ~RACK_SACK_PASSED; 9268 counter_u64_add(rack_reorder_seen, 1); 9269 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9270 rsm->r_flags |= RACK_ACKED; 9271 rack->r_ctl.rc_reorder_ts = cts; 9272 if (rack->r_ent_rec_ns) { 9273 /* 9274 * We have sent no more, and we saw an sack 9275 * then ack arrive. 9276 */ 9277 rack->r_might_revert = 1; 9278 } 9279 } 9280 if ((rsm->r_flags & RACK_TO_REXT) && 9281 (tp->t_flags & TF_RCVD_TSTMP) && 9282 (to->to_flags & TOF_TS) && 9283 (to->to_tsecr != 0) && 9284 (tp->t_flags & TF_PREVVALID)) { 9285 /* 9286 * We can use the timestamp to see 9287 * if this retransmission was from the 9288 * first transmit. If so we made a mistake. 9289 */ 9290 tp->t_flags &= ~TF_PREVVALID; 9291 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { 9292 /* The first transmit is what this ack is for */ 9293 rack_cong_signal(tp, CC_RTO_ERR, th_ack); 9294 } 9295 } 9296 left = th_ack - rsm->r_end; 9297 if (rack->app_limited_needs_set && newly_acked) 9298 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK); 9299 /* Free back to zone */ 9300 rack_free(rack, rsm); 9301 if (left) { 9302 goto more; 9303 } 9304 /* Check for reneging */ 9305 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9306 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 9307 /* 9308 * The peer has moved snd_una up to 9309 * the edge of this send, i.e. one 9310 * that it had previously acked. The only 9311 * way that can be true if the peer threw 9312 * away data (space issues) that it had 9313 * previously sacked (else it would have 9314 * given us snd_una up to (rsm->r_end). 9315 * We need to undo the acked markings here. 9316 * 9317 * Note we have to look to make sure th_ack is 9318 * our rsm->r_start in case we get an old ack 9319 * where th_ack is behind snd_una. 9320 */ 9321 rack_peer_reneges(rack, rsm, th_ack); 9322 } 9323 return; 9324 } 9325 if (rsm->r_flags & RACK_ACKED) { 9326 /* 9327 * It was acked on the scoreboard -- remove it from 9328 * total for the part being cum-acked. 9329 */ 9330 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 9331 } 9332 /* 9333 * Clear the dup ack count for 9334 * the piece that remains. 9335 */ 9336 rsm->r_dupack = 0; 9337 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9338 if (rsm->r_rtr_bytes) { 9339 /* 9340 * It was retransmitted adjust the 9341 * sack holes for what was acked. 9342 */ 9343 int ack_am; 9344 9345 ack_am = (th_ack - rsm->r_start); 9346 if (ack_am >= rsm->r_rtr_bytes) { 9347 rack->r_ctl.rc_holes_rxt -= ack_am; 9348 rsm->r_rtr_bytes -= ack_am; 9349 } 9350 } 9351 /* 9352 * Update where the piece starts and record 9353 * the time of send of highest cumack sent. 9354 */ 9355 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9356 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__); 9357 /* Now we need to move our offset forward too */ 9358 if (rsm->m && (rsm->orig_m_len != rsm->m->m_len)) { 9359 /* Fix up the orig_m_len and possibly the mbuf offset */ 9360 rack_adjust_orig_mlen(rsm); 9361 } 9362 rsm->soff += (th_ack - rsm->r_start); 9363 rsm->r_start = th_ack; 9364 /* Now do we need to move the mbuf fwd too? */ 9365 if (rsm->m) { 9366 while (rsm->soff >= rsm->m->m_len) { 9367 rsm->soff -= rsm->m->m_len; 9368 rsm->m = rsm->m->m_next; 9369 KASSERT((rsm->m != NULL), 9370 (" nrsm:%p hit at soff:%u null m", 9371 rsm, rsm->soff)); 9372 } 9373 rsm->orig_m_len = rsm->m->m_len; 9374 } 9375 if (rack->app_limited_needs_set) 9376 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); 9377 } 9378 9379 static void 9380 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack) 9381 { 9382 struct rack_sendmap *rsm; 9383 int sack_pass_fnd = 0; 9384 9385 if (rack->r_might_revert) { 9386 /* 9387 * Ok we have reordering, have not sent anything, we 9388 * might want to revert the congestion state if nothing 9389 * further has SACK_PASSED on it. Lets check. 9390 * 9391 * We also get here when we have DSACKs come in for 9392 * all the data that we FR'd. Note that a rxt or tlp 9393 * timer clears this from happening. 9394 */ 9395 9396 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 9397 if (rsm->r_flags & RACK_SACK_PASSED) { 9398 sack_pass_fnd = 1; 9399 break; 9400 } 9401 } 9402 if (sack_pass_fnd == 0) { 9403 /* 9404 * We went into recovery 9405 * incorrectly due to reordering! 9406 */ 9407 int orig_cwnd; 9408 9409 rack->r_ent_rec_ns = 0; 9410 orig_cwnd = tp->snd_cwnd; 9411 tp->snd_cwnd = rack->r_ctl.rc_cwnd_at_erec; 9412 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; 9413 tp->snd_recover = tp->snd_una; 9414 rack_log_to_prr(rack, 14, orig_cwnd); 9415 EXIT_RECOVERY(tp->t_flags); 9416 } 9417 rack->r_might_revert = 0; 9418 } 9419 } 9420 9421 #ifdef NETFLIX_EXP_DETECTION 9422 static void 9423 rack_do_detection(struct tcpcb *tp, struct tcp_rack *rack, uint32_t bytes_this_ack, uint32_t segsiz) 9424 { 9425 if ((rack->do_detection || tcp_force_detection) && 9426 tcp_sack_to_ack_thresh && 9427 tcp_sack_to_move_thresh && 9428 ((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) { 9429 /* 9430 * We have thresholds set to find 9431 * possible attackers and disable sack. 9432 * Check them. 9433 */ 9434 uint64_t ackratio, moveratio, movetotal; 9435 9436 /* Log detecting */ 9437 rack_log_sad(rack, 1); 9438 ackratio = (uint64_t)(rack->r_ctl.sack_count); 9439 ackratio *= (uint64_t)(1000); 9440 if (rack->r_ctl.ack_count) 9441 ackratio /= (uint64_t)(rack->r_ctl.ack_count); 9442 else { 9443 /* We really should not hit here */ 9444 ackratio = 1000; 9445 } 9446 if ((rack->sack_attack_disable == 0) && 9447 (ackratio > rack_highest_sack_thresh_seen)) 9448 rack_highest_sack_thresh_seen = (uint32_t)ackratio; 9449 movetotal = rack->r_ctl.sack_moved_extra; 9450 movetotal += rack->r_ctl.sack_noextra_move; 9451 moveratio = rack->r_ctl.sack_moved_extra; 9452 moveratio *= (uint64_t)1000; 9453 if (movetotal) 9454 moveratio /= movetotal; 9455 else { 9456 /* No moves, thats pretty good */ 9457 moveratio = 0; 9458 } 9459 if ((rack->sack_attack_disable == 0) && 9460 (moveratio > rack_highest_move_thresh_seen)) 9461 rack_highest_move_thresh_seen = (uint32_t)moveratio; 9462 if (rack->sack_attack_disable == 0) { 9463 if ((ackratio > tcp_sack_to_ack_thresh) && 9464 (moveratio > tcp_sack_to_move_thresh)) { 9465 /* Disable sack processing */ 9466 rack->sack_attack_disable = 1; 9467 if (rack->r_rep_attack == 0) { 9468 rack->r_rep_attack = 1; 9469 counter_u64_add(rack_sack_attacks_detected, 1); 9470 } 9471 if (tcp_attack_on_turns_on_logging) { 9472 /* 9473 * Turn on logging, used for debugging 9474 * false positives. 9475 */ 9476 rack->rc_tp->t_logstate = tcp_attack_on_turns_on_logging; 9477 } 9478 /* Clamp the cwnd at flight size */ 9479 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd; 9480 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 9481 rack_log_sad(rack, 2); 9482 } 9483 } else { 9484 /* We are sack-disabled check for false positives */ 9485 if ((ackratio <= tcp_restoral_thresh) || 9486 (rack->r_ctl.rc_num_maps_alloced < tcp_map_minimum)) { 9487 rack->sack_attack_disable = 0; 9488 rack_log_sad(rack, 3); 9489 /* Restart counting */ 9490 rack->r_ctl.sack_count = 0; 9491 rack->r_ctl.sack_moved_extra = 0; 9492 rack->r_ctl.sack_noextra_move = 1; 9493 rack->r_ctl.ack_count = max(1, 9494 (bytes_this_ack / segsiz)); 9495 9496 if (rack->r_rep_reverse == 0) { 9497 rack->r_rep_reverse = 1; 9498 counter_u64_add(rack_sack_attacks_reversed, 1); 9499 } 9500 /* Restore the cwnd */ 9501 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd) 9502 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd; 9503 } 9504 } 9505 } 9506 } 9507 #endif 9508 9509 static int 9510 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end) 9511 { 9512 9513 uint32_t am, l_end; 9514 int was_tlp = 0; 9515 9516 if (SEQ_GT(end, start)) 9517 am = end - start; 9518 else 9519 am = 0; 9520 if ((rack->rc_last_tlp_acked_set ) && 9521 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && 9522 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { 9523 /* 9524 * The DSACK is because of a TLP which we don't 9525 * do anything with the reordering window over since 9526 * it was not reordering that caused the DSACK but 9527 * our previous retransmit TLP. 9528 */ 9529 rack_log_dsack_event(rack, 7, __LINE__, start, end); 9530 was_tlp = 1; 9531 goto skip_dsack_round; 9532 } 9533 if (rack->rc_last_sent_tlp_seq_valid) { 9534 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; 9535 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && 9536 (SEQ_LEQ(end, l_end))) { 9537 /* 9538 * This dsack is from the last sent TLP, ignore it 9539 * for reordering purposes. 9540 */ 9541 rack_log_dsack_event(rack, 7, __LINE__, start, end); 9542 was_tlp = 1; 9543 goto skip_dsack_round; 9544 } 9545 } 9546 if (rack->rc_dsack_round_seen == 0) { 9547 rack->rc_dsack_round_seen = 1; 9548 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; 9549 rack->r_ctl.num_dsack++; 9550 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ 9551 rack_log_dsack_event(rack, 2, __LINE__, 0, 0); 9552 } 9553 skip_dsack_round: 9554 /* 9555 * We keep track of how many DSACK blocks we get 9556 * after a recovery incident. 9557 */ 9558 rack->r_ctl.dsack_byte_cnt += am; 9559 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && 9560 rack->r_ctl.retran_during_recovery && 9561 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { 9562 /* 9563 * False recovery most likely culprit is reordering. If 9564 * nothing else is missing we need to revert. 9565 */ 9566 rack->r_might_revert = 1; 9567 rack_handle_might_revert(rack->rc_tp, rack); 9568 rack->r_might_revert = 0; 9569 rack->r_ctl.retran_during_recovery = 0; 9570 rack->r_ctl.dsack_byte_cnt = 0; 9571 } 9572 return (was_tlp); 9573 } 9574 9575 static void 9576 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack) 9577 { 9578 /* Deal with changed and PRR here (in recovery only) */ 9579 uint32_t pipe, snd_una; 9580 9581 rack->r_ctl.rc_prr_delivered += changed; 9582 9583 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { 9584 /* 9585 * It is all outstanding, we are application limited 9586 * and thus we don't need more room to send anything. 9587 * Note we use tp->snd_una here and not th_ack because 9588 * the data as yet not been cut from the sb. 9589 */ 9590 rack->r_ctl.rc_prr_sndcnt = 0; 9591 return; 9592 } 9593 /* Compute prr_sndcnt */ 9594 if (SEQ_GT(tp->snd_una, th_ack)) { 9595 snd_una = tp->snd_una; 9596 } else { 9597 snd_una = th_ack; 9598 } 9599 pipe = ((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt; 9600 if (pipe > tp->snd_ssthresh) { 9601 long sndcnt; 9602 9603 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 9604 if (rack->r_ctl.rc_prr_recovery_fs > 0) 9605 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 9606 else { 9607 rack->r_ctl.rc_prr_sndcnt = 0; 9608 rack_log_to_prr(rack, 9, 0); 9609 sndcnt = 0; 9610 } 9611 sndcnt++; 9612 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 9613 sndcnt -= rack->r_ctl.rc_prr_out; 9614 else 9615 sndcnt = 0; 9616 rack->r_ctl.rc_prr_sndcnt = sndcnt; 9617 rack_log_to_prr(rack, 10, 0); 9618 } else { 9619 uint32_t limit; 9620 9621 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 9622 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 9623 else 9624 limit = 0; 9625 if (changed > limit) 9626 limit = changed; 9627 limit += ctf_fixed_maxseg(tp); 9628 if (tp->snd_ssthresh > pipe) { 9629 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 9630 rack_log_to_prr(rack, 11, 0); 9631 } else { 9632 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 9633 rack_log_to_prr(rack, 12, 0); 9634 } 9635 } 9636 } 9637 9638 static void 9639 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck) 9640 { 9641 uint32_t changed; 9642 struct tcp_rack *rack; 9643 struct rack_sendmap *rsm; 9644 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 9645 register uint32_t th_ack; 9646 int32_t i, j, k, num_sack_blks = 0; 9647 uint32_t cts, acked, ack_point; 9648 int loop_start = 0, moved_two = 0; 9649 uint32_t tsused; 9650 9651 9652 INP_WLOCK_ASSERT(tp->t_inpcb); 9653 if (tcp_get_flags(th) & TH_RST) { 9654 /* We don't log resets */ 9655 return; 9656 } 9657 rack = (struct tcp_rack *)tp->t_fb_ptr; 9658 cts = tcp_get_usecs(NULL); 9659 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9660 changed = 0; 9661 th_ack = th->th_ack; 9662 if (rack->sack_attack_disable == 0) 9663 rack_do_decay(rack); 9664 if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) { 9665 /* 9666 * You only get credit for 9667 * MSS and greater (and you get extra 9668 * credit for larger cum-ack moves). 9669 */ 9670 int ac; 9671 9672 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 9673 rack->r_ctl.ack_count += ac; 9674 counter_u64_add(rack_ack_total, ac); 9675 } 9676 if (rack->r_ctl.ack_count > 0xfff00000) { 9677 /* 9678 * reduce the number to keep us under 9679 * a uint32_t. 9680 */ 9681 rack->r_ctl.ack_count /= 2; 9682 rack->r_ctl.sack_count /= 2; 9683 } 9684 if (SEQ_GT(th_ack, tp->snd_una)) { 9685 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 9686 tp->t_acktime = ticks; 9687 } 9688 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 9689 changed = th_ack - rsm->r_start; 9690 if (changed) { 9691 rack_process_to_cumack(tp, rack, th_ack, cts, to); 9692 } 9693 if ((to->to_flags & TOF_SACK) == 0) { 9694 /* We are done nothing left and no sack. */ 9695 rack_handle_might_revert(tp, rack); 9696 /* 9697 * For cases where we struck a dup-ack 9698 * with no SACK, add to the changes so 9699 * PRR will work right. 9700 */ 9701 if (dup_ack_struck && (changed == 0)) { 9702 changed += ctf_fixed_maxseg(rack->rc_tp); 9703 } 9704 goto out; 9705 } 9706 /* Sack block processing */ 9707 if (SEQ_GT(th_ack, tp->snd_una)) 9708 ack_point = th_ack; 9709 else 9710 ack_point = tp->snd_una; 9711 for (i = 0; i < to->to_nsacks; i++) { 9712 bcopy((to->to_sacks + i * TCPOLEN_SACK), 9713 &sack, sizeof(sack)); 9714 sack.start = ntohl(sack.start); 9715 sack.end = ntohl(sack.end); 9716 if (SEQ_GT(sack.end, sack.start) && 9717 SEQ_GT(sack.start, ack_point) && 9718 SEQ_LT(sack.start, tp->snd_max) && 9719 SEQ_GT(sack.end, ack_point) && 9720 SEQ_LEQ(sack.end, tp->snd_max)) { 9721 sack_blocks[num_sack_blks] = sack; 9722 num_sack_blks++; 9723 } else if (SEQ_LEQ(sack.start, th_ack) && 9724 SEQ_LEQ(sack.end, th_ack)) { 9725 int was_tlp; 9726 9727 was_tlp = rack_note_dsack(rack, sack.start, sack.end); 9728 /* 9729 * Its a D-SACK block. 9730 */ 9731 tcp_record_dsack(tp, sack.start, sack.end, was_tlp); 9732 } 9733 } 9734 if (rack->rc_dsack_round_seen) { 9735 /* Is the dsack roound over? */ 9736 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { 9737 /* Yes it is */ 9738 rack->rc_dsack_round_seen = 0; 9739 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 9740 } 9741 } 9742 /* 9743 * Sort the SACK blocks so we can update the rack scoreboard with 9744 * just one pass. 9745 */ 9746 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks, 9747 num_sack_blks, th->th_ack); 9748 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 9749 if (num_sack_blks == 0) { 9750 /* Nothing to sack (DSACKs?) */ 9751 goto out_with_totals; 9752 } 9753 if (num_sack_blks < 2) { 9754 /* Only one, we don't need to sort */ 9755 goto do_sack_work; 9756 } 9757 /* Sort the sacks */ 9758 for (i = 0; i < num_sack_blks; i++) { 9759 for (j = i + 1; j < num_sack_blks; j++) { 9760 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 9761 sack = sack_blocks[i]; 9762 sack_blocks[i] = sack_blocks[j]; 9763 sack_blocks[j] = sack; 9764 } 9765 } 9766 } 9767 /* 9768 * Now are any of the sack block ends the same (yes some 9769 * implementations send these)? 9770 */ 9771 again: 9772 if (num_sack_blks == 0) 9773 goto out_with_totals; 9774 if (num_sack_blks > 1) { 9775 for (i = 0; i < num_sack_blks; i++) { 9776 for (j = i + 1; j < num_sack_blks; j++) { 9777 if (sack_blocks[i].end == sack_blocks[j].end) { 9778 /* 9779 * Ok these two have the same end we 9780 * want the smallest end and then 9781 * throw away the larger and start 9782 * again. 9783 */ 9784 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 9785 /* 9786 * The second block covers 9787 * more area use that 9788 */ 9789 sack_blocks[i].start = sack_blocks[j].start; 9790 } 9791 /* 9792 * Now collapse out the dup-sack and 9793 * lower the count 9794 */ 9795 for (k = (j + 1); k < num_sack_blks; k++) { 9796 sack_blocks[j].start = sack_blocks[k].start; 9797 sack_blocks[j].end = sack_blocks[k].end; 9798 j++; 9799 } 9800 num_sack_blks--; 9801 goto again; 9802 } 9803 } 9804 } 9805 } 9806 do_sack_work: 9807 /* 9808 * First lets look to see if 9809 * we have retransmitted and 9810 * can use the transmit next? 9811 */ 9812 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 9813 if (rsm && 9814 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 9815 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 9816 /* 9817 * We probably did the FR and the next 9818 * SACK in continues as we would expect. 9819 */ 9820 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &moved_two); 9821 if (acked) { 9822 rack->r_wanted_output = 1; 9823 changed += acked; 9824 } 9825 if (num_sack_blks == 1) { 9826 /* 9827 * This is what we would expect from 9828 * a normal implementation to happen 9829 * after we have retransmitted the FR, 9830 * i.e the sack-filter pushes down 9831 * to 1 block and the next to be retransmitted 9832 * is the sequence in the sack block (has more 9833 * are acked). Count this as ACK'd data to boost 9834 * up the chances of recovering any false positives. 9835 */ 9836 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp)); 9837 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 9838 counter_u64_add(rack_express_sack, 1); 9839 if (rack->r_ctl.ack_count > 0xfff00000) { 9840 /* 9841 * reduce the number to keep us under 9842 * a uint32_t. 9843 */ 9844 rack->r_ctl.ack_count /= 2; 9845 rack->r_ctl.sack_count /= 2; 9846 } 9847 goto out_with_totals; 9848 } else { 9849 /* 9850 * Start the loop through the 9851 * rest of blocks, past the first block. 9852 */ 9853 moved_two = 0; 9854 loop_start = 1; 9855 } 9856 } 9857 /* Its a sack of some sort */ 9858 rack->r_ctl.sack_count++; 9859 if (rack->r_ctl.sack_count > 0xfff00000) { 9860 /* 9861 * reduce the number to keep us under 9862 * a uint32_t. 9863 */ 9864 rack->r_ctl.ack_count /= 2; 9865 rack->r_ctl.sack_count /= 2; 9866 } 9867 counter_u64_add(rack_sack_total, 1); 9868 if (rack->sack_attack_disable) { 9869 /* An attacker disablement is in place */ 9870 if (num_sack_blks > 1) { 9871 rack->r_ctl.sack_count += (num_sack_blks - 1); 9872 rack->r_ctl.sack_moved_extra++; 9873 counter_u64_add(rack_move_some, 1); 9874 if (rack->r_ctl.sack_moved_extra > 0xfff00000) { 9875 rack->r_ctl.sack_moved_extra /= 2; 9876 rack->r_ctl.sack_noextra_move /= 2; 9877 } 9878 } 9879 goto out; 9880 } 9881 rsm = rack->r_ctl.rc_sacklast; 9882 for (i = loop_start; i < num_sack_blks; i++) { 9883 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &moved_two); 9884 if (acked) { 9885 rack->r_wanted_output = 1; 9886 changed += acked; 9887 } 9888 if (moved_two) { 9889 /* 9890 * If we did not get a SACK for at least a MSS and 9891 * had to move at all, or if we moved more than our 9892 * threshold, it counts against the "extra" move. 9893 */ 9894 rack->r_ctl.sack_moved_extra += moved_two; 9895 counter_u64_add(rack_move_some, 1); 9896 } else { 9897 /* 9898 * else we did not have to move 9899 * any more than we would expect. 9900 */ 9901 rack->r_ctl.sack_noextra_move++; 9902 counter_u64_add(rack_move_none, 1); 9903 } 9904 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) { 9905 /* 9906 * If the SACK was not a full MSS then 9907 * we add to sack_count the number of 9908 * MSS's (or possibly more than 9909 * a MSS if its a TSO send) we had to skip by. 9910 */ 9911 rack->r_ctl.sack_count += moved_two; 9912 counter_u64_add(rack_sack_total, moved_two); 9913 } 9914 /* 9915 * Now we need to setup for the next 9916 * round. First we make sure we won't 9917 * exceed the size of our uint32_t on 9918 * the various counts, and then clear out 9919 * moved_two. 9920 */ 9921 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) || 9922 (rack->r_ctl.sack_noextra_move > 0xfff00000)) { 9923 rack->r_ctl.sack_moved_extra /= 2; 9924 rack->r_ctl.sack_noextra_move /= 2; 9925 } 9926 if (rack->r_ctl.sack_count > 0xfff00000) { 9927 rack->r_ctl.ack_count /= 2; 9928 rack->r_ctl.sack_count /= 2; 9929 } 9930 moved_two = 0; 9931 } 9932 out_with_totals: 9933 if (num_sack_blks > 1) { 9934 /* 9935 * You get an extra stroke if 9936 * you have more than one sack-blk, this 9937 * could be where we are skipping forward 9938 * and the sack-filter is still working, or 9939 * it could be an attacker constantly 9940 * moving us. 9941 */ 9942 rack->r_ctl.sack_moved_extra++; 9943 counter_u64_add(rack_move_some, 1); 9944 } 9945 out: 9946 #ifdef NETFLIX_EXP_DETECTION 9947 rack_do_detection(tp, rack, BYTES_THIS_ACK(tp, th), ctf_fixed_maxseg(rack->rc_tp)); 9948 #endif 9949 if (changed) { 9950 /* Something changed cancel the rack timer */ 9951 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 9952 } 9953 tsused = tcp_get_usecs(NULL); 9954 rsm = tcp_rack_output(tp, rack, tsused); 9955 if ((!IN_FASTRECOVERY(tp->t_flags)) && 9956 rsm) { 9957 /* Enter recovery */ 9958 rack->r_ctl.rc_rsm_start = rsm->r_start; 9959 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 9960 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 9961 entered_recovery = 1; 9962 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 9963 /* 9964 * When we enter recovery we need to assure we send 9965 * one packet. 9966 */ 9967 if (rack->rack_no_prr == 0) { 9968 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 9969 rack_log_to_prr(rack, 8, 0); 9970 } 9971 rack->r_timer_override = 1; 9972 rack->r_early = 0; 9973 rack->r_ctl.rc_agg_early = 0; 9974 } else if (IN_FASTRECOVERY(tp->t_flags) && 9975 rsm && 9976 (rack->r_rr_config == 3)) { 9977 /* 9978 * Assure we can output and we get no 9979 * remembered pace time except the retransmit. 9980 */ 9981 rack->r_timer_override = 1; 9982 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 9983 rack->r_ctl.rc_resend = rsm; 9984 } 9985 if (IN_FASTRECOVERY(tp->t_flags) && 9986 (rack->rack_no_prr == 0) && 9987 (entered_recovery == 0)) { 9988 rack_update_prr(tp, rack, changed, th_ack); 9989 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && 9990 ((tcp_in_hpts(rack->rc_inp) == 0) && 9991 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { 9992 /* 9993 * If you are pacing output you don't want 9994 * to override. 9995 */ 9996 rack->r_early = 0; 9997 rack->r_ctl.rc_agg_early = 0; 9998 rack->r_timer_override = 1; 9999 } 10000 } 10001 } 10002 10003 static void 10004 rack_strike_dupack(struct tcp_rack *rack) 10005 { 10006 struct rack_sendmap *rsm; 10007 10008 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 10009 while (rsm && (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 10010 rsm = TAILQ_NEXT(rsm, r_tnext); 10011 } 10012 if (rsm && (rsm->r_dupack < 0xff)) { 10013 rsm->r_dupack++; 10014 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 10015 struct timeval tv; 10016 uint32_t cts; 10017 /* 10018 * Here we see if we need to retransmit. For 10019 * a SACK type connection if enough time has passed 10020 * we will get a return of the rsm. For a non-sack 10021 * connection we will get the rsm returned if the 10022 * dupack value is 3 or more. 10023 */ 10024 cts = tcp_get_usecs(&tv); 10025 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); 10026 if (rack->r_ctl.rc_resend != NULL) { 10027 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { 10028 rack_cong_signal(rack->rc_tp, CC_NDUPACK, 10029 rack->rc_tp->snd_una); 10030 } 10031 rack->r_wanted_output = 1; 10032 rack->r_timer_override = 1; 10033 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 10034 } 10035 } else { 10036 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 10037 } 10038 } 10039 } 10040 10041 static void 10042 rack_check_bottom_drag(struct tcpcb *tp, 10043 struct tcp_rack *rack, 10044 struct socket *so, int32_t acked) 10045 { 10046 uint32_t segsiz, minseg; 10047 10048 segsiz = ctf_fixed_maxseg(tp); 10049 minseg = segsiz; 10050 10051 if (tp->snd_max == tp->snd_una) { 10052 /* 10053 * We are doing dynamic pacing and we are way 10054 * under. Basically everything got acked while 10055 * we were still waiting on the pacer to expire. 10056 * 10057 * This means we need to boost the b/w in 10058 * addition to any earlier boosting of 10059 * the multipler. 10060 */ 10061 rack->rc_dragged_bottom = 1; 10062 rack_validate_multipliers_at_or_above100(rack); 10063 /* 10064 * Lets use the segment bytes acked plus 10065 * the lowest RTT seen as the basis to 10066 * form a b/w estimate. This will be off 10067 * due to the fact that the true estimate 10068 * should be around 1/2 the time of the RTT 10069 * but we can settle for that. 10070 */ 10071 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && 10072 acked) { 10073 uint64_t bw, calc_bw, rtt; 10074 10075 rtt = rack->r_ctl.rack_rs.rs_us_rtt; 10076 if (rtt == 0) { 10077 /* no us sample is there a ms one? */ 10078 if (rack->r_ctl.rack_rs.rs_rtt_lowest) { 10079 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 10080 } else { 10081 goto no_measurement; 10082 } 10083 } 10084 bw = acked; 10085 calc_bw = bw * 1000000; 10086 calc_bw /= rtt; 10087 if (rack->r_ctl.last_max_bw && 10088 (rack->r_ctl.last_max_bw < calc_bw)) { 10089 /* 10090 * If we have a last calculated max bw 10091 * enforce it. 10092 */ 10093 calc_bw = rack->r_ctl.last_max_bw; 10094 } 10095 /* now plop it in */ 10096 if (rack->rc_gp_filled == 0) { 10097 if (calc_bw > ONE_POINT_TWO_MEG) { 10098 /* 10099 * If we have no measurement 10100 * don't let us set in more than 10101 * 1.2Mbps. If we are still too 10102 * low after pacing with this we 10103 * will hopefully have a max b/w 10104 * available to sanity check things. 10105 */ 10106 calc_bw = ONE_POINT_TWO_MEG; 10107 } 10108 rack->r_ctl.rc_rtt_diff = 0; 10109 rack->r_ctl.gp_bw = calc_bw; 10110 rack->rc_gp_filled = 1; 10111 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 10112 rack->r_ctl.num_measurements = RACK_REQ_AVG; 10113 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 10114 } else if (calc_bw > rack->r_ctl.gp_bw) { 10115 rack->r_ctl.rc_rtt_diff = 0; 10116 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 10117 rack->r_ctl.num_measurements = RACK_REQ_AVG; 10118 rack->r_ctl.gp_bw = calc_bw; 10119 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 10120 } else 10121 rack_increase_bw_mul(rack, -1, 0, 0, 1); 10122 if ((rack->gp_ready == 0) && 10123 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 10124 /* We have enough measurements now */ 10125 rack->gp_ready = 1; 10126 rack_set_cc_pacing(rack); 10127 if (rack->defer_options) 10128 rack_apply_deferred_options(rack); 10129 } 10130 /* 10131 * For acks over 1mss we do a extra boost to simulate 10132 * where we would get 2 acks (we want 110 for the mul). 10133 */ 10134 if (acked > segsiz) 10135 rack_increase_bw_mul(rack, -1, 0, 0, 1); 10136 } else { 10137 /* 10138 * zero rtt possibly?, settle for just an old increase. 10139 */ 10140 no_measurement: 10141 rack_increase_bw_mul(rack, -1, 0, 0, 1); 10142 } 10143 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 10144 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), 10145 minseg)) && 10146 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && 10147 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && 10148 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= 10149 (segsiz * rack_req_segs))) { 10150 /* 10151 * We are doing dynamic GP pacing and 10152 * we have everything except 1MSS or less 10153 * bytes left out. We are still pacing away. 10154 * And there is data that could be sent, This 10155 * means we are inserting delayed ack time in 10156 * our measurements because we are pacing too slow. 10157 */ 10158 rack_validate_multipliers_at_or_above100(rack); 10159 rack->rc_dragged_bottom = 1; 10160 rack_increase_bw_mul(rack, -1, 0, 0, 1); 10161 } 10162 } 10163 10164 10165 10166 static void 10167 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount) 10168 { 10169 /* 10170 * The fast output path is enabled and we 10171 * have moved the cumack forward. Lets see if 10172 * we can expand forward the fast path length by 10173 * that amount. What we would ideally like to 10174 * do is increase the number of bytes in the 10175 * fast path block (left_to_send) by the 10176 * acked amount. However we have to gate that 10177 * by two factors: 10178 * 1) The amount outstanding and the rwnd of the peer 10179 * (i.e. we don't want to exceed the rwnd of the peer). 10180 * <and> 10181 * 2) The amount of data left in the socket buffer (i.e. 10182 * we can't send beyond what is in the buffer). 10183 * 10184 * Note that this does not take into account any increase 10185 * in the cwnd. We will only extend the fast path by 10186 * what was acked. 10187 */ 10188 uint32_t new_total, gating_val; 10189 10190 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; 10191 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), 10192 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); 10193 if (new_total <= gating_val) { 10194 /* We can increase left_to_send by the acked amount */ 10195 counter_u64_add(rack_extended_rfo, 1); 10196 rack->r_ctl.fsb.left_to_send = new_total; 10197 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))), 10198 ("rack:%p left_to_send:%u sbavail:%u out:%u", 10199 rack, rack->r_ctl.fsb.left_to_send, 10200 sbavail(&rack->rc_inp->inp_socket->so_snd), 10201 (tp->snd_max - tp->snd_una))); 10202 10203 } 10204 } 10205 10206 static void 10207 rack_adjust_sendmap(struct tcp_rack *rack, struct sockbuf *sb, tcp_seq snd_una) 10208 { 10209 /* 10210 * Here any sendmap entry that points to the 10211 * beginning mbuf must be adjusted to the correct 10212 * offset. This must be called with: 10213 * 1) The socket buffer locked 10214 * 2) snd_una adjusted to its new postion. 10215 * 10216 * Note that (2) implies rack_ack_received has also 10217 * been called. 10218 * 10219 * We grab the first mbuf in the socket buffer and 10220 * then go through the front of the sendmap, recalculating 10221 * the stored offset for any sendmap entry that has 10222 * that mbuf. We must use the sb functions to do this 10223 * since its possible an add was done has well as 10224 * the subtraction we may have just completed. This should 10225 * not be a penalty though, since we just referenced the sb 10226 * to go in and trim off the mbufs that we freed (of course 10227 * there will be a penalty for the sendmap references though). 10228 */ 10229 struct mbuf *m; 10230 struct rack_sendmap *rsm; 10231 10232 SOCKBUF_LOCK_ASSERT(sb); 10233 m = sb->sb_mb; 10234 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 10235 if ((rsm == NULL) || (m == NULL)) { 10236 /* Nothing outstanding */ 10237 return; 10238 } 10239 while (rsm->m && (rsm->m == m)) { 10240 /* one to adjust */ 10241 #ifdef INVARIANTS 10242 struct mbuf *tm; 10243 uint32_t soff; 10244 10245 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); 10246 if (rsm->orig_m_len != m->m_len) { 10247 rack_adjust_orig_mlen(rsm); 10248 } 10249 if (rsm->soff != soff) { 10250 /* 10251 * This is not a fatal error, we anticipate it 10252 * might happen (the else code), so we count it here 10253 * so that under invariant we can see that it really 10254 * does happen. 10255 */ 10256 counter_u64_add(rack_adjust_map_bw, 1); 10257 } 10258 rsm->m = tm; 10259 rsm->soff = soff; 10260 if (tm) 10261 rsm->orig_m_len = rsm->m->m_len; 10262 else 10263 rsm->orig_m_len = 0; 10264 #else 10265 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); 10266 if (rsm->m) 10267 rsm->orig_m_len = rsm->m->m_len; 10268 else 10269 rsm->orig_m_len = 0; 10270 #endif 10271 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 10272 rsm); 10273 if (rsm == NULL) 10274 break; 10275 } 10276 } 10277 10278 /* 10279 * Return value of 1, we do not need to call rack_process_data(). 10280 * return value of 0, rack_process_data can be called. 10281 * For ret_val if its 0 the TCP is locked, if its non-zero 10282 * its unlocked and probably unsafe to touch the TCB. 10283 */ 10284 static int 10285 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 10286 struct tcpcb *tp, struct tcpopt *to, 10287 uint32_t tiwin, int32_t tlen, 10288 int32_t * ofia, int32_t thflags, int32_t *ret_val) 10289 { 10290 int32_t ourfinisacked = 0; 10291 int32_t nsegs, acked_amount; 10292 int32_t acked; 10293 struct mbuf *mfree; 10294 struct tcp_rack *rack; 10295 int32_t under_pacing = 0; 10296 int32_t recovery = 0; 10297 10298 rack = (struct tcp_rack *)tp->t_fb_ptr; 10299 if (SEQ_GT(th->th_ack, tp->snd_max)) { 10300 __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val, 10301 &rack->r_ctl.challenge_ack_ts, 10302 &rack->r_ctl.challenge_ack_cnt); 10303 rack->r_wanted_output = 1; 10304 return (1); 10305 } 10306 if (rack->gp_ready && 10307 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 10308 under_pacing = 1; 10309 } 10310 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 10311 int in_rec, dup_ack_struck = 0; 10312 10313 in_rec = IN_FASTRECOVERY(tp->t_flags); 10314 if (rack->rc_in_persist) { 10315 tp->t_rxtshift = 0; 10316 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10317 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10318 } 10319 if ((th->th_ack == tp->snd_una) && 10320 (tiwin == tp->snd_wnd) && 10321 ((to->to_flags & TOF_SACK) == 0)) { 10322 rack_strike_dupack(rack); 10323 dup_ack_struck = 1; 10324 } 10325 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), dup_ack_struck); 10326 } 10327 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 10328 /* 10329 * Old ack, behind (or duplicate to) the last one rcv'd 10330 * Note: We mark reordering is occuring if its 10331 * less than and we have not closed our window. 10332 */ 10333 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { 10334 counter_u64_add(rack_reorder_seen, 1); 10335 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 10336 } 10337 return (0); 10338 } 10339 /* 10340 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 10341 * something we sent. 10342 */ 10343 if (tp->t_flags & TF_NEEDSYN) { 10344 /* 10345 * T/TCP: Connection was half-synchronized, and our SYN has 10346 * been ACK'd (so connection is now fully synchronized). Go 10347 * to non-starred state, increment snd_una for ACK of SYN, 10348 * and check if we can do window scaling. 10349 */ 10350 tp->t_flags &= ~TF_NEEDSYN; 10351 tp->snd_una++; 10352 /* Do window scaling? */ 10353 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 10354 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 10355 tp->rcv_scale = tp->request_r_scale; 10356 /* Send window already scaled. */ 10357 } 10358 } 10359 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10360 INP_WLOCK_ASSERT(tp->t_inpcb); 10361 10362 acked = BYTES_THIS_ACK(tp, th); 10363 if (acked) { 10364 /* 10365 * Any time we move the cum-ack forward clear 10366 * keep-alive tied probe-not-answered. The 10367 * persists clears its own on entry. 10368 */ 10369 rack->probe_not_answered = 0; 10370 } 10371 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 10372 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 10373 /* 10374 * If we just performed our first retransmit, and the ACK arrives 10375 * within our recovery window, then it was a mistake to do the 10376 * retransmit in the first place. Recover our original cwnd and 10377 * ssthresh, and proceed to transmit where we left off. 10378 */ 10379 if ((tp->t_flags & TF_PREVVALID) && 10380 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 10381 tp->t_flags &= ~TF_PREVVALID; 10382 if (tp->t_rxtshift == 1 && 10383 (int)(ticks - tp->t_badrxtwin) < 0) 10384 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack); 10385 } 10386 if (acked) { 10387 /* assure we are not backed off */ 10388 tp->t_rxtshift = 0; 10389 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10390 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10391 rack->rc_tlp_in_progress = 0; 10392 rack->r_ctl.rc_tlp_cnt_out = 0; 10393 /* 10394 * If it is the RXT timer we want to 10395 * stop it, so we can restart a TLP. 10396 */ 10397 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 10398 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10399 #ifdef NETFLIX_HTTP_LOGGING 10400 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 10401 #endif 10402 } 10403 /* 10404 * If we have a timestamp reply, update smoothed round trip time. If 10405 * no timestamp is present but transmit timer is running and timed 10406 * sequence number was acked, update smoothed round trip time. Since 10407 * we now have an rtt measurement, cancel the timer backoff (cf., 10408 * Phil Karn's retransmit alg.). Recompute the initial retransmit 10409 * timer. 10410 * 10411 * Some boxes send broken timestamp replies during the SYN+ACK 10412 * phase, ignore timestamps of 0 or we could calculate a huge RTT 10413 * and blow up the retransmit timer. 10414 */ 10415 /* 10416 * If all outstanding data is acked, stop retransmit timer and 10417 * remember to restart (more output or persist). If there is more 10418 * data to be acked, restart retransmit timer, using current 10419 * (possibly backed-off) value. 10420 */ 10421 if (acked == 0) { 10422 if (ofia) 10423 *ofia = ourfinisacked; 10424 return (0); 10425 } 10426 if (IN_RECOVERY(tp->t_flags)) { 10427 if (SEQ_LT(th->th_ack, tp->snd_recover) && 10428 (SEQ_LT(th->th_ack, tp->snd_max))) { 10429 tcp_rack_partialack(tp); 10430 } else { 10431 rack_post_recovery(tp, th->th_ack); 10432 recovery = 1; 10433 } 10434 } 10435 /* 10436 * Let the congestion control algorithm update congestion control 10437 * related information. This typically means increasing the 10438 * congestion window. 10439 */ 10440 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, recovery); 10441 SOCKBUF_LOCK(&so->so_snd); 10442 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 10443 tp->snd_wnd -= acked_amount; 10444 mfree = sbcut_locked(&so->so_snd, acked_amount); 10445 if ((sbused(&so->so_snd) == 0) && 10446 (acked > acked_amount) && 10447 (tp->t_state >= TCPS_FIN_WAIT_1) && 10448 (tp->t_flags & TF_SENTFIN)) { 10449 /* 10450 * We must be sure our fin 10451 * was sent and acked (we can be 10452 * in FIN_WAIT_1 without having 10453 * sent the fin). 10454 */ 10455 ourfinisacked = 1; 10456 } 10457 tp->snd_una = th->th_ack; 10458 if (acked_amount && sbavail(&so->so_snd)) 10459 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 10460 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 10461 /* NB: sowwakeup_locked() does an implicit unlock. */ 10462 sowwakeup_locked(so); 10463 m_freem(mfree); 10464 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 10465 tp->snd_recover = tp->snd_una; 10466 10467 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) { 10468 tp->snd_nxt = tp->snd_una; 10469 } 10470 if (under_pacing && 10471 (rack->use_fixed_rate == 0) && 10472 (rack->in_probe_rtt == 0) && 10473 rack->rc_gp_dyn_mul && 10474 rack->rc_always_pace) { 10475 /* Check if we are dragging bottom */ 10476 rack_check_bottom_drag(tp, rack, so, acked); 10477 } 10478 if (tp->snd_una == tp->snd_max) { 10479 /* Nothing left outstanding */ 10480 tp->t_flags &= ~TF_PREVVALID; 10481 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 10482 rack->r_ctl.retran_during_recovery = 0; 10483 rack->r_ctl.dsack_byte_cnt = 0; 10484 if (rack->r_ctl.rc_went_idle_time == 0) 10485 rack->r_ctl.rc_went_idle_time = 1; 10486 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 10487 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 10488 tp->t_acktime = 0; 10489 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10490 /* Set need output so persist might get set */ 10491 rack->r_wanted_output = 1; 10492 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 10493 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 10494 (sbavail(&so->so_snd) == 0) && 10495 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 10496 /* 10497 * The socket was gone and the 10498 * peer sent data (now or in the past), time to 10499 * reset him. 10500 */ 10501 *ret_val = 1; 10502 /* tcp_close will kill the inp pre-log the Reset */ 10503 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 10504 tp = tcp_close(tp); 10505 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 10506 return (1); 10507 } 10508 } 10509 if (ofia) 10510 *ofia = ourfinisacked; 10511 return (0); 10512 } 10513 10514 static void 10515 rack_collapsed_window(struct tcp_rack *rack) 10516 { 10517 /* 10518 * Now we must walk the 10519 * send map and divide the 10520 * ones left stranded. These 10521 * guys can't cause us to abort 10522 * the connection and are really 10523 * "unsent". However if a buggy 10524 * client actually did keep some 10525 * of the data i.e. collapsed the win 10526 * and refused to ack and then opened 10527 * the win and acked that data. We would 10528 * get into an ack war, the simplier 10529 * method then of just pretending we 10530 * did not send those segments something 10531 * won't work. 10532 */ 10533 struct rack_sendmap *rsm, *nrsm, fe; 10534 #ifdef INVARIANTS 10535 struct rack_sendmap *insret; 10536 #endif 10537 tcp_seq max_seq; 10538 10539 max_seq = rack->rc_tp->snd_una + rack->rc_tp->snd_wnd; 10540 memset(&fe, 0, sizeof(fe)); 10541 fe.r_start = max_seq; 10542 /* Find the first seq past or at maxseq */ 10543 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 10544 if (rsm == NULL) { 10545 /* Nothing to do strange */ 10546 rack->rc_has_collapsed = 0; 10547 return; 10548 } 10549 /* 10550 * Now do we need to split at 10551 * the collapse point? 10552 */ 10553 if (SEQ_GT(max_seq, rsm->r_start)) { 10554 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 10555 if (nrsm == NULL) { 10556 /* We can't get a rsm, mark all? */ 10557 nrsm = rsm; 10558 goto no_split; 10559 } 10560 /* Clone it */ 10561 rack_clone_rsm(rack, nrsm, rsm, max_seq); 10562 #ifndef INVARIANTS 10563 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 10564 #else 10565 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 10566 if (insret != NULL) { 10567 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 10568 nrsm, insret, rack, rsm); 10569 } 10570 #endif 10571 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, max_seq, __LINE__); 10572 if (rsm->r_in_tmap) { 10573 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 10574 nrsm->r_in_tmap = 1; 10575 } 10576 /* 10577 * Set in the new RSM as the 10578 * collapsed starting point 10579 */ 10580 rsm = nrsm; 10581 } 10582 no_split: 10583 counter_u64_add(rack_collapsed_win, 1); 10584 RB_FOREACH_FROM(nrsm, rack_rb_tree_head, rsm) { 10585 nrsm->r_flags |= RACK_RWND_COLLAPSED; 10586 } 10587 rack->rc_has_collapsed = 1; 10588 } 10589 10590 static void 10591 rack_un_collapse_window(struct tcp_rack *rack) 10592 { 10593 struct rack_sendmap *rsm; 10594 10595 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 10596 if (rsm->r_flags & RACK_RWND_COLLAPSED) 10597 rsm->r_flags &= ~RACK_RWND_COLLAPSED; 10598 else 10599 break; 10600 } 10601 rack->rc_has_collapsed = 0; 10602 } 10603 10604 static void 10605 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack, 10606 int32_t tlen, int32_t tfo_syn) 10607 { 10608 if (DELAY_ACK(tp, tlen) || tfo_syn) { 10609 if (rack->rc_dack_mode && 10610 (tlen > 500) && 10611 (rack->rc_dack_toggle == 1)) { 10612 goto no_delayed_ack; 10613 } 10614 rack_timer_cancel(tp, rack, 10615 rack->r_ctl.rc_rcvtime, __LINE__); 10616 tp->t_flags |= TF_DELACK; 10617 } else { 10618 no_delayed_ack: 10619 rack->r_wanted_output = 1; 10620 tp->t_flags |= TF_ACKNOW; 10621 if (rack->rc_dack_mode) { 10622 if (tp->t_flags & TF_DELACK) 10623 rack->rc_dack_toggle = 1; 10624 else 10625 rack->rc_dack_toggle = 0; 10626 } 10627 } 10628 } 10629 10630 static void 10631 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack) 10632 { 10633 /* 10634 * If fast output is in progress, lets validate that 10635 * the new window did not shrink on us and make it 10636 * so fast output should end. 10637 */ 10638 if (rack->r_fast_output) { 10639 uint32_t out; 10640 10641 /* 10642 * Calculate what we will send if left as is 10643 * and compare that to our send window. 10644 */ 10645 out = ctf_outstanding(tp); 10646 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { 10647 /* ok we have an issue */ 10648 if (out >= tp->snd_wnd) { 10649 /* Turn off fast output the window is met or collapsed */ 10650 rack->r_fast_output = 0; 10651 } else { 10652 /* we have some room left */ 10653 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; 10654 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { 10655 /* If not at least 1 full segment never mind */ 10656 rack->r_fast_output = 0; 10657 } 10658 } 10659 } 10660 } 10661 } 10662 10663 10664 /* 10665 * Return value of 1, the TCB is unlocked and most 10666 * likely gone, return value of 0, the TCP is still 10667 * locked. 10668 */ 10669 static int 10670 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 10671 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 10672 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 10673 { 10674 /* 10675 * Update window information. Don't look at window if no ACK: TAC's 10676 * send garbage on first SYN. 10677 */ 10678 int32_t nsegs; 10679 int32_t tfo_syn; 10680 struct tcp_rack *rack; 10681 10682 rack = (struct tcp_rack *)tp->t_fb_ptr; 10683 INP_WLOCK_ASSERT(tp->t_inpcb); 10684 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10685 if ((thflags & TH_ACK) && 10686 (SEQ_LT(tp->snd_wl1, th->th_seq) || 10687 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 10688 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 10689 /* keep track of pure window updates */ 10690 if (tlen == 0 && 10691 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 10692 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 10693 tp->snd_wnd = tiwin; 10694 rack_validate_fo_sendwin_up(tp, rack); 10695 tp->snd_wl1 = th->th_seq; 10696 tp->snd_wl2 = th->th_ack; 10697 if (tp->snd_wnd > tp->max_sndwnd) 10698 tp->max_sndwnd = tp->snd_wnd; 10699 rack->r_wanted_output = 1; 10700 } else if (thflags & TH_ACK) { 10701 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 10702 tp->snd_wnd = tiwin; 10703 rack_validate_fo_sendwin_up(tp, rack); 10704 tp->snd_wl1 = th->th_seq; 10705 tp->snd_wl2 = th->th_ack; 10706 } 10707 } 10708 if (tp->snd_wnd < ctf_outstanding(tp)) 10709 /* The peer collapsed the window */ 10710 rack_collapsed_window(rack); 10711 else if (rack->rc_has_collapsed) 10712 rack_un_collapse_window(rack); 10713 /* Was persist timer active and now we have window space? */ 10714 if ((rack->rc_in_persist != 0) && 10715 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 10716 rack->r_ctl.rc_pace_min_segs))) { 10717 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10718 tp->snd_nxt = tp->snd_max; 10719 /* Make sure we output to start the timer */ 10720 rack->r_wanted_output = 1; 10721 } 10722 /* Do we enter persists? */ 10723 if ((rack->rc_in_persist == 0) && 10724 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 10725 TCPS_HAVEESTABLISHED(tp->t_state) && 10726 (tp->snd_max == tp->snd_una) && 10727 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 10728 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 10729 /* 10730 * Here the rwnd is less than 10731 * the pacing size, we are established, 10732 * nothing is outstanding, and there is 10733 * data to send. Enter persists. 10734 */ 10735 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10736 } 10737 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 10738 m_freem(m); 10739 return (0); 10740 } 10741 /* 10742 * don't process the URG bit, ignore them drag 10743 * along the up. 10744 */ 10745 tp->rcv_up = tp->rcv_nxt; 10746 INP_WLOCK_ASSERT(tp->t_inpcb); 10747 10748 /* 10749 * Process the segment text, merging it into the TCP sequencing 10750 * queue, and arranging for acknowledgment of receipt if necessary. 10751 * This process logically involves adjusting tp->rcv_wnd as data is 10752 * presented to the user (this happens in tcp_usrreq.c, case 10753 * PRU_RCVD). If a FIN has already been received on this connection 10754 * then we just ignore the text. 10755 */ 10756 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 10757 IS_FASTOPEN(tp->t_flags)); 10758 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 10759 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10760 tcp_seq save_start = th->th_seq; 10761 tcp_seq save_rnxt = tp->rcv_nxt; 10762 int save_tlen = tlen; 10763 10764 m_adj(m, drop_hdrlen); /* delayed header drop */ 10765 /* 10766 * Insert segment which includes th into TCP reassembly 10767 * queue with control block tp. Set thflags to whether 10768 * reassembly now includes a segment with FIN. This handles 10769 * the common case inline (segment is the next to be 10770 * received on an established connection, and the queue is 10771 * empty), avoiding linkage into and removal from the queue 10772 * and repetition of various conversions. Set DELACK for 10773 * segments received in order, but ack immediately when 10774 * segments are out of order (so fast retransmit can work). 10775 */ 10776 if (th->th_seq == tp->rcv_nxt && 10777 SEGQ_EMPTY(tp) && 10778 (TCPS_HAVEESTABLISHED(tp->t_state) || 10779 tfo_syn)) { 10780 #ifdef NETFLIX_SB_LIMITS 10781 u_int mcnt, appended; 10782 10783 if (so->so_rcv.sb_shlim) { 10784 mcnt = m_memcnt(m); 10785 appended = 0; 10786 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 10787 CFO_NOSLEEP, NULL) == false) { 10788 counter_u64_add(tcp_sb_shlim_fails, 1); 10789 m_freem(m); 10790 return (0); 10791 } 10792 } 10793 #endif 10794 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn); 10795 tp->rcv_nxt += tlen; 10796 if (tlen && 10797 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 10798 (tp->t_fbyte_in == 0)) { 10799 tp->t_fbyte_in = ticks; 10800 if (tp->t_fbyte_in == 0) 10801 tp->t_fbyte_in = 1; 10802 if (tp->t_fbyte_out && tp->t_fbyte_in) 10803 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 10804 } 10805 thflags = tcp_get_flags(th) & TH_FIN; 10806 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 10807 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 10808 SOCKBUF_LOCK(&so->so_rcv); 10809 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 10810 m_freem(m); 10811 } else 10812 #ifdef NETFLIX_SB_LIMITS 10813 appended = 10814 #endif 10815 sbappendstream_locked(&so->so_rcv, m, 0); 10816 10817 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 10818 /* NB: sorwakeup_locked() does an implicit unlock. */ 10819 sorwakeup_locked(so); 10820 #ifdef NETFLIX_SB_LIMITS 10821 if (so->so_rcv.sb_shlim && appended != mcnt) 10822 counter_fo_release(so->so_rcv.sb_shlim, 10823 mcnt - appended); 10824 #endif 10825 } else { 10826 /* 10827 * XXX: Due to the header drop above "th" is 10828 * theoretically invalid by now. Fortunately 10829 * m_adj() doesn't actually frees any mbufs when 10830 * trimming from the head. 10831 */ 10832 tcp_seq temp = save_start; 10833 10834 thflags = tcp_reass(tp, th, &temp, &tlen, m); 10835 tp->t_flags |= TF_ACKNOW; 10836 if (tp->t_flags & TF_WAKESOR) { 10837 tp->t_flags &= ~TF_WAKESOR; 10838 /* NB: sorwakeup_locked() does an implicit unlock. */ 10839 sorwakeup_locked(so); 10840 } 10841 } 10842 if ((tp->t_flags & TF_SACK_PERMIT) && 10843 (save_tlen > 0) && 10844 TCPS_HAVEESTABLISHED(tp->t_state)) { 10845 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 10846 /* 10847 * DSACK actually handled in the fastpath 10848 * above. 10849 */ 10850 RACK_OPTS_INC(tcp_sack_path_1); 10851 tcp_update_sack_list(tp, save_start, 10852 save_start + save_tlen); 10853 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 10854 if ((tp->rcv_numsacks >= 1) && 10855 (tp->sackblks[0].end == save_start)) { 10856 /* 10857 * Partial overlap, recorded at todrop 10858 * above. 10859 */ 10860 RACK_OPTS_INC(tcp_sack_path_2a); 10861 tcp_update_sack_list(tp, 10862 tp->sackblks[0].start, 10863 tp->sackblks[0].end); 10864 } else { 10865 RACK_OPTS_INC(tcp_sack_path_2b); 10866 tcp_update_dsack_list(tp, save_start, 10867 save_start + save_tlen); 10868 } 10869 } else if (tlen >= save_tlen) { 10870 /* Update of sackblks. */ 10871 RACK_OPTS_INC(tcp_sack_path_3); 10872 tcp_update_dsack_list(tp, save_start, 10873 save_start + save_tlen); 10874 } else if (tlen > 0) { 10875 RACK_OPTS_INC(tcp_sack_path_4); 10876 tcp_update_dsack_list(tp, save_start, 10877 save_start + tlen); 10878 } 10879 } 10880 } else { 10881 m_freem(m); 10882 thflags &= ~TH_FIN; 10883 } 10884 10885 /* 10886 * If FIN is received ACK the FIN and let the user know that the 10887 * connection is closing. 10888 */ 10889 if (thflags & TH_FIN) { 10890 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10891 /* The socket upcall is handled by socantrcvmore. */ 10892 socantrcvmore(so); 10893 /* 10894 * If connection is half-synchronized (ie NEEDSYN 10895 * flag on) then delay ACK, so it may be piggybacked 10896 * when SYN is sent. Otherwise, since we received a 10897 * FIN then no more input can be expected, send ACK 10898 * now. 10899 */ 10900 if (tp->t_flags & TF_NEEDSYN) { 10901 rack_timer_cancel(tp, rack, 10902 rack->r_ctl.rc_rcvtime, __LINE__); 10903 tp->t_flags |= TF_DELACK; 10904 } else { 10905 tp->t_flags |= TF_ACKNOW; 10906 } 10907 tp->rcv_nxt++; 10908 } 10909 switch (tp->t_state) { 10910 /* 10911 * In SYN_RECEIVED and ESTABLISHED STATES enter the 10912 * CLOSE_WAIT state. 10913 */ 10914 case TCPS_SYN_RECEIVED: 10915 tp->t_starttime = ticks; 10916 /* FALLTHROUGH */ 10917 case TCPS_ESTABLISHED: 10918 rack_timer_cancel(tp, rack, 10919 rack->r_ctl.rc_rcvtime, __LINE__); 10920 tcp_state_change(tp, TCPS_CLOSE_WAIT); 10921 break; 10922 10923 /* 10924 * If still in FIN_WAIT_1 STATE FIN has not been 10925 * acked so enter the CLOSING state. 10926 */ 10927 case TCPS_FIN_WAIT_1: 10928 rack_timer_cancel(tp, rack, 10929 rack->r_ctl.rc_rcvtime, __LINE__); 10930 tcp_state_change(tp, TCPS_CLOSING); 10931 break; 10932 10933 /* 10934 * In FIN_WAIT_2 state enter the TIME_WAIT state, 10935 * starting the time-wait timer, turning off the 10936 * other standard timers. 10937 */ 10938 case TCPS_FIN_WAIT_2: 10939 rack_timer_cancel(tp, rack, 10940 rack->r_ctl.rc_rcvtime, __LINE__); 10941 tcp_twstart(tp); 10942 return (1); 10943 } 10944 } 10945 /* 10946 * Return any desired output. 10947 */ 10948 if ((tp->t_flags & TF_ACKNOW) || 10949 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 10950 rack->r_wanted_output = 1; 10951 } 10952 INP_WLOCK_ASSERT(tp->t_inpcb); 10953 return (0); 10954 } 10955 10956 /* 10957 * Here nothing is really faster, its just that we 10958 * have broken out the fast-data path also just like 10959 * the fast-ack. 10960 */ 10961 static int 10962 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 10963 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 10964 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 10965 { 10966 int32_t nsegs; 10967 int32_t newsize = 0; /* automatic sockbuf scaling */ 10968 struct tcp_rack *rack; 10969 #ifdef NETFLIX_SB_LIMITS 10970 u_int mcnt, appended; 10971 #endif 10972 #ifdef TCPDEBUG 10973 /* 10974 * The size of tcp_saveipgen must be the size of the max ip header, 10975 * now IPv6. 10976 */ 10977 u_char tcp_saveipgen[IP6_HDR_LEN]; 10978 struct tcphdr tcp_savetcp; 10979 short ostate = 0; 10980 10981 #endif 10982 /* 10983 * If last ACK falls within this segment's sequence numbers, record 10984 * the timestamp. NOTE that the test is modified according to the 10985 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 10986 */ 10987 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 10988 return (0); 10989 } 10990 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 10991 return (0); 10992 } 10993 if (tiwin && tiwin != tp->snd_wnd) { 10994 return (0); 10995 } 10996 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 10997 return (0); 10998 } 10999 if (__predict_false((to->to_flags & TOF_TS) && 11000 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 11001 return (0); 11002 } 11003 if (__predict_false((th->th_ack != tp->snd_una))) { 11004 return (0); 11005 } 11006 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 11007 return (0); 11008 } 11009 if ((to->to_flags & TOF_TS) != 0 && 11010 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 11011 tp->ts_recent_age = tcp_ts_getticks(); 11012 tp->ts_recent = to->to_tsval; 11013 } 11014 rack = (struct tcp_rack *)tp->t_fb_ptr; 11015 /* 11016 * This is a pure, in-sequence data packet with nothing on the 11017 * reassembly queue and we have enough buffer space to take it. 11018 */ 11019 nsegs = max(1, m->m_pkthdr.lro_nsegs); 11020 11021 #ifdef NETFLIX_SB_LIMITS 11022 if (so->so_rcv.sb_shlim) { 11023 mcnt = m_memcnt(m); 11024 appended = 0; 11025 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 11026 CFO_NOSLEEP, NULL) == false) { 11027 counter_u64_add(tcp_sb_shlim_fails, 1); 11028 m_freem(m); 11029 return (1); 11030 } 11031 } 11032 #endif 11033 /* Clean receiver SACK report if present */ 11034 if (tp->rcv_numsacks) 11035 tcp_clean_sackreport(tp); 11036 KMOD_TCPSTAT_INC(tcps_preddat); 11037 tp->rcv_nxt += tlen; 11038 if (tlen && 11039 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 11040 (tp->t_fbyte_in == 0)) { 11041 tp->t_fbyte_in = ticks; 11042 if (tp->t_fbyte_in == 0) 11043 tp->t_fbyte_in = 1; 11044 if (tp->t_fbyte_out && tp->t_fbyte_in) 11045 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 11046 } 11047 /* 11048 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 11049 */ 11050 tp->snd_wl1 = th->th_seq; 11051 /* 11052 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 11053 */ 11054 tp->rcv_up = tp->rcv_nxt; 11055 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 11056 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 11057 #ifdef TCPDEBUG 11058 if (so->so_options & SO_DEBUG) 11059 tcp_trace(TA_INPUT, ostate, tp, 11060 (void *)tcp_saveipgen, &tcp_savetcp, 0); 11061 #endif 11062 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 11063 11064 /* Add data to socket buffer. */ 11065 SOCKBUF_LOCK(&so->so_rcv); 11066 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11067 m_freem(m); 11068 } else { 11069 /* 11070 * Set new socket buffer size. Give up when limit is 11071 * reached. 11072 */ 11073 if (newsize) 11074 if (!sbreserve_locked(&so->so_rcv, 11075 newsize, so, NULL)) 11076 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 11077 m_adj(m, drop_hdrlen); /* delayed header drop */ 11078 #ifdef NETFLIX_SB_LIMITS 11079 appended = 11080 #endif 11081 sbappendstream_locked(&so->so_rcv, m, 0); 11082 ctf_calc_rwin(so, tp); 11083 } 11084 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 11085 /* NB: sorwakeup_locked() does an implicit unlock. */ 11086 sorwakeup_locked(so); 11087 #ifdef NETFLIX_SB_LIMITS 11088 if (so->so_rcv.sb_shlim && mcnt != appended) 11089 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 11090 #endif 11091 rack_handle_delayed_ack(tp, rack, tlen, 0); 11092 if (tp->snd_una == tp->snd_max) 11093 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 11094 return (1); 11095 } 11096 11097 /* 11098 * This subfunction is used to try to highly optimize the 11099 * fast path. We again allow window updates that are 11100 * in sequence to remain in the fast-path. We also add 11101 * in the __predict's to attempt to help the compiler. 11102 * Note that if we return a 0, then we can *not* process 11103 * it and the caller should push the packet into the 11104 * slow-path. 11105 */ 11106 static int 11107 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 11108 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11109 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 11110 { 11111 int32_t acked; 11112 int32_t nsegs; 11113 #ifdef TCPDEBUG 11114 /* 11115 * The size of tcp_saveipgen must be the size of the max ip header, 11116 * now IPv6. 11117 */ 11118 u_char tcp_saveipgen[IP6_HDR_LEN]; 11119 struct tcphdr tcp_savetcp; 11120 short ostate = 0; 11121 #endif 11122 int32_t under_pacing = 0; 11123 struct tcp_rack *rack; 11124 11125 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 11126 /* Old ack, behind (or duplicate to) the last one rcv'd */ 11127 return (0); 11128 } 11129 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 11130 /* Above what we have sent? */ 11131 return (0); 11132 } 11133 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 11134 /* We are retransmitting */ 11135 return (0); 11136 } 11137 if (__predict_false(tiwin == 0)) { 11138 /* zero window */ 11139 return (0); 11140 } 11141 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 11142 /* We need a SYN or a FIN, unlikely.. */ 11143 return (0); 11144 } 11145 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 11146 /* Timestamp is behind .. old ack with seq wrap? */ 11147 return (0); 11148 } 11149 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 11150 /* Still recovering */ 11151 return (0); 11152 } 11153 rack = (struct tcp_rack *)tp->t_fb_ptr; 11154 if (rack->r_ctl.rc_sacked) { 11155 /* We have sack holes on our scoreboard */ 11156 return (0); 11157 } 11158 /* Ok if we reach here, we can process a fast-ack */ 11159 if (rack->gp_ready && 11160 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 11161 under_pacing = 1; 11162 } 11163 nsegs = max(1, m->m_pkthdr.lro_nsegs); 11164 rack_log_ack(tp, to, th, 0, 0); 11165 /* Did the window get updated? */ 11166 if (tiwin != tp->snd_wnd) { 11167 tp->snd_wnd = tiwin; 11168 rack_validate_fo_sendwin_up(tp, rack); 11169 tp->snd_wl1 = th->th_seq; 11170 if (tp->snd_wnd > tp->max_sndwnd) 11171 tp->max_sndwnd = tp->snd_wnd; 11172 } 11173 /* Do we exit persists? */ 11174 if ((rack->rc_in_persist != 0) && 11175 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 11176 rack->r_ctl.rc_pace_min_segs))) { 11177 rack_exit_persist(tp, rack, cts); 11178 } 11179 /* Do we enter persists? */ 11180 if ((rack->rc_in_persist == 0) && 11181 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 11182 TCPS_HAVEESTABLISHED(tp->t_state) && 11183 (tp->snd_max == tp->snd_una) && 11184 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 11185 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 11186 /* 11187 * Here the rwnd is less than 11188 * the pacing size, we are established, 11189 * nothing is outstanding, and there is 11190 * data to send. Enter persists. 11191 */ 11192 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 11193 } 11194 /* 11195 * If last ACK falls within this segment's sequence numbers, record 11196 * the timestamp. NOTE that the test is modified according to the 11197 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 11198 */ 11199 if ((to->to_flags & TOF_TS) != 0 && 11200 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 11201 tp->ts_recent_age = tcp_ts_getticks(); 11202 tp->ts_recent = to->to_tsval; 11203 } 11204 /* 11205 * This is a pure ack for outstanding data. 11206 */ 11207 KMOD_TCPSTAT_INC(tcps_predack); 11208 11209 /* 11210 * "bad retransmit" recovery. 11211 */ 11212 if ((tp->t_flags & TF_PREVVALID) && 11213 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 11214 tp->t_flags &= ~TF_PREVVALID; 11215 if (tp->t_rxtshift == 1 && 11216 (int)(ticks - tp->t_badrxtwin) < 0) 11217 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack); 11218 } 11219 /* 11220 * Recalculate the transmit timer / rtt. 11221 * 11222 * Some boxes send broken timestamp replies during the SYN+ACK 11223 * phase, ignore timestamps of 0 or we could calculate a huge RTT 11224 * and blow up the retransmit timer. 11225 */ 11226 acked = BYTES_THIS_ACK(tp, th); 11227 11228 #ifdef TCP_HHOOK 11229 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 11230 hhook_run_tcp_est_in(tp, th, to); 11231 #endif 11232 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 11233 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 11234 if (acked) { 11235 struct mbuf *mfree; 11236 11237 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); 11238 SOCKBUF_LOCK(&so->so_snd); 11239 mfree = sbcut_locked(&so->so_snd, acked); 11240 tp->snd_una = th->th_ack; 11241 /* Note we want to hold the sb lock through the sendmap adjust */ 11242 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 11243 /* Wake up the socket if we have room to write more */ 11244 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 11245 sowwakeup_locked(so); 11246 m_freem(mfree); 11247 tp->t_rxtshift = 0; 11248 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 11249 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 11250 rack->rc_tlp_in_progress = 0; 11251 rack->r_ctl.rc_tlp_cnt_out = 0; 11252 /* 11253 * If it is the RXT timer we want to 11254 * stop it, so we can restart a TLP. 11255 */ 11256 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 11257 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11258 #ifdef NETFLIX_HTTP_LOGGING 11259 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 11260 #endif 11261 } 11262 /* 11263 * Let the congestion control algorithm update congestion control 11264 * related information. This typically means increasing the 11265 * congestion window. 11266 */ 11267 if (tp->snd_wnd < ctf_outstanding(tp)) { 11268 /* The peer collapsed the window */ 11269 rack_collapsed_window(rack); 11270 } else if (rack->rc_has_collapsed) 11271 rack_un_collapse_window(rack); 11272 11273 /* 11274 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 11275 */ 11276 tp->snd_wl2 = th->th_ack; 11277 tp->t_dupacks = 0; 11278 m_freem(m); 11279 /* ND6_HINT(tp); *//* Some progress has been made. */ 11280 11281 /* 11282 * If all outstanding data are acked, stop retransmit timer, 11283 * otherwise restart timer using current (possibly backed-off) 11284 * value. If process is waiting for space, wakeup/selwakeup/signal. 11285 * If data are ready to send, let tcp_output decide between more 11286 * output or persist. 11287 */ 11288 #ifdef TCPDEBUG 11289 if (so->so_options & SO_DEBUG) 11290 tcp_trace(TA_INPUT, ostate, tp, 11291 (void *)tcp_saveipgen, 11292 &tcp_savetcp, 0); 11293 #endif 11294 if (under_pacing && 11295 (rack->use_fixed_rate == 0) && 11296 (rack->in_probe_rtt == 0) && 11297 rack->rc_gp_dyn_mul && 11298 rack->rc_always_pace) { 11299 /* Check if we are dragging bottom */ 11300 rack_check_bottom_drag(tp, rack, so, acked); 11301 } 11302 if (tp->snd_una == tp->snd_max) { 11303 tp->t_flags &= ~TF_PREVVALID; 11304 rack->r_ctl.retran_during_recovery = 0; 11305 rack->r_ctl.dsack_byte_cnt = 0; 11306 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 11307 if (rack->r_ctl.rc_went_idle_time == 0) 11308 rack->r_ctl.rc_went_idle_time = 1; 11309 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 11310 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 11311 tp->t_acktime = 0; 11312 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11313 } 11314 if (acked && rack->r_fast_output) 11315 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked); 11316 if (sbavail(&so->so_snd)) { 11317 rack->r_wanted_output = 1; 11318 } 11319 return (1); 11320 } 11321 11322 /* 11323 * Return value of 1, the TCB is unlocked and most 11324 * likely gone, return value of 0, the TCP is still 11325 * locked. 11326 */ 11327 static int 11328 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 11329 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11330 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11331 { 11332 int32_t ret_val = 0; 11333 int32_t todrop; 11334 int32_t ourfinisacked = 0; 11335 struct tcp_rack *rack; 11336 11337 ctf_calc_rwin(so, tp); 11338 /* 11339 * If the state is SYN_SENT: if seg contains an ACK, but not for our 11340 * SYN, drop the input. if seg contains a RST, then drop the 11341 * connection. if seg does not contain SYN, then drop it. Otherwise 11342 * this is an acceptable SYN segment initialize tp->rcv_nxt and 11343 * tp->irs if seg contains ack then advance tp->snd_una if seg 11344 * contains an ECE and ECN support is enabled, the stream is ECN 11345 * capable. if SYN has been acked change to ESTABLISHED else 11346 * SYN_RCVD state arrange for segment to be acked (eventually) 11347 * continue processing rest of data/controls. 11348 */ 11349 if ((thflags & TH_ACK) && 11350 (SEQ_LEQ(th->th_ack, tp->iss) || 11351 SEQ_GT(th->th_ack, tp->snd_max))) { 11352 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11353 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11354 return (1); 11355 } 11356 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 11357 TCP_PROBE5(connect__refused, NULL, tp, 11358 mtod(m, const char *), tp, th); 11359 tp = tcp_drop(tp, ECONNREFUSED); 11360 ctf_do_drop(m, tp); 11361 return (1); 11362 } 11363 if (thflags & TH_RST) { 11364 ctf_do_drop(m, tp); 11365 return (1); 11366 } 11367 if (!(thflags & TH_SYN)) { 11368 ctf_do_drop(m, tp); 11369 return (1); 11370 } 11371 tp->irs = th->th_seq; 11372 tcp_rcvseqinit(tp); 11373 rack = (struct tcp_rack *)tp->t_fb_ptr; 11374 if (thflags & TH_ACK) { 11375 int tfo_partial = 0; 11376 11377 KMOD_TCPSTAT_INC(tcps_connects); 11378 soisconnected(so); 11379 #ifdef MAC 11380 mac_socketpeer_set_from_mbuf(m, so); 11381 #endif 11382 /* Do window scaling on this connection? */ 11383 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11384 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11385 tp->rcv_scale = tp->request_r_scale; 11386 } 11387 tp->rcv_adv += min(tp->rcv_wnd, 11388 TCP_MAXWIN << tp->rcv_scale); 11389 /* 11390 * If not all the data that was sent in the TFO SYN 11391 * has been acked, resend the remainder right away. 11392 */ 11393 if (IS_FASTOPEN(tp->t_flags) && 11394 (tp->snd_una != tp->snd_max)) { 11395 tp->snd_nxt = th->th_ack; 11396 tfo_partial = 1; 11397 } 11398 /* 11399 * If there's data, delay ACK; if there's also a FIN ACKNOW 11400 * will be turned on later. 11401 */ 11402 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) { 11403 rack_timer_cancel(tp, rack, 11404 rack->r_ctl.rc_rcvtime, __LINE__); 11405 tp->t_flags |= TF_DELACK; 11406 } else { 11407 rack->r_wanted_output = 1; 11408 tp->t_flags |= TF_ACKNOW; 11409 rack->rc_dack_toggle = 0; 11410 } 11411 11412 tcp_ecn_input_syn_sent(tp, thflags, iptos); 11413 11414 if (SEQ_GT(th->th_ack, tp->snd_una)) { 11415 /* 11416 * We advance snd_una for the 11417 * fast open case. If th_ack is 11418 * acknowledging data beyond 11419 * snd_una we can't just call 11420 * ack-processing since the 11421 * data stream in our send-map 11422 * will start at snd_una + 1 (one 11423 * beyond the SYN). If its just 11424 * equal we don't need to do that 11425 * and there is no send_map. 11426 */ 11427 tp->snd_una++; 11428 } 11429 /* 11430 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 11431 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 11432 */ 11433 tp->t_starttime = ticks; 11434 if (tp->t_flags & TF_NEEDFIN) { 11435 tcp_state_change(tp, TCPS_FIN_WAIT_1); 11436 tp->t_flags &= ~TF_NEEDFIN; 11437 thflags &= ~TH_SYN; 11438 } else { 11439 tcp_state_change(tp, TCPS_ESTABLISHED); 11440 TCP_PROBE5(connect__established, NULL, tp, 11441 mtod(m, const char *), tp, th); 11442 rack_cc_conn_init(tp); 11443 } 11444 } else { 11445 /* 11446 * Received initial SYN in SYN-SENT[*] state => simultaneous 11447 * open. If segment contains CC option and there is a 11448 * cached CC, apply TAO test. If it succeeds, connection is * 11449 * half-synchronized. Otherwise, do 3-way handshake: 11450 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 11451 * there was no CC option, clear cached CC value. 11452 */ 11453 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 11454 tcp_state_change(tp, TCPS_SYN_RECEIVED); 11455 } 11456 INP_WLOCK_ASSERT(tp->t_inpcb); 11457 /* 11458 * Advance th->th_seq to correspond to first data byte. If data, 11459 * trim to stay within window, dropping FIN if necessary. 11460 */ 11461 th->th_seq++; 11462 if (tlen > tp->rcv_wnd) { 11463 todrop = tlen - tp->rcv_wnd; 11464 m_adj(m, -todrop); 11465 tlen = tp->rcv_wnd; 11466 thflags &= ~TH_FIN; 11467 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 11468 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 11469 } 11470 tp->snd_wl1 = th->th_seq - 1; 11471 tp->rcv_up = th->th_seq; 11472 /* 11473 * Client side of transaction: already sent SYN and data. If the 11474 * remote host used T/TCP to validate the SYN, our data will be 11475 * ACK'd; if so, enter normal data segment processing in the middle 11476 * of step 5, ack processing. Otherwise, goto step 6. 11477 */ 11478 if (thflags & TH_ACK) { 11479 /* For syn-sent we need to possibly update the rtt */ 11480 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 11481 uint32_t t, mcts; 11482 11483 mcts = tcp_ts_getticks(); 11484 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 11485 if (!tp->t_rttlow || tp->t_rttlow > t) 11486 tp->t_rttlow = t; 11487 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); 11488 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 11489 tcp_rack_xmit_timer_commit(rack, tp); 11490 } 11491 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) 11492 return (ret_val); 11493 /* We may have changed to FIN_WAIT_1 above */ 11494 if (tp->t_state == TCPS_FIN_WAIT_1) { 11495 /* 11496 * In FIN_WAIT_1 STATE in addition to the processing 11497 * for the ESTABLISHED state if our FIN is now 11498 * acknowledged then enter FIN_WAIT_2. 11499 */ 11500 if (ourfinisacked) { 11501 /* 11502 * If we can't receive any more data, then 11503 * closing user can proceed. Starting the 11504 * timer is contrary to the specification, 11505 * but if we don't get a FIN we'll hang 11506 * forever. 11507 * 11508 * XXXjl: we should release the tp also, and 11509 * use a compressed state. 11510 */ 11511 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11512 soisdisconnected(so); 11513 tcp_timer_activate(tp, TT_2MSL, 11514 (tcp_fast_finwait2_recycle ? 11515 tcp_finwait2_timeout : 11516 TP_MAXIDLE(tp))); 11517 } 11518 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11519 } 11520 } 11521 } 11522 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11523 tiwin, thflags, nxt_pkt)); 11524 } 11525 11526 /* 11527 * Return value of 1, the TCB is unlocked and most 11528 * likely gone, return value of 0, the TCP is still 11529 * locked. 11530 */ 11531 static int 11532 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 11533 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11534 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11535 { 11536 struct tcp_rack *rack; 11537 int32_t ret_val = 0; 11538 int32_t ourfinisacked = 0; 11539 11540 ctf_calc_rwin(so, tp); 11541 if ((thflags & TH_ACK) && 11542 (SEQ_LEQ(th->th_ack, tp->snd_una) || 11543 SEQ_GT(th->th_ack, tp->snd_max))) { 11544 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11545 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11546 return (1); 11547 } 11548 rack = (struct tcp_rack *)tp->t_fb_ptr; 11549 if (IS_FASTOPEN(tp->t_flags)) { 11550 /* 11551 * When a TFO connection is in SYN_RECEIVED, the 11552 * only valid packets are the initial SYN, a 11553 * retransmit/copy of the initial SYN (possibly with 11554 * a subset of the original data), a valid ACK, a 11555 * FIN, or a RST. 11556 */ 11557 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 11558 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11559 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11560 return (1); 11561 } else if (thflags & TH_SYN) { 11562 /* non-initial SYN is ignored */ 11563 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 11564 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 11565 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 11566 ctf_do_drop(m, NULL); 11567 return (0); 11568 } 11569 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 11570 ctf_do_drop(m, NULL); 11571 return (0); 11572 } 11573 } 11574 11575 if ((thflags & TH_RST) || 11576 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11577 return (__ctf_process_rst(m, th, so, tp, 11578 &rack->r_ctl.challenge_ack_ts, 11579 &rack->r_ctl.challenge_ack_cnt)); 11580 /* 11581 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11582 * it's less than ts_recent, drop it. 11583 */ 11584 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11585 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11586 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11587 return (ret_val); 11588 } 11589 /* 11590 * In the SYN-RECEIVED state, validate that the packet belongs to 11591 * this connection before trimming the data to fit the receive 11592 * window. Check the sequence number versus IRS since we know the 11593 * sequence numbers haven't wrapped. This is a partial fix for the 11594 * "LAND" DoS attack. 11595 */ 11596 if (SEQ_LT(th->th_seq, tp->irs)) { 11597 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11598 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11599 return (1); 11600 } 11601 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11602 &rack->r_ctl.challenge_ack_ts, 11603 &rack->r_ctl.challenge_ack_cnt)) { 11604 return (ret_val); 11605 } 11606 /* 11607 * If last ACK falls within this segment's sequence numbers, record 11608 * its timestamp. NOTE: 1) That the test incorporates suggestions 11609 * from the latest proposal of the tcplw@cray.com list (Braden 11610 * 1993/04/26). 2) That updating only on newer timestamps interferes 11611 * with our earlier PAWS tests, so this check should be solely 11612 * predicated on the sequence space of this segment. 3) That we 11613 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11614 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11615 * SEG.Len, This modified check allows us to overcome RFC1323's 11616 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11617 * p.869. In such cases, we can still calculate the RTT correctly 11618 * when RCV.NXT == Last.ACK.Sent. 11619 */ 11620 if ((to->to_flags & TOF_TS) != 0 && 11621 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11622 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11623 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11624 tp->ts_recent_age = tcp_ts_getticks(); 11625 tp->ts_recent = to->to_tsval; 11626 } 11627 tp->snd_wnd = tiwin; 11628 rack_validate_fo_sendwin_up(tp, rack); 11629 /* 11630 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11631 * is on (half-synchronized state), then queue data for later 11632 * processing; else drop segment and return. 11633 */ 11634 if ((thflags & TH_ACK) == 0) { 11635 if (IS_FASTOPEN(tp->t_flags)) { 11636 rack_cc_conn_init(tp); 11637 } 11638 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11639 tiwin, thflags, nxt_pkt)); 11640 } 11641 KMOD_TCPSTAT_INC(tcps_connects); 11642 soisconnected(so); 11643 /* Do window scaling? */ 11644 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11645 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11646 tp->rcv_scale = tp->request_r_scale; 11647 } 11648 /* 11649 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 11650 * FIN-WAIT-1 11651 */ 11652 tp->t_starttime = ticks; 11653 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) { 11654 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 11655 tp->t_tfo_pending = NULL; 11656 } 11657 if (tp->t_flags & TF_NEEDFIN) { 11658 tcp_state_change(tp, TCPS_FIN_WAIT_1); 11659 tp->t_flags &= ~TF_NEEDFIN; 11660 } else { 11661 tcp_state_change(tp, TCPS_ESTABLISHED); 11662 TCP_PROBE5(accept__established, NULL, tp, 11663 mtod(m, const char *), tp, th); 11664 /* 11665 * TFO connections call cc_conn_init() during SYN 11666 * processing. Calling it again here for such connections 11667 * is not harmless as it would undo the snd_cwnd reduction 11668 * that occurs when a TFO SYN|ACK is retransmitted. 11669 */ 11670 if (!IS_FASTOPEN(tp->t_flags)) 11671 rack_cc_conn_init(tp); 11672 } 11673 /* 11674 * Account for the ACK of our SYN prior to 11675 * regular ACK processing below, except for 11676 * simultaneous SYN, which is handled later. 11677 */ 11678 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 11679 tp->snd_una++; 11680 /* 11681 * If segment contains data or ACK, will call tcp_reass() later; if 11682 * not, do so now to pass queued data to user. 11683 */ 11684 if (tlen == 0 && (thflags & TH_FIN) == 0) { 11685 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 11686 (struct mbuf *)0); 11687 if (tp->t_flags & TF_WAKESOR) { 11688 tp->t_flags &= ~TF_WAKESOR; 11689 /* NB: sorwakeup_locked() does an implicit unlock. */ 11690 sorwakeup_locked(so); 11691 } 11692 } 11693 tp->snd_wl1 = th->th_seq - 1; 11694 /* For syn-recv we need to possibly update the rtt */ 11695 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 11696 uint32_t t, mcts; 11697 11698 mcts = tcp_ts_getticks(); 11699 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 11700 if (!tp->t_rttlow || tp->t_rttlow > t) 11701 tp->t_rttlow = t; 11702 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); 11703 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 11704 tcp_rack_xmit_timer_commit(rack, tp); 11705 } 11706 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11707 return (ret_val); 11708 } 11709 if (tp->t_state == TCPS_FIN_WAIT_1) { 11710 /* We could have went to FIN_WAIT_1 (or EST) above */ 11711 /* 11712 * In FIN_WAIT_1 STATE in addition to the processing for the 11713 * ESTABLISHED state if our FIN is now acknowledged then 11714 * enter FIN_WAIT_2. 11715 */ 11716 if (ourfinisacked) { 11717 /* 11718 * If we can't receive any more data, then closing 11719 * user can proceed. Starting the timer is contrary 11720 * to the specification, but if we don't get a FIN 11721 * we'll hang forever. 11722 * 11723 * XXXjl: we should release the tp also, and use a 11724 * compressed state. 11725 */ 11726 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11727 soisdisconnected(so); 11728 tcp_timer_activate(tp, TT_2MSL, 11729 (tcp_fast_finwait2_recycle ? 11730 tcp_finwait2_timeout : 11731 TP_MAXIDLE(tp))); 11732 } 11733 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11734 } 11735 } 11736 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11737 tiwin, thflags, nxt_pkt)); 11738 } 11739 11740 /* 11741 * Return value of 1, the TCB is unlocked and most 11742 * likely gone, return value of 0, the TCP is still 11743 * locked. 11744 */ 11745 static int 11746 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 11747 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11748 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11749 { 11750 int32_t ret_val = 0; 11751 struct tcp_rack *rack; 11752 11753 /* 11754 * Header prediction: check for the two common cases of a 11755 * uni-directional data xfer. If the packet has no control flags, 11756 * is in-sequence, the window didn't change and we're not 11757 * retransmitting, it's a candidate. If the length is zero and the 11758 * ack moved forward, we're the sender side of the xfer. Just free 11759 * the data acked & wake any higher level process that was blocked 11760 * waiting for space. If the length is non-zero and the ack didn't 11761 * move, we're the receiver side. If we're getting packets in-order 11762 * (the reassembly queue is empty), add the data toc The socket 11763 * buffer and note that we need a delayed ack. Make sure that the 11764 * hidden state-flags are also off. Since we check for 11765 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 11766 */ 11767 rack = (struct tcp_rack *)tp->t_fb_ptr; 11768 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 11769 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) && 11770 __predict_true(SEGQ_EMPTY(tp)) && 11771 __predict_true(th->th_seq == tp->rcv_nxt)) { 11772 if (tlen == 0) { 11773 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 11774 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 11775 return (0); 11776 } 11777 } else { 11778 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 11779 tiwin, nxt_pkt, iptos)) { 11780 return (0); 11781 } 11782 } 11783 } 11784 ctf_calc_rwin(so, tp); 11785 11786 if ((thflags & TH_RST) || 11787 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11788 return (__ctf_process_rst(m, th, so, tp, 11789 &rack->r_ctl.challenge_ack_ts, 11790 &rack->r_ctl.challenge_ack_cnt)); 11791 11792 /* 11793 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11794 * synchronized state. 11795 */ 11796 if (thflags & TH_SYN) { 11797 ctf_challenge_ack(m, th, tp, &ret_val); 11798 return (ret_val); 11799 } 11800 /* 11801 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11802 * it's less than ts_recent, drop it. 11803 */ 11804 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11805 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11806 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11807 return (ret_val); 11808 } 11809 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11810 &rack->r_ctl.challenge_ack_ts, 11811 &rack->r_ctl.challenge_ack_cnt)) { 11812 return (ret_val); 11813 } 11814 /* 11815 * If last ACK falls within this segment's sequence numbers, record 11816 * its timestamp. NOTE: 1) That the test incorporates suggestions 11817 * from the latest proposal of the tcplw@cray.com list (Braden 11818 * 1993/04/26). 2) That updating only on newer timestamps interferes 11819 * with our earlier PAWS tests, so this check should be solely 11820 * predicated on the sequence space of this segment. 3) That we 11821 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11822 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11823 * SEG.Len, This modified check allows us to overcome RFC1323's 11824 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11825 * p.869. In such cases, we can still calculate the RTT correctly 11826 * when RCV.NXT == Last.ACK.Sent. 11827 */ 11828 if ((to->to_flags & TOF_TS) != 0 && 11829 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11830 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11831 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11832 tp->ts_recent_age = tcp_ts_getticks(); 11833 tp->ts_recent = to->to_tsval; 11834 } 11835 /* 11836 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11837 * is on (half-synchronized state), then queue data for later 11838 * processing; else drop segment and return. 11839 */ 11840 if ((thflags & TH_ACK) == 0) { 11841 if (tp->t_flags & TF_NEEDSYN) { 11842 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11843 tiwin, thflags, nxt_pkt)); 11844 11845 } else if (tp->t_flags & TF_ACKNOW) { 11846 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11847 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11848 return (ret_val); 11849 } else { 11850 ctf_do_drop(m, NULL); 11851 return (0); 11852 } 11853 } 11854 /* 11855 * Ack processing. 11856 */ 11857 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11858 return (ret_val); 11859 } 11860 if (sbavail(&so->so_snd)) { 11861 if (ctf_progress_timeout_check(tp, true)) { 11862 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 11863 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11864 return (1); 11865 } 11866 } 11867 /* State changes only happen in rack_process_data() */ 11868 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11869 tiwin, thflags, nxt_pkt)); 11870 } 11871 11872 /* 11873 * Return value of 1, the TCB is unlocked and most 11874 * likely gone, return value of 0, the TCP is still 11875 * locked. 11876 */ 11877 static int 11878 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 11879 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11880 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11881 { 11882 int32_t ret_val = 0; 11883 struct tcp_rack *rack; 11884 11885 rack = (struct tcp_rack *)tp->t_fb_ptr; 11886 ctf_calc_rwin(so, tp); 11887 if ((thflags & TH_RST) || 11888 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11889 return (__ctf_process_rst(m, th, so, tp, 11890 &rack->r_ctl.challenge_ack_ts, 11891 &rack->r_ctl.challenge_ack_cnt)); 11892 /* 11893 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11894 * synchronized state. 11895 */ 11896 if (thflags & TH_SYN) { 11897 ctf_challenge_ack(m, th, tp, &ret_val); 11898 return (ret_val); 11899 } 11900 /* 11901 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11902 * it's less than ts_recent, drop it. 11903 */ 11904 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11905 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11906 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11907 return (ret_val); 11908 } 11909 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11910 &rack->r_ctl.challenge_ack_ts, 11911 &rack->r_ctl.challenge_ack_cnt)) { 11912 return (ret_val); 11913 } 11914 /* 11915 * If last ACK falls within this segment's sequence numbers, record 11916 * its timestamp. NOTE: 1) That the test incorporates suggestions 11917 * from the latest proposal of the tcplw@cray.com list (Braden 11918 * 1993/04/26). 2) That updating only on newer timestamps interferes 11919 * with our earlier PAWS tests, so this check should be solely 11920 * predicated on the sequence space of this segment. 3) That we 11921 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11922 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11923 * SEG.Len, This modified check allows us to overcome RFC1323's 11924 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11925 * p.869. In such cases, we can still calculate the RTT correctly 11926 * when RCV.NXT == Last.ACK.Sent. 11927 */ 11928 if ((to->to_flags & TOF_TS) != 0 && 11929 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11930 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11931 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11932 tp->ts_recent_age = tcp_ts_getticks(); 11933 tp->ts_recent = to->to_tsval; 11934 } 11935 /* 11936 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11937 * is on (half-synchronized state), then queue data for later 11938 * processing; else drop segment and return. 11939 */ 11940 if ((thflags & TH_ACK) == 0) { 11941 if (tp->t_flags & TF_NEEDSYN) { 11942 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11943 tiwin, thflags, nxt_pkt)); 11944 11945 } else if (tp->t_flags & TF_ACKNOW) { 11946 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11947 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11948 return (ret_val); 11949 } else { 11950 ctf_do_drop(m, NULL); 11951 return (0); 11952 } 11953 } 11954 /* 11955 * Ack processing. 11956 */ 11957 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11958 return (ret_val); 11959 } 11960 if (sbavail(&so->so_snd)) { 11961 if (ctf_progress_timeout_check(tp, true)) { 11962 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11963 tp, tick, PROGRESS_DROP, __LINE__); 11964 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11965 return (1); 11966 } 11967 } 11968 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11969 tiwin, thflags, nxt_pkt)); 11970 } 11971 11972 static int 11973 rack_check_data_after_close(struct mbuf *m, 11974 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 11975 { 11976 struct tcp_rack *rack; 11977 11978 rack = (struct tcp_rack *)tp->t_fb_ptr; 11979 if (rack->rc_allow_data_af_clo == 0) { 11980 close_now: 11981 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11982 /* tcp_close will kill the inp pre-log the Reset */ 11983 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 11984 tp = tcp_close(tp); 11985 KMOD_TCPSTAT_INC(tcps_rcvafterclose); 11986 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 11987 return (1); 11988 } 11989 if (sbavail(&so->so_snd) == 0) 11990 goto close_now; 11991 /* Ok we allow data that is ignored and a followup reset */ 11992 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11993 tp->rcv_nxt = th->th_seq + *tlen; 11994 tp->t_flags2 |= TF2_DROP_AF_DATA; 11995 rack->r_wanted_output = 1; 11996 *tlen = 0; 11997 return (0); 11998 } 11999 12000 /* 12001 * Return value of 1, the TCB is unlocked and most 12002 * likely gone, return value of 0, the TCP is still 12003 * locked. 12004 */ 12005 static int 12006 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 12007 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12008 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12009 { 12010 int32_t ret_val = 0; 12011 int32_t ourfinisacked = 0; 12012 struct tcp_rack *rack; 12013 12014 rack = (struct tcp_rack *)tp->t_fb_ptr; 12015 ctf_calc_rwin(so, tp); 12016 12017 if ((thflags & TH_RST) || 12018 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12019 return (__ctf_process_rst(m, th, so, tp, 12020 &rack->r_ctl.challenge_ack_ts, 12021 &rack->r_ctl.challenge_ack_cnt)); 12022 /* 12023 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12024 * synchronized state. 12025 */ 12026 if (thflags & TH_SYN) { 12027 ctf_challenge_ack(m, th, tp, &ret_val); 12028 return (ret_val); 12029 } 12030 /* 12031 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12032 * it's less than ts_recent, drop it. 12033 */ 12034 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12035 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12036 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12037 return (ret_val); 12038 } 12039 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12040 &rack->r_ctl.challenge_ack_ts, 12041 &rack->r_ctl.challenge_ack_cnt)) { 12042 return (ret_val); 12043 } 12044 /* 12045 * If new data are received on a connection after the user processes 12046 * are gone, then RST the other end. 12047 */ 12048 if ((so->so_state & SS_NOFDREF) && tlen) { 12049 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 12050 return (1); 12051 } 12052 /* 12053 * If last ACK falls within this segment's sequence numbers, record 12054 * its timestamp. NOTE: 1) That the test incorporates suggestions 12055 * from the latest proposal of the tcplw@cray.com list (Braden 12056 * 1993/04/26). 2) That updating only on newer timestamps interferes 12057 * with our earlier PAWS tests, so this check should be solely 12058 * predicated on the sequence space of this segment. 3) That we 12059 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12060 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12061 * SEG.Len, This modified check allows us to overcome RFC1323's 12062 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12063 * p.869. In such cases, we can still calculate the RTT correctly 12064 * when RCV.NXT == Last.ACK.Sent. 12065 */ 12066 if ((to->to_flags & TOF_TS) != 0 && 12067 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12068 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12069 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12070 tp->ts_recent_age = tcp_ts_getticks(); 12071 tp->ts_recent = to->to_tsval; 12072 } 12073 /* 12074 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12075 * is on (half-synchronized state), then queue data for later 12076 * processing; else drop segment and return. 12077 */ 12078 if ((thflags & TH_ACK) == 0) { 12079 if (tp->t_flags & TF_NEEDSYN) { 12080 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12081 tiwin, thflags, nxt_pkt)); 12082 } else if (tp->t_flags & TF_ACKNOW) { 12083 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12084 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12085 return (ret_val); 12086 } else { 12087 ctf_do_drop(m, NULL); 12088 return (0); 12089 } 12090 } 12091 /* 12092 * Ack processing. 12093 */ 12094 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12095 return (ret_val); 12096 } 12097 if (ourfinisacked) { 12098 /* 12099 * If we can't receive any more data, then closing user can 12100 * proceed. Starting the timer is contrary to the 12101 * specification, but if we don't get a FIN we'll hang 12102 * forever. 12103 * 12104 * XXXjl: we should release the tp also, and use a 12105 * compressed state. 12106 */ 12107 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 12108 soisdisconnected(so); 12109 tcp_timer_activate(tp, TT_2MSL, 12110 (tcp_fast_finwait2_recycle ? 12111 tcp_finwait2_timeout : 12112 TP_MAXIDLE(tp))); 12113 } 12114 tcp_state_change(tp, TCPS_FIN_WAIT_2); 12115 } 12116 if (sbavail(&so->so_snd)) { 12117 if (ctf_progress_timeout_check(tp, true)) { 12118 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12119 tp, tick, PROGRESS_DROP, __LINE__); 12120 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12121 return (1); 12122 } 12123 } 12124 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12125 tiwin, thflags, nxt_pkt)); 12126 } 12127 12128 /* 12129 * Return value of 1, the TCB is unlocked and most 12130 * likely gone, return value of 0, the TCP is still 12131 * locked. 12132 */ 12133 static int 12134 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 12135 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12136 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12137 { 12138 int32_t ret_val = 0; 12139 int32_t ourfinisacked = 0; 12140 struct tcp_rack *rack; 12141 12142 rack = (struct tcp_rack *)tp->t_fb_ptr; 12143 ctf_calc_rwin(so, tp); 12144 12145 if ((thflags & TH_RST) || 12146 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12147 return (__ctf_process_rst(m, th, so, tp, 12148 &rack->r_ctl.challenge_ack_ts, 12149 &rack->r_ctl.challenge_ack_cnt)); 12150 /* 12151 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12152 * synchronized state. 12153 */ 12154 if (thflags & TH_SYN) { 12155 ctf_challenge_ack(m, th, tp, &ret_val); 12156 return (ret_val); 12157 } 12158 /* 12159 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12160 * it's less than ts_recent, drop it. 12161 */ 12162 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12163 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12164 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12165 return (ret_val); 12166 } 12167 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12168 &rack->r_ctl.challenge_ack_ts, 12169 &rack->r_ctl.challenge_ack_cnt)) { 12170 return (ret_val); 12171 } 12172 /* 12173 * If new data are received on a connection after the user processes 12174 * are gone, then RST the other end. 12175 */ 12176 if ((so->so_state & SS_NOFDREF) && tlen) { 12177 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 12178 return (1); 12179 } 12180 /* 12181 * If last ACK falls within this segment's sequence numbers, record 12182 * its timestamp. NOTE: 1) That the test incorporates suggestions 12183 * from the latest proposal of the tcplw@cray.com list (Braden 12184 * 1993/04/26). 2) That updating only on newer timestamps interferes 12185 * with our earlier PAWS tests, so this check should be solely 12186 * predicated on the sequence space of this segment. 3) That we 12187 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12188 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12189 * SEG.Len, This modified check allows us to overcome RFC1323's 12190 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12191 * p.869. In such cases, we can still calculate the RTT correctly 12192 * when RCV.NXT == Last.ACK.Sent. 12193 */ 12194 if ((to->to_flags & TOF_TS) != 0 && 12195 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12196 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12197 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12198 tp->ts_recent_age = tcp_ts_getticks(); 12199 tp->ts_recent = to->to_tsval; 12200 } 12201 /* 12202 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12203 * is on (half-synchronized state), then queue data for later 12204 * processing; else drop segment and return. 12205 */ 12206 if ((thflags & TH_ACK) == 0) { 12207 if (tp->t_flags & TF_NEEDSYN) { 12208 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12209 tiwin, thflags, nxt_pkt)); 12210 } else if (tp->t_flags & TF_ACKNOW) { 12211 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12212 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12213 return (ret_val); 12214 } else { 12215 ctf_do_drop(m, NULL); 12216 return (0); 12217 } 12218 } 12219 /* 12220 * Ack processing. 12221 */ 12222 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12223 return (ret_val); 12224 } 12225 if (ourfinisacked) { 12226 tcp_twstart(tp); 12227 m_freem(m); 12228 return (1); 12229 } 12230 if (sbavail(&so->so_snd)) { 12231 if (ctf_progress_timeout_check(tp, true)) { 12232 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12233 tp, tick, PROGRESS_DROP, __LINE__); 12234 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12235 return (1); 12236 } 12237 } 12238 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12239 tiwin, thflags, nxt_pkt)); 12240 } 12241 12242 /* 12243 * Return value of 1, the TCB is unlocked and most 12244 * likely gone, return value of 0, the TCP is still 12245 * locked. 12246 */ 12247 static int 12248 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 12249 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12250 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12251 { 12252 int32_t ret_val = 0; 12253 int32_t ourfinisacked = 0; 12254 struct tcp_rack *rack; 12255 12256 rack = (struct tcp_rack *)tp->t_fb_ptr; 12257 ctf_calc_rwin(so, tp); 12258 12259 if ((thflags & TH_RST) || 12260 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12261 return (__ctf_process_rst(m, th, so, tp, 12262 &rack->r_ctl.challenge_ack_ts, 12263 &rack->r_ctl.challenge_ack_cnt)); 12264 /* 12265 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12266 * synchronized state. 12267 */ 12268 if (thflags & TH_SYN) { 12269 ctf_challenge_ack(m, th, tp, &ret_val); 12270 return (ret_val); 12271 } 12272 /* 12273 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12274 * it's less than ts_recent, drop it. 12275 */ 12276 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12277 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12278 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12279 return (ret_val); 12280 } 12281 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12282 &rack->r_ctl.challenge_ack_ts, 12283 &rack->r_ctl.challenge_ack_cnt)) { 12284 return (ret_val); 12285 } 12286 /* 12287 * If new data are received on a connection after the user processes 12288 * are gone, then RST the other end. 12289 */ 12290 if ((so->so_state & SS_NOFDREF) && tlen) { 12291 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 12292 return (1); 12293 } 12294 /* 12295 * If last ACK falls within this segment's sequence numbers, record 12296 * its timestamp. NOTE: 1) That the test incorporates suggestions 12297 * from the latest proposal of the tcplw@cray.com list (Braden 12298 * 1993/04/26). 2) That updating only on newer timestamps interferes 12299 * with our earlier PAWS tests, so this check should be solely 12300 * predicated on the sequence space of this segment. 3) That we 12301 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12302 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12303 * SEG.Len, This modified check allows us to overcome RFC1323's 12304 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12305 * p.869. In such cases, we can still calculate the RTT correctly 12306 * when RCV.NXT == Last.ACK.Sent. 12307 */ 12308 if ((to->to_flags & TOF_TS) != 0 && 12309 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12310 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12311 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12312 tp->ts_recent_age = tcp_ts_getticks(); 12313 tp->ts_recent = to->to_tsval; 12314 } 12315 /* 12316 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12317 * is on (half-synchronized state), then queue data for later 12318 * processing; else drop segment and return. 12319 */ 12320 if ((thflags & TH_ACK) == 0) { 12321 if (tp->t_flags & TF_NEEDSYN) { 12322 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12323 tiwin, thflags, nxt_pkt)); 12324 } else if (tp->t_flags & TF_ACKNOW) { 12325 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12326 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12327 return (ret_val); 12328 } else { 12329 ctf_do_drop(m, NULL); 12330 return (0); 12331 } 12332 } 12333 /* 12334 * case TCPS_LAST_ACK: Ack processing. 12335 */ 12336 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12337 return (ret_val); 12338 } 12339 if (ourfinisacked) { 12340 tp = tcp_close(tp); 12341 ctf_do_drop(m, tp); 12342 return (1); 12343 } 12344 if (sbavail(&so->so_snd)) { 12345 if (ctf_progress_timeout_check(tp, true)) { 12346 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12347 tp, tick, PROGRESS_DROP, __LINE__); 12348 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12349 return (1); 12350 } 12351 } 12352 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12353 tiwin, thflags, nxt_pkt)); 12354 } 12355 12356 /* 12357 * Return value of 1, the TCB is unlocked and most 12358 * likely gone, return value of 0, the TCP is still 12359 * locked. 12360 */ 12361 static int 12362 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 12363 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12364 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12365 { 12366 int32_t ret_val = 0; 12367 int32_t ourfinisacked = 0; 12368 struct tcp_rack *rack; 12369 12370 rack = (struct tcp_rack *)tp->t_fb_ptr; 12371 ctf_calc_rwin(so, tp); 12372 12373 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 12374 if ((thflags & TH_RST) || 12375 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12376 return (__ctf_process_rst(m, th, so, tp, 12377 &rack->r_ctl.challenge_ack_ts, 12378 &rack->r_ctl.challenge_ack_cnt)); 12379 /* 12380 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12381 * synchronized state. 12382 */ 12383 if (thflags & TH_SYN) { 12384 ctf_challenge_ack(m, th, tp, &ret_val); 12385 return (ret_val); 12386 } 12387 /* 12388 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12389 * it's less than ts_recent, drop it. 12390 */ 12391 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12392 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12393 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12394 return (ret_val); 12395 } 12396 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12397 &rack->r_ctl.challenge_ack_ts, 12398 &rack->r_ctl.challenge_ack_cnt)) { 12399 return (ret_val); 12400 } 12401 /* 12402 * If new data are received on a connection after the user processes 12403 * are gone, then RST the other end. 12404 */ 12405 if ((so->so_state & SS_NOFDREF) && 12406 tlen) { 12407 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 12408 return (1); 12409 } 12410 /* 12411 * If last ACK falls within this segment's sequence numbers, record 12412 * its timestamp. NOTE: 1) That the test incorporates suggestions 12413 * from the latest proposal of the tcplw@cray.com list (Braden 12414 * 1993/04/26). 2) That updating only on newer timestamps interferes 12415 * with our earlier PAWS tests, so this check should be solely 12416 * predicated on the sequence space of this segment. 3) That we 12417 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12418 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12419 * SEG.Len, This modified check allows us to overcome RFC1323's 12420 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12421 * p.869. In such cases, we can still calculate the RTT correctly 12422 * when RCV.NXT == Last.ACK.Sent. 12423 */ 12424 if ((to->to_flags & TOF_TS) != 0 && 12425 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12426 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12427 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12428 tp->ts_recent_age = tcp_ts_getticks(); 12429 tp->ts_recent = to->to_tsval; 12430 } 12431 /* 12432 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12433 * is on (half-synchronized state), then queue data for later 12434 * processing; else drop segment and return. 12435 */ 12436 if ((thflags & TH_ACK) == 0) { 12437 if (tp->t_flags & TF_NEEDSYN) { 12438 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12439 tiwin, thflags, nxt_pkt)); 12440 } else if (tp->t_flags & TF_ACKNOW) { 12441 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12442 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12443 return (ret_val); 12444 } else { 12445 ctf_do_drop(m, NULL); 12446 return (0); 12447 } 12448 } 12449 /* 12450 * Ack processing. 12451 */ 12452 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12453 return (ret_val); 12454 } 12455 if (sbavail(&so->so_snd)) { 12456 if (ctf_progress_timeout_check(tp, true)) { 12457 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12458 tp, tick, PROGRESS_DROP, __LINE__); 12459 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12460 return (1); 12461 } 12462 } 12463 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12464 tiwin, thflags, nxt_pkt)); 12465 } 12466 12467 static void inline 12468 rack_clear_rate_sample(struct tcp_rack *rack) 12469 { 12470 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 12471 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 12472 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 12473 } 12474 12475 static void 12476 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override) 12477 { 12478 uint64_t bw_est, rate_wanted; 12479 int chged = 0; 12480 uint32_t user_max, orig_min, orig_max; 12481 12482 orig_min = rack->r_ctl.rc_pace_min_segs; 12483 orig_max = rack->r_ctl.rc_pace_max_segs; 12484 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; 12485 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) 12486 chged = 1; 12487 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 12488 if (rack->use_fixed_rate || rack->rc_force_max_seg) { 12489 if (user_max != rack->r_ctl.rc_pace_max_segs) 12490 chged = 1; 12491 } 12492 if (rack->rc_force_max_seg) { 12493 rack->r_ctl.rc_pace_max_segs = user_max; 12494 } else if (rack->use_fixed_rate) { 12495 bw_est = rack_get_bw(rack); 12496 if ((rack->r_ctl.crte == NULL) || 12497 (bw_est != rack->r_ctl.crte->rate)) { 12498 rack->r_ctl.rc_pace_max_segs = user_max; 12499 } else { 12500 /* We are pacing right at the hardware rate */ 12501 uint32_t segsiz; 12502 12503 segsiz = min(ctf_fixed_maxseg(tp), 12504 rack->r_ctl.rc_pace_min_segs); 12505 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size( 12506 tp, bw_est, segsiz, 0, 12507 rack->r_ctl.crte, NULL); 12508 } 12509 } else if (rack->rc_always_pace) { 12510 if (rack->r_ctl.gp_bw || 12511 #ifdef NETFLIX_PEAKRATE 12512 rack->rc_tp->t_maxpeakrate || 12513 #endif 12514 rack->r_ctl.init_rate) { 12515 /* We have a rate of some sort set */ 12516 uint32_t orig; 12517 12518 bw_est = rack_get_bw(rack); 12519 orig = rack->r_ctl.rc_pace_max_segs; 12520 if (fill_override) 12521 rate_wanted = *fill_override; 12522 else 12523 rate_wanted = rack_get_output_bw(rack, bw_est, NULL, NULL); 12524 if (rate_wanted) { 12525 /* We have something */ 12526 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, 12527 rate_wanted, 12528 ctf_fixed_maxseg(rack->rc_tp)); 12529 } else 12530 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; 12531 if (orig != rack->r_ctl.rc_pace_max_segs) 12532 chged = 1; 12533 } else if ((rack->r_ctl.gp_bw == 0) && 12534 (rack->r_ctl.rc_pace_max_segs == 0)) { 12535 /* 12536 * If we have nothing limit us to bursting 12537 * out IW sized pieces. 12538 */ 12539 chged = 1; 12540 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); 12541 } 12542 } 12543 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { 12544 chged = 1; 12545 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 12546 } 12547 if (chged) 12548 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2); 12549 } 12550 12551 12552 static void 12553 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack) 12554 { 12555 #ifdef INET6 12556 struct ip6_hdr *ip6 = NULL; 12557 #endif 12558 #ifdef INET 12559 struct ip *ip = NULL; 12560 #endif 12561 struct udphdr *udp = NULL; 12562 12563 /* Ok lets fill in the fast block, it can only be used with no IP options! */ 12564 #ifdef INET6 12565 if (rack->r_is_v6) { 12566 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 12567 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 12568 if (tp->t_port) { 12569 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 12570 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 12571 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 12572 udp->uh_dport = tp->t_port; 12573 rack->r_ctl.fsb.udp = udp; 12574 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 12575 } else 12576 { 12577 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); 12578 rack->r_ctl.fsb.udp = NULL; 12579 } 12580 tcpip_fillheaders(rack->rc_inp, 12581 tp->t_port, 12582 ip6, rack->r_ctl.fsb.th); 12583 } else 12584 #endif /* INET6 */ 12585 { 12586 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); 12587 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 12588 if (tp->t_port) { 12589 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 12590 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 12591 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 12592 udp->uh_dport = tp->t_port; 12593 rack->r_ctl.fsb.udp = udp; 12594 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 12595 } else 12596 { 12597 rack->r_ctl.fsb.udp = NULL; 12598 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); 12599 } 12600 tcpip_fillheaders(rack->rc_inp, 12601 tp->t_port, 12602 ip, rack->r_ctl.fsb.th); 12603 } 12604 rack->r_fsb_inited = 1; 12605 } 12606 12607 static int 12608 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack) 12609 { 12610 /* 12611 * Allocate the larger of spaces V6 if available else just 12612 * V4 and include udphdr (overbook) 12613 */ 12614 #ifdef INET6 12615 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr); 12616 #else 12617 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); 12618 #endif 12619 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, 12620 M_TCPFSB, M_NOWAIT|M_ZERO); 12621 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { 12622 return (ENOMEM); 12623 } 12624 rack->r_fsb_inited = 0; 12625 return (0); 12626 } 12627 12628 static int 12629 rack_init(struct tcpcb *tp) 12630 { 12631 struct tcp_rack *rack = NULL; 12632 #ifdef INVARIANTS 12633 struct rack_sendmap *insret; 12634 #endif 12635 uint32_t iwin, snt, us_cts; 12636 int err; 12637 12638 tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 12639 if (tp->t_fb_ptr == NULL) { 12640 /* 12641 * We need to allocate memory but cant. The INP and INP_INFO 12642 * locks and they are recusive (happens during setup. So a 12643 * scheme to drop the locks fails :( 12644 * 12645 */ 12646 return (ENOMEM); 12647 } 12648 memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack)); 12649 12650 rack = (struct tcp_rack *)tp->t_fb_ptr; 12651 RB_INIT(&rack->r_ctl.rc_mtree); 12652 TAILQ_INIT(&rack->r_ctl.rc_free); 12653 TAILQ_INIT(&rack->r_ctl.rc_tmap); 12654 rack->rc_tp = tp; 12655 rack->rc_inp = tp->t_inpcb; 12656 /* Set the flag */ 12657 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 12658 /* Probably not needed but lets be sure */ 12659 rack_clear_rate_sample(rack); 12660 /* 12661 * Save off the default values, socket options will poke 12662 * at these if pacing is not on or we have not yet 12663 * reached where pacing is on (gp_ready/fixed enabled). 12664 * When they get set into the CC module (when gp_ready 12665 * is enabled or we enable fixed) then we will set these 12666 * values into the CC and place in here the old values 12667 * so we have a restoral. Then we will set the flag 12668 * rc_pacing_cc_set. That way whenever we turn off pacing 12669 * or switch off this stack, we will know to go restore 12670 * the saved values. 12671 */ 12672 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn; 12673 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn; 12674 /* We want abe like behavior as well */ 12675 rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 12676 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 12677 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 12678 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 12679 rack->r_ctl.roundends = tp->snd_max; 12680 if (use_rack_rr) 12681 rack->use_rack_rr = 1; 12682 if (V_tcp_delack_enabled) 12683 tp->t_delayed_ack = 1; 12684 else 12685 tp->t_delayed_ack = 0; 12686 #ifdef TCP_ACCOUNTING 12687 if (rack_tcp_accounting) { 12688 tp->t_flags2 |= TF2_TCP_ACCOUNTING; 12689 } 12690 #endif 12691 if (rack_enable_shared_cwnd) 12692 rack->rack_enable_scwnd = 1; 12693 rack->rc_user_set_max_segs = rack_hptsi_segments; 12694 rack->rc_force_max_seg = 0; 12695 if (rack_use_imac_dack) 12696 rack->rc_dack_mode = 1; 12697 TAILQ_INIT(&rack->r_ctl.opt_list); 12698 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 12699 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 12700 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 12701 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; 12702 rack->r_ctl.rc_highest_us_rtt = 0; 12703 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; 12704 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); 12705 if (rack_use_cmp_acks) 12706 rack->r_use_cmp_ack = 1; 12707 if (rack_disable_prr) 12708 rack->rack_no_prr = 1; 12709 if (rack_gp_no_rec_chg) 12710 rack->rc_gp_no_rec_chg = 1; 12711 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 12712 rack->rc_always_pace = 1; 12713 if (rack->use_fixed_rate || rack->gp_ready) 12714 rack_set_cc_pacing(rack); 12715 } else 12716 rack->rc_always_pace = 0; 12717 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) 12718 rack->r_mbuf_queue = 1; 12719 else 12720 rack->r_mbuf_queue = 0; 12721 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 12722 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 12723 else 12724 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 12725 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12726 if (rack_limits_scwnd) 12727 rack->r_limit_scw = 1; 12728 else 12729 rack->r_limit_scw = 0; 12730 rack->rc_labc = V_tcp_abc_l_var; 12731 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 12732 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 12733 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 12734 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 12735 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 12736 rack->r_ctl.rc_min_to = rack_min_to; 12737 microuptime(&rack->r_ctl.act_rcv_time); 12738 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 12739 rack->r_running_late = 0; 12740 rack->r_running_early = 0; 12741 rack->rc_init_win = rack_default_init_window; 12742 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; 12743 if (rack_hw_up_only) 12744 rack->r_up_only = 1; 12745 if (rack_do_dyn_mul) { 12746 /* When dynamic adjustment is on CA needs to start at 100% */ 12747 rack->rc_gp_dyn_mul = 1; 12748 if (rack_do_dyn_mul >= 100) 12749 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 12750 } else 12751 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 12752 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; 12753 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 12754 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 12755 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, 12756 rack_probertt_filter_life); 12757 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 12758 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 12759 rack->r_ctl.rc_time_of_last_probertt = us_cts; 12760 rack->r_ctl.challenge_ack_ts = tcp_ts_getticks(); 12761 rack->r_ctl.rc_time_probertt_starts = 0; 12762 if (rack_dsack_std_based & 0x1) { 12763 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 12764 rack->rc_rack_tmr_std_based = 1; 12765 } 12766 if (rack_dsack_std_based & 0x2) { 12767 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 12768 rack->rc_rack_use_dsack = 1; 12769 } 12770 /* We require at least one measurement, even if the sysctl is 0 */ 12771 if (rack_req_measurements) 12772 rack->r_ctl.req_measurements = rack_req_measurements; 12773 else 12774 rack->r_ctl.req_measurements = 1; 12775 if (rack_enable_hw_pacing) 12776 rack->rack_hdw_pace_ena = 1; 12777 if (rack_hw_rate_caps) 12778 rack->r_rack_hw_rate_caps = 1; 12779 /* Do we force on detection? */ 12780 #ifdef NETFLIX_EXP_DETECTION 12781 if (tcp_force_detection) 12782 rack->do_detection = 1; 12783 else 12784 #endif 12785 rack->do_detection = 0; 12786 if (rack_non_rxt_use_cr) 12787 rack->rack_rec_nonrxt_use_cr = 1; 12788 err = rack_init_fsb(tp, rack); 12789 if (err) { 12790 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12791 tp->t_fb_ptr = NULL; 12792 return (err); 12793 } 12794 if (tp->snd_una != tp->snd_max) { 12795 /* Create a send map for the current outstanding data */ 12796 struct rack_sendmap *rsm; 12797 12798 rsm = rack_alloc(rack); 12799 if (rsm == NULL) { 12800 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12801 tp->t_fb_ptr = NULL; 12802 return (ENOMEM); 12803 } 12804 rsm->r_no_rtt_allowed = 1; 12805 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 12806 rsm->r_rtr_cnt = 1; 12807 rsm->r_rtr_bytes = 0; 12808 if (tp->t_flags & TF_SENTFIN) { 12809 rsm->r_end = tp->snd_max - 1; 12810 rsm->r_flags |= RACK_HAS_FIN; 12811 } else { 12812 rsm->r_end = tp->snd_max; 12813 } 12814 if (tp->snd_una == tp->iss) { 12815 /* The data space is one beyond snd_una */ 12816 rsm->r_flags |= RACK_HAS_SYN; 12817 rsm->r_start = tp->iss; 12818 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una); 12819 } else 12820 rsm->r_start = tp->snd_una; 12821 rsm->r_dupack = 0; 12822 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { 12823 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); 12824 if (rsm->m) 12825 rsm->orig_m_len = rsm->m->m_len; 12826 else 12827 rsm->orig_m_len = 0; 12828 } else { 12829 /* 12830 * This can happen if we have a stand-alone FIN or 12831 * SYN. 12832 */ 12833 rsm->m = NULL; 12834 rsm->orig_m_len = 0; 12835 rsm->soff = 0; 12836 } 12837 #ifndef INVARIANTS 12838 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12839 #else 12840 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12841 if (insret != NULL) { 12842 panic("Insert in rb tree fails ret:%p rack:%p rsm:%p", 12843 insret, rack, rsm); 12844 } 12845 #endif 12846 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 12847 rsm->r_in_tmap = 1; 12848 } 12849 /* 12850 * Timers in Rack are kept in microseconds so lets 12851 * convert any initial incoming variables 12852 * from ticks into usecs. Note that we 12853 * also change the values of t_srtt and t_rttvar, if 12854 * they are non-zero. They are kept with a 5 12855 * bit decimal so we have to carefully convert 12856 * these to get the full precision. 12857 */ 12858 rack_convert_rtts(tp); 12859 tp->t_rttlow = TICKS_2_USEC(tp->t_rttlow); 12860 if (rack_do_hystart) { 12861 tp->ccv->flags |= CCF_HYSTART_ALLOWED; 12862 if (rack_do_hystart > 1) 12863 tp->ccv->flags |= CCF_HYSTART_CAN_SH_CWND; 12864 if (rack_do_hystart > 2) 12865 tp->ccv->flags |= CCF_HYSTART_CONS_SSTH; 12866 } 12867 if (rack_def_profile) 12868 rack_set_profile(rack, rack_def_profile); 12869 /* Cancel the GP measurement in progress */ 12870 tp->t_flags &= ~TF_GPUTINPROG; 12871 if (SEQ_GT(tp->snd_max, tp->iss)) 12872 snt = tp->snd_max - tp->iss; 12873 else 12874 snt = 0; 12875 iwin = rc_init_window(rack); 12876 if (snt < iwin) { 12877 /* We are not past the initial window 12878 * so we need to make sure cwnd is 12879 * correct. 12880 */ 12881 if (tp->snd_cwnd < iwin) 12882 tp->snd_cwnd = iwin; 12883 /* 12884 * If we are within the initial window 12885 * we want ssthresh to be unlimited. Setting 12886 * it to the rwnd (which the default stack does 12887 * and older racks) is not really a good idea 12888 * since we want to be in SS and grow both the 12889 * cwnd and the rwnd (via dynamic rwnd growth). If 12890 * we set it to the rwnd then as the peer grows its 12891 * rwnd we will be stuck in CA and never hit SS. 12892 * 12893 * Its far better to raise it up high (this takes the 12894 * risk that there as been a loss already, probably 12895 * we should have an indicator in all stacks of loss 12896 * but we don't), but considering the normal use this 12897 * is a risk worth taking. The consequences of not 12898 * hitting SS are far worse than going one more time 12899 * into it early on (before we have sent even a IW). 12900 * It is highly unlikely that we will have had a loss 12901 * before getting the IW out. 12902 */ 12903 tp->snd_ssthresh = 0xffffffff; 12904 } 12905 rack_stop_all_timers(tp); 12906 /* Lets setup the fsb block */ 12907 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 12908 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, 12909 __LINE__, RACK_RTTS_INIT); 12910 return (0); 12911 } 12912 12913 static int 12914 rack_handoff_ok(struct tcpcb *tp) 12915 { 12916 if ((tp->t_state == TCPS_CLOSED) || 12917 (tp->t_state == TCPS_LISTEN)) { 12918 /* Sure no problem though it may not stick */ 12919 return (0); 12920 } 12921 if ((tp->t_state == TCPS_SYN_SENT) || 12922 (tp->t_state == TCPS_SYN_RECEIVED)) { 12923 /* 12924 * We really don't know if you support sack, 12925 * you have to get to ESTAB or beyond to tell. 12926 */ 12927 return (EAGAIN); 12928 } 12929 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { 12930 /* 12931 * Rack will only send a FIN after all data is acknowledged. 12932 * So in this case we have more data outstanding. We can't 12933 * switch stacks until either all data and only the FIN 12934 * is left (in which case rack_init() now knows how 12935 * to deal with that) <or> all is acknowledged and we 12936 * are only left with incoming data, though why you 12937 * would want to switch to rack after all data is acknowledged 12938 * I have no idea (rrs)! 12939 */ 12940 return (EAGAIN); 12941 } 12942 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 12943 return (0); 12944 } 12945 /* 12946 * If we reach here we don't do SACK on this connection so we can 12947 * never do rack. 12948 */ 12949 return (EINVAL); 12950 } 12951 12952 12953 static void 12954 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 12955 { 12956 int ack_cmp = 0; 12957 12958 if (tp->t_fb_ptr) { 12959 struct tcp_rack *rack; 12960 struct rack_sendmap *rsm, *nrsm; 12961 #ifdef INVARIANTS 12962 struct rack_sendmap *rm; 12963 #endif 12964 12965 rack = (struct tcp_rack *)tp->t_fb_ptr; 12966 if (tp->t_in_pkt) { 12967 /* 12968 * It is unsafe to process the packets since a 12969 * reset may be lurking in them (its rare but it 12970 * can occur). If we were to find a RST, then we 12971 * would end up dropping the connection and the 12972 * INP lock, so when we return the caller (tcp_usrreq) 12973 * will blow up when it trys to unlock the inp. 12974 */ 12975 struct mbuf *save, *m; 12976 12977 m = tp->t_in_pkt; 12978 tp->t_in_pkt = NULL; 12979 tp->t_tail_pkt = NULL; 12980 while (m) { 12981 save = m->m_nextpkt; 12982 m->m_nextpkt = NULL; 12983 m_freem(m); 12984 m = save; 12985 } 12986 if ((tp->t_inpcb) && 12987 (tp->t_inpcb->inp_flags2 & INP_MBUF_ACKCMP)) 12988 ack_cmp = 1; 12989 if (ack_cmp) { 12990 /* Total if we used large or small (if ack-cmp was used). */ 12991 if (rack->rc_inp->inp_flags2 & INP_MBUF_L_ACKS) 12992 counter_u64_add(rack_large_ackcmp, 1); 12993 else 12994 counter_u64_add(rack_small_ackcmp, 1); 12995 } 12996 } 12997 tp->t_flags &= ~TF_FORCEDATA; 12998 #ifdef NETFLIX_SHARED_CWND 12999 if (rack->r_ctl.rc_scw) { 13000 uint32_t limit; 13001 13002 if (rack->r_limit_scw) 13003 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 13004 else 13005 limit = 0; 13006 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 13007 rack->r_ctl.rc_scw_index, 13008 limit); 13009 rack->r_ctl.rc_scw = NULL; 13010 } 13011 #endif 13012 if (rack->r_ctl.fsb.tcp_ip_hdr) { 13013 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); 13014 rack->r_ctl.fsb.tcp_ip_hdr = NULL; 13015 rack->r_ctl.fsb.th = NULL; 13016 } 13017 /* Convert back to ticks, with */ 13018 if (tp->t_srtt > 1) { 13019 uint32_t val, frac; 13020 13021 val = USEC_2_TICKS(tp->t_srtt); 13022 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 13023 tp->t_srtt = val << TCP_RTT_SHIFT; 13024 /* 13025 * frac is the fractional part here is left 13026 * over from converting to hz and shifting. 13027 * We need to convert this to the 5 bit 13028 * remainder. 13029 */ 13030 if (frac) { 13031 if (hz == 1000) { 13032 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 13033 } else { 13034 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 13035 } 13036 tp->t_srtt += frac; 13037 } 13038 } 13039 if (tp->t_rttvar) { 13040 uint32_t val, frac; 13041 13042 val = USEC_2_TICKS(tp->t_rttvar); 13043 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 13044 tp->t_rttvar = val << TCP_RTTVAR_SHIFT; 13045 /* 13046 * frac is the fractional part here is left 13047 * over from converting to hz and shifting. 13048 * We need to convert this to the 5 bit 13049 * remainder. 13050 */ 13051 if (frac) { 13052 if (hz == 1000) { 13053 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 13054 } else { 13055 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 13056 } 13057 tp->t_rttvar += frac; 13058 } 13059 } 13060 tp->t_rxtcur = USEC_2_TICKS(tp->t_rxtcur); 13061 tp->t_rttlow = USEC_2_TICKS(tp->t_rttlow); 13062 if (rack->rc_always_pace) { 13063 tcp_decrement_paced_conn(); 13064 rack_undo_cc_pacing(rack); 13065 rack->rc_always_pace = 0; 13066 } 13067 /* Clean up any options if they were not applied */ 13068 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { 13069 struct deferred_opt_list *dol; 13070 13071 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); 13072 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 13073 free(dol, M_TCPDO); 13074 } 13075 /* rack does not use force data but other stacks may clear it */ 13076 if (rack->r_ctl.crte != NULL) { 13077 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 13078 rack->rack_hdrw_pacing = 0; 13079 rack->r_ctl.crte = NULL; 13080 } 13081 #ifdef TCP_BLACKBOX 13082 tcp_log_flowend(tp); 13083 #endif 13084 RB_FOREACH_SAFE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm) { 13085 #ifndef INVARIANTS 13086 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 13087 #else 13088 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 13089 if (rm != rsm) { 13090 panic("At fini, rack:%p rsm:%p rm:%p", 13091 rack, rsm, rm); 13092 } 13093 #endif 13094 uma_zfree(rack_zone, rsm); 13095 } 13096 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 13097 while (rsm) { 13098 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 13099 uma_zfree(rack_zone, rsm); 13100 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 13101 } 13102 rack->rc_free_cnt = 0; 13103 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 13104 tp->t_fb_ptr = NULL; 13105 } 13106 if (tp->t_inpcb) { 13107 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 13108 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_QUEUE_READY; 13109 tp->t_inpcb->inp_flags2 &= ~INP_DONT_SACK_QUEUE; 13110 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_ACKCMP; 13111 /* Cancel the GP measurement in progress */ 13112 tp->t_flags &= ~TF_GPUTINPROG; 13113 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_L_ACKS; 13114 } 13115 /* Make sure snd_nxt is correctly set */ 13116 tp->snd_nxt = tp->snd_max; 13117 } 13118 13119 static void 13120 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 13121 { 13122 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { 13123 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 13124 } 13125 switch (tp->t_state) { 13126 case TCPS_SYN_SENT: 13127 rack->r_state = TCPS_SYN_SENT; 13128 rack->r_substate = rack_do_syn_sent; 13129 break; 13130 case TCPS_SYN_RECEIVED: 13131 rack->r_state = TCPS_SYN_RECEIVED; 13132 rack->r_substate = rack_do_syn_recv; 13133 break; 13134 case TCPS_ESTABLISHED: 13135 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13136 rack->r_state = TCPS_ESTABLISHED; 13137 rack->r_substate = rack_do_established; 13138 break; 13139 case TCPS_CLOSE_WAIT: 13140 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13141 rack->r_state = TCPS_CLOSE_WAIT; 13142 rack->r_substate = rack_do_close_wait; 13143 break; 13144 case TCPS_FIN_WAIT_1: 13145 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13146 rack->r_state = TCPS_FIN_WAIT_1; 13147 rack->r_substate = rack_do_fin_wait_1; 13148 break; 13149 case TCPS_CLOSING: 13150 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13151 rack->r_state = TCPS_CLOSING; 13152 rack->r_substate = rack_do_closing; 13153 break; 13154 case TCPS_LAST_ACK: 13155 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13156 rack->r_state = TCPS_LAST_ACK; 13157 rack->r_substate = rack_do_lastack; 13158 break; 13159 case TCPS_FIN_WAIT_2: 13160 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13161 rack->r_state = TCPS_FIN_WAIT_2; 13162 rack->r_substate = rack_do_fin_wait_2; 13163 break; 13164 case TCPS_LISTEN: 13165 case TCPS_CLOSED: 13166 case TCPS_TIME_WAIT: 13167 default: 13168 break; 13169 }; 13170 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 13171 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 13172 13173 } 13174 13175 static void 13176 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 13177 { 13178 /* 13179 * We received an ack, and then did not 13180 * call send or were bounced out due to the 13181 * hpts was running. Now a timer is up as well, is 13182 * it the right timer? 13183 */ 13184 struct rack_sendmap *rsm; 13185 int tmr_up; 13186 13187 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 13188 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 13189 return; 13190 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 13191 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 13192 (tmr_up == PACE_TMR_RXT)) { 13193 /* Should be an RXT */ 13194 return; 13195 } 13196 if (rsm == NULL) { 13197 /* Nothing outstanding? */ 13198 if (tp->t_flags & TF_DELACK) { 13199 if (tmr_up == PACE_TMR_DELACK) 13200 /* We are supposed to have delayed ack up and we do */ 13201 return; 13202 } else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) { 13203 /* 13204 * if we hit enobufs then we would expect the possiblity 13205 * of nothing outstanding and the RXT up (and the hptsi timer). 13206 */ 13207 return; 13208 } else if (((V_tcp_always_keepalive || 13209 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 13210 (tp->t_state <= TCPS_CLOSING)) && 13211 (tmr_up == PACE_TMR_KEEP) && 13212 (tp->snd_max == tp->snd_una)) { 13213 /* We should have keep alive up and we do */ 13214 return; 13215 } 13216 } 13217 if (SEQ_GT(tp->snd_max, tp->snd_una) && 13218 ((tmr_up == PACE_TMR_TLP) || 13219 (tmr_up == PACE_TMR_RACK) || 13220 (tmr_up == PACE_TMR_RXT))) { 13221 /* 13222 * Either a Rack, TLP or RXT is fine if we 13223 * have outstanding data. 13224 */ 13225 return; 13226 } else if (tmr_up == PACE_TMR_DELACK) { 13227 /* 13228 * If the delayed ack was going to go off 13229 * before the rtx/tlp/rack timer were going to 13230 * expire, then that would be the timer in control. 13231 * Note we don't check the time here trusting the 13232 * code is correct. 13233 */ 13234 return; 13235 } 13236 /* 13237 * Ok the timer originally started is not what we want now. 13238 * We will force the hpts to be stopped if any, and restart 13239 * with the slot set to what was in the saved slot. 13240 */ 13241 if (tcp_in_hpts(rack->rc_inp)) { 13242 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 13243 uint32_t us_cts; 13244 13245 us_cts = tcp_get_usecs(NULL); 13246 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 13247 rack->r_early = 1; 13248 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 13249 } 13250 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 13251 } 13252 tcp_hpts_remove(tp->t_inpcb); 13253 } 13254 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13255 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 13256 } 13257 13258 13259 static void 13260 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts, uint32_t high_seq) 13261 { 13262 if ((SEQ_LT(tp->snd_wl1, seq) || 13263 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || 13264 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { 13265 /* keep track of pure window updates */ 13266 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) 13267 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 13268 tp->snd_wnd = tiwin; 13269 rack_validate_fo_sendwin_up(tp, rack); 13270 tp->snd_wl1 = seq; 13271 tp->snd_wl2 = ack; 13272 if (tp->snd_wnd > tp->max_sndwnd) 13273 tp->max_sndwnd = tp->snd_wnd; 13274 rack->r_wanted_output = 1; 13275 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { 13276 tp->snd_wnd = tiwin; 13277 rack_validate_fo_sendwin_up(tp, rack); 13278 tp->snd_wl1 = seq; 13279 tp->snd_wl2 = ack; 13280 } else { 13281 /* Not a valid win update */ 13282 return; 13283 } 13284 if (tp->snd_wnd > tp->max_sndwnd) 13285 tp->max_sndwnd = tp->snd_wnd; 13286 if (tp->snd_wnd < (tp->snd_max - high_seq)) { 13287 /* The peer collapsed the window */ 13288 rack_collapsed_window(rack); 13289 } else if (rack->rc_has_collapsed) 13290 rack_un_collapse_window(rack); 13291 /* Do we exit persists? */ 13292 if ((rack->rc_in_persist != 0) && 13293 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 13294 rack->r_ctl.rc_pace_min_segs))) { 13295 rack_exit_persist(tp, rack, cts); 13296 } 13297 /* Do we enter persists? */ 13298 if ((rack->rc_in_persist == 0) && 13299 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 13300 TCPS_HAVEESTABLISHED(tp->t_state) && 13301 (tp->snd_max == tp->snd_una) && 13302 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 13303 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 13304 /* 13305 * Here the rwnd is less than 13306 * the pacing size, we are established, 13307 * nothing is outstanding, and there is 13308 * data to send. Enter persists. 13309 */ 13310 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 13311 } 13312 } 13313 13314 static void 13315 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq) 13316 { 13317 13318 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 13319 union tcp_log_stackspecific log; 13320 struct timeval ltv; 13321 char tcp_hdr_buf[60]; 13322 struct tcphdr *th; 13323 struct timespec ts; 13324 uint32_t orig_snd_una; 13325 uint8_t xx = 0; 13326 13327 #ifdef NETFLIX_HTTP_LOGGING 13328 struct http_sendfile_track *http_req; 13329 13330 if (SEQ_GT(ae->ack, tp->snd_una)) { 13331 http_req = tcp_http_find_req_for_seq(tp, (ae->ack-1)); 13332 } else { 13333 http_req = tcp_http_find_req_for_seq(tp, ae->ack); 13334 } 13335 #endif 13336 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 13337 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 13338 if (rack->rack_no_prr == 0) 13339 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 13340 else 13341 log.u_bbr.flex1 = 0; 13342 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 13343 log.u_bbr.use_lt_bw <<= 1; 13344 log.u_bbr.use_lt_bw |= rack->r_might_revert; 13345 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 13346 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 13347 log.u_bbr.pkts_out = tp->t_maxseg; 13348 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 13349 log.u_bbr.flex7 = 1; 13350 log.u_bbr.lost = ae->flags; 13351 log.u_bbr.cwnd_gain = ackval; 13352 log.u_bbr.pacing_gain = 0x2; 13353 if (ae->flags & TSTMP_HDWR) { 13354 /* Record the hardware timestamp if present */ 13355 log.u_bbr.flex3 = M_TSTMP; 13356 ts.tv_sec = ae->timestamp / 1000000000; 13357 ts.tv_nsec = ae->timestamp % 1000000000; 13358 ltv.tv_sec = ts.tv_sec; 13359 ltv.tv_usec = ts.tv_nsec / 1000; 13360 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 13361 } else if (ae->flags & TSTMP_LRO) { 13362 /* Record the LRO the arrival timestamp */ 13363 log.u_bbr.flex3 = M_TSTMP_LRO; 13364 ts.tv_sec = ae->timestamp / 1000000000; 13365 ts.tv_nsec = ae->timestamp % 1000000000; 13366 ltv.tv_sec = ts.tv_sec; 13367 ltv.tv_usec = ts.tv_nsec / 1000; 13368 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 13369 } 13370 log.u_bbr.timeStamp = tcp_get_usecs(<v); 13371 /* Log the rcv time */ 13372 log.u_bbr.delRate = ae->timestamp; 13373 #ifdef NETFLIX_HTTP_LOGGING 13374 log.u_bbr.applimited = tp->t_http_closed; 13375 log.u_bbr.applimited <<= 8; 13376 log.u_bbr.applimited |= tp->t_http_open; 13377 log.u_bbr.applimited <<= 8; 13378 log.u_bbr.applimited |= tp->t_http_req; 13379 if (http_req) { 13380 /* Copy out any client req info */ 13381 /* seconds */ 13382 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 13383 /* useconds */ 13384 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 13385 log.u_bbr.rttProp = http_req->timestamp; 13386 log.u_bbr.cur_del_rate = http_req->start; 13387 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 13388 log.u_bbr.flex8 |= 1; 13389 } else { 13390 log.u_bbr.flex8 |= 2; 13391 log.u_bbr.bw_inuse = http_req->end; 13392 } 13393 log.u_bbr.flex6 = http_req->start_seq; 13394 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 13395 log.u_bbr.flex8 |= 4; 13396 log.u_bbr.epoch = http_req->end_seq; 13397 } 13398 } 13399 #endif 13400 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf)); 13401 th = (struct tcphdr *)tcp_hdr_buf; 13402 th->th_seq = ae->seq; 13403 th->th_ack = ae->ack; 13404 th->th_win = ae->win; 13405 /* Now fill in the ports */ 13406 th->th_sport = tp->t_inpcb->inp_fport; 13407 th->th_dport = tp->t_inpcb->inp_lport; 13408 tcp_set_flags(th, ae->flags); 13409 /* Now do we have a timestamp option? */ 13410 if (ae->flags & HAS_TSTMP) { 13411 u_char *cp; 13412 uint32_t val; 13413 13414 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); 13415 cp = (u_char *)(th + 1); 13416 *cp = TCPOPT_NOP; 13417 cp++; 13418 *cp = TCPOPT_NOP; 13419 cp++; 13420 *cp = TCPOPT_TIMESTAMP; 13421 cp++; 13422 *cp = TCPOLEN_TIMESTAMP; 13423 cp++; 13424 val = htonl(ae->ts_value); 13425 bcopy((char *)&val, 13426 (char *)cp, sizeof(uint32_t)); 13427 val = htonl(ae->ts_echo); 13428 bcopy((char *)&val, 13429 (char *)(cp + 4), sizeof(uint32_t)); 13430 } else 13431 th->th_off = (sizeof(struct tcphdr) >> 2); 13432 13433 /* 13434 * For sane logging we need to play a little trick. 13435 * If the ack were fully processed we would have moved 13436 * snd_una to high_seq, but since compressed acks are 13437 * processed in two phases, at this point (logging) snd_una 13438 * won't be advanced. So we would see multiple acks showing 13439 * the advancement. We can prevent that by "pretending" that 13440 * snd_una was advanced and then un-advancing it so that the 13441 * logging code has the right value for tlb_snd_una. 13442 */ 13443 if (tp->snd_una != high_seq) { 13444 orig_snd_una = tp->snd_una; 13445 tp->snd_una = high_seq; 13446 xx = 1; 13447 } else 13448 xx = 0; 13449 TCP_LOG_EVENTP(tp, th, 13450 &tp->t_inpcb->inp_socket->so_rcv, 13451 &tp->t_inpcb->inp_socket->so_snd, TCP_LOG_IN, 0, 13452 0, &log, true, <v); 13453 if (xx) { 13454 tp->snd_una = orig_snd_una; 13455 } 13456 } 13457 13458 } 13459 13460 static void 13461 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts) 13462 { 13463 uint32_t us_rtt; 13464 /* 13465 * A persist or keep-alive was forced out, update our 13466 * min rtt time. Note now worry about lost responses. 13467 * When a subsequent keep-alive or persist times out 13468 * and forced_ack is still on, then the last probe 13469 * was not responded to. In such cases we have a 13470 * sysctl that controls the behavior. Either we apply 13471 * the rtt but with reduced confidence (0). Or we just 13472 * plain don't apply the rtt estimate. Having data flow 13473 * will clear the probe_not_answered flag i.e. cum-ack 13474 * move forward <or> exiting and reentering persists. 13475 */ 13476 13477 rack->forced_ack = 0; 13478 rack->rc_tp->t_rxtshift = 0; 13479 if ((rack->rc_in_persist && 13480 (tiwin == rack->rc_tp->snd_wnd)) || 13481 (rack->rc_in_persist == 0)) { 13482 /* 13483 * In persists only apply the RTT update if this is 13484 * a response to our window probe. And that 13485 * means the rwnd sent must match the current 13486 * snd_wnd. If it does not, then we got a 13487 * window update ack instead. For keepalive 13488 * we allow the answer no matter what the window. 13489 * 13490 * Note that if the probe_not_answered is set then 13491 * the forced_ack_ts is the oldest one i.e. the first 13492 * probe sent that might have been lost. This assures 13493 * us that if we do calculate an RTT it is longer not 13494 * some short thing. 13495 */ 13496 if (rack->rc_in_persist) 13497 counter_u64_add(rack_persists_acks, 1); 13498 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; 13499 if (us_rtt == 0) 13500 us_rtt = 1; 13501 if (rack->probe_not_answered == 0) { 13502 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 13503 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1); 13504 } else { 13505 /* We have a retransmitted probe here too */ 13506 if (rack_apply_rtt_with_reduced_conf) { 13507 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 13508 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1); 13509 } 13510 } 13511 } 13512 } 13513 13514 static int 13515 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv) 13516 { 13517 /* 13518 * Handle a "special" compressed ack mbuf. Each incoming 13519 * ack has only four possible dispositions: 13520 * 13521 * A) It moves the cum-ack forward 13522 * B) It is behind the cum-ack. 13523 * C) It is a window-update ack. 13524 * D) It is a dup-ack. 13525 * 13526 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES 13527 * in the incoming mbuf. We also need to still pay attention 13528 * to nxt_pkt since there may be another packet after this 13529 * one. 13530 */ 13531 #ifdef TCP_ACCOUNTING 13532 uint64_t ts_val; 13533 uint64_t rdstc; 13534 #endif 13535 int segsiz; 13536 struct timespec ts; 13537 struct tcp_rack *rack; 13538 struct tcp_ackent *ae; 13539 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack; 13540 int cnt, i, did_out, ourfinisacked = 0; 13541 struct tcpopt to_holder, *to = NULL; 13542 #ifdef TCP_ACCOUNTING 13543 int win_up_req = 0; 13544 #endif 13545 int nsegs = 0; 13546 int under_pacing = 1; 13547 int recovery = 0; 13548 int idx; 13549 #ifdef TCP_ACCOUNTING 13550 sched_pin(); 13551 #endif 13552 rack = (struct tcp_rack *)tp->t_fb_ptr; 13553 if (rack->gp_ready && 13554 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) 13555 under_pacing = 0; 13556 else 13557 under_pacing = 1; 13558 13559 if (rack->r_state != tp->t_state) 13560 rack_set_state(tp, rack); 13561 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13562 (tp->t_flags & TF_GPUTINPROG)) { 13563 /* 13564 * We have a goodput in progress 13565 * and we have entered a late state. 13566 * Do we have enough data in the sb 13567 * to handle the GPUT request? 13568 */ 13569 uint32_t bytes; 13570 13571 bytes = tp->gput_ack - tp->gput_seq; 13572 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 13573 bytes += tp->gput_seq - tp->snd_una; 13574 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 13575 /* 13576 * There are not enough bytes in the socket 13577 * buffer that have been sent to cover this 13578 * measurement. Cancel it. 13579 */ 13580 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 13581 rack->r_ctl.rc_gp_srtt /*flex1*/, 13582 tp->gput_seq, 13583 0, 0, 18, __LINE__, NULL, 0); 13584 tp->t_flags &= ~TF_GPUTINPROG; 13585 } 13586 } 13587 to = &to_holder; 13588 to->to_flags = 0; 13589 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), 13590 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); 13591 cnt = m->m_len / sizeof(struct tcp_ackent); 13592 idx = cnt / 5; 13593 if (idx >= MAX_NUM_OF_CNTS) 13594 idx = MAX_NUM_OF_CNTS - 1; 13595 counter_u64_add(rack_proc_comp_ack[idx], 1); 13596 counter_u64_add(rack_multi_single_eq, cnt); 13597 high_seq = tp->snd_una; 13598 the_win = tp->snd_wnd; 13599 win_seq = tp->snd_wl1; 13600 win_upd_ack = tp->snd_wl2; 13601 cts = tcp_tv_to_usectick(tv); 13602 ms_cts = tcp_tv_to_mssectick(tv); 13603 segsiz = ctf_fixed_maxseg(tp); 13604 if ((rack->rc_gp_dyn_mul) && 13605 (rack->use_fixed_rate == 0) && 13606 (rack->rc_always_pace)) { 13607 /* Check in on probertt */ 13608 rack_check_probe_rtt(rack, cts); 13609 } 13610 for (i = 0; i < cnt; i++) { 13611 #ifdef TCP_ACCOUNTING 13612 ts_val = get_cyclecount(); 13613 #endif 13614 rack_clear_rate_sample(rack); 13615 ae = ((mtod(m, struct tcp_ackent *)) + i); 13616 /* Setup the window */ 13617 tiwin = ae->win << tp->snd_scale; 13618 /* figure out the type of ack */ 13619 if (SEQ_LT(ae->ack, high_seq)) { 13620 /* Case B*/ 13621 ae->ack_val_set = ACK_BEHIND; 13622 } else if (SEQ_GT(ae->ack, high_seq)) { 13623 /* Case A */ 13624 ae->ack_val_set = ACK_CUMACK; 13625 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ 13626 /* Case D */ 13627 ae->ack_val_set = ACK_DUPACK; 13628 } else { 13629 /* Case C */ 13630 ae->ack_val_set = ACK_RWND; 13631 } 13632 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); 13633 /* Validate timestamp */ 13634 if (ae->flags & HAS_TSTMP) { 13635 /* Setup for a timestamp */ 13636 to->to_flags = TOF_TS; 13637 ae->ts_echo -= tp->ts_offset; 13638 to->to_tsecr = ae->ts_echo; 13639 to->to_tsval = ae->ts_value; 13640 /* 13641 * If echoed timestamp is later than the current time, fall back to 13642 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 13643 * were used when this connection was established. 13644 */ 13645 if (TSTMP_GT(ae->ts_echo, ms_cts)) 13646 to->to_tsecr = 0; 13647 if (tp->ts_recent && 13648 TSTMP_LT(ae->ts_value, tp->ts_recent)) { 13649 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { 13650 #ifdef TCP_ACCOUNTING 13651 rdstc = get_cyclecount(); 13652 if (rdstc > ts_val) { 13653 counter_u64_add(tcp_proc_time[ae->ack_val_set] , 13654 (rdstc - ts_val)); 13655 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13656 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 13657 } 13658 } 13659 #endif 13660 continue; 13661 } 13662 } 13663 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && 13664 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { 13665 tp->ts_recent_age = tcp_ts_getticks(); 13666 tp->ts_recent = ae->ts_value; 13667 } 13668 } else { 13669 /* Setup for a no options */ 13670 to->to_flags = 0; 13671 } 13672 /* Update the rcv time and perform idle reduction possibly */ 13673 if (tp->t_idle_reduce && 13674 (tp->snd_max == tp->snd_una) && 13675 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 13676 counter_u64_add(rack_input_idle_reduces, 1); 13677 rack_cc_after_idle(rack, tp); 13678 } 13679 tp->t_rcvtime = ticks; 13680 /* Now what about ECN? */ 13681 if (tcp_ecn_input_segment(tp, ae->flags, ae->codepoint)) 13682 rack_cong_signal(tp, CC_ECN, ae->ack); 13683 #ifdef TCP_ACCOUNTING 13684 /* Count for the specific type of ack in */ 13685 counter_u64_add(tcp_cnt_counters[ae->ack_val_set], 1); 13686 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13687 tp->tcp_cnt_counters[ae->ack_val_set]++; 13688 } 13689 #endif 13690 /* 13691 * Note how we could move up these in the determination 13692 * above, but we don't so that way the timestamp checks (and ECN) 13693 * is done first before we do any processing on the ACK. 13694 * The non-compressed path through the code has this 13695 * weakness (noted by @jtl) that it actually does some 13696 * processing before verifying the timestamp information. 13697 * We don't take that path here which is why we set 13698 * the ack_val_set first, do the timestamp and ecn 13699 * processing, and then look at what we have setup. 13700 */ 13701 if (ae->ack_val_set == ACK_BEHIND) { 13702 /* 13703 * Case B flag reordering, if window is not closed 13704 * or it could be a keep-alive or persists 13705 */ 13706 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { 13707 counter_u64_add(rack_reorder_seen, 1); 13708 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 13709 } 13710 } else if (ae->ack_val_set == ACK_DUPACK) { 13711 /* Case D */ 13712 rack_strike_dupack(rack); 13713 } else if (ae->ack_val_set == ACK_RWND) { 13714 /* Case C */ 13715 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 13716 ts.tv_sec = ae->timestamp / 1000000000; 13717 ts.tv_nsec = ae->timestamp % 1000000000; 13718 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13719 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13720 } else { 13721 rack->r_ctl.act_rcv_time = *tv; 13722 } 13723 if (rack->forced_ack) { 13724 rack_handle_probe_response(rack, tiwin, 13725 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 13726 } 13727 #ifdef TCP_ACCOUNTING 13728 win_up_req = 1; 13729 #endif 13730 win_upd_ack = ae->ack; 13731 win_seq = ae->seq; 13732 the_win = tiwin; 13733 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq); 13734 } else { 13735 /* Case A */ 13736 if (SEQ_GT(ae->ack, tp->snd_max)) { 13737 /* 13738 * We just send an ack since the incoming 13739 * ack is beyond the largest seq we sent. 13740 */ 13741 if ((tp->t_flags & TF_ACKNOW) == 0) { 13742 ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt); 13743 if (tp->t_flags && TF_ACKNOW) 13744 rack->r_wanted_output = 1; 13745 } 13746 } else { 13747 nsegs++; 13748 /* If the window changed setup to update */ 13749 if (tiwin != tp->snd_wnd) { 13750 win_upd_ack = ae->ack; 13751 win_seq = ae->seq; 13752 the_win = tiwin; 13753 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq); 13754 } 13755 #ifdef TCP_ACCOUNTING 13756 /* Account for the acks */ 13757 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13758 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); 13759 } 13760 counter_u64_add(tcp_cnt_counters[CNT_OF_ACKS_IN], 13761 (((ae->ack - high_seq) + segsiz - 1) / segsiz)); 13762 #endif 13763 high_seq = ae->ack; 13764 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 13765 union tcp_log_stackspecific log; 13766 struct timeval tv; 13767 13768 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 13769 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 13770 log.u_bbr.flex1 = high_seq; 13771 log.u_bbr.flex2 = rack->r_ctl.roundends; 13772 log.u_bbr.flex3 = rack->r_ctl.current_round; 13773 log.u_bbr.rttProp = (uint64_t)CC_ALGO(tp)->newround; 13774 log.u_bbr.flex8 = 8; 13775 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 13776 0, &log, false, NULL, NULL, 0, &tv); 13777 } 13778 /* 13779 * The draft (v3) calls for us to use SEQ_GEQ, but that 13780 * causes issues when we are just going app limited. Lets 13781 * instead use SEQ_GT <or> where its equal but more data 13782 * is outstanding. 13783 */ 13784 if ((SEQ_GT(high_seq, rack->r_ctl.roundends)) || 13785 ((high_seq == rack->r_ctl.roundends) && 13786 SEQ_GT(tp->snd_max, tp->snd_una))) { 13787 rack->r_ctl.current_round++; 13788 rack->r_ctl.roundends = tp->snd_max; 13789 if (CC_ALGO(tp)->newround != NULL) { 13790 CC_ALGO(tp)->newround(tp->ccv, rack->r_ctl.current_round); 13791 } 13792 } 13793 /* Setup our act_rcv_time */ 13794 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 13795 ts.tv_sec = ae->timestamp / 1000000000; 13796 ts.tv_nsec = ae->timestamp % 1000000000; 13797 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13798 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13799 } else { 13800 rack->r_ctl.act_rcv_time = *tv; 13801 } 13802 rack_process_to_cumack(tp, rack, ae->ack, cts, to); 13803 if (rack->rc_dsack_round_seen) { 13804 /* Is the dsack round over? */ 13805 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { 13806 /* Yes it is */ 13807 rack->rc_dsack_round_seen = 0; 13808 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 13809 } 13810 } 13811 } 13812 } 13813 /* And lets be sure to commit the rtt measurements for this ack */ 13814 tcp_rack_xmit_timer_commit(rack, tp); 13815 #ifdef TCP_ACCOUNTING 13816 rdstc = get_cyclecount(); 13817 if (rdstc > ts_val) { 13818 counter_u64_add(tcp_proc_time[ae->ack_val_set] , (rdstc - ts_val)); 13819 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13820 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 13821 if (ae->ack_val_set == ACK_CUMACK) 13822 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); 13823 } 13824 } 13825 #endif 13826 } 13827 #ifdef TCP_ACCOUNTING 13828 ts_val = get_cyclecount(); 13829 #endif 13830 acked_amount = acked = (high_seq - tp->snd_una); 13831 if (acked) { 13832 /* 13833 * Clear the probe not answered flag 13834 * since cum-ack moved forward. 13835 */ 13836 rack->probe_not_answered = 0; 13837 if (rack->sack_attack_disable == 0) 13838 rack_do_decay(rack); 13839 if (acked >= segsiz) { 13840 /* 13841 * You only get credit for 13842 * MSS and greater (and you get extra 13843 * credit for larger cum-ack moves). 13844 */ 13845 int ac; 13846 13847 ac = acked / segsiz; 13848 rack->r_ctl.ack_count += ac; 13849 counter_u64_add(rack_ack_total, ac); 13850 } 13851 if (rack->r_ctl.ack_count > 0xfff00000) { 13852 /* 13853 * reduce the number to keep us under 13854 * a uint32_t. 13855 */ 13856 rack->r_ctl.ack_count /= 2; 13857 rack->r_ctl.sack_count /= 2; 13858 } 13859 if (tp->t_flags & TF_NEEDSYN) { 13860 /* 13861 * T/TCP: Connection was half-synchronized, and our SYN has 13862 * been ACK'd (so connection is now fully synchronized). Go 13863 * to non-starred state, increment snd_una for ACK of SYN, 13864 * and check if we can do window scaling. 13865 */ 13866 tp->t_flags &= ~TF_NEEDSYN; 13867 tp->snd_una++; 13868 acked_amount = acked = (high_seq - tp->snd_una); 13869 } 13870 if (acked > sbavail(&so->so_snd)) 13871 acked_amount = sbavail(&so->so_snd); 13872 #ifdef NETFLIX_EXP_DETECTION 13873 /* 13874 * We only care on a cum-ack move if we are in a sack-disabled 13875 * state. We have already added in to the ack_count, and we never 13876 * would disable on a cum-ack move, so we only care to do the 13877 * detection if it may "undo" it, i.e. we were in disabled already. 13878 */ 13879 if (rack->sack_attack_disable) 13880 rack_do_detection(tp, rack, acked_amount, segsiz); 13881 #endif 13882 if (IN_FASTRECOVERY(tp->t_flags) && 13883 (rack->rack_no_prr == 0)) 13884 rack_update_prr(tp, rack, acked_amount, high_seq); 13885 if (IN_RECOVERY(tp->t_flags)) { 13886 if (SEQ_LT(high_seq, tp->snd_recover) && 13887 (SEQ_LT(high_seq, tp->snd_max))) { 13888 tcp_rack_partialack(tp); 13889 } else { 13890 rack_post_recovery(tp, high_seq); 13891 recovery = 1; 13892 } 13893 } 13894 /* Handle the rack-log-ack part (sendmap) */ 13895 if ((sbused(&so->so_snd) == 0) && 13896 (acked > acked_amount) && 13897 (tp->t_state >= TCPS_FIN_WAIT_1) && 13898 (tp->t_flags & TF_SENTFIN)) { 13899 /* 13900 * We must be sure our fin 13901 * was sent and acked (we can be 13902 * in FIN_WAIT_1 without having 13903 * sent the fin). 13904 */ 13905 ourfinisacked = 1; 13906 /* 13907 * Lets make sure snd_una is updated 13908 * since most likely acked_amount = 0 (it 13909 * should be). 13910 */ 13911 tp->snd_una = high_seq; 13912 } 13913 /* Did we make a RTO error? */ 13914 if ((tp->t_flags & TF_PREVVALID) && 13915 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 13916 tp->t_flags &= ~TF_PREVVALID; 13917 if (tp->t_rxtshift == 1 && 13918 (int)(ticks - tp->t_badrxtwin) < 0) 13919 rack_cong_signal(tp, CC_RTO_ERR, high_seq); 13920 } 13921 /* Handle the data in the socket buffer */ 13922 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1); 13923 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 13924 if (acked_amount > 0) { 13925 struct mbuf *mfree; 13926 13927 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, recovery); 13928 SOCKBUF_LOCK(&so->so_snd); 13929 mfree = sbcut_locked(&so->so_snd, acked_amount); 13930 tp->snd_una = high_seq; 13931 /* Note we want to hold the sb lock through the sendmap adjust */ 13932 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 13933 /* Wake up the socket if we have room to write more */ 13934 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 13935 sowwakeup_locked(so); 13936 m_freem(mfree); 13937 } 13938 /* update progress */ 13939 tp->t_acktime = ticks; 13940 rack_log_progress_event(rack, tp, tp->t_acktime, 13941 PROGRESS_UPDATE, __LINE__); 13942 /* Clear out shifts and such */ 13943 tp->t_rxtshift = 0; 13944 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 13945 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 13946 rack->rc_tlp_in_progress = 0; 13947 rack->r_ctl.rc_tlp_cnt_out = 0; 13948 /* Send recover and snd_nxt must be dragged along */ 13949 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 13950 tp->snd_recover = tp->snd_una; 13951 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 13952 tp->snd_nxt = tp->snd_una; 13953 /* 13954 * If the RXT timer is running we want to 13955 * stop it, so we can restart a TLP (or new RXT). 13956 */ 13957 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 13958 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13959 #ifdef NETFLIX_HTTP_LOGGING 13960 tcp_http_check_for_comp(rack->rc_tp, high_seq); 13961 #endif 13962 tp->snd_wl2 = high_seq; 13963 tp->t_dupacks = 0; 13964 if (under_pacing && 13965 (rack->use_fixed_rate == 0) && 13966 (rack->in_probe_rtt == 0) && 13967 rack->rc_gp_dyn_mul && 13968 rack->rc_always_pace) { 13969 /* Check if we are dragging bottom */ 13970 rack_check_bottom_drag(tp, rack, so, acked); 13971 } 13972 if (tp->snd_una == tp->snd_max) { 13973 tp->t_flags &= ~TF_PREVVALID; 13974 rack->r_ctl.retran_during_recovery = 0; 13975 rack->r_ctl.dsack_byte_cnt = 0; 13976 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 13977 if (rack->r_ctl.rc_went_idle_time == 0) 13978 rack->r_ctl.rc_went_idle_time = 1; 13979 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 13980 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 13981 tp->t_acktime = 0; 13982 /* Set so we might enter persists... */ 13983 rack->r_wanted_output = 1; 13984 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13985 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 13986 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13987 (sbavail(&so->so_snd) == 0) && 13988 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 13989 /* 13990 * The socket was gone and the 13991 * peer sent data (not now in the past), time to 13992 * reset him. 13993 */ 13994 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13995 /* tcp_close will kill the inp pre-log the Reset */ 13996 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 13997 #ifdef TCP_ACCOUNTING 13998 rdstc = get_cyclecount(); 13999 if (rdstc > ts_val) { 14000 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 14001 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14002 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 14003 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 14004 } 14005 } 14006 #endif 14007 m_freem(m); 14008 tp = tcp_close(tp); 14009 if (tp == NULL) { 14010 #ifdef TCP_ACCOUNTING 14011 sched_unpin(); 14012 #endif 14013 return (1); 14014 } 14015 /* 14016 * We would normally do drop-with-reset which would 14017 * send back a reset. We can't since we don't have 14018 * all the needed bits. Instead lets arrange for 14019 * a call to tcp_output(). That way since we 14020 * are in the closed state we will generate a reset. 14021 * 14022 * Note if tcp_accounting is on we don't unpin since 14023 * we do that after the goto label. 14024 */ 14025 goto send_out_a_rst; 14026 } 14027 if ((sbused(&so->so_snd) == 0) && 14028 (tp->t_state >= TCPS_FIN_WAIT_1) && 14029 (tp->t_flags & TF_SENTFIN)) { 14030 /* 14031 * If we can't receive any more data, then closing user can 14032 * proceed. Starting the timer is contrary to the 14033 * specification, but if we don't get a FIN we'll hang 14034 * forever. 14035 * 14036 */ 14037 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 14038 soisdisconnected(so); 14039 tcp_timer_activate(tp, TT_2MSL, 14040 (tcp_fast_finwait2_recycle ? 14041 tcp_finwait2_timeout : 14042 TP_MAXIDLE(tp))); 14043 } 14044 if (ourfinisacked == 0) { 14045 /* 14046 * We don't change to fin-wait-2 if we have our fin acked 14047 * which means we are probably in TCPS_CLOSING. 14048 */ 14049 tcp_state_change(tp, TCPS_FIN_WAIT_2); 14050 } 14051 } 14052 } 14053 /* Wake up the socket if we have room to write more */ 14054 if (sbavail(&so->so_snd)) { 14055 rack->r_wanted_output = 1; 14056 if (ctf_progress_timeout_check(tp, true)) { 14057 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 14058 tp, tick, PROGRESS_DROP, __LINE__); 14059 /* 14060 * We cheat here and don't send a RST, we should send one 14061 * when the pacer drops the connection. 14062 */ 14063 #ifdef TCP_ACCOUNTING 14064 rdstc = get_cyclecount(); 14065 if (rdstc > ts_val) { 14066 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 14067 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14068 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 14069 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 14070 } 14071 } 14072 sched_unpin(); 14073 #endif 14074 (void)tcp_drop(tp, ETIMEDOUT); 14075 m_freem(m); 14076 return (1); 14077 } 14078 } 14079 if (ourfinisacked) { 14080 switch(tp->t_state) { 14081 case TCPS_CLOSING: 14082 #ifdef TCP_ACCOUNTING 14083 rdstc = get_cyclecount(); 14084 if (rdstc > ts_val) { 14085 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 14086 (rdstc - ts_val)); 14087 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14088 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 14089 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 14090 } 14091 } 14092 sched_unpin(); 14093 #endif 14094 tcp_twstart(tp); 14095 m_freem(m); 14096 return (1); 14097 break; 14098 case TCPS_LAST_ACK: 14099 #ifdef TCP_ACCOUNTING 14100 rdstc = get_cyclecount(); 14101 if (rdstc > ts_val) { 14102 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 14103 (rdstc - ts_val)); 14104 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14105 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 14106 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 14107 } 14108 } 14109 sched_unpin(); 14110 #endif 14111 tp = tcp_close(tp); 14112 ctf_do_drop(m, tp); 14113 return (1); 14114 break; 14115 case TCPS_FIN_WAIT_1: 14116 #ifdef TCP_ACCOUNTING 14117 rdstc = get_cyclecount(); 14118 if (rdstc > ts_val) { 14119 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 14120 (rdstc - ts_val)); 14121 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14122 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 14123 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 14124 } 14125 } 14126 #endif 14127 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 14128 soisdisconnected(so); 14129 tcp_timer_activate(tp, TT_2MSL, 14130 (tcp_fast_finwait2_recycle ? 14131 tcp_finwait2_timeout : 14132 TP_MAXIDLE(tp))); 14133 } 14134 tcp_state_change(tp, TCPS_FIN_WAIT_2); 14135 break; 14136 default: 14137 break; 14138 } 14139 } 14140 if (rack->r_fast_output) { 14141 /* 14142 * We re doing fast output.. can we expand that? 14143 */ 14144 rack_gain_for_fastoutput(rack, tp, so, acked_amount); 14145 } 14146 #ifdef TCP_ACCOUNTING 14147 rdstc = get_cyclecount(); 14148 if (rdstc > ts_val) { 14149 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 14150 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14151 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 14152 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 14153 } 14154 } 14155 14156 } else if (win_up_req) { 14157 rdstc = get_cyclecount(); 14158 if (rdstc > ts_val) { 14159 counter_u64_add(tcp_proc_time[ACK_RWND] , (rdstc - ts_val)); 14160 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14161 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); 14162 } 14163 } 14164 #endif 14165 } 14166 /* Now is there a next packet, if so we are done */ 14167 m_freem(m); 14168 did_out = 0; 14169 if (nxt_pkt) { 14170 #ifdef TCP_ACCOUNTING 14171 sched_unpin(); 14172 #endif 14173 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs); 14174 return (0); 14175 } 14176 rack_handle_might_revert(tp, rack); 14177 ctf_calc_rwin(so, tp); 14178 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 14179 send_out_a_rst: 14180 if (tcp_output(tp) < 0) { 14181 #ifdef TCP_ACCOUNTING 14182 sched_unpin(); 14183 #endif 14184 return (1); 14185 } 14186 did_out = 1; 14187 } 14188 rack_free_trim(rack); 14189 #ifdef TCP_ACCOUNTING 14190 sched_unpin(); 14191 #endif 14192 rack_timer_audit(tp, rack, &so->so_snd); 14193 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs); 14194 return (0); 14195 } 14196 14197 14198 static int 14199 rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so, 14200 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, 14201 int32_t nxt_pkt, struct timeval *tv) 14202 { 14203 #ifdef TCP_ACCOUNTING 14204 uint64_t ts_val; 14205 #endif 14206 int32_t thflags, retval, did_out = 0; 14207 int32_t way_out = 0; 14208 /* 14209 * cts - is the current time from tv (caller gets ts) in microseconds. 14210 * ms_cts - is the current time from tv in milliseconds. 14211 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. 14212 */ 14213 uint32_t cts, us_cts, ms_cts; 14214 uint32_t tiwin, high_seq; 14215 struct timespec ts; 14216 struct tcpopt to; 14217 struct tcp_rack *rack; 14218 struct rack_sendmap *rsm; 14219 int32_t prev_state = 0; 14220 #ifdef TCP_ACCOUNTING 14221 int ack_val_set = 0xf; 14222 #endif 14223 int nsegs; 14224 /* 14225 * tv passed from common code is from either M_TSTMP_LRO or 14226 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. 14227 */ 14228 rack = (struct tcp_rack *)tp->t_fb_ptr; 14229 if (m->m_flags & M_ACKCMP) { 14230 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv)); 14231 } 14232 if (m->m_flags & M_ACKCMP) { 14233 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp); 14234 } 14235 cts = tcp_tv_to_usectick(tv); 14236 ms_cts = tcp_tv_to_mssectick(tv); 14237 nsegs = m->m_pkthdr.lro_nsegs; 14238 counter_u64_add(rack_proc_non_comp_ack, 1); 14239 thflags = tcp_get_flags(th); 14240 #ifdef TCP_ACCOUNTING 14241 sched_pin(); 14242 if (thflags & TH_ACK) 14243 ts_val = get_cyclecount(); 14244 #endif 14245 if ((m->m_flags & M_TSTMP) || 14246 (m->m_flags & M_TSTMP_LRO)) { 14247 mbuf_tstmp2timespec(m, &ts); 14248 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 14249 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 14250 } else 14251 rack->r_ctl.act_rcv_time = *tv; 14252 kern_prefetch(rack, &prev_state); 14253 prev_state = 0; 14254 /* 14255 * Unscale the window into a 32-bit value. For the SYN_SENT state 14256 * the scale is zero. 14257 */ 14258 tiwin = th->th_win << tp->snd_scale; 14259 #ifdef TCP_ACCOUNTING 14260 if (thflags & TH_ACK) { 14261 /* 14262 * We have a tradeoff here. We can either do what we are 14263 * doing i.e. pinning to this CPU and then doing the accounting 14264 * <or> we could do a critical enter, setup the rdtsc and cpu 14265 * as in below, and then validate we are on the same CPU on 14266 * exit. I have choosen to not do the critical enter since 14267 * that often will gain you a context switch, and instead lock 14268 * us (line above this if) to the same CPU with sched_pin(). This 14269 * means we may be context switched out for a higher priority 14270 * interupt but we won't be moved to another CPU. 14271 * 14272 * If this occurs (which it won't very often since we most likely 14273 * are running this code in interupt context and only a higher 14274 * priority will bump us ... clock?) we will falsely add in 14275 * to the time the interupt processing time plus the ack processing 14276 * time. This is ok since its a rare event. 14277 */ 14278 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin, 14279 ctf_fixed_maxseg(tp)); 14280 } 14281 #endif 14282 /* 14283 * Parse options on any incoming segment. 14284 */ 14285 memset(&to, 0, sizeof(to)); 14286 tcp_dooptions(&to, (u_char *)(th + 1), 14287 (th->th_off << 2) - sizeof(struct tcphdr), 14288 (thflags & TH_SYN) ? TO_SYN : 0); 14289 NET_EPOCH_ASSERT(); 14290 INP_WLOCK_ASSERT(tp->t_inpcb); 14291 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 14292 __func__)); 14293 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 14294 __func__)); 14295 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 14296 (tp->t_flags & TF_GPUTINPROG)) { 14297 /* 14298 * We have a goodput in progress 14299 * and we have entered a late state. 14300 * Do we have enough data in the sb 14301 * to handle the GPUT request? 14302 */ 14303 uint32_t bytes; 14304 14305 bytes = tp->gput_ack - tp->gput_seq; 14306 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 14307 bytes += tp->gput_seq - tp->snd_una; 14308 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 14309 /* 14310 * There are not enough bytes in the socket 14311 * buffer that have been sent to cover this 14312 * measurement. Cancel it. 14313 */ 14314 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 14315 rack->r_ctl.rc_gp_srtt /*flex1*/, 14316 tp->gput_seq, 14317 0, 0, 18, __LINE__, NULL, 0); 14318 tp->t_flags &= ~TF_GPUTINPROG; 14319 } 14320 } 14321 high_seq = th->th_ack; 14322 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 14323 union tcp_log_stackspecific log; 14324 struct timeval ltv; 14325 #ifdef NETFLIX_HTTP_LOGGING 14326 struct http_sendfile_track *http_req; 14327 14328 if (SEQ_GT(th->th_ack, tp->snd_una)) { 14329 http_req = tcp_http_find_req_for_seq(tp, (th->th_ack-1)); 14330 } else { 14331 http_req = tcp_http_find_req_for_seq(tp, th->th_ack); 14332 } 14333 #endif 14334 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14335 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 14336 if (rack->rack_no_prr == 0) 14337 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 14338 else 14339 log.u_bbr.flex1 = 0; 14340 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 14341 log.u_bbr.use_lt_bw <<= 1; 14342 log.u_bbr.use_lt_bw |= rack->r_might_revert; 14343 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 14344 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14345 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 14346 log.u_bbr.flex3 = m->m_flags; 14347 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 14348 log.u_bbr.lost = thflags; 14349 log.u_bbr.pacing_gain = 0x1; 14350 #ifdef TCP_ACCOUNTING 14351 log.u_bbr.cwnd_gain = ack_val_set; 14352 #endif 14353 log.u_bbr.flex7 = 2; 14354 if (m->m_flags & M_TSTMP) { 14355 /* Record the hardware timestamp if present */ 14356 mbuf_tstmp2timespec(m, &ts); 14357 ltv.tv_sec = ts.tv_sec; 14358 ltv.tv_usec = ts.tv_nsec / 1000; 14359 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 14360 } else if (m->m_flags & M_TSTMP_LRO) { 14361 /* Record the LRO the arrival timestamp */ 14362 mbuf_tstmp2timespec(m, &ts); 14363 ltv.tv_sec = ts.tv_sec; 14364 ltv.tv_usec = ts.tv_nsec / 1000; 14365 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 14366 } 14367 log.u_bbr.timeStamp = tcp_get_usecs(<v); 14368 /* Log the rcv time */ 14369 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; 14370 #ifdef NETFLIX_HTTP_LOGGING 14371 log.u_bbr.applimited = tp->t_http_closed; 14372 log.u_bbr.applimited <<= 8; 14373 log.u_bbr.applimited |= tp->t_http_open; 14374 log.u_bbr.applimited <<= 8; 14375 log.u_bbr.applimited |= tp->t_http_req; 14376 if (http_req) { 14377 /* Copy out any client req info */ 14378 /* seconds */ 14379 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 14380 /* useconds */ 14381 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 14382 log.u_bbr.rttProp = http_req->timestamp; 14383 log.u_bbr.cur_del_rate = http_req->start; 14384 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 14385 log.u_bbr.flex8 |= 1; 14386 } else { 14387 log.u_bbr.flex8 |= 2; 14388 log.u_bbr.bw_inuse = http_req->end; 14389 } 14390 log.u_bbr.flex6 = http_req->start_seq; 14391 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 14392 log.u_bbr.flex8 |= 4; 14393 log.u_bbr.epoch = http_req->end_seq; 14394 } 14395 } 14396 #endif 14397 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 14398 tlen, &log, true, <v); 14399 } 14400 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 14401 way_out = 4; 14402 retval = 0; 14403 m_freem(m); 14404 goto done_with_input; 14405 } 14406 /* 14407 * If a segment with the ACK-bit set arrives in the SYN-SENT state 14408 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 14409 */ 14410 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 14411 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 14412 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 14413 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14414 #ifdef TCP_ACCOUNTING 14415 sched_unpin(); 14416 #endif 14417 return (1); 14418 } 14419 /* 14420 * If timestamps were negotiated during SYN/ACK and a 14421 * segment without a timestamp is received, silently drop 14422 * the segment, unless it is a RST segment or missing timestamps are 14423 * tolerated. 14424 * See section 3.2 of RFC 7323. 14425 */ 14426 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && 14427 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) { 14428 way_out = 5; 14429 retval = 0; 14430 m_freem(m); 14431 goto done_with_input; 14432 } 14433 14434 /* 14435 * Segment received on connection. Reset idle time and keep-alive 14436 * timer. XXX: This should be done after segment validation to 14437 * ignore broken/spoofed segs. 14438 */ 14439 if (tp->t_idle_reduce && 14440 (tp->snd_max == tp->snd_una) && 14441 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 14442 counter_u64_add(rack_input_idle_reduces, 1); 14443 rack_cc_after_idle(rack, tp); 14444 } 14445 tp->t_rcvtime = ticks; 14446 #ifdef STATS 14447 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 14448 #endif 14449 if (tiwin > rack->r_ctl.rc_high_rwnd) 14450 rack->r_ctl.rc_high_rwnd = tiwin; 14451 /* 14452 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 14453 * this to occur after we've validated the segment. 14454 */ 14455 if (tcp_ecn_input_segment(tp, thflags, iptos)) 14456 rack_cong_signal(tp, CC_ECN, th->th_ack); 14457 14458 /* 14459 * If echoed timestamp is later than the current time, fall back to 14460 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 14461 * were used when this connection was established. 14462 */ 14463 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 14464 to.to_tsecr -= tp->ts_offset; 14465 if (TSTMP_GT(to.to_tsecr, ms_cts)) 14466 to.to_tsecr = 0; 14467 } 14468 14469 /* 14470 * If its the first time in we need to take care of options and 14471 * verify we can do SACK for rack! 14472 */ 14473 if (rack->r_state == 0) { 14474 /* Should be init'd by rack_init() */ 14475 KASSERT(rack->rc_inp != NULL, 14476 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 14477 if (rack->rc_inp == NULL) { 14478 rack->rc_inp = tp->t_inpcb; 14479 } 14480 14481 /* 14482 * Process options only when we get SYN/ACK back. The SYN 14483 * case for incoming connections is handled in tcp_syncache. 14484 * According to RFC1323 the window field in a SYN (i.e., a 14485 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 14486 * this is traditional behavior, may need to be cleaned up. 14487 */ 14488 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 14489 /* Handle parallel SYN for ECN */ 14490 tcp_ecn_input_parallel_syn(tp, thflags, iptos); 14491 if ((to.to_flags & TOF_SCALE) && 14492 (tp->t_flags & TF_REQ_SCALE)) { 14493 tp->t_flags |= TF_RCVD_SCALE; 14494 tp->snd_scale = to.to_wscale; 14495 } else 14496 tp->t_flags &= ~TF_REQ_SCALE; 14497 /* 14498 * Initial send window. It will be updated with the 14499 * next incoming segment to the scaled value. 14500 */ 14501 tp->snd_wnd = th->th_win; 14502 rack_validate_fo_sendwin_up(tp, rack); 14503 if ((to.to_flags & TOF_TS) && 14504 (tp->t_flags & TF_REQ_TSTMP)) { 14505 tp->t_flags |= TF_RCVD_TSTMP; 14506 tp->ts_recent = to.to_tsval; 14507 tp->ts_recent_age = cts; 14508 } else 14509 tp->t_flags &= ~TF_REQ_TSTMP; 14510 if (to.to_flags & TOF_MSS) { 14511 tcp_mss(tp, to.to_mss); 14512 } 14513 if ((tp->t_flags & TF_SACK_PERMIT) && 14514 (to.to_flags & TOF_SACKPERM) == 0) 14515 tp->t_flags &= ~TF_SACK_PERMIT; 14516 if (IS_FASTOPEN(tp->t_flags)) { 14517 if (to.to_flags & TOF_FASTOPEN) { 14518 uint16_t mss; 14519 14520 if (to.to_flags & TOF_MSS) 14521 mss = to.to_mss; 14522 else 14523 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 14524 mss = TCP6_MSS; 14525 else 14526 mss = TCP_MSS; 14527 tcp_fastopen_update_cache(tp, mss, 14528 to.to_tfo_len, to.to_tfo_cookie); 14529 } else 14530 tcp_fastopen_disable_path(tp); 14531 } 14532 } 14533 /* 14534 * At this point we are at the initial call. Here we decide 14535 * if we are doing RACK or not. We do this by seeing if 14536 * TF_SACK_PERMIT is set and the sack-not-required is clear. 14537 * The code now does do dup-ack counting so if you don't 14538 * switch back you won't get rack & TLP, but you will still 14539 * get this stack. 14540 */ 14541 14542 if ((rack_sack_not_required == 0) && 14543 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 14544 tcp_switch_back_to_default(tp); 14545 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen, 14546 tlen, iptos); 14547 #ifdef TCP_ACCOUNTING 14548 sched_unpin(); 14549 #endif 14550 return (1); 14551 } 14552 tcp_set_hpts(tp->t_inpcb); 14553 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 14554 } 14555 if (thflags & TH_FIN) 14556 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 14557 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 14558 if ((rack->rc_gp_dyn_mul) && 14559 (rack->use_fixed_rate == 0) && 14560 (rack->rc_always_pace)) { 14561 /* Check in on probertt */ 14562 rack_check_probe_rtt(rack, us_cts); 14563 } 14564 rack_clear_rate_sample(rack); 14565 if ((rack->forced_ack) && 14566 ((tcp_get_flags(th) & TH_RST) == 0)) { 14567 rack_handle_probe_response(rack, tiwin, us_cts); 14568 } 14569 /* 14570 * This is the one exception case where we set the rack state 14571 * always. All other times (timers etc) we must have a rack-state 14572 * set (so we assure we have done the checks above for SACK). 14573 */ 14574 rack->r_ctl.rc_rcvtime = cts; 14575 if (rack->r_state != tp->t_state) 14576 rack_set_state(tp, rack); 14577 if (SEQ_GT(th->th_ack, tp->snd_una) && 14578 (rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree)) != NULL) 14579 kern_prefetch(rsm, &prev_state); 14580 prev_state = rack->r_state; 14581 retval = (*rack->r_substate) (m, th, so, 14582 tp, &to, drop_hdrlen, 14583 tlen, tiwin, thflags, nxt_pkt, iptos); 14584 #ifdef INVARIANTS 14585 if ((retval == 0) && 14586 (tp->t_inpcb == NULL)) { 14587 panic("retval:%d tp:%p t_inpcb:NULL state:%d", 14588 retval, tp, prev_state); 14589 } 14590 #endif 14591 if (retval == 0) { 14592 /* 14593 * If retval is 1 the tcb is unlocked and most likely the tp 14594 * is gone. 14595 */ 14596 INP_WLOCK_ASSERT(tp->t_inpcb); 14597 if ((rack->rc_gp_dyn_mul) && 14598 (rack->rc_always_pace) && 14599 (rack->use_fixed_rate == 0) && 14600 rack->in_probe_rtt && 14601 (rack->r_ctl.rc_time_probertt_starts == 0)) { 14602 /* 14603 * If we are going for target, lets recheck before 14604 * we output. 14605 */ 14606 rack_check_probe_rtt(rack, us_cts); 14607 } 14608 if (rack->set_pacing_done_a_iw == 0) { 14609 /* How much has been acked? */ 14610 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 14611 /* We have enough to set in the pacing segment size */ 14612 rack->set_pacing_done_a_iw = 1; 14613 rack_set_pace_segments(tp, rack, __LINE__, NULL); 14614 } 14615 } 14616 tcp_rack_xmit_timer_commit(rack, tp); 14617 #ifdef TCP_ACCOUNTING 14618 /* 14619 * If we set the ack_val_se to what ack processing we are doing 14620 * we also want to track how many cycles we burned. Note 14621 * the bits after tcp_output we let be "free". This is because 14622 * we are also tracking the tcp_output times as well. Note the 14623 * use of 0xf here since we only have 11 counter (0 - 0xa) and 14624 * 0xf cannot be returned and is what we initialize it too to 14625 * indicate we are not doing the tabulations. 14626 */ 14627 if (ack_val_set != 0xf) { 14628 uint64_t crtsc; 14629 14630 crtsc = get_cyclecount(); 14631 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val)); 14632 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14633 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); 14634 } 14635 } 14636 #endif 14637 if (nxt_pkt == 0) { 14638 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 14639 do_output_now: 14640 if (tcp_output(tp) < 0) 14641 return (1); 14642 did_out = 1; 14643 } 14644 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 14645 rack_free_trim(rack); 14646 } 14647 /* Update any rounds needed */ 14648 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 14649 union tcp_log_stackspecific log; 14650 struct timeval tv; 14651 14652 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14653 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14654 log.u_bbr.flex1 = high_seq; 14655 log.u_bbr.flex2 = rack->r_ctl.roundends; 14656 log.u_bbr.flex3 = rack->r_ctl.current_round; 14657 log.u_bbr.rttProp = (uint64_t)CC_ALGO(tp)->newround; 14658 log.u_bbr.flex8 = 9; 14659 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 14660 0, &log, false, NULL, NULL, 0, &tv); 14661 } 14662 /* 14663 * The draft (v3) calls for us to use SEQ_GEQ, but that 14664 * causes issues when we are just going app limited. Lets 14665 * instead use SEQ_GT <or> where its equal but more data 14666 * is outstanding. 14667 */ 14668 if ((SEQ_GT(tp->snd_una, rack->r_ctl.roundends)) || 14669 ((tp->snd_una == rack->r_ctl.roundends) && SEQ_GT(tp->snd_max, tp->snd_una))) { 14670 rack->r_ctl.current_round++; 14671 rack->r_ctl.roundends = tp->snd_max; 14672 if (CC_ALGO(tp)->newround != NULL) { 14673 CC_ALGO(tp)->newround(tp->ccv, rack->r_ctl.current_round); 14674 } 14675 } 14676 if ((nxt_pkt == 0) && 14677 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 14678 (SEQ_GT(tp->snd_max, tp->snd_una) || 14679 (tp->t_flags & TF_DELACK) || 14680 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 14681 (tp->t_state <= TCPS_CLOSING)))) { 14682 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 14683 if ((tp->snd_max == tp->snd_una) && 14684 ((tp->t_flags & TF_DELACK) == 0) && 14685 (tcp_in_hpts(rack->rc_inp)) && 14686 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 14687 /* keep alive not needed if we are hptsi output yet */ 14688 ; 14689 } else { 14690 int late = 0; 14691 if (tcp_in_hpts(rack->rc_inp)) { 14692 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14693 us_cts = tcp_get_usecs(NULL); 14694 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 14695 rack->r_early = 1; 14696 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 14697 } else 14698 late = 1; 14699 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 14700 } 14701 tcp_hpts_remove(tp->t_inpcb); 14702 } 14703 if (late && (did_out == 0)) { 14704 /* 14705 * We are late in the sending 14706 * and we did not call the output 14707 * (this probably should not happen). 14708 */ 14709 goto do_output_now; 14710 } 14711 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 14712 } 14713 way_out = 1; 14714 } else if (nxt_pkt == 0) { 14715 /* Do we have the correct timer running? */ 14716 rack_timer_audit(tp, rack, &so->so_snd); 14717 way_out = 2; 14718 } 14719 done_with_input: 14720 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); 14721 if (did_out) 14722 rack->r_wanted_output = 0; 14723 #ifdef INVARIANTS 14724 if (tp->t_inpcb == NULL) { 14725 panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d", 14726 did_out, 14727 retval, tp, prev_state); 14728 } 14729 #endif 14730 #ifdef TCP_ACCOUNTING 14731 } else { 14732 /* 14733 * Track the time (see above). 14734 */ 14735 if (ack_val_set != 0xf) { 14736 uint64_t crtsc; 14737 14738 crtsc = get_cyclecount(); 14739 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val)); 14740 /* 14741 * Note we *DO NOT* increment the per-tcb counters since 14742 * in the else the TP may be gone!! 14743 */ 14744 } 14745 #endif 14746 } 14747 #ifdef TCP_ACCOUNTING 14748 sched_unpin(); 14749 #endif 14750 return (retval); 14751 } 14752 14753 void 14754 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 14755 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 14756 { 14757 struct timeval tv; 14758 14759 /* First lets see if we have old packets */ 14760 if (tp->t_in_pkt) { 14761 if (ctf_do_queued_segments(so, tp, 1)) { 14762 m_freem(m); 14763 return; 14764 } 14765 } 14766 if (m->m_flags & M_TSTMP_LRO) { 14767 tv.tv_sec = m->m_pkthdr.rcv_tstmp /1000000000; 14768 tv.tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000)/1000; 14769 } else { 14770 /* Should not be should we kassert instead? */ 14771 tcp_get_usecs(&tv); 14772 } 14773 if (rack_do_segment_nounlock(m, th, so, tp, 14774 drop_hdrlen, tlen, iptos, 0, &tv) == 0) { 14775 INP_WUNLOCK(tp->t_inpcb); 14776 } 14777 } 14778 14779 struct rack_sendmap * 14780 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 14781 { 14782 struct rack_sendmap *rsm = NULL; 14783 int32_t idx; 14784 uint32_t srtt = 0, thresh = 0, ts_low = 0; 14785 14786 /* Return the next guy to be re-transmitted */ 14787 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 14788 return (NULL); 14789 } 14790 if (tp->t_flags & TF_SENTFIN) { 14791 /* retran the end FIN? */ 14792 return (NULL); 14793 } 14794 /* ok lets look at this one */ 14795 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 14796 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 14797 goto check_it; 14798 } 14799 rsm = rack_find_lowest_rsm(rack); 14800 if (rsm == NULL) { 14801 return (NULL); 14802 } 14803 check_it: 14804 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && 14805 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 14806 /* 14807 * No sack so we automatically do the 3 strikes and 14808 * retransmit (no rack timer would be started). 14809 */ 14810 14811 return (rsm); 14812 } 14813 if (rsm->r_flags & RACK_ACKED) { 14814 return (NULL); 14815 } 14816 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && 14817 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { 14818 /* Its not yet ready */ 14819 return (NULL); 14820 } 14821 srtt = rack_grab_rtt(tp, rack); 14822 idx = rsm->r_rtr_cnt - 1; 14823 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; 14824 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 14825 if ((tsused == ts_low) || 14826 (TSTMP_LT(tsused, ts_low))) { 14827 /* No time since sending */ 14828 return (NULL); 14829 } 14830 if ((tsused - ts_low) < thresh) { 14831 /* It has not been long enough yet */ 14832 return (NULL); 14833 } 14834 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 14835 ((rsm->r_flags & RACK_SACK_PASSED) && 14836 (rack->sack_attack_disable == 0))) { 14837 /* 14838 * We have passed the dup-ack threshold <or> 14839 * a SACK has indicated this is missing. 14840 * Note that if you are a declared attacker 14841 * it is only the dup-ack threshold that 14842 * will cause retransmits. 14843 */ 14844 /* log retransmit reason */ 14845 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 14846 rack->r_fast_output = 0; 14847 return (rsm); 14848 } 14849 return (NULL); 14850 } 14851 14852 static void 14853 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 14854 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, 14855 int line, struct rack_sendmap *rsm, uint8_t quality) 14856 { 14857 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 14858 union tcp_log_stackspecific log; 14859 struct timeval tv; 14860 14861 memset(&log, 0, sizeof(log)); 14862 log.u_bbr.flex1 = slot; 14863 log.u_bbr.flex2 = len; 14864 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; 14865 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; 14866 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; 14867 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; 14868 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; 14869 log.u_bbr.use_lt_bw <<= 1; 14870 log.u_bbr.use_lt_bw |= rack->r_late; 14871 log.u_bbr.use_lt_bw <<= 1; 14872 log.u_bbr.use_lt_bw |= rack->r_early; 14873 log.u_bbr.use_lt_bw <<= 1; 14874 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 14875 log.u_bbr.use_lt_bw <<= 1; 14876 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 14877 log.u_bbr.use_lt_bw <<= 1; 14878 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 14879 log.u_bbr.use_lt_bw <<= 1; 14880 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 14881 log.u_bbr.use_lt_bw <<= 1; 14882 log.u_bbr.use_lt_bw |= rack->gp_ready; 14883 log.u_bbr.pkt_epoch = line; 14884 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; 14885 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; 14886 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; 14887 log.u_bbr.bw_inuse = bw_est; 14888 log.u_bbr.delRate = bw; 14889 if (rack->r_ctl.gp_bw == 0) 14890 log.u_bbr.cur_del_rate = 0; 14891 else 14892 log.u_bbr.cur_del_rate = rack_get_bw(rack); 14893 log.u_bbr.rttProp = len_time; 14894 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; 14895 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; 14896 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 14897 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { 14898 /* We are in slow start */ 14899 log.u_bbr.flex7 = 1; 14900 } else { 14901 /* we are on congestion avoidance */ 14902 log.u_bbr.flex7 = 0; 14903 } 14904 log.u_bbr.flex8 = method; 14905 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14906 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14907 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; 14908 log.u_bbr.cwnd_gain <<= 1; 14909 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 14910 log.u_bbr.cwnd_gain <<= 1; 14911 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 14912 log.u_bbr.bbr_substate = quality; 14913 TCP_LOG_EVENTP(rack->rc_tp, NULL, 14914 &rack->rc_inp->inp_socket->so_rcv, 14915 &rack->rc_inp->inp_socket->so_snd, 14916 BBR_LOG_HPTSI_CALC, 0, 14917 0, &log, false, &tv); 14918 } 14919 } 14920 14921 static uint32_t 14922 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss) 14923 { 14924 uint32_t new_tso, user_max; 14925 14926 user_max = rack->rc_user_set_max_segs * mss; 14927 if (rack->rc_force_max_seg) { 14928 return (user_max); 14929 } 14930 if (rack->use_fixed_rate && 14931 ((rack->r_ctl.crte == NULL) || 14932 (bw != rack->r_ctl.crte->rate))) { 14933 /* Use the user mss since we are not exactly matched */ 14934 return (user_max); 14935 } 14936 new_tso = tcp_get_pacing_burst_size(rack->rc_tp, bw, mss, rack_pace_one_seg, rack->r_ctl.crte, NULL); 14937 if (new_tso > user_max) 14938 new_tso = user_max; 14939 return (new_tso); 14940 } 14941 14942 static int32_t 14943 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced) 14944 { 14945 uint64_t lentim, fill_bw; 14946 14947 /* Lets first see if we are full, if so continue with normal rate */ 14948 rack->r_via_fill_cw = 0; 14949 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) 14950 return (slot); 14951 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) 14952 return (slot); 14953 if (rack->r_ctl.rc_last_us_rtt == 0) 14954 return (slot); 14955 if (rack->rc_pace_fill_if_rttin_range && 14956 (rack->r_ctl.rc_last_us_rtt >= 14957 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { 14958 /* The rtt is huge, N * smallest, lets not fill */ 14959 return (slot); 14960 } 14961 /* 14962 * first lets calculate the b/w based on the last us-rtt 14963 * and the sndwnd. 14964 */ 14965 fill_bw = rack->r_ctl.cwnd_to_use; 14966 /* Take the rwnd if its smaller */ 14967 if (fill_bw > rack->rc_tp->snd_wnd) 14968 fill_bw = rack->rc_tp->snd_wnd; 14969 if (rack->r_fill_less_agg) { 14970 /* 14971 * Now take away the inflight (this will reduce our 14972 * aggressiveness and yeah, if we get that much out in 1RTT 14973 * we will have had acks come back and still be behind). 14974 */ 14975 fill_bw -= ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14976 } 14977 /* Now lets make it into a b/w */ 14978 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC; 14979 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 14980 /* We are below the min b/w */ 14981 if (non_paced) 14982 *rate_wanted = fill_bw; 14983 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted)) 14984 return (slot); 14985 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) 14986 fill_bw = rack->r_ctl.bw_rate_cap; 14987 rack->r_via_fill_cw = 1; 14988 if (rack->r_rack_hw_rate_caps && 14989 (rack->r_ctl.crte != NULL)) { 14990 uint64_t high_rate; 14991 14992 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 14993 if (fill_bw > high_rate) { 14994 /* We are capping bw at the highest rate table entry */ 14995 if (*rate_wanted > high_rate) { 14996 /* The original rate was also capped */ 14997 rack->r_via_fill_cw = 0; 14998 } 14999 rack_log_hdwr_pacing(rack, 15000 fill_bw, high_rate, __LINE__, 15001 0, 3); 15002 fill_bw = high_rate; 15003 if (capped) 15004 *capped = 1; 15005 } 15006 } else if ((rack->r_ctl.crte == NULL) && 15007 (rack->rack_hdrw_pacing == 0) && 15008 (rack->rack_hdw_pace_ena) && 15009 rack->r_rack_hw_rate_caps && 15010 (rack->rack_attempt_hdwr_pace == 0) && 15011 (rack->rc_inp->inp_route.ro_nh != NULL) && 15012 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 15013 /* 15014 * Ok we may have a first attempt that is greater than our top rate 15015 * lets check. 15016 */ 15017 uint64_t high_rate; 15018 15019 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 15020 if (high_rate) { 15021 if (fill_bw > high_rate) { 15022 fill_bw = high_rate; 15023 if (capped) 15024 *capped = 1; 15025 } 15026 } 15027 } 15028 /* 15029 * Ok fill_bw holds our mythical b/w to fill the cwnd 15030 * in a rtt, what does that time wise equate too? 15031 */ 15032 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC; 15033 lentim /= fill_bw; 15034 *rate_wanted = fill_bw; 15035 if (non_paced || (lentim < slot)) { 15036 rack_log_pacing_delay_calc(rack, len, slot, fill_bw, 15037 0, lentim, 12, __LINE__, NULL, 0); 15038 return ((int32_t)lentim); 15039 } else 15040 return (slot); 15041 } 15042 15043 static int32_t 15044 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz) 15045 { 15046 uint64_t srtt; 15047 int32_t slot = 0; 15048 int can_start_hw_pacing = 1; 15049 int err; 15050 15051 if (rack->rc_always_pace == 0) { 15052 /* 15053 * We use the most optimistic possible cwnd/srtt for 15054 * sending calculations. This will make our 15055 * calculation anticipate getting more through 15056 * quicker then possible. But thats ok we don't want 15057 * the peer to have a gap in data sending. 15058 */ 15059 uint64_t cwnd, tr_perms = 0; 15060 int32_t reduce = 0; 15061 15062 old_method: 15063 /* 15064 * We keep no precise pacing with the old method 15065 * instead we use the pacer to mitigate bursts. 15066 */ 15067 if (rack->r_ctl.rc_rack_min_rtt) 15068 srtt = rack->r_ctl.rc_rack_min_rtt; 15069 else 15070 srtt = max(tp->t_srtt, 1); 15071 if (rack->r_ctl.rc_rack_largest_cwnd) 15072 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 15073 else 15074 cwnd = rack->r_ctl.cwnd_to_use; 15075 /* Inflate cwnd by 1000 so srtt of usecs is in ms */ 15076 tr_perms = (cwnd * 1000) / srtt; 15077 if (tr_perms == 0) { 15078 tr_perms = ctf_fixed_maxseg(tp); 15079 } 15080 /* 15081 * Calculate how long this will take to drain, if 15082 * the calculation comes out to zero, thats ok we 15083 * will use send_a_lot to possibly spin around for 15084 * more increasing tot_len_this_send to the point 15085 * that its going to require a pace, or we hit the 15086 * cwnd. Which in that case we are just waiting for 15087 * a ACK. 15088 */ 15089 slot = len / tr_perms; 15090 /* Now do we reduce the time so we don't run dry? */ 15091 if (slot && rack_slot_reduction) { 15092 reduce = (slot / rack_slot_reduction); 15093 if (reduce < slot) { 15094 slot -= reduce; 15095 } else 15096 slot = 0; 15097 } 15098 slot *= HPTS_USEC_IN_MSEC; 15099 if (rack->rc_pace_to_cwnd) { 15100 uint64_t rate_wanted = 0; 15101 15102 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1); 15103 rack->rc_ack_can_sendout_data = 1; 15104 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0); 15105 } else 15106 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0); 15107 } else { 15108 uint64_t bw_est, res, lentim, rate_wanted; 15109 uint32_t orig_val, segs, oh; 15110 int capped = 0; 15111 int prev_fill; 15112 15113 if ((rack->r_rr_config == 1) && rsm) { 15114 return (rack->r_ctl.rc_min_to); 15115 } 15116 if (rack->use_fixed_rate) { 15117 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack); 15118 } else if ((rack->r_ctl.init_rate == 0) && 15119 #ifdef NETFLIX_PEAKRATE 15120 (rack->rc_tp->t_maxpeakrate == 0) && 15121 #endif 15122 (rack->r_ctl.gp_bw == 0)) { 15123 /* no way to yet do an estimate */ 15124 bw_est = rate_wanted = 0; 15125 } else { 15126 bw_est = rack_get_bw(rack); 15127 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped); 15128 } 15129 if ((bw_est == 0) || (rate_wanted == 0) || 15130 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) { 15131 /* 15132 * No way yet to make a b/w estimate or 15133 * our raise is set incorrectly. 15134 */ 15135 goto old_method; 15136 } 15137 /* We need to account for all the overheads */ 15138 segs = (len + segsiz - 1) / segsiz; 15139 /* 15140 * We need the diff between 1514 bytes (e-mtu with e-hdr) 15141 * and how much data we put in each packet. Yes this 15142 * means we may be off if we are larger than 1500 bytes 15143 * or smaller. But this just makes us more conservative. 15144 */ 15145 if (rack_hw_rate_min && 15146 (bw_est < rack_hw_rate_min)) 15147 can_start_hw_pacing = 0; 15148 if (ETHERNET_SEGMENT_SIZE > segsiz) 15149 oh = ETHERNET_SEGMENT_SIZE - segsiz; 15150 else 15151 oh = 0; 15152 segs *= oh; 15153 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC; 15154 res = lentim / rate_wanted; 15155 slot = (uint32_t)res; 15156 orig_val = rack->r_ctl.rc_pace_max_segs; 15157 if (rack->r_ctl.crte == NULL) { 15158 /* 15159 * Only do this if we are not hardware pacing 15160 * since if we are doing hw-pacing below we will 15161 * set make a call after setting up or changing 15162 * the rate. 15163 */ 15164 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 15165 } else if (rack->rc_inp->inp_snd_tag == NULL) { 15166 /* 15167 * We lost our rate somehow, this can happen 15168 * if the interface changed underneath us. 15169 */ 15170 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 15171 rack->r_ctl.crte = NULL; 15172 /* Lets re-allow attempting to setup pacing */ 15173 rack->rack_hdrw_pacing = 0; 15174 rack->rack_attempt_hdwr_pace = 0; 15175 rack_log_hdwr_pacing(rack, 15176 rate_wanted, bw_est, __LINE__, 15177 0, 6); 15178 } 15179 /* Did we change the TSO size, if so log it */ 15180 if (rack->r_ctl.rc_pace_max_segs != orig_val) 15181 rack_log_pacing_delay_calc(rack, len, slot, orig_val, 0, 0, 15, __LINE__, NULL, 0); 15182 prev_fill = rack->r_via_fill_cw; 15183 if ((rack->rc_pace_to_cwnd) && 15184 (capped == 0) && 15185 (rack->use_fixed_rate == 0) && 15186 (rack->in_probe_rtt == 0) && 15187 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { 15188 /* 15189 * We want to pace at our rate *or* faster to 15190 * fill the cwnd to the max if its not full. 15191 */ 15192 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0); 15193 } 15194 if ((rack->rc_inp->inp_route.ro_nh != NULL) && 15195 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 15196 if ((rack->rack_hdw_pace_ena) && 15197 (can_start_hw_pacing > 0) && 15198 (rack->rack_hdrw_pacing == 0) && 15199 (rack->rack_attempt_hdwr_pace == 0)) { 15200 /* 15201 * Lets attempt to turn on hardware pacing 15202 * if we can. 15203 */ 15204 rack->rack_attempt_hdwr_pace = 1; 15205 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, 15206 rack->rc_inp->inp_route.ro_nh->nh_ifp, 15207 rate_wanted, 15208 RS_PACING_GEQ, 15209 &err, &rack->r_ctl.crte_prev_rate); 15210 if (rack->r_ctl.crte) { 15211 rack->rack_hdrw_pacing = 1; 15212 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, segsiz, 15213 0, rack->r_ctl.crte, 15214 NULL); 15215 rack_log_hdwr_pacing(rack, 15216 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15217 err, 0); 15218 rack->r_ctl.last_hw_bw_req = rate_wanted; 15219 } else { 15220 counter_u64_add(rack_hw_pace_init_fail, 1); 15221 } 15222 } else if (rack->rack_hdrw_pacing && 15223 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { 15224 /* Do we need to adjust our rate? */ 15225 const struct tcp_hwrate_limit_table *nrte; 15226 15227 if (rack->r_up_only && 15228 (rate_wanted < rack->r_ctl.crte->rate)) { 15229 /** 15230 * We have four possible states here 15231 * having to do with the previous time 15232 * and this time. 15233 * previous | this-time 15234 * A) 0 | 0 -- fill_cw not in the picture 15235 * B) 1 | 0 -- we were doing a fill-cw but now are not 15236 * C) 1 | 1 -- all rates from fill_cw 15237 * D) 0 | 1 -- we were doing non-fill and now we are filling 15238 * 15239 * For case A, C and D we don't allow a drop. But for 15240 * case B where we now our on our steady rate we do 15241 * allow a drop. 15242 * 15243 */ 15244 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) 15245 goto done_w_hdwr; 15246 } 15247 if ((rate_wanted > rack->r_ctl.crte->rate) || 15248 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { 15249 if (rack_hw_rate_to_low && 15250 (bw_est < rack_hw_rate_to_low)) { 15251 /* 15252 * The pacing rate is too low for hardware, but 15253 * do allow hardware pacing to be restarted. 15254 */ 15255 rack_log_hdwr_pacing(rack, 15256 bw_est, rack->r_ctl.crte->rate, __LINE__, 15257 0, 5); 15258 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 15259 rack->r_ctl.crte = NULL; 15260 rack->rack_attempt_hdwr_pace = 0; 15261 rack->rack_hdrw_pacing = 0; 15262 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15263 goto done_w_hdwr; 15264 } 15265 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, 15266 rack->rc_tp, 15267 rack->rc_inp->inp_route.ro_nh->nh_ifp, 15268 rate_wanted, 15269 RS_PACING_GEQ, 15270 &err, &rack->r_ctl.crte_prev_rate); 15271 if (nrte == NULL) { 15272 /* Lost the rate */ 15273 rack->rack_hdrw_pacing = 0; 15274 rack->r_ctl.crte = NULL; 15275 rack_log_hdwr_pacing(rack, 15276 rate_wanted, 0, __LINE__, 15277 err, 1); 15278 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15279 counter_u64_add(rack_hw_pace_lost, 1); 15280 } else if (nrte != rack->r_ctl.crte) { 15281 rack->r_ctl.crte = nrte; 15282 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, 15283 segsiz, 0, 15284 rack->r_ctl.crte, 15285 NULL); 15286 rack_log_hdwr_pacing(rack, 15287 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15288 err, 2); 15289 rack->r_ctl.last_hw_bw_req = rate_wanted; 15290 } 15291 } else { 15292 /* We just need to adjust the segment size */ 15293 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15294 rack_log_hdwr_pacing(rack, 15295 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15296 0, 4); 15297 rack->r_ctl.last_hw_bw_req = rate_wanted; 15298 } 15299 } 15300 } 15301 if ((rack->r_ctl.crte != NULL) && 15302 (rack->r_ctl.crte->rate == rate_wanted)) { 15303 /* 15304 * We need to add a extra if the rates 15305 * are exactly matched. The idea is 15306 * we want the software to make sure the 15307 * queue is empty before adding more, this 15308 * gives us N MSS extra pace times where 15309 * N is our sysctl 15310 */ 15311 slot += (rack->r_ctl.crte->time_between * rack_hw_pace_extra_slots); 15312 } 15313 done_w_hdwr: 15314 if (rack_limit_time_with_srtt && 15315 (rack->use_fixed_rate == 0) && 15316 #ifdef NETFLIX_PEAKRATE 15317 (rack->rc_tp->t_maxpeakrate == 0) && 15318 #endif 15319 (rack->rack_hdrw_pacing == 0)) { 15320 /* 15321 * Sanity check, we do not allow the pacing delay 15322 * to be longer than the SRTT of the path. If it is 15323 * a slow path, then adding a packet should increase 15324 * the RTT and compensate for this i.e. the srtt will 15325 * be greater so the allowed pacing time will be greater. 15326 * 15327 * Note this restriction is not for where a peak rate 15328 * is set, we are doing fixed pacing or hardware pacing. 15329 */ 15330 if (rack->rc_tp->t_srtt) 15331 srtt = rack->rc_tp->t_srtt; 15332 else 15333 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */ 15334 if (srtt < (uint64_t)slot) { 15335 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0); 15336 slot = srtt; 15337 } 15338 } 15339 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0); 15340 } 15341 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { 15342 /* 15343 * If this rate is seeing enobufs when it 15344 * goes to send then either the nic is out 15345 * of gas or we are mis-estimating the time 15346 * somehow and not letting the queue empty 15347 * completely. Lets add to the pacing time. 15348 */ 15349 int hw_boost_delay; 15350 15351 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; 15352 if (hw_boost_delay > rack_enobuf_hw_max) 15353 hw_boost_delay = rack_enobuf_hw_max; 15354 else if (hw_boost_delay < rack_enobuf_hw_min) 15355 hw_boost_delay = rack_enobuf_hw_min; 15356 slot += hw_boost_delay; 15357 } 15358 if (slot) 15359 counter_u64_add(rack_calc_nonzero, 1); 15360 else 15361 counter_u64_add(rack_calc_zero, 1); 15362 return (slot); 15363 } 15364 15365 static void 15366 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack, 15367 tcp_seq startseq, uint32_t sb_offset) 15368 { 15369 struct rack_sendmap *my_rsm = NULL; 15370 struct rack_sendmap fe; 15371 15372 if (tp->t_state < TCPS_ESTABLISHED) { 15373 /* 15374 * We don't start any measurements if we are 15375 * not at least established. 15376 */ 15377 return; 15378 } 15379 if (tp->t_state >= TCPS_FIN_WAIT_1) { 15380 /* 15381 * We will get no more data into the SB 15382 * this means we need to have the data available 15383 * before we start a measurement. 15384 */ 15385 15386 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) < 15387 max(rc_init_window(rack), 15388 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) { 15389 /* Nope not enough data */ 15390 return; 15391 } 15392 } 15393 tp->t_flags |= TF_GPUTINPROG; 15394 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 15395 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 15396 tp->gput_seq = startseq; 15397 rack->app_limited_needs_set = 0; 15398 if (rack->in_probe_rtt) 15399 rack->measure_saw_probe_rtt = 1; 15400 else if ((rack->measure_saw_probe_rtt) && 15401 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 15402 rack->measure_saw_probe_rtt = 0; 15403 if (rack->rc_gp_filled) 15404 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 15405 else { 15406 /* Special case initial measurement */ 15407 struct timeval tv; 15408 15409 tp->gput_ts = tcp_get_usecs(&tv); 15410 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 15411 } 15412 /* 15413 * We take a guess out into the future, 15414 * if we have no measurement and no 15415 * initial rate, we measure the first 15416 * initial-windows worth of data to 15417 * speed up getting some GP measurement and 15418 * thus start pacing. 15419 */ 15420 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { 15421 rack->app_limited_needs_set = 1; 15422 tp->gput_ack = startseq + max(rc_init_window(rack), 15423 (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 15424 rack_log_pacing_delay_calc(rack, 15425 tp->gput_seq, 15426 tp->gput_ack, 15427 0, 15428 tp->gput_ts, 15429 rack->r_ctl.rc_app_limited_cnt, 15430 9, 15431 __LINE__, NULL, 0); 15432 return; 15433 } 15434 if (sb_offset) { 15435 /* 15436 * We are out somewhere in the sb 15437 * can we use the already outstanding data? 15438 */ 15439 if (rack->r_ctl.rc_app_limited_cnt == 0) { 15440 /* 15441 * Yes first one is good and in this case 15442 * the tp->gput_ts is correctly set based on 15443 * the last ack that arrived (no need to 15444 * set things up when an ack comes in). 15445 */ 15446 my_rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 15447 if ((my_rsm == NULL) || 15448 (my_rsm->r_rtr_cnt != 1)) { 15449 /* retransmission? */ 15450 goto use_latest; 15451 } 15452 } else { 15453 if (rack->r_ctl.rc_first_appl == NULL) { 15454 /* 15455 * If rc_first_appl is NULL 15456 * then the cnt should be 0. 15457 * This is probably an error, maybe 15458 * a KASSERT would be approprate. 15459 */ 15460 goto use_latest; 15461 } 15462 /* 15463 * If we have a marker pointer to the last one that is 15464 * app limited we can use that, but we need to set 15465 * things up so that when it gets ack'ed we record 15466 * the ack time (if its not already acked). 15467 */ 15468 rack->app_limited_needs_set = 1; 15469 /* 15470 * We want to get to the rsm that is either 15471 * next with space i.e. over 1 MSS or the one 15472 * after that (after the app-limited). 15473 */ 15474 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 15475 rack->r_ctl.rc_first_appl); 15476 if (my_rsm) { 15477 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) 15478 /* Have to use the next one */ 15479 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 15480 my_rsm); 15481 else { 15482 /* Use after the first MSS of it is acked */ 15483 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); 15484 goto start_set; 15485 } 15486 } 15487 if ((my_rsm == NULL) || 15488 (my_rsm->r_rtr_cnt != 1)) { 15489 /* 15490 * Either its a retransmit or 15491 * the last is the app-limited one. 15492 */ 15493 goto use_latest; 15494 } 15495 } 15496 tp->gput_seq = my_rsm->r_start; 15497 start_set: 15498 if (my_rsm->r_flags & RACK_ACKED) { 15499 /* 15500 * This one has been acked use the arrival ack time 15501 */ 15502 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 15503 rack->app_limited_needs_set = 0; 15504 } 15505 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 15506 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 15507 rack_log_pacing_delay_calc(rack, 15508 tp->gput_seq, 15509 tp->gput_ack, 15510 (uint64_t)my_rsm, 15511 tp->gput_ts, 15512 rack->r_ctl.rc_app_limited_cnt, 15513 9, 15514 __LINE__, NULL, 0); 15515 return; 15516 } 15517 15518 use_latest: 15519 /* 15520 * We don't know how long we may have been 15521 * idle or if this is the first-send. Lets 15522 * setup the flag so we will trim off 15523 * the first ack'd data so we get a true 15524 * measurement. 15525 */ 15526 rack->app_limited_needs_set = 1; 15527 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); 15528 /* Find this guy so we can pull the send time */ 15529 fe.r_start = startseq; 15530 my_rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 15531 if (my_rsm) { 15532 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 15533 if (my_rsm->r_flags & RACK_ACKED) { 15534 /* 15535 * Unlikely since its probably what was 15536 * just transmitted (but I am paranoid). 15537 */ 15538 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 15539 rack->app_limited_needs_set = 0; 15540 } 15541 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { 15542 /* This also is unlikely */ 15543 tp->gput_seq = my_rsm->r_start; 15544 } 15545 } else { 15546 /* 15547 * TSNH unless we have some send-map limit, 15548 * and even at that it should not be hitting 15549 * that limit (we should have stopped sending). 15550 */ 15551 struct timeval tv; 15552 15553 microuptime(&tv); 15554 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 15555 } 15556 rack_log_pacing_delay_calc(rack, 15557 tp->gput_seq, 15558 tp->gput_ack, 15559 (uint64_t)my_rsm, 15560 tp->gput_ts, 15561 rack->r_ctl.rc_app_limited_cnt, 15562 9, __LINE__, NULL, 0); 15563 } 15564 15565 static inline uint32_t 15566 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use, 15567 uint32_t avail, int32_t sb_offset) 15568 { 15569 uint32_t len; 15570 uint32_t sendwin; 15571 15572 if (tp->snd_wnd > cwnd_to_use) 15573 sendwin = cwnd_to_use; 15574 else 15575 sendwin = tp->snd_wnd; 15576 if (ctf_outstanding(tp) >= tp->snd_wnd) { 15577 /* We never want to go over our peers rcv-window */ 15578 len = 0; 15579 } else { 15580 uint32_t flight; 15581 15582 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 15583 if (flight >= sendwin) { 15584 /* 15585 * We have in flight what we are allowed by cwnd (if 15586 * it was rwnd blocking it would have hit above out 15587 * >= tp->snd_wnd). 15588 */ 15589 return (0); 15590 } 15591 len = sendwin - flight; 15592 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { 15593 /* We would send too much (beyond the rwnd) */ 15594 len = tp->snd_wnd - ctf_outstanding(tp); 15595 } 15596 if ((len + sb_offset) > avail) { 15597 /* 15598 * We don't have that much in the SB, how much is 15599 * there? 15600 */ 15601 len = avail - sb_offset; 15602 } 15603 } 15604 return (len); 15605 } 15606 15607 static void 15608 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags, 15609 unsigned ipoptlen, int32_t orig_len, int32_t len, int error, 15610 int rsm_is_null, int optlen, int line, uint16_t mode) 15611 { 15612 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 15613 union tcp_log_stackspecific log; 15614 struct timeval tv; 15615 15616 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15617 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 15618 log.u_bbr.flex1 = error; 15619 log.u_bbr.flex2 = flags; 15620 log.u_bbr.flex3 = rsm_is_null; 15621 log.u_bbr.flex4 = ipoptlen; 15622 log.u_bbr.flex5 = tp->rcv_numsacks; 15623 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 15624 log.u_bbr.flex7 = optlen; 15625 log.u_bbr.flex8 = rack->r_fsb_inited; 15626 log.u_bbr.applimited = rack->r_fast_output; 15627 log.u_bbr.bw_inuse = rack_get_bw(rack); 15628 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 15629 log.u_bbr.cwnd_gain = mode; 15630 log.u_bbr.pkts_out = orig_len; 15631 log.u_bbr.lt_epoch = len; 15632 log.u_bbr.delivered = line; 15633 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15634 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15635 tcp_log_event_(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, 15636 len, &log, false, NULL, NULL, 0, &tv); 15637 } 15638 } 15639 15640 15641 static struct mbuf * 15642 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen, 15643 struct rack_fast_send_blk *fsb, 15644 int32_t seglimit, int32_t segsize, int hw_tls) 15645 { 15646 #ifdef KERN_TLS 15647 struct ktls_session *tls, *ntls; 15648 #ifdef INVARIANTS 15649 struct mbuf *start; 15650 #endif 15651 #endif 15652 struct mbuf *m, *n, **np, *smb; 15653 struct mbuf *top; 15654 int32_t off, soff; 15655 int32_t len = *plen; 15656 int32_t fragsize; 15657 int32_t len_cp = 0; 15658 uint32_t mlen, frags; 15659 15660 soff = off = the_off; 15661 smb = m = the_m; 15662 np = ⊤ 15663 top = NULL; 15664 #ifdef KERN_TLS 15665 if (hw_tls && (m->m_flags & M_EXTPG)) 15666 tls = m->m_epg_tls; 15667 else 15668 tls = NULL; 15669 #ifdef INVARIANTS 15670 start = m; 15671 #endif 15672 #endif 15673 while (len > 0) { 15674 if (m == NULL) { 15675 *plen = len_cp; 15676 break; 15677 } 15678 #ifdef KERN_TLS 15679 if (hw_tls) { 15680 if (m->m_flags & M_EXTPG) 15681 ntls = m->m_epg_tls; 15682 else 15683 ntls = NULL; 15684 15685 /* 15686 * Avoid mixing TLS records with handshake 15687 * data or TLS records from different 15688 * sessions. 15689 */ 15690 if (tls != ntls) { 15691 MPASS(m != start); 15692 *plen = len_cp; 15693 break; 15694 } 15695 } 15696 #endif 15697 mlen = min(len, m->m_len - off); 15698 if (seglimit) { 15699 /* 15700 * For M_EXTPG mbufs, add 3 segments 15701 * + 1 in case we are crossing page boundaries 15702 * + 2 in case the TLS hdr/trailer are used 15703 * It is cheaper to just add the segments 15704 * than it is to take the cache miss to look 15705 * at the mbuf ext_pgs state in detail. 15706 */ 15707 if (m->m_flags & M_EXTPG) { 15708 fragsize = min(segsize, PAGE_SIZE); 15709 frags = 3; 15710 } else { 15711 fragsize = segsize; 15712 frags = 0; 15713 } 15714 15715 /* Break if we really can't fit anymore. */ 15716 if ((frags + 1) >= seglimit) { 15717 *plen = len_cp; 15718 break; 15719 } 15720 15721 /* 15722 * Reduce size if you can't copy the whole 15723 * mbuf. If we can't copy the whole mbuf, also 15724 * adjust len so the loop will end after this 15725 * mbuf. 15726 */ 15727 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 15728 mlen = (seglimit - frags - 1) * fragsize; 15729 len = mlen; 15730 *plen = len_cp + len; 15731 } 15732 frags += howmany(mlen, fragsize); 15733 if (frags == 0) 15734 frags++; 15735 seglimit -= frags; 15736 KASSERT(seglimit > 0, 15737 ("%s: seglimit went too low", __func__)); 15738 } 15739 n = m_get(M_NOWAIT, m->m_type); 15740 *np = n; 15741 if (n == NULL) 15742 goto nospace; 15743 n->m_len = mlen; 15744 soff += mlen; 15745 len_cp += n->m_len; 15746 if (m->m_flags & (M_EXT|M_EXTPG)) { 15747 n->m_data = m->m_data + off; 15748 mb_dupcl(n, m); 15749 } else { 15750 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 15751 (u_int)n->m_len); 15752 } 15753 len -= n->m_len; 15754 off = 0; 15755 m = m->m_next; 15756 np = &n->m_next; 15757 if (len || (soff == smb->m_len)) { 15758 /* 15759 * We have more so we move forward or 15760 * we have consumed the entire mbuf and 15761 * len has fell to 0. 15762 */ 15763 soff = 0; 15764 smb = m; 15765 } 15766 15767 } 15768 if (fsb != NULL) { 15769 fsb->m = smb; 15770 fsb->off = soff; 15771 if (smb) { 15772 /* 15773 * Save off the size of the mbuf. We do 15774 * this so that we can recognize when it 15775 * has been trimmed by sbcut() as acks 15776 * come in. 15777 */ 15778 fsb->o_m_len = smb->m_len; 15779 } else { 15780 /* 15781 * This is the case where the next mbuf went to NULL. This 15782 * means with this copy we have sent everything in the sb. 15783 * In theory we could clear the fast_output flag, but lets 15784 * not since its possible that we could get more added 15785 * and acks that call the extend function which would let 15786 * us send more. 15787 */ 15788 fsb->o_m_len = 0; 15789 } 15790 } 15791 return (top); 15792 nospace: 15793 if (top) 15794 m_freem(top); 15795 return (NULL); 15796 15797 } 15798 15799 /* 15800 * This is a copy of m_copym(), taking the TSO segment size/limit 15801 * constraints into account, and advancing the sndptr as it goes. 15802 */ 15803 static struct mbuf * 15804 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen, 15805 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff) 15806 { 15807 struct mbuf *m, *n; 15808 int32_t soff; 15809 15810 soff = rack->r_ctl.fsb.off; 15811 m = rack->r_ctl.fsb.m; 15812 if (rack->r_ctl.fsb.o_m_len > m->m_len) { 15813 /* 15814 * The mbuf had the front of it chopped off by an ack 15815 * we need to adjust the soff/off by that difference. 15816 */ 15817 uint32_t delta; 15818 15819 delta = rack->r_ctl.fsb.o_m_len - m->m_len; 15820 soff -= delta; 15821 } else if (rack->r_ctl.fsb.o_m_len < m->m_len) { 15822 /* 15823 * The mbuf was expanded probably by 15824 * a m_compress. Just update o_m_len. 15825 */ 15826 rack->r_ctl.fsb.o_m_len = m->m_len; 15827 } 15828 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff)); 15829 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen)); 15830 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", 15831 __FUNCTION__, 15832 rack, *plen, m, m->m_len)); 15833 /* Save off the right location before we copy and advance */ 15834 *s_soff = soff; 15835 *s_mb = rack->r_ctl.fsb.m; 15836 n = rack_fo_base_copym(m, soff, plen, 15837 &rack->r_ctl.fsb, 15838 seglimit, segsize, rack->r_ctl.fsb.hw_tls); 15839 return (n); 15840 } 15841 15842 static int 15843 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, 15844 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp) 15845 { 15846 /* 15847 * Enter the fast retransmit path. We are given that a sched_pin is 15848 * in place (if accounting is compliled in) and the cycle count taken 15849 * at the entry is in the ts_val. The concept her is that the rsm 15850 * now holds the mbuf offsets and such so we can directly transmit 15851 * without a lot of overhead, the len field is already set for 15852 * us to prohibit us from sending too much (usually its 1MSS). 15853 */ 15854 struct ip *ip = NULL; 15855 struct udphdr *udp = NULL; 15856 struct tcphdr *th = NULL; 15857 struct mbuf *m = NULL; 15858 struct inpcb *inp; 15859 uint8_t *cpto; 15860 struct tcp_log_buffer *lgb; 15861 #ifdef TCP_ACCOUNTING 15862 uint64_t crtsc; 15863 int cnt_thru = 1; 15864 #endif 15865 struct tcpopt to; 15866 u_char opt[TCP_MAXOLEN]; 15867 uint32_t hdrlen, optlen; 15868 int32_t slot, segsiz, max_val, tso = 0, error, ulen = 0; 15869 uint16_t flags; 15870 uint32_t if_hw_tsomaxsegcount = 0, startseq; 15871 uint32_t if_hw_tsomaxsegsize; 15872 15873 #ifdef INET6 15874 struct ip6_hdr *ip6 = NULL; 15875 15876 if (rack->r_is_v6) { 15877 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 15878 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 15879 } else 15880 #endif /* INET6 */ 15881 { 15882 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 15883 hdrlen = sizeof(struct tcpiphdr); 15884 } 15885 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 15886 goto failed; 15887 } 15888 if (doing_tlp) { 15889 /* Its a TLP add the flag, it may already be there but be sure */ 15890 rsm->r_flags |= RACK_TLP; 15891 } else { 15892 /* If it was a TLP it is not not on this retransmit */ 15893 rsm->r_flags &= ~RACK_TLP; 15894 } 15895 startseq = rsm->r_start; 15896 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 15897 inp = rack->rc_inp; 15898 to.to_flags = 0; 15899 flags = tcp_outflags[tp->t_state]; 15900 if (flags & (TH_SYN|TH_RST)) { 15901 goto failed; 15902 } 15903 if (rsm->r_flags & RACK_HAS_FIN) { 15904 /* We can't send a FIN here */ 15905 goto failed; 15906 } 15907 if (flags & TH_FIN) { 15908 /* We never send a FIN */ 15909 flags &= ~TH_FIN; 15910 } 15911 if (tp->t_flags & TF_RCVD_TSTMP) { 15912 to.to_tsval = ms_cts + tp->ts_offset; 15913 to.to_tsecr = tp->ts_recent; 15914 to.to_flags = TOF_TS; 15915 } 15916 optlen = tcp_addoptions(&to, opt); 15917 hdrlen += optlen; 15918 udp = rack->r_ctl.fsb.udp; 15919 if (udp) 15920 hdrlen += sizeof(struct udphdr); 15921 if (rack->r_ctl.rc_pace_max_segs) 15922 max_val = rack->r_ctl.rc_pace_max_segs; 15923 else if (rack->rc_user_set_max_segs) 15924 max_val = rack->rc_user_set_max_segs * segsiz; 15925 else 15926 max_val = len; 15927 if ((tp->t_flags & TF_TSO) && 15928 V_tcp_do_tso && 15929 (len > segsiz) && 15930 (tp->t_port == 0)) 15931 tso = 1; 15932 #ifdef INET6 15933 if (MHLEN < hdrlen + max_linkhdr) 15934 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 15935 else 15936 #endif 15937 m = m_gethdr(M_NOWAIT, MT_DATA); 15938 if (m == NULL) 15939 goto failed; 15940 m->m_data += max_linkhdr; 15941 m->m_len = hdrlen; 15942 th = rack->r_ctl.fsb.th; 15943 /* Establish the len to send */ 15944 if (len > max_val) 15945 len = max_val; 15946 if ((tso) && (len + optlen > tp->t_maxseg)) { 15947 uint32_t if_hw_tsomax; 15948 int32_t max_len; 15949 15950 /* extract TSO information */ 15951 if_hw_tsomax = tp->t_tsomax; 15952 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 15953 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 15954 /* 15955 * Check if we should limit by maximum payload 15956 * length: 15957 */ 15958 if (if_hw_tsomax != 0) { 15959 /* compute maximum TSO length */ 15960 max_len = (if_hw_tsomax - hdrlen - 15961 max_linkhdr); 15962 if (max_len <= 0) { 15963 goto failed; 15964 } else if (len > max_len) { 15965 len = max_len; 15966 } 15967 } 15968 if (len <= segsiz) { 15969 /* 15970 * In case there are too many small fragments don't 15971 * use TSO: 15972 */ 15973 tso = 0; 15974 } 15975 } else { 15976 tso = 0; 15977 } 15978 if ((tso == 0) && (len > segsiz)) 15979 len = segsiz; 15980 if ((len == 0) || 15981 (len <= MHLEN - hdrlen - max_linkhdr)) { 15982 goto failed; 15983 } 15984 th->th_seq = htonl(rsm->r_start); 15985 th->th_ack = htonl(tp->rcv_nxt); 15986 /* 15987 * The PUSH bit should only be applied 15988 * if the full retransmission is made. If 15989 * we are sending less than this is the 15990 * left hand edge and should not have 15991 * the PUSH bit. 15992 */ 15993 if ((rsm->r_flags & RACK_HAD_PUSH) && 15994 (len == (rsm->r_end - rsm->r_start))) 15995 flags |= TH_PUSH; 15996 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 15997 if (th->th_win == 0) { 15998 tp->t_sndzerowin++; 15999 tp->t_flags |= TF_RXWIN0SENT; 16000 } else 16001 tp->t_flags &= ~TF_RXWIN0SENT; 16002 if (rsm->r_flags & RACK_TLP) { 16003 /* 16004 * TLP should not count in retran count, but 16005 * in its own bin 16006 */ 16007 counter_u64_add(rack_tlp_retran, 1); 16008 counter_u64_add(rack_tlp_retran_bytes, len); 16009 } else { 16010 tp->t_sndrexmitpack++; 16011 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 16012 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 16013 } 16014 #ifdef STATS 16015 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 16016 len); 16017 #endif 16018 if (rsm->m == NULL) 16019 goto failed; 16020 if (rsm->orig_m_len != rsm->m->m_len) { 16021 /* Fix up the orig_m_len and possibly the mbuf offset */ 16022 rack_adjust_orig_mlen(rsm); 16023 } 16024 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls); 16025 if (len <= segsiz) { 16026 /* 16027 * Must have ran out of mbufs for the copy 16028 * shorten it to no longer need tso. Lets 16029 * not put on sendalot since we are low on 16030 * mbufs. 16031 */ 16032 tso = 0; 16033 } 16034 if ((m->m_next == NULL) || (len <= 0)){ 16035 goto failed; 16036 } 16037 if (udp) { 16038 if (rack->r_is_v6) 16039 ulen = hdrlen + len - sizeof(struct ip6_hdr); 16040 else 16041 ulen = hdrlen + len - sizeof(struct ip); 16042 udp->uh_ulen = htons(ulen); 16043 } 16044 m->m_pkthdr.rcvif = (struct ifnet *)0; 16045 if (TCPS_HAVERCVDSYN(tp->t_state) && 16046 (tp->t_flags2 & TF2_ECN_PERMIT)) { 16047 int ect = tcp_ecn_output_established(tp, &flags, len); 16048 if ((tp->t_state == TCPS_SYN_RECEIVED) && 16049 (tp->t_flags2 & TF2_ECN_SND_ECE)) 16050 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 16051 #ifdef INET6 16052 if (rack->r_is_v6) { 16053 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 16054 ip6->ip6_flow |= htonl(ect << 20); 16055 } 16056 else 16057 #endif 16058 { 16059 ip->ip_tos &= ~IPTOS_ECN_MASK; 16060 ip->ip_tos |= ect; 16061 } 16062 } 16063 tcp_set_flags(th, flags); 16064 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 16065 #ifdef INET6 16066 if (rack->r_is_v6) { 16067 if (tp->t_port) { 16068 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 16069 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16070 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 16071 th->th_sum = htons(0); 16072 UDPSTAT_INC(udps_opackets); 16073 } else { 16074 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 16075 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16076 th->th_sum = in6_cksum_pseudo(ip6, 16077 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 16078 0); 16079 } 16080 } 16081 #endif 16082 #if defined(INET6) && defined(INET) 16083 else 16084 #endif 16085 #ifdef INET 16086 { 16087 if (tp->t_port) { 16088 m->m_pkthdr.csum_flags = CSUM_UDP; 16089 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16090 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 16091 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 16092 th->th_sum = htons(0); 16093 UDPSTAT_INC(udps_opackets); 16094 } else { 16095 m->m_pkthdr.csum_flags = CSUM_TCP; 16096 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16097 th->th_sum = in_pseudo(ip->ip_src.s_addr, 16098 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 16099 IPPROTO_TCP + len + optlen)); 16100 } 16101 /* IP version must be set here for ipv4/ipv6 checking later */ 16102 KASSERT(ip->ip_v == IPVERSION, 16103 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 16104 } 16105 #endif 16106 if (tso) { 16107 KASSERT(len > tp->t_maxseg - optlen, 16108 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 16109 m->m_pkthdr.csum_flags |= CSUM_TSO; 16110 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 16111 } 16112 #ifdef INET6 16113 if (rack->r_is_v6) { 16114 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 16115 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 16116 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 16117 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16118 else 16119 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16120 } 16121 #endif 16122 #if defined(INET) && defined(INET6) 16123 else 16124 #endif 16125 #ifdef INET 16126 { 16127 ip->ip_len = htons(m->m_pkthdr.len); 16128 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 16129 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 16130 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16131 if (tp->t_port == 0 || len < V_tcp_minmss) { 16132 ip->ip_off |= htons(IP_DF); 16133 } 16134 } else { 16135 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16136 } 16137 } 16138 #endif 16139 /* Time to copy in our header */ 16140 cpto = mtod(m, uint8_t *); 16141 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 16142 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 16143 if (optlen) { 16144 bcopy(opt, th + 1, optlen); 16145 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 16146 } else { 16147 th->th_off = sizeof(struct tcphdr) >> 2; 16148 } 16149 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 16150 union tcp_log_stackspecific log; 16151 16152 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16153 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 16154 if (rack->rack_no_prr) 16155 log.u_bbr.flex1 = 0; 16156 else 16157 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16158 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 16159 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 16160 log.u_bbr.flex4 = max_val; 16161 log.u_bbr.flex5 = 0; 16162 /* Save off the early/late values */ 16163 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 16164 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 16165 log.u_bbr.bw_inuse = rack_get_bw(rack); 16166 if (doing_tlp == 0) 16167 log.u_bbr.flex8 = 1; 16168 else 16169 log.u_bbr.flex8 = 2; 16170 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 16171 log.u_bbr.flex7 = 55; 16172 log.u_bbr.pkts_out = tp->t_maxseg; 16173 log.u_bbr.timeStamp = cts; 16174 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16175 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 16176 log.u_bbr.delivered = 0; 16177 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 16178 len, &log, false, NULL, NULL, 0, tv); 16179 } else 16180 lgb = NULL; 16181 #ifdef INET6 16182 if (rack->r_is_v6) { 16183 error = ip6_output(m, NULL, 16184 &inp->inp_route6, 16185 0, NULL, NULL, inp); 16186 } 16187 #endif 16188 #if defined(INET) && defined(INET6) 16189 else 16190 #endif 16191 #ifdef INET 16192 { 16193 error = ip_output(m, NULL, 16194 &inp->inp_route, 16195 0, 0, inp); 16196 } 16197 #endif 16198 m = NULL; 16199 if (lgb) { 16200 lgb->tlb_errno = error; 16201 lgb = NULL; 16202 } 16203 if (error) { 16204 goto failed; 16205 } 16206 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), 16207 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls); 16208 if (doing_tlp && (rack->fast_rsm_hack == 0)) { 16209 rack->rc_tlp_in_progress = 1; 16210 rack->r_ctl.rc_tlp_cnt_out++; 16211 } 16212 if (error == 0) { 16213 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); 16214 if (doing_tlp) { 16215 rack->rc_last_sent_tlp_past_cumack = 0; 16216 rack->rc_last_sent_tlp_seq_valid = 1; 16217 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 16218 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 16219 } 16220 } 16221 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 16222 rack->forced_ack = 0; /* If we send something zap the FA flag */ 16223 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 16224 rack->r_ctl.retran_during_recovery += len; 16225 { 16226 int idx; 16227 16228 idx = (len / segsiz) + 3; 16229 if (idx >= TCP_MSS_ACCT_ATIMER) 16230 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 16231 else 16232 counter_u64_add(rack_out_size[idx], 1); 16233 } 16234 if (tp->t_rtttime == 0) { 16235 tp->t_rtttime = ticks; 16236 tp->t_rtseq = startseq; 16237 KMOD_TCPSTAT_INC(tcps_segstimed); 16238 } 16239 counter_u64_add(rack_fto_rsm_send, 1); 16240 if (error && (error == ENOBUFS)) { 16241 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 16242 if (rack->rc_enobuf < 0x7f) 16243 rack->rc_enobuf++; 16244 if (slot < (10 * HPTS_USEC_IN_MSEC)) 16245 slot = 10 * HPTS_USEC_IN_MSEC; 16246 } else 16247 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz); 16248 if ((slot == 0) || 16249 (rack->rc_always_pace == 0) || 16250 (rack->r_rr_config == 1)) { 16251 /* 16252 * We have no pacing set or we 16253 * are using old-style rack or 16254 * we are overriden to use the old 1ms pacing. 16255 */ 16256 slot = rack->r_ctl.rc_min_to; 16257 } 16258 rack_start_hpts_timer(rack, tp, cts, slot, len, 0); 16259 if (rack->r_must_retran) { 16260 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 16261 if ((SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) || 16262 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { 16263 /* 16264 * We have retransmitted all we need. If 16265 * RACK_MUST_RXT is not set then we need to 16266 * not retransmit this guy. 16267 */ 16268 rack->r_must_retran = 0; 16269 rack->r_ctl.rc_out_at_rto = 0; 16270 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { 16271 /* Not one we should rxt */ 16272 goto failed; 16273 } else { 16274 /* Clear the flag */ 16275 rsm->r_flags &= ~RACK_MUST_RXT; 16276 } 16277 } else { 16278 /* Remove the flag */ 16279 rsm->r_flags &= ~RACK_MUST_RXT; 16280 } 16281 } 16282 #ifdef TCP_ACCOUNTING 16283 crtsc = get_cyclecount(); 16284 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16285 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 16286 } 16287 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru); 16288 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16289 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 16290 } 16291 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 16292 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16293 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); 16294 } 16295 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((len + segsiz - 1) / segsiz)); 16296 sched_unpin(); 16297 #endif 16298 return (0); 16299 failed: 16300 if (m) 16301 m_free(m); 16302 return (-1); 16303 } 16304 16305 static void 16306 rack_sndbuf_autoscale(struct tcp_rack *rack) 16307 { 16308 /* 16309 * Automatic sizing of send socket buffer. Often the send buffer 16310 * size is not optimally adjusted to the actual network conditions 16311 * at hand (delay bandwidth product). Setting the buffer size too 16312 * small limits throughput on links with high bandwidth and high 16313 * delay (eg. trans-continental/oceanic links). Setting the 16314 * buffer size too big consumes too much real kernel memory, 16315 * especially with many connections on busy servers. 16316 * 16317 * The criteria to step up the send buffer one notch are: 16318 * 1. receive window of remote host is larger than send buffer 16319 * (with a fudge factor of 5/4th); 16320 * 2. send buffer is filled to 7/8th with data (so we actually 16321 * have data to make use of it); 16322 * 3. send buffer fill has not hit maximal automatic size; 16323 * 4. our send window (slow start and cogestion controlled) is 16324 * larger than sent but unacknowledged data in send buffer. 16325 * 16326 * Note that the rack version moves things much faster since 16327 * we want to avoid hitting cache lines in the rack_fast_output() 16328 * path so this is called much less often and thus moves 16329 * the SB forward by a percentage. 16330 */ 16331 struct socket *so; 16332 struct tcpcb *tp; 16333 uint32_t sendwin, scaleup; 16334 16335 tp = rack->rc_tp; 16336 so = rack->rc_inp->inp_socket; 16337 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); 16338 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 16339 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && 16340 sbused(&so->so_snd) >= 16341 (so->so_snd.sb_hiwat / 8 * 7) && 16342 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 16343 sendwin >= (sbused(&so->so_snd) - 16344 (tp->snd_nxt - tp->snd_una))) { 16345 if (rack_autosndbuf_inc) 16346 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; 16347 else 16348 scaleup = V_tcp_autosndbuf_inc; 16349 if (scaleup < V_tcp_autosndbuf_inc) 16350 scaleup = V_tcp_autosndbuf_inc; 16351 scaleup += so->so_snd.sb_hiwat; 16352 if (scaleup > V_tcp_autosndbuf_max) 16353 scaleup = V_tcp_autosndbuf_max; 16354 if (!sbreserve_locked(&so->so_snd, scaleup, so, curthread)) 16355 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 16356 } 16357 } 16358 } 16359 16360 static int 16361 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val, 16362 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err) 16363 { 16364 /* 16365 * Enter to do fast output. We are given that the sched_pin is 16366 * in place (if accounting is compiled in) and the cycle count taken 16367 * at entry is in place in ts_val. The idea here is that 16368 * we know how many more bytes needs to be sent (presumably either 16369 * during pacing or to fill the cwnd and that was greater than 16370 * the max-burst). We have how much to send and all the info we 16371 * need to just send. 16372 */ 16373 struct ip *ip = NULL; 16374 struct udphdr *udp = NULL; 16375 struct tcphdr *th = NULL; 16376 struct mbuf *m, *s_mb; 16377 struct inpcb *inp; 16378 uint8_t *cpto; 16379 struct tcp_log_buffer *lgb; 16380 #ifdef TCP_ACCOUNTING 16381 uint64_t crtsc; 16382 #endif 16383 struct tcpopt to; 16384 u_char opt[TCP_MAXOLEN]; 16385 uint32_t hdrlen, optlen; 16386 int cnt_thru = 1; 16387 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0; 16388 uint16_t flags; 16389 uint32_t s_soff; 16390 uint32_t if_hw_tsomaxsegcount = 0, startseq; 16391 uint32_t if_hw_tsomaxsegsize; 16392 uint16_t add_flag = RACK_SENT_FP; 16393 #ifdef INET6 16394 struct ip6_hdr *ip6 = NULL; 16395 16396 if (rack->r_is_v6) { 16397 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 16398 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 16399 } else 16400 #endif /* INET6 */ 16401 { 16402 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 16403 hdrlen = sizeof(struct tcpiphdr); 16404 } 16405 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 16406 m = NULL; 16407 goto failed; 16408 } 16409 startseq = tp->snd_max; 16410 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16411 inp = rack->rc_inp; 16412 len = rack->r_ctl.fsb.left_to_send; 16413 to.to_flags = 0; 16414 flags = rack->r_ctl.fsb.tcp_flags; 16415 if (tp->t_flags & TF_RCVD_TSTMP) { 16416 to.to_tsval = ms_cts + tp->ts_offset; 16417 to.to_tsecr = tp->ts_recent; 16418 to.to_flags = TOF_TS; 16419 } 16420 optlen = tcp_addoptions(&to, opt); 16421 hdrlen += optlen; 16422 udp = rack->r_ctl.fsb.udp; 16423 if (udp) 16424 hdrlen += sizeof(struct udphdr); 16425 if (rack->r_ctl.rc_pace_max_segs) 16426 max_val = rack->r_ctl.rc_pace_max_segs; 16427 else if (rack->rc_user_set_max_segs) 16428 max_val = rack->rc_user_set_max_segs * segsiz; 16429 else 16430 max_val = len; 16431 if ((tp->t_flags & TF_TSO) && 16432 V_tcp_do_tso && 16433 (len > segsiz) && 16434 (tp->t_port == 0)) 16435 tso = 1; 16436 again: 16437 #ifdef INET6 16438 if (MHLEN < hdrlen + max_linkhdr) 16439 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 16440 else 16441 #endif 16442 m = m_gethdr(M_NOWAIT, MT_DATA); 16443 if (m == NULL) 16444 goto failed; 16445 m->m_data += max_linkhdr; 16446 m->m_len = hdrlen; 16447 th = rack->r_ctl.fsb.th; 16448 /* Establish the len to send */ 16449 if (len > max_val) 16450 len = max_val; 16451 if ((tso) && (len + optlen > tp->t_maxseg)) { 16452 uint32_t if_hw_tsomax; 16453 int32_t max_len; 16454 16455 /* extract TSO information */ 16456 if_hw_tsomax = tp->t_tsomax; 16457 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 16458 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 16459 /* 16460 * Check if we should limit by maximum payload 16461 * length: 16462 */ 16463 if (if_hw_tsomax != 0) { 16464 /* compute maximum TSO length */ 16465 max_len = (if_hw_tsomax - hdrlen - 16466 max_linkhdr); 16467 if (max_len <= 0) { 16468 goto failed; 16469 } else if (len > max_len) { 16470 len = max_len; 16471 } 16472 } 16473 if (len <= segsiz) { 16474 /* 16475 * In case there are too many small fragments don't 16476 * use TSO: 16477 */ 16478 tso = 0; 16479 } 16480 } else { 16481 tso = 0; 16482 } 16483 if ((tso == 0) && (len > segsiz)) 16484 len = segsiz; 16485 if ((len == 0) || 16486 (len <= MHLEN - hdrlen - max_linkhdr)) { 16487 goto failed; 16488 } 16489 sb_offset = tp->snd_max - tp->snd_una; 16490 th->th_seq = htonl(tp->snd_max); 16491 th->th_ack = htonl(tp->rcv_nxt); 16492 tcp_set_flags(th, flags); 16493 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 16494 if (th->th_win == 0) { 16495 tp->t_sndzerowin++; 16496 tp->t_flags |= TF_RXWIN0SENT; 16497 } else 16498 tp->t_flags &= ~TF_RXWIN0SENT; 16499 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 16500 KMOD_TCPSTAT_INC(tcps_sndpack); 16501 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 16502 #ifdef STATS 16503 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 16504 len); 16505 #endif 16506 if (rack->r_ctl.fsb.m == NULL) 16507 goto failed; 16508 16509 /* s_mb and s_soff are saved for rack_log_output */ 16510 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, 16511 &s_mb, &s_soff); 16512 if (len <= segsiz) { 16513 /* 16514 * Must have ran out of mbufs for the copy 16515 * shorten it to no longer need tso. Lets 16516 * not put on sendalot since we are low on 16517 * mbufs. 16518 */ 16519 tso = 0; 16520 } 16521 if (rack->r_ctl.fsb.rfo_apply_push && 16522 (len == rack->r_ctl.fsb.left_to_send)) { 16523 tcp_set_flags(th, flags | TH_PUSH); 16524 add_flag |= RACK_HAD_PUSH; 16525 } 16526 if ((m->m_next == NULL) || (len <= 0)){ 16527 goto failed; 16528 } 16529 if (udp) { 16530 if (rack->r_is_v6) 16531 ulen = hdrlen + len - sizeof(struct ip6_hdr); 16532 else 16533 ulen = hdrlen + len - sizeof(struct ip); 16534 udp->uh_ulen = htons(ulen); 16535 } 16536 m->m_pkthdr.rcvif = (struct ifnet *)0; 16537 if (TCPS_HAVERCVDSYN(tp->t_state) && 16538 (tp->t_flags2 & TF2_ECN_PERMIT)) { 16539 int ect = tcp_ecn_output_established(tp, &flags, len); 16540 if ((tp->t_state == TCPS_SYN_RECEIVED) && 16541 (tp->t_flags2 & TF2_ECN_SND_ECE)) 16542 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 16543 #ifdef INET6 16544 if (rack->r_is_v6) { 16545 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 16546 ip6->ip6_flow |= htonl(ect << 20); 16547 } 16548 else 16549 #endif 16550 { 16551 ip->ip_tos &= ~IPTOS_ECN_MASK; 16552 ip->ip_tos |= ect; 16553 } 16554 } 16555 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 16556 #ifdef INET6 16557 if (rack->r_is_v6) { 16558 if (tp->t_port) { 16559 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 16560 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16561 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 16562 th->th_sum = htons(0); 16563 UDPSTAT_INC(udps_opackets); 16564 } else { 16565 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 16566 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16567 th->th_sum = in6_cksum_pseudo(ip6, 16568 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 16569 0); 16570 } 16571 } 16572 #endif 16573 #if defined(INET6) && defined(INET) 16574 else 16575 #endif 16576 #ifdef INET 16577 { 16578 if (tp->t_port) { 16579 m->m_pkthdr.csum_flags = CSUM_UDP; 16580 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16581 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 16582 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 16583 th->th_sum = htons(0); 16584 UDPSTAT_INC(udps_opackets); 16585 } else { 16586 m->m_pkthdr.csum_flags = CSUM_TCP; 16587 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16588 th->th_sum = in_pseudo(ip->ip_src.s_addr, 16589 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 16590 IPPROTO_TCP + len + optlen)); 16591 } 16592 /* IP version must be set here for ipv4/ipv6 checking later */ 16593 KASSERT(ip->ip_v == IPVERSION, 16594 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 16595 } 16596 #endif 16597 if (tso) { 16598 KASSERT(len > tp->t_maxseg - optlen, 16599 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 16600 m->m_pkthdr.csum_flags |= CSUM_TSO; 16601 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 16602 } 16603 #ifdef INET6 16604 if (rack->r_is_v6) { 16605 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 16606 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 16607 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 16608 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16609 else 16610 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16611 } 16612 #endif 16613 #if defined(INET) && defined(INET6) 16614 else 16615 #endif 16616 #ifdef INET 16617 { 16618 ip->ip_len = htons(m->m_pkthdr.len); 16619 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 16620 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 16621 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16622 if (tp->t_port == 0 || len < V_tcp_minmss) { 16623 ip->ip_off |= htons(IP_DF); 16624 } 16625 } else { 16626 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16627 } 16628 } 16629 #endif 16630 /* Time to copy in our header */ 16631 cpto = mtod(m, uint8_t *); 16632 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 16633 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 16634 if (optlen) { 16635 bcopy(opt, th + 1, optlen); 16636 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 16637 } else { 16638 th->th_off = sizeof(struct tcphdr) >> 2; 16639 } 16640 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 16641 union tcp_log_stackspecific log; 16642 16643 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16644 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 16645 if (rack->rack_no_prr) 16646 log.u_bbr.flex1 = 0; 16647 else 16648 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16649 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 16650 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 16651 log.u_bbr.flex4 = max_val; 16652 log.u_bbr.flex5 = 0; 16653 /* Save off the early/late values */ 16654 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 16655 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 16656 log.u_bbr.bw_inuse = rack_get_bw(rack); 16657 log.u_bbr.flex8 = 0; 16658 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 16659 log.u_bbr.flex7 = 44; 16660 log.u_bbr.pkts_out = tp->t_maxseg; 16661 log.u_bbr.timeStamp = cts; 16662 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16663 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 16664 log.u_bbr.delivered = 0; 16665 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 16666 len, &log, false, NULL, NULL, 0, tv); 16667 } else 16668 lgb = NULL; 16669 #ifdef INET6 16670 if (rack->r_is_v6) { 16671 error = ip6_output(m, NULL, 16672 &inp->inp_route6, 16673 0, NULL, NULL, inp); 16674 } 16675 #endif 16676 #if defined(INET) && defined(INET6) 16677 else 16678 #endif 16679 #ifdef INET 16680 { 16681 error = ip_output(m, NULL, 16682 &inp->inp_route, 16683 0, 0, inp); 16684 } 16685 #endif 16686 if (lgb) { 16687 lgb->tlb_errno = error; 16688 lgb = NULL; 16689 } 16690 if (error) { 16691 *send_err = error; 16692 m = NULL; 16693 goto failed; 16694 } 16695 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), 16696 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls); 16697 m = NULL; 16698 if (tp->snd_una == tp->snd_max) { 16699 rack->r_ctl.rc_tlp_rxt_last_time = cts; 16700 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 16701 tp->t_acktime = ticks; 16702 } 16703 if (error == 0) 16704 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); 16705 16706 rack->forced_ack = 0; /* If we send something zap the FA flag */ 16707 tot_len += len; 16708 if ((tp->t_flags & TF_GPUTINPROG) == 0) 16709 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); 16710 tp->snd_max += len; 16711 tp->snd_nxt = tp->snd_max; 16712 { 16713 int idx; 16714 16715 idx = (len / segsiz) + 3; 16716 if (idx >= TCP_MSS_ACCT_ATIMER) 16717 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 16718 else 16719 counter_u64_add(rack_out_size[idx], 1); 16720 } 16721 if (len <= rack->r_ctl.fsb.left_to_send) 16722 rack->r_ctl.fsb.left_to_send -= len; 16723 else 16724 rack->r_ctl.fsb.left_to_send = 0; 16725 if (rack->r_ctl.fsb.left_to_send < segsiz) { 16726 rack->r_fast_output = 0; 16727 rack->r_ctl.fsb.left_to_send = 0; 16728 /* At the end of fast_output scale up the sb */ 16729 SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd); 16730 rack_sndbuf_autoscale(rack); 16731 SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd); 16732 } 16733 if (tp->t_rtttime == 0) { 16734 tp->t_rtttime = ticks; 16735 tp->t_rtseq = startseq; 16736 KMOD_TCPSTAT_INC(tcps_segstimed); 16737 } 16738 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && 16739 (max_val > len) && 16740 (tso == 0)) { 16741 max_val -= len; 16742 len = segsiz; 16743 th = rack->r_ctl.fsb.th; 16744 cnt_thru++; 16745 goto again; 16746 } 16747 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 16748 counter_u64_add(rack_fto_send, 1); 16749 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz); 16750 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0); 16751 #ifdef TCP_ACCOUNTING 16752 crtsc = get_cyclecount(); 16753 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16754 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 16755 } 16756 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru); 16757 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16758 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 16759 } 16760 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 16761 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16762 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz); 16763 } 16764 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len + segsiz - 1) / segsiz)); 16765 sched_unpin(); 16766 #endif 16767 return (0); 16768 failed: 16769 if (m) 16770 m_free(m); 16771 rack->r_fast_output = 0; 16772 return (-1); 16773 } 16774 16775 static int 16776 rack_output(struct tcpcb *tp) 16777 { 16778 struct socket *so; 16779 uint32_t recwin; 16780 uint32_t sb_offset, s_moff = 0; 16781 int32_t len, error = 0; 16782 uint16_t flags; 16783 struct mbuf *m, *s_mb = NULL; 16784 struct mbuf *mb; 16785 uint32_t if_hw_tsomaxsegcount = 0; 16786 uint32_t if_hw_tsomaxsegsize; 16787 int32_t segsiz, minseg; 16788 long tot_len_this_send = 0; 16789 #ifdef INET 16790 struct ip *ip = NULL; 16791 #endif 16792 #ifdef TCPDEBUG 16793 struct ipovly *ipov = NULL; 16794 #endif 16795 struct udphdr *udp = NULL; 16796 struct tcp_rack *rack; 16797 struct tcphdr *th; 16798 uint8_t pass = 0; 16799 uint8_t mark = 0; 16800 uint8_t wanted_cookie = 0; 16801 u_char opt[TCP_MAXOLEN]; 16802 unsigned ipoptlen, optlen, hdrlen, ulen=0; 16803 uint32_t rack_seq; 16804 16805 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 16806 unsigned ipsec_optlen = 0; 16807 16808 #endif 16809 int32_t idle, sendalot; 16810 int32_t sub_from_prr = 0; 16811 volatile int32_t sack_rxmit; 16812 struct rack_sendmap *rsm = NULL; 16813 int32_t tso, mtu; 16814 struct tcpopt to; 16815 int32_t slot = 0; 16816 int32_t sup_rack = 0; 16817 uint32_t cts, ms_cts, delayed, early; 16818 uint16_t add_flag = RACK_SENT_SP; 16819 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */ 16820 uint8_t hpts_calling, doing_tlp = 0; 16821 uint32_t cwnd_to_use, pace_max_seg; 16822 int32_t do_a_prefetch = 0; 16823 int32_t prefetch_rsm = 0; 16824 int32_t orig_len = 0; 16825 struct timeval tv; 16826 int32_t prefetch_so_done = 0; 16827 struct tcp_log_buffer *lgb; 16828 struct inpcb *inp; 16829 struct sockbuf *sb; 16830 uint64_t ts_val = 0; 16831 #ifdef TCP_ACCOUNTING 16832 uint64_t crtsc; 16833 #endif 16834 #ifdef INET6 16835 struct ip6_hdr *ip6 = NULL; 16836 int32_t isipv6; 16837 #endif 16838 uint8_t filled_all = 0; 16839 bool hw_tls = false; 16840 16841 /* setup and take the cache hits here */ 16842 rack = (struct tcp_rack *)tp->t_fb_ptr; 16843 #ifdef TCP_ACCOUNTING 16844 sched_pin(); 16845 ts_val = get_cyclecount(); 16846 #endif 16847 hpts_calling = rack->rc_inp->inp_hpts_calls; 16848 NET_EPOCH_ASSERT(); 16849 INP_WLOCK_ASSERT(rack->rc_inp); 16850 #ifdef TCP_OFFLOAD 16851 if (tp->t_flags & TF_TOE) { 16852 #ifdef TCP_ACCOUNTING 16853 sched_unpin(); 16854 #endif 16855 return (tcp_offload_output(tp)); 16856 } 16857 #endif 16858 /* 16859 * For TFO connections in SYN_RECEIVED, only allow the initial 16860 * SYN|ACK and those sent by the retransmit timer. 16861 */ 16862 if (IS_FASTOPEN(tp->t_flags) && 16863 (tp->t_state == TCPS_SYN_RECEIVED) && 16864 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 16865 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ 16866 #ifdef TCP_ACCOUNTING 16867 sched_unpin(); 16868 #endif 16869 return (0); 16870 } 16871 #ifdef INET6 16872 if (rack->r_state) { 16873 /* Use the cache line loaded if possible */ 16874 isipv6 = rack->r_is_v6; 16875 } else { 16876 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; 16877 } 16878 #endif 16879 early = 0; 16880 cts = tcp_get_usecs(&tv); 16881 ms_cts = tcp_tv_to_mssectick(&tv); 16882 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 16883 tcp_in_hpts(rack->rc_inp)) { 16884 /* 16885 * We are on the hpts for some timer but not hptsi output. 16886 * Remove from the hpts unconditionally. 16887 */ 16888 rack_timer_cancel(tp, rack, cts, __LINE__); 16889 } 16890 /* Are we pacing and late? */ 16891 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16892 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 16893 /* We are delayed */ 16894 delayed = cts - rack->r_ctl.rc_last_output_to; 16895 } else { 16896 delayed = 0; 16897 } 16898 /* Do the timers, which may override the pacer */ 16899 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 16900 int retval; 16901 16902 retval = rack_process_timers(tp, rack, cts, hpts_calling, 16903 &doing_tlp); 16904 if (retval != 0) { 16905 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 16906 #ifdef TCP_ACCOUNTING 16907 sched_unpin(); 16908 #endif 16909 /* 16910 * If timers want tcp_drop(), then pass error out, 16911 * otherwise suppress it. 16912 */ 16913 return (retval < 0 ? retval : 0); 16914 } 16915 } 16916 if (rack->rc_in_persist) { 16917 if (tcp_in_hpts(rack->rc_inp) == 0) { 16918 /* Timer is not running */ 16919 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 16920 } 16921 #ifdef TCP_ACCOUNTING 16922 sched_unpin(); 16923 #endif 16924 return (0); 16925 } 16926 if ((rack->r_timer_override) || 16927 (rack->rc_ack_can_sendout_data) || 16928 (delayed) || 16929 (tp->t_state < TCPS_ESTABLISHED)) { 16930 rack->rc_ack_can_sendout_data = 0; 16931 if (tcp_in_hpts(rack->rc_inp)) 16932 tcp_hpts_remove(rack->rc_inp); 16933 } else if (tcp_in_hpts(rack->rc_inp)) { 16934 /* 16935 * On the hpts you can't pass even if ACKNOW is on, we will 16936 * when the hpts fires. 16937 */ 16938 #ifdef TCP_ACCOUNTING 16939 crtsc = get_cyclecount(); 16940 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16941 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); 16942 } 16943 counter_u64_add(tcp_proc_time[SND_BLOCKED], (crtsc - ts_val)); 16944 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16945 tp->tcp_cnt_counters[SND_BLOCKED]++; 16946 } 16947 counter_u64_add(tcp_cnt_counters[SND_BLOCKED], 1); 16948 sched_unpin(); 16949 #endif 16950 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 16951 return (0); 16952 } 16953 rack->rc_inp->inp_hpts_calls = 0; 16954 /* Finish out both pacing early and late accounting */ 16955 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16956 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 16957 early = rack->r_ctl.rc_last_output_to - cts; 16958 } else 16959 early = 0; 16960 if (delayed) { 16961 rack->r_ctl.rc_agg_delayed += delayed; 16962 rack->r_late = 1; 16963 } else if (early) { 16964 rack->r_ctl.rc_agg_early += early; 16965 rack->r_early = 1; 16966 } 16967 /* Now that early/late accounting is done turn off the flag */ 16968 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 16969 rack->r_wanted_output = 0; 16970 rack->r_timer_override = 0; 16971 if ((tp->t_state != rack->r_state) && 16972 TCPS_HAVEESTABLISHED(tp->t_state)) { 16973 rack_set_state(tp, rack); 16974 } 16975 if ((rack->r_fast_output) && 16976 (doing_tlp == 0) && 16977 (tp->rcv_numsacks == 0)) { 16978 int ret; 16979 16980 error = 0; 16981 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 16982 if (ret >= 0) 16983 return(ret); 16984 else if (error) { 16985 inp = rack->rc_inp; 16986 so = inp->inp_socket; 16987 sb = &so->so_snd; 16988 goto nomore; 16989 } 16990 } 16991 inp = rack->rc_inp; 16992 /* 16993 * For TFO connections in SYN_SENT or SYN_RECEIVED, 16994 * only allow the initial SYN or SYN|ACK and those sent 16995 * by the retransmit timer. 16996 */ 16997 if (IS_FASTOPEN(tp->t_flags) && 16998 ((tp->t_state == TCPS_SYN_RECEIVED) || 16999 (tp->t_state == TCPS_SYN_SENT)) && 17000 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 17001 (tp->t_rxtshift == 0)) { /* not a retransmit */ 17002 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 17003 so = inp->inp_socket; 17004 sb = &so->so_snd; 17005 goto just_return_nolock; 17006 } 17007 /* 17008 * Determine length of data that should be transmitted, and flags 17009 * that will be used. If there is some data or critical controls 17010 * (SYN, RST) to send, then transmit; otherwise, investigate 17011 * further. 17012 */ 17013 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 17014 if (tp->t_idle_reduce) { 17015 if (idle && ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 17016 rack_cc_after_idle(rack, tp); 17017 } 17018 tp->t_flags &= ~TF_LASTIDLE; 17019 if (idle) { 17020 if (tp->t_flags & TF_MORETOCOME) { 17021 tp->t_flags |= TF_LASTIDLE; 17022 idle = 0; 17023 } 17024 } 17025 if ((tp->snd_una == tp->snd_max) && 17026 rack->r_ctl.rc_went_idle_time && 17027 TSTMP_GT(cts, rack->r_ctl.rc_went_idle_time)) { 17028 idle = cts - rack->r_ctl.rc_went_idle_time; 17029 if (idle > rack_min_probertt_hold) { 17030 /* Count as a probe rtt */ 17031 if (rack->in_probe_rtt == 0) { 17032 rack->r_ctl.rc_lower_rtt_us_cts = cts; 17033 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 17034 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 17035 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 17036 } else { 17037 rack_exit_probertt(rack, cts); 17038 } 17039 } 17040 idle = 0; 17041 } 17042 if (rack_use_fsb && (rack->r_fsb_inited == 0) && (rack->r_state != TCPS_CLOSED)) 17043 rack_init_fsb_block(tp, rack); 17044 again: 17045 /* 17046 * If we've recently taken a timeout, snd_max will be greater than 17047 * snd_nxt. There may be SACK information that allows us to avoid 17048 * resending already delivered data. Adjust snd_nxt accordingly. 17049 */ 17050 sendalot = 0; 17051 cts = tcp_get_usecs(&tv); 17052 ms_cts = tcp_tv_to_mssectick(&tv); 17053 tso = 0; 17054 mtu = 0; 17055 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 17056 minseg = segsiz; 17057 if (rack->r_ctl.rc_pace_max_segs == 0) 17058 pace_max_seg = rack->rc_user_set_max_segs * segsiz; 17059 else 17060 pace_max_seg = rack->r_ctl.rc_pace_max_segs; 17061 sb_offset = tp->snd_max - tp->snd_una; 17062 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 17063 flags = tcp_outflags[tp->t_state]; 17064 while (rack->rc_free_cnt < rack_free_cache) { 17065 rsm = rack_alloc(rack); 17066 if (rsm == NULL) { 17067 if (inp->inp_hpts_calls) 17068 /* Retry in a ms */ 17069 slot = (1 * HPTS_USEC_IN_MSEC); 17070 so = inp->inp_socket; 17071 sb = &so->so_snd; 17072 goto just_return_nolock; 17073 } 17074 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 17075 rack->rc_free_cnt++; 17076 rsm = NULL; 17077 } 17078 if (inp->inp_hpts_calls) 17079 inp->inp_hpts_calls = 0; 17080 sack_rxmit = 0; 17081 len = 0; 17082 rsm = NULL; 17083 if (flags & TH_RST) { 17084 SOCKBUF_LOCK(&inp->inp_socket->so_snd); 17085 so = inp->inp_socket; 17086 sb = &so->so_snd; 17087 goto send; 17088 } 17089 if (rack->r_ctl.rc_resend) { 17090 /* Retransmit timer */ 17091 rsm = rack->r_ctl.rc_resend; 17092 rack->r_ctl.rc_resend = NULL; 17093 len = rsm->r_end - rsm->r_start; 17094 sack_rxmit = 1; 17095 sendalot = 0; 17096 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 17097 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 17098 __func__, __LINE__, 17099 rsm->r_start, tp->snd_una, tp, rack, rsm)); 17100 sb_offset = rsm->r_start - tp->snd_una; 17101 if (len >= segsiz) 17102 len = segsiz; 17103 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) { 17104 /* We have a retransmit that takes precedence */ 17105 if ((!IN_FASTRECOVERY(tp->t_flags)) && 17106 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { 17107 /* Enter recovery if not induced by a time-out */ 17108 rack->r_ctl.rc_rsm_start = rsm->r_start; 17109 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 17110 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 17111 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 17112 } 17113 #ifdef INVARIANTS 17114 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 17115 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 17116 tp, rack, rsm, rsm->r_start, tp->snd_una); 17117 } 17118 #endif 17119 len = rsm->r_end - rsm->r_start; 17120 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 17121 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 17122 __func__, __LINE__, 17123 rsm->r_start, tp->snd_una, tp, rack, rsm)); 17124 sb_offset = rsm->r_start - tp->snd_una; 17125 sendalot = 0; 17126 if (len >= segsiz) 17127 len = segsiz; 17128 if (len > 0) { 17129 sack_rxmit = 1; 17130 KMOD_TCPSTAT_INC(tcps_sack_rexmits); 17131 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes, 17132 min(len, segsiz)); 17133 counter_u64_add(rack_rtm_prr_retran, 1); 17134 } 17135 } else if (rack->r_ctl.rc_tlpsend) { 17136 /* Tail loss probe */ 17137 long cwin; 17138 long tlen; 17139 17140 /* 17141 * Check if we can do a TLP with a RACK'd packet 17142 * this can happen if we are not doing the rack 17143 * cheat and we skipped to a TLP and it 17144 * went off. 17145 */ 17146 rsm = rack->r_ctl.rc_tlpsend; 17147 /* We are doing a TLP make sure the flag is preent */ 17148 rsm->r_flags |= RACK_TLP; 17149 rack->r_ctl.rc_tlpsend = NULL; 17150 sack_rxmit = 1; 17151 tlen = rsm->r_end - rsm->r_start; 17152 if (tlen > segsiz) 17153 tlen = segsiz; 17154 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 17155 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 17156 __func__, __LINE__, 17157 rsm->r_start, tp->snd_una, tp, rack, rsm)); 17158 sb_offset = rsm->r_start - tp->snd_una; 17159 cwin = min(tp->snd_wnd, tlen); 17160 len = cwin; 17161 } 17162 if (rack->r_must_retran && 17163 (doing_tlp == 0) && 17164 (rsm == NULL)) { 17165 /* 17166 * Non-Sack and we had a RTO or Sack/non-Sack and a 17167 * MTU change, we need to retransmit until we reach 17168 * the former snd_max (rack->r_ctl.rc_snd_max_at_rto). 17169 */ 17170 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 17171 int sendwin, flight; 17172 17173 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 17174 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); 17175 if (flight >= sendwin) { 17176 so = inp->inp_socket; 17177 sb = &so->so_snd; 17178 goto just_return_nolock; 17179 } 17180 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 17181 if (rsm == NULL) { 17182 /* TSNH */ 17183 rack->r_must_retran = 0; 17184 rack->r_ctl.rc_out_at_rto = 0; 17185 rack->r_must_retran = 0; 17186 so = inp->inp_socket; 17187 sb = &so->so_snd; 17188 goto just_return_nolock; 17189 } 17190 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { 17191 /* It does not have the flag, we are done */ 17192 rack->r_must_retran = 0; 17193 rack->r_ctl.rc_out_at_rto = 0; 17194 } else { 17195 sack_rxmit = 1; 17196 len = rsm->r_end - rsm->r_start; 17197 sendalot = 0; 17198 sb_offset = rsm->r_start - tp->snd_una; 17199 if (len >= segsiz) 17200 len = segsiz; 17201 /* 17202 * Delay removing the flag RACK_MUST_RXT so 17203 * that the fastpath for retransmit will 17204 * work with this rsm. 17205 */ 17206 17207 } 17208 } else { 17209 /* We must be done if there is nothing outstanding */ 17210 rack->r_must_retran = 0; 17211 rack->r_ctl.rc_out_at_rto = 0; 17212 } 17213 } 17214 /* 17215 * Enforce a connection sendmap count limit if set 17216 * as long as we are not retransmiting. 17217 */ 17218 if ((rsm == NULL) && 17219 (rack->do_detection == 0) && 17220 (V_tcp_map_entries_limit > 0) && 17221 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 17222 counter_u64_add(rack_to_alloc_limited, 1); 17223 if (!rack->alloc_limit_reported) { 17224 rack->alloc_limit_reported = 1; 17225 counter_u64_add(rack_alloc_limited_conns, 1); 17226 } 17227 so = inp->inp_socket; 17228 sb = &so->so_snd; 17229 goto just_return_nolock; 17230 } 17231 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 17232 /* we are retransmitting the fin */ 17233 len--; 17234 if (len) { 17235 /* 17236 * When retransmitting data do *not* include the 17237 * FIN. This could happen from a TLP probe. 17238 */ 17239 flags &= ~TH_FIN; 17240 } 17241 } 17242 #ifdef INVARIANTS 17243 /* For debugging */ 17244 rack->r_ctl.rc_rsm_at_retran = rsm; 17245 #endif 17246 if (rsm && rack->r_fsb_inited && rack_use_rsm_rfo && 17247 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { 17248 int ret; 17249 17250 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp); 17251 if (ret == 0) 17252 return (0); 17253 } 17254 if (rsm && (rsm->r_flags & RACK_MUST_RXT)) { 17255 /* 17256 * Clear the flag in prep for the send 17257 * note that if we can't get an mbuf 17258 * and fail, we won't retransmit this 17259 * rsm but that should be ok (its rare). 17260 */ 17261 rsm->r_flags &= ~RACK_MUST_RXT; 17262 } 17263 so = inp->inp_socket; 17264 sb = &so->so_snd; 17265 if (do_a_prefetch == 0) { 17266 kern_prefetch(sb, &do_a_prefetch); 17267 do_a_prefetch = 1; 17268 } 17269 #ifdef NETFLIX_SHARED_CWND 17270 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && 17271 rack->rack_enable_scwnd) { 17272 /* We are doing cwnd sharing */ 17273 if (rack->gp_ready && 17274 (rack->rack_attempted_scwnd == 0) && 17275 (rack->r_ctl.rc_scw == NULL) && 17276 tp->t_lib) { 17277 /* The pcbid is in, lets make an attempt */ 17278 counter_u64_add(rack_try_scwnd, 1); 17279 rack->rack_attempted_scwnd = 1; 17280 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, 17281 &rack->r_ctl.rc_scw_index, 17282 segsiz); 17283 } 17284 if (rack->r_ctl.rc_scw && 17285 (rack->rack_scwnd_is_idle == 1) && 17286 sbavail(&so->so_snd)) { 17287 /* we are no longer out of data */ 17288 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 17289 rack->rack_scwnd_is_idle = 0; 17290 } 17291 if (rack->r_ctl.rc_scw) { 17292 /* First lets update and get the cwnd */ 17293 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, 17294 rack->r_ctl.rc_scw_index, 17295 tp->snd_cwnd, tp->snd_wnd, segsiz); 17296 } 17297 } 17298 #endif 17299 /* 17300 * Get standard flags, and add SYN or FIN if requested by 'hidden' 17301 * state flags. 17302 */ 17303 if (tp->t_flags & TF_NEEDFIN) 17304 flags |= TH_FIN; 17305 if (tp->t_flags & TF_NEEDSYN) 17306 flags |= TH_SYN; 17307 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 17308 void *end_rsm; 17309 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 17310 if (end_rsm) 17311 kern_prefetch(end_rsm, &prefetch_rsm); 17312 prefetch_rsm = 1; 17313 } 17314 SOCKBUF_LOCK(sb); 17315 /* 17316 * If snd_nxt == snd_max and we have transmitted a FIN, the 17317 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a 17318 * negative length. This can also occur when TCP opens up its 17319 * congestion window while receiving additional duplicate acks after 17320 * fast-retransmit because TCP will reset snd_nxt to snd_max after 17321 * the fast-retransmit. 17322 * 17323 * In the normal retransmit-FIN-only case, however, snd_nxt will be 17324 * set to snd_una, the sb_offset will be 0, and the length may wind 17325 * up 0. 17326 * 17327 * If sack_rxmit is true we are retransmitting from the scoreboard 17328 * in which case len is already set. 17329 */ 17330 if ((sack_rxmit == 0) && 17331 (TCPS_HAVEESTABLISHED(tp->t_state) || IS_FASTOPEN(tp->t_flags))) { 17332 uint32_t avail; 17333 17334 avail = sbavail(sb); 17335 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail) 17336 sb_offset = tp->snd_nxt - tp->snd_una; 17337 else 17338 sb_offset = 0; 17339 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { 17340 if (rack->r_ctl.rc_tlp_new_data) { 17341 /* TLP is forcing out new data */ 17342 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 17343 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 17344 } 17345 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { 17346 if (tp->snd_wnd > sb_offset) 17347 len = tp->snd_wnd - sb_offset; 17348 else 17349 len = 0; 17350 } else { 17351 len = rack->r_ctl.rc_tlp_new_data; 17352 } 17353 rack->r_ctl.rc_tlp_new_data = 0; 17354 } else { 17355 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset); 17356 } 17357 if ((rack->r_ctl.crte == NULL) && IN_FASTRECOVERY(tp->t_flags) && (len > segsiz)) { 17358 /* 17359 * For prr=off, we need to send only 1 MSS 17360 * at a time. We do this because another sack could 17361 * be arriving that causes us to send retransmits and 17362 * we don't want to be on a long pace due to a larger send 17363 * that keeps us from sending out the retransmit. 17364 */ 17365 len = segsiz; 17366 } 17367 } else { 17368 uint32_t outstanding; 17369 /* 17370 * We are inside of a Fast recovery episode, this 17371 * is caused by a SACK or 3 dup acks. At this point 17372 * we have sent all the retransmissions and we rely 17373 * on PRR to dictate what we will send in the form of 17374 * new data. 17375 */ 17376 17377 outstanding = tp->snd_max - tp->snd_una; 17378 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 17379 if (tp->snd_wnd > outstanding) { 17380 len = tp->snd_wnd - outstanding; 17381 /* Check to see if we have the data */ 17382 if ((sb_offset + len) > avail) { 17383 /* It does not all fit */ 17384 if (avail > sb_offset) 17385 len = avail - sb_offset; 17386 else 17387 len = 0; 17388 } 17389 } else { 17390 len = 0; 17391 } 17392 } else if (avail > sb_offset) { 17393 len = avail - sb_offset; 17394 } else { 17395 len = 0; 17396 } 17397 if (len > 0) { 17398 if (len > rack->r_ctl.rc_prr_sndcnt) { 17399 len = rack->r_ctl.rc_prr_sndcnt; 17400 } 17401 if (len > 0) { 17402 sub_from_prr = 1; 17403 counter_u64_add(rack_rtm_prr_newdata, 1); 17404 } 17405 } 17406 if (len > segsiz) { 17407 /* 17408 * We should never send more than a MSS when 17409 * retransmitting or sending new data in prr 17410 * mode unless the override flag is on. Most 17411 * likely the PRR algorithm is not going to 17412 * let us send a lot as well :-) 17413 */ 17414 if (rack->r_ctl.rc_prr_sendalot == 0) { 17415 len = segsiz; 17416 } 17417 } else if (len < segsiz) { 17418 /* 17419 * Do we send any? The idea here is if the 17420 * send empty's the socket buffer we want to 17421 * do it. However if not then lets just wait 17422 * for our prr_sndcnt to get bigger. 17423 */ 17424 long leftinsb; 17425 17426 leftinsb = sbavail(sb) - sb_offset; 17427 if (leftinsb > len) { 17428 /* This send does not empty the sb */ 17429 len = 0; 17430 } 17431 } 17432 } 17433 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { 17434 /* 17435 * If you have not established 17436 * and are not doing FAST OPEN 17437 * no data please. 17438 */ 17439 if ((sack_rxmit == 0) && 17440 (!IS_FASTOPEN(tp->t_flags))){ 17441 len = 0; 17442 sb_offset = 0; 17443 } 17444 } 17445 if (prefetch_so_done == 0) { 17446 kern_prefetch(so, &prefetch_so_done); 17447 prefetch_so_done = 1; 17448 } 17449 /* 17450 * Lop off SYN bit if it has already been sent. However, if this is 17451 * SYN-SENT state and if segment contains data and if we don't know 17452 * that foreign host supports TAO, suppress sending segment. 17453 */ 17454 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) && 17455 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) { 17456 /* 17457 * When sending additional segments following a TFO SYN|ACK, 17458 * do not include the SYN bit. 17459 */ 17460 if (IS_FASTOPEN(tp->t_flags) && 17461 (tp->t_state == TCPS_SYN_RECEIVED)) 17462 flags &= ~TH_SYN; 17463 } 17464 /* 17465 * Be careful not to send data and/or FIN on SYN segments. This 17466 * measure is needed to prevent interoperability problems with not 17467 * fully conformant TCP implementations. 17468 */ 17469 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 17470 len = 0; 17471 flags &= ~TH_FIN; 17472 } 17473 /* 17474 * On TFO sockets, ensure no data is sent in the following cases: 17475 * 17476 * - When retransmitting SYN|ACK on a passively-created socket 17477 * 17478 * - When retransmitting SYN on an actively created socket 17479 * 17480 * - When sending a zero-length cookie (cookie request) on an 17481 * actively created socket 17482 * 17483 * - When the socket is in the CLOSED state (RST is being sent) 17484 */ 17485 if (IS_FASTOPEN(tp->t_flags) && 17486 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 17487 ((tp->t_state == TCPS_SYN_SENT) && 17488 (tp->t_tfo_client_cookie_len == 0)) || 17489 (flags & TH_RST))) { 17490 sack_rxmit = 0; 17491 len = 0; 17492 } 17493 /* Without fast-open there should never be data sent on a SYN */ 17494 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) { 17495 tp->snd_nxt = tp->iss; 17496 len = 0; 17497 } 17498 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) { 17499 /* We only send 1 MSS if we have a DSACK block */ 17500 add_flag |= RACK_SENT_W_DSACK; 17501 len = segsiz; 17502 } 17503 orig_len = len; 17504 if (len <= 0) { 17505 /* 17506 * If FIN has been sent but not acked, but we haven't been 17507 * called to retransmit, len will be < 0. Otherwise, window 17508 * shrank after we sent into it. If window shrank to 0, 17509 * cancel pending retransmit, pull snd_nxt back to (closed) 17510 * window, and set the persist timer if it isn't already 17511 * going. If the window didn't close completely, just wait 17512 * for an ACK. 17513 * 17514 * We also do a general check here to ensure that we will 17515 * set the persist timer when we have data to send, but a 17516 * 0-byte window. This makes sure the persist timer is set 17517 * even if the packet hits one of the "goto send" lines 17518 * below. 17519 */ 17520 len = 0; 17521 if ((tp->snd_wnd == 0) && 17522 (TCPS_HAVEESTABLISHED(tp->t_state)) && 17523 (tp->snd_una == tp->snd_max) && 17524 (sb_offset < (int)sbavail(sb))) { 17525 rack_enter_persist(tp, rack, cts); 17526 } 17527 } else if ((rsm == NULL) && 17528 (doing_tlp == 0) && 17529 (len < pace_max_seg)) { 17530 /* 17531 * We are not sending a maximum sized segment for 17532 * some reason. Should we not send anything (think 17533 * sws or persists)? 17534 */ 17535 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 17536 (TCPS_HAVEESTABLISHED(tp->t_state)) && 17537 (len < minseg) && 17538 (len < (int)(sbavail(sb) - sb_offset))) { 17539 /* 17540 * Here the rwnd is less than 17541 * the minimum pacing size, this is not a retransmit, 17542 * we are established and 17543 * the send is not the last in the socket buffer 17544 * we send nothing, and we may enter persists 17545 * if nothing is outstanding. 17546 */ 17547 len = 0; 17548 if (tp->snd_max == tp->snd_una) { 17549 /* 17550 * Nothing out we can 17551 * go into persists. 17552 */ 17553 rack_enter_persist(tp, rack, cts); 17554 } 17555 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) && 17556 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 17557 (len < (int)(sbavail(sb) - sb_offset)) && 17558 (len < minseg)) { 17559 /* 17560 * Here we are not retransmitting, and 17561 * the cwnd is not so small that we could 17562 * not send at least a min size (rxt timer 17563 * not having gone off), We have 2 segments or 17564 * more already in flight, its not the tail end 17565 * of the socket buffer and the cwnd is blocking 17566 * us from sending out a minimum pacing segment size. 17567 * Lets not send anything. 17568 */ 17569 len = 0; 17570 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 17571 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 17572 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 17573 (len < (int)(sbavail(sb) - sb_offset)) && 17574 (TCPS_HAVEESTABLISHED(tp->t_state))) { 17575 /* 17576 * Here we have a send window but we have 17577 * filled it up and we can't send another pacing segment. 17578 * We also have in flight more than 2 segments 17579 * and we are not completing the sb i.e. we allow 17580 * the last bytes of the sb to go out even if 17581 * its not a full pacing segment. 17582 */ 17583 len = 0; 17584 } else if ((rack->r_ctl.crte != NULL) && 17585 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && 17586 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) && 17587 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && 17588 (len < (int)(sbavail(sb) - sb_offset))) { 17589 /* 17590 * Here we are doing hardware pacing, this is not a TLP, 17591 * we are not sending a pace max segment size, there is rwnd 17592 * room to send at least N pace_max_seg, the cwnd is greater 17593 * than or equal to a full pacing segments plus 4 mss and we have 2 or 17594 * more segments in flight and its not the tail of the socket buffer. 17595 * 17596 * We don't want to send instead we need to get more ack's in to 17597 * allow us to send a full pacing segment. Normally, if we are pacing 17598 * about the right speed, we should have finished our pacing 17599 * send as most of the acks have come back if we are at the 17600 * right rate. This is a bit fuzzy since return path delay 17601 * can delay the acks, which is why we want to make sure we 17602 * have cwnd space to have a bit more than a max pace segments in flight. 17603 * 17604 * If we have not gotten our acks back we are pacing at too high a 17605 * rate delaying will not hurt and will bring our GP estimate down by 17606 * injecting the delay. If we don't do this we will send 17607 * 2 MSS out in response to the acks being clocked in which 17608 * defeats the point of hw-pacing (i.e. to help us get 17609 * larger TSO's out). 17610 */ 17611 len = 0; 17612 17613 } 17614 17615 } 17616 /* len will be >= 0 after this point. */ 17617 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 17618 rack_sndbuf_autoscale(rack); 17619 /* 17620 * Decide if we can use TCP Segmentation Offloading (if supported by 17621 * hardware). 17622 * 17623 * TSO may only be used if we are in a pure bulk sending state. The 17624 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 17625 * options prevent using TSO. With TSO the TCP header is the same 17626 * (except for the sequence number) for all generated packets. This 17627 * makes it impossible to transmit any options which vary per 17628 * generated segment or packet. 17629 * 17630 * IPv4 handling has a clear separation of ip options and ip header 17631 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 17632 * the right thing below to provide length of just ip options and thus 17633 * checking for ipoptlen is enough to decide if ip options are present. 17634 */ 17635 ipoptlen = 0; 17636 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17637 /* 17638 * Pre-calculate here as we save another lookup into the darknesses 17639 * of IPsec that way and can actually decide if TSO is ok. 17640 */ 17641 #ifdef INET6 17642 if (isipv6 && IPSEC_ENABLED(ipv6)) 17643 ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb); 17644 #ifdef INET 17645 else 17646 #endif 17647 #endif /* INET6 */ 17648 #ifdef INET 17649 if (IPSEC_ENABLED(ipv4)) 17650 ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb); 17651 #endif /* INET */ 17652 #endif 17653 17654 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17655 ipoptlen += ipsec_optlen; 17656 #endif 17657 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && 17658 (tp->t_port == 0) && 17659 ((tp->t_flags & TF_SIGNATURE) == 0) && 17660 tp->rcv_numsacks == 0 && sack_rxmit == 0 && 17661 ipoptlen == 0) 17662 tso = 1; 17663 { 17664 uint32_t outstanding; 17665 17666 outstanding = tp->snd_max - tp->snd_una; 17667 if (tp->t_flags & TF_SENTFIN) { 17668 /* 17669 * If we sent a fin, snd_max is 1 higher than 17670 * snd_una 17671 */ 17672 outstanding--; 17673 } 17674 if (sack_rxmit) { 17675 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 17676 flags &= ~TH_FIN; 17677 } else { 17678 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + 17679 sbused(sb))) 17680 flags &= ~TH_FIN; 17681 } 17682 } 17683 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 17684 (long)TCP_MAXWIN << tp->rcv_scale); 17685 17686 /* 17687 * Sender silly window avoidance. We transmit under the following 17688 * conditions when len is non-zero: 17689 * 17690 * - We have a full segment (or more with TSO) - This is the last 17691 * buffer in a write()/send() and we are either idle or running 17692 * NODELAY - we've timed out (e.g. persist timer) - we have more 17693 * then 1/2 the maximum send window's worth of data (receiver may be 17694 * limited the window size) - we need to retransmit 17695 */ 17696 if (len) { 17697 if (len >= segsiz) { 17698 goto send; 17699 } 17700 /* 17701 * NOTE! on localhost connections an 'ack' from the remote 17702 * end may occur synchronously with the output and cause us 17703 * to flush a buffer queued with moretocome. XXX 17704 * 17705 */ 17706 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 17707 (idle || (tp->t_flags & TF_NODELAY)) && 17708 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 17709 (tp->t_flags & TF_NOPUSH) == 0) { 17710 pass = 2; 17711 goto send; 17712 } 17713 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 17714 pass = 22; 17715 goto send; 17716 } 17717 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 17718 pass = 4; 17719 goto send; 17720 } 17721 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */ 17722 pass = 5; 17723 goto send; 17724 } 17725 if (sack_rxmit) { 17726 pass = 6; 17727 goto send; 17728 } 17729 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && 17730 (ctf_outstanding(tp) < (segsiz * 2))) { 17731 /* 17732 * We have less than two MSS outstanding (delayed ack) 17733 * and our rwnd will not let us send a full sized 17734 * MSS. Lets go ahead and let this small segment 17735 * out because we want to try to have at least two 17736 * packets inflight to not be caught by delayed ack. 17737 */ 17738 pass = 12; 17739 goto send; 17740 } 17741 } 17742 /* 17743 * Sending of standalone window updates. 17744 * 17745 * Window updates are important when we close our window due to a 17746 * full socket buffer and are opening it again after the application 17747 * reads data from it. Once the window has opened again and the 17748 * remote end starts to send again the ACK clock takes over and 17749 * provides the most current window information. 17750 * 17751 * We must avoid the silly window syndrome whereas every read from 17752 * the receive buffer, no matter how small, causes a window update 17753 * to be sent. We also should avoid sending a flurry of window 17754 * updates when the socket buffer had queued a lot of data and the 17755 * application is doing small reads. 17756 * 17757 * Prevent a flurry of pointless window updates by only sending an 17758 * update when we can increase the advertized window by more than 17759 * 1/4th of the socket buffer capacity. When the buffer is getting 17760 * full or is very small be more aggressive and send an update 17761 * whenever we can increase by two mss sized segments. In all other 17762 * situations the ACK's to new incoming data will carry further 17763 * window increases. 17764 * 17765 * Don't send an independent window update if a delayed ACK is 17766 * pending (it will get piggy-backed on it) or the remote side 17767 * already has done a half-close and won't send more data. Skip 17768 * this if the connection is in T/TCP half-open state. 17769 */ 17770 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 17771 !(tp->t_flags & TF_DELACK) && 17772 !TCPS_HAVERCVDFIN(tp->t_state)) { 17773 /* 17774 * "adv" is the amount we could increase the window, taking 17775 * into account that we are limited by TCP_MAXWIN << 17776 * tp->rcv_scale. 17777 */ 17778 int32_t adv; 17779 int oldwin; 17780 17781 adv = recwin; 17782 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 17783 oldwin = (tp->rcv_adv - tp->rcv_nxt); 17784 if (adv > oldwin) 17785 adv -= oldwin; 17786 else { 17787 /* We can't increase the window */ 17788 adv = 0; 17789 } 17790 } else 17791 oldwin = 0; 17792 17793 /* 17794 * If the new window size ends up being the same as or less 17795 * than the old size when it is scaled, then don't force 17796 * a window update. 17797 */ 17798 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 17799 goto dontupdate; 17800 17801 if (adv >= (int32_t)(2 * segsiz) && 17802 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 17803 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 17804 so->so_rcv.sb_hiwat <= 8 * segsiz)) { 17805 pass = 7; 17806 goto send; 17807 } 17808 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { 17809 pass = 23; 17810 goto send; 17811 } 17812 } 17813 dontupdate: 17814 17815 /* 17816 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 17817 * is also a catch-all for the retransmit timer timeout case. 17818 */ 17819 if (tp->t_flags & TF_ACKNOW) { 17820 pass = 8; 17821 goto send; 17822 } 17823 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 17824 pass = 9; 17825 goto send; 17826 } 17827 /* 17828 * If our state indicates that FIN should be sent and we have not 17829 * yet done so, then we need to send. 17830 */ 17831 if ((flags & TH_FIN) && 17832 (tp->snd_nxt == tp->snd_una)) { 17833 pass = 11; 17834 goto send; 17835 } 17836 /* 17837 * No reason to send a segment, just return. 17838 */ 17839 just_return: 17840 SOCKBUF_UNLOCK(sb); 17841 just_return_nolock: 17842 { 17843 int app_limited = CTF_JR_SENT_DATA; 17844 17845 if (tot_len_this_send > 0) { 17846 /* Make sure snd_nxt is up to max */ 17847 rack->r_ctl.fsb.recwin = recwin; 17848 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz); 17849 if ((error == 0) && 17850 rack_use_rfo && 17851 ((flags & (TH_SYN|TH_FIN)) == 0) && 17852 (ipoptlen == 0) && 17853 (tp->snd_nxt == tp->snd_max) && 17854 (tp->rcv_numsacks == 0) && 17855 rack->r_fsb_inited && 17856 TCPS_HAVEESTABLISHED(tp->t_state) && 17857 (rack->r_must_retran == 0) && 17858 ((tp->t_flags & TF_NEEDFIN) == 0) && 17859 (len > 0) && (orig_len > 0) && 17860 (orig_len > len) && 17861 ((orig_len - len) >= segsiz) && 17862 ((optlen == 0) || 17863 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 17864 /* We can send at least one more MSS using our fsb */ 17865 17866 rack->r_fast_output = 1; 17867 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 17868 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 17869 rack->r_ctl.fsb.tcp_flags = flags; 17870 rack->r_ctl.fsb.left_to_send = orig_len - len; 17871 if (hw_tls) 17872 rack->r_ctl.fsb.hw_tls = 1; 17873 else 17874 rack->r_ctl.fsb.hw_tls = 0; 17875 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 17876 ("rack:%p left_to_send:%u sbavail:%u out:%u", 17877 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 17878 (tp->snd_max - tp->snd_una))); 17879 if (rack->r_ctl.fsb.left_to_send < segsiz) 17880 rack->r_fast_output = 0; 17881 else { 17882 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 17883 rack->r_ctl.fsb.rfo_apply_push = 1; 17884 else 17885 rack->r_ctl.fsb.rfo_apply_push = 0; 17886 } 17887 } else 17888 rack->r_fast_output = 0; 17889 17890 17891 rack_log_fsb(rack, tp, so, flags, 17892 ipoptlen, orig_len, len, 0, 17893 1, optlen, __LINE__, 1); 17894 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 17895 tp->snd_nxt = tp->snd_max; 17896 } else { 17897 int end_window = 0; 17898 uint32_t seq = tp->gput_ack; 17899 17900 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 17901 if (rsm) { 17902 /* 17903 * Mark the last sent that we just-returned (hinting 17904 * that delayed ack may play a role in any rtt measurement). 17905 */ 17906 rsm->r_just_ret = 1; 17907 } 17908 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 17909 rack->r_ctl.rc_agg_delayed = 0; 17910 rack->r_early = 0; 17911 rack->r_late = 0; 17912 rack->r_ctl.rc_agg_early = 0; 17913 if ((ctf_outstanding(tp) + 17914 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), 17915 minseg)) >= tp->snd_wnd) { 17916 /* We are limited by the rwnd */ 17917 app_limited = CTF_JR_RWND_LIMITED; 17918 if (IN_FASTRECOVERY(tp->t_flags)) 17919 rack->r_ctl.rc_prr_sndcnt = 0; 17920 } else if (ctf_outstanding(tp) >= sbavail(sb)) { 17921 /* We are limited by whats available -- app limited */ 17922 app_limited = CTF_JR_APP_LIMITED; 17923 if (IN_FASTRECOVERY(tp->t_flags)) 17924 rack->r_ctl.rc_prr_sndcnt = 0; 17925 } else if ((idle == 0) && 17926 ((tp->t_flags & TF_NODELAY) == 0) && 17927 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 17928 (len < segsiz)) { 17929 /* 17930 * No delay is not on and the 17931 * user is sending less than 1MSS. This 17932 * brings out SWS avoidance so we 17933 * don't send. Another app-limited case. 17934 */ 17935 app_limited = CTF_JR_APP_LIMITED; 17936 } else if (tp->t_flags & TF_NOPUSH) { 17937 /* 17938 * The user has requested no push of 17939 * the last segment and we are 17940 * at the last segment. Another app 17941 * limited case. 17942 */ 17943 app_limited = CTF_JR_APP_LIMITED; 17944 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) { 17945 /* Its the cwnd */ 17946 app_limited = CTF_JR_CWND_LIMITED; 17947 } else if (IN_FASTRECOVERY(tp->t_flags) && 17948 (rack->rack_no_prr == 0) && 17949 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { 17950 app_limited = CTF_JR_PRR; 17951 } else { 17952 /* Now why here are we not sending? */ 17953 #ifdef NOW 17954 #ifdef INVARIANTS 17955 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use); 17956 #endif 17957 #endif 17958 app_limited = CTF_JR_ASSESSING; 17959 } 17960 /* 17961 * App limited in some fashion, for our pacing GP 17962 * measurements we don't want any gap (even cwnd). 17963 * Close down the measurement window. 17964 */ 17965 if (rack_cwnd_block_ends_measure && 17966 ((app_limited == CTF_JR_CWND_LIMITED) || 17967 (app_limited == CTF_JR_PRR))) { 17968 /* 17969 * The reason we are not sending is 17970 * the cwnd (or prr). We have been configured 17971 * to end the measurement window in 17972 * this case. 17973 */ 17974 end_window = 1; 17975 } else if (rack_rwnd_block_ends_measure && 17976 (app_limited == CTF_JR_RWND_LIMITED)) { 17977 /* 17978 * We are rwnd limited and have been 17979 * configured to end the measurement 17980 * window in this case. 17981 */ 17982 end_window = 1; 17983 } else if (app_limited == CTF_JR_APP_LIMITED) { 17984 /* 17985 * A true application limited period, we have 17986 * ran out of data. 17987 */ 17988 end_window = 1; 17989 } else if (app_limited == CTF_JR_ASSESSING) { 17990 /* 17991 * In the assessing case we hit the end of 17992 * the if/else and had no known reason 17993 * This will panic us under invariants.. 17994 * 17995 * If we get this out in logs we need to 17996 * investagate which reason we missed. 17997 */ 17998 end_window = 1; 17999 } 18000 if (end_window) { 18001 uint8_t log = 0; 18002 18003 /* Adjust the Gput measurement */ 18004 if ((tp->t_flags & TF_GPUTINPROG) && 18005 SEQ_GT(tp->gput_ack, tp->snd_max)) { 18006 tp->gput_ack = tp->snd_max; 18007 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 18008 /* 18009 * There is not enough to measure. 18010 */ 18011 tp->t_flags &= ~TF_GPUTINPROG; 18012 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 18013 rack->r_ctl.rc_gp_srtt /*flex1*/, 18014 tp->gput_seq, 18015 0, 0, 18, __LINE__, NULL, 0); 18016 } else 18017 log = 1; 18018 } 18019 /* Mark the last packet has app limited */ 18020 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 18021 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 18022 if (rack->r_ctl.rc_app_limited_cnt == 0) 18023 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 18024 else { 18025 /* 18026 * Go out to the end app limited and mark 18027 * this new one as next and move the end_appl up 18028 * to this guy. 18029 */ 18030 if (rack->r_ctl.rc_end_appl) 18031 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 18032 rack->r_ctl.rc_end_appl = rsm; 18033 } 18034 rsm->r_flags |= RACK_APP_LIMITED; 18035 rack->r_ctl.rc_app_limited_cnt++; 18036 } 18037 if (log) 18038 rack_log_pacing_delay_calc(rack, 18039 rack->r_ctl.rc_app_limited_cnt, seq, 18040 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); 18041 } 18042 } 18043 if (slot) { 18044 /* set the rack tcb into the slot N */ 18045 counter_u64_add(rack_paced_segments, 1); 18046 } else if (tot_len_this_send) { 18047 counter_u64_add(rack_unpaced_segments, 1); 18048 } 18049 /* Check if we need to go into persists or not */ 18050 if ((tp->snd_max == tp->snd_una) && 18051 TCPS_HAVEESTABLISHED(tp->t_state) && 18052 sbavail(sb) && 18053 (sbavail(sb) > tp->snd_wnd) && 18054 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { 18055 /* Yes lets make sure to move to persist before timer-start */ 18056 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 18057 } 18058 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); 18059 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use); 18060 } 18061 #ifdef NETFLIX_SHARED_CWND 18062 if ((sbavail(sb) == 0) && 18063 rack->r_ctl.rc_scw) { 18064 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 18065 rack->rack_scwnd_is_idle = 1; 18066 } 18067 #endif 18068 #ifdef TCP_ACCOUNTING 18069 if (tot_len_this_send > 0) { 18070 crtsc = get_cyclecount(); 18071 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18072 tp->tcp_cnt_counters[SND_OUT_DATA]++; 18073 } 18074 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1); 18075 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18076 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 18077 } 18078 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 18079 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18080 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); 18081 } 18082 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) / segsiz)); 18083 } else { 18084 crtsc = get_cyclecount(); 18085 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18086 tp->tcp_cnt_counters[SND_LIMITED]++; 18087 } 18088 counter_u64_add(tcp_cnt_counters[SND_LIMITED], 1); 18089 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18090 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); 18091 } 18092 counter_u64_add(tcp_proc_time[SND_LIMITED], (crtsc - ts_val)); 18093 } 18094 sched_unpin(); 18095 #endif 18096 return (0); 18097 18098 send: 18099 if (rsm || sack_rxmit) 18100 counter_u64_add(rack_nfto_resend, 1); 18101 else 18102 counter_u64_add(rack_non_fto_send, 1); 18103 if ((flags & TH_FIN) && 18104 sbavail(sb)) { 18105 /* 18106 * We do not transmit a FIN 18107 * with data outstanding. We 18108 * need to make it so all data 18109 * is acked first. 18110 */ 18111 flags &= ~TH_FIN; 18112 } 18113 /* Enforce stack imposed max seg size if we have one */ 18114 if (rack->r_ctl.rc_pace_max_segs && 18115 (len > rack->r_ctl.rc_pace_max_segs)) { 18116 mark = 1; 18117 len = rack->r_ctl.rc_pace_max_segs; 18118 } 18119 SOCKBUF_LOCK_ASSERT(sb); 18120 if (len > 0) { 18121 if (len >= segsiz) 18122 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 18123 else 18124 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 18125 } 18126 /* 18127 * Before ESTABLISHED, force sending of initial options unless TCP 18128 * set not to do any options. NOTE: we assume that the IP/TCP header 18129 * plus TCP options always fit in a single mbuf, leaving room for a 18130 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 18131 * + optlen <= MCLBYTES 18132 */ 18133 optlen = 0; 18134 #ifdef INET6 18135 if (isipv6) 18136 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 18137 else 18138 #endif 18139 hdrlen = sizeof(struct tcpiphdr); 18140 18141 /* 18142 * Compute options for segment. We only have to care about SYN and 18143 * established connection segments. Options for SYN-ACK segments 18144 * are handled in TCP syncache. 18145 */ 18146 to.to_flags = 0; 18147 if ((tp->t_flags & TF_NOOPT) == 0) { 18148 /* Maximum segment size. */ 18149 if (flags & TH_SYN) { 18150 tp->snd_nxt = tp->iss; 18151 to.to_mss = tcp_mssopt(&inp->inp_inc); 18152 if (tp->t_port) 18153 to.to_mss -= V_tcp_udp_tunneling_overhead; 18154 to.to_flags |= TOF_MSS; 18155 18156 /* 18157 * On SYN or SYN|ACK transmits on TFO connections, 18158 * only include the TFO option if it is not a 18159 * retransmit, as the presence of the TFO option may 18160 * have caused the original SYN or SYN|ACK to have 18161 * been dropped by a middlebox. 18162 */ 18163 if (IS_FASTOPEN(tp->t_flags) && 18164 (tp->t_rxtshift == 0)) { 18165 if (tp->t_state == TCPS_SYN_RECEIVED) { 18166 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 18167 to.to_tfo_cookie = 18168 (u_int8_t *)&tp->t_tfo_cookie.server; 18169 to.to_flags |= TOF_FASTOPEN; 18170 wanted_cookie = 1; 18171 } else if (tp->t_state == TCPS_SYN_SENT) { 18172 to.to_tfo_len = 18173 tp->t_tfo_client_cookie_len; 18174 to.to_tfo_cookie = 18175 tp->t_tfo_cookie.client; 18176 to.to_flags |= TOF_FASTOPEN; 18177 wanted_cookie = 1; 18178 /* 18179 * If we wind up having more data to 18180 * send with the SYN than can fit in 18181 * one segment, don't send any more 18182 * until the SYN|ACK comes back from 18183 * the other end. 18184 */ 18185 sendalot = 0; 18186 } 18187 } 18188 } 18189 /* Window scaling. */ 18190 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 18191 to.to_wscale = tp->request_r_scale; 18192 to.to_flags |= TOF_SCALE; 18193 } 18194 /* Timestamps. */ 18195 if ((tp->t_flags & TF_RCVD_TSTMP) || 18196 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 18197 to.to_tsval = ms_cts + tp->ts_offset; 18198 to.to_tsecr = tp->ts_recent; 18199 to.to_flags |= TOF_TS; 18200 } 18201 /* Set receive buffer autosizing timestamp. */ 18202 if (tp->rfbuf_ts == 0 && 18203 (so->so_rcv.sb_flags & SB_AUTOSIZE)) 18204 tp->rfbuf_ts = tcp_ts_getticks(); 18205 /* Selective ACK's. */ 18206 if (tp->t_flags & TF_SACK_PERMIT) { 18207 if (flags & TH_SYN) 18208 to.to_flags |= TOF_SACKPERM; 18209 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 18210 tp->rcv_numsacks > 0) { 18211 to.to_flags |= TOF_SACK; 18212 to.to_nsacks = tp->rcv_numsacks; 18213 to.to_sacks = (u_char *)tp->sackblks; 18214 } 18215 } 18216 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18217 /* TCP-MD5 (RFC2385). */ 18218 if (tp->t_flags & TF_SIGNATURE) 18219 to.to_flags |= TOF_SIGNATURE; 18220 #endif /* TCP_SIGNATURE */ 18221 18222 /* Processing the options. */ 18223 hdrlen += optlen = tcp_addoptions(&to, opt); 18224 /* 18225 * If we wanted a TFO option to be added, but it was unable 18226 * to fit, ensure no data is sent. 18227 */ 18228 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie && 18229 !(to.to_flags & TOF_FASTOPEN)) 18230 len = 0; 18231 } 18232 if (tp->t_port) { 18233 if (V_tcp_udp_tunneling_port == 0) { 18234 /* The port was removed?? */ 18235 SOCKBUF_UNLOCK(&so->so_snd); 18236 #ifdef TCP_ACCOUNTING 18237 crtsc = get_cyclecount(); 18238 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18239 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18240 } 18241 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 18242 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18243 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18244 } 18245 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 18246 sched_unpin(); 18247 #endif 18248 return (EHOSTUNREACH); 18249 } 18250 hdrlen += sizeof(struct udphdr); 18251 } 18252 #ifdef INET6 18253 if (isipv6) 18254 ipoptlen = ip6_optlen(tp->t_inpcb); 18255 else 18256 #endif 18257 if (tp->t_inpcb->inp_options) 18258 ipoptlen = tp->t_inpcb->inp_options->m_len - 18259 offsetof(struct ipoption, ipopt_list); 18260 else 18261 ipoptlen = 0; 18262 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18263 ipoptlen += ipsec_optlen; 18264 #endif 18265 18266 /* 18267 * Adjust data length if insertion of options will bump the packet 18268 * length beyond the t_maxseg length. Clear the FIN bit because we 18269 * cut off the tail of the segment. 18270 */ 18271 if (len + optlen + ipoptlen > tp->t_maxseg) { 18272 if (tso) { 18273 uint32_t if_hw_tsomax; 18274 uint32_t moff; 18275 int32_t max_len; 18276 18277 /* extract TSO information */ 18278 if_hw_tsomax = tp->t_tsomax; 18279 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 18280 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 18281 KASSERT(ipoptlen == 0, 18282 ("%s: TSO can't do IP options", __func__)); 18283 18284 /* 18285 * Check if we should limit by maximum payload 18286 * length: 18287 */ 18288 if (if_hw_tsomax != 0) { 18289 /* compute maximum TSO length */ 18290 max_len = (if_hw_tsomax - hdrlen - 18291 max_linkhdr); 18292 if (max_len <= 0) { 18293 len = 0; 18294 } else if (len > max_len) { 18295 sendalot = 1; 18296 len = max_len; 18297 mark = 2; 18298 } 18299 } 18300 /* 18301 * Prevent the last segment from being fractional 18302 * unless the send sockbuf can be emptied: 18303 */ 18304 max_len = (tp->t_maxseg - optlen); 18305 if ((sb_offset + len) < sbavail(sb)) { 18306 moff = len % (u_int)max_len; 18307 if (moff != 0) { 18308 mark = 3; 18309 len -= moff; 18310 } 18311 } 18312 /* 18313 * In case there are too many small fragments don't 18314 * use TSO: 18315 */ 18316 if (len <= segsiz) { 18317 mark = 4; 18318 tso = 0; 18319 } 18320 /* 18321 * Send the FIN in a separate segment after the bulk 18322 * sending is done. We don't trust the TSO 18323 * implementations to clear the FIN flag on all but 18324 * the last segment. 18325 */ 18326 if (tp->t_flags & TF_NEEDFIN) { 18327 sendalot = 4; 18328 } 18329 } else { 18330 mark = 5; 18331 if (optlen + ipoptlen >= tp->t_maxseg) { 18332 /* 18333 * Since we don't have enough space to put 18334 * the IP header chain and the TCP header in 18335 * one packet as required by RFC 7112, don't 18336 * send it. Also ensure that at least one 18337 * byte of the payload can be put into the 18338 * TCP segment. 18339 */ 18340 SOCKBUF_UNLOCK(&so->so_snd); 18341 error = EMSGSIZE; 18342 sack_rxmit = 0; 18343 goto out; 18344 } 18345 len = tp->t_maxseg - optlen - ipoptlen; 18346 sendalot = 5; 18347 } 18348 } else { 18349 tso = 0; 18350 mark = 6; 18351 } 18352 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 18353 ("%s: len > IP_MAXPACKET", __func__)); 18354 #ifdef DIAGNOSTIC 18355 #ifdef INET6 18356 if (max_linkhdr + hdrlen > MCLBYTES) 18357 #else 18358 if (max_linkhdr + hdrlen > MHLEN) 18359 #endif 18360 panic("tcphdr too big"); 18361 #endif 18362 18363 /* 18364 * This KASSERT is here to catch edge cases at a well defined place. 18365 * Before, those had triggered (random) panic conditions further 18366 * down. 18367 */ 18368 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 18369 if ((len == 0) && 18370 (flags & TH_FIN) && 18371 (sbused(sb))) { 18372 /* 18373 * We have outstanding data, don't send a fin by itself!. 18374 */ 18375 goto just_return; 18376 } 18377 /* 18378 * Grab a header mbuf, attaching a copy of data to be transmitted, 18379 * and initialize the header from the template for sends on this 18380 * connection. 18381 */ 18382 hw_tls = (sb->sb_flags & SB_TLS_IFNET) != 0; 18383 if (len) { 18384 uint32_t max_val; 18385 uint32_t moff; 18386 18387 if (rack->r_ctl.rc_pace_max_segs) 18388 max_val = rack->r_ctl.rc_pace_max_segs; 18389 else if (rack->rc_user_set_max_segs) 18390 max_val = rack->rc_user_set_max_segs * segsiz; 18391 else 18392 max_val = len; 18393 /* 18394 * We allow a limit on sending with hptsi. 18395 */ 18396 if (len > max_val) { 18397 mark = 7; 18398 len = max_val; 18399 } 18400 #ifdef INET6 18401 if (MHLEN < hdrlen + max_linkhdr) 18402 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 18403 else 18404 #endif 18405 m = m_gethdr(M_NOWAIT, MT_DATA); 18406 18407 if (m == NULL) { 18408 SOCKBUF_UNLOCK(sb); 18409 error = ENOBUFS; 18410 sack_rxmit = 0; 18411 goto out; 18412 } 18413 m->m_data += max_linkhdr; 18414 m->m_len = hdrlen; 18415 18416 /* 18417 * Start the m_copy functions from the closest mbuf to the 18418 * sb_offset in the socket buffer chain. 18419 */ 18420 mb = sbsndptr_noadv(sb, sb_offset, &moff); 18421 s_mb = mb; 18422 s_moff = moff; 18423 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 18424 m_copydata(mb, moff, (int)len, 18425 mtod(m, caddr_t)+hdrlen); 18426 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 18427 sbsndptr_adv(sb, mb, len); 18428 m->m_len += len; 18429 } else { 18430 struct sockbuf *msb; 18431 18432 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 18433 msb = NULL; 18434 else 18435 msb = sb; 18436 m->m_next = tcp_m_copym( 18437 mb, moff, &len, 18438 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 18439 ((rsm == NULL) ? hw_tls : 0) 18440 #ifdef NETFLIX_COPY_ARGS 18441 , &filled_all 18442 #endif 18443 ); 18444 if (len <= (tp->t_maxseg - optlen)) { 18445 /* 18446 * Must have ran out of mbufs for the copy 18447 * shorten it to no longer need tso. Lets 18448 * not put on sendalot since we are low on 18449 * mbufs. 18450 */ 18451 tso = 0; 18452 } 18453 if (m->m_next == NULL) { 18454 SOCKBUF_UNLOCK(sb); 18455 (void)m_free(m); 18456 error = ENOBUFS; 18457 sack_rxmit = 0; 18458 goto out; 18459 } 18460 } 18461 if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { 18462 if (rsm && (rsm->r_flags & RACK_TLP)) { 18463 /* 18464 * TLP should not count in retran count, but 18465 * in its own bin 18466 */ 18467 counter_u64_add(rack_tlp_retran, 1); 18468 counter_u64_add(rack_tlp_retran_bytes, len); 18469 } else { 18470 tp->t_sndrexmitpack++; 18471 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 18472 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 18473 } 18474 #ifdef STATS 18475 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 18476 len); 18477 #endif 18478 } else { 18479 KMOD_TCPSTAT_INC(tcps_sndpack); 18480 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 18481 #ifdef STATS 18482 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 18483 len); 18484 #endif 18485 } 18486 /* 18487 * If we're sending everything we've got, set PUSH. (This 18488 * will keep happy those implementations which only give 18489 * data to the user when a buffer fills or a PUSH comes in.) 18490 */ 18491 if (sb_offset + len == sbused(sb) && 18492 sbused(sb) && 18493 !(flags & TH_SYN)) { 18494 flags |= TH_PUSH; 18495 add_flag |= RACK_HAD_PUSH; 18496 } 18497 18498 SOCKBUF_UNLOCK(sb); 18499 } else { 18500 SOCKBUF_UNLOCK(sb); 18501 if (tp->t_flags & TF_ACKNOW) 18502 KMOD_TCPSTAT_INC(tcps_sndacks); 18503 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 18504 KMOD_TCPSTAT_INC(tcps_sndctrl); 18505 else 18506 KMOD_TCPSTAT_INC(tcps_sndwinup); 18507 18508 m = m_gethdr(M_NOWAIT, MT_DATA); 18509 if (m == NULL) { 18510 error = ENOBUFS; 18511 sack_rxmit = 0; 18512 goto out; 18513 } 18514 #ifdef INET6 18515 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 18516 MHLEN >= hdrlen) { 18517 M_ALIGN(m, hdrlen); 18518 } else 18519 #endif 18520 m->m_data += max_linkhdr; 18521 m->m_len = hdrlen; 18522 } 18523 SOCKBUF_UNLOCK_ASSERT(sb); 18524 m->m_pkthdr.rcvif = (struct ifnet *)0; 18525 #ifdef MAC 18526 mac_inpcb_create_mbuf(inp, m); 18527 #endif 18528 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 18529 #ifdef INET6 18530 if (isipv6) 18531 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 18532 else 18533 #endif /* INET6 */ 18534 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 18535 th = rack->r_ctl.fsb.th; 18536 udp = rack->r_ctl.fsb.udp; 18537 if (udp) { 18538 #ifdef INET6 18539 if (isipv6) 18540 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18541 else 18542 #endif /* INET6 */ 18543 ulen = hdrlen + len - sizeof(struct ip); 18544 udp->uh_ulen = htons(ulen); 18545 } 18546 } else { 18547 #ifdef INET6 18548 if (isipv6) { 18549 ip6 = mtod(m, struct ip6_hdr *); 18550 if (tp->t_port) { 18551 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 18552 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 18553 udp->uh_dport = tp->t_port; 18554 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18555 udp->uh_ulen = htons(ulen); 18556 th = (struct tcphdr *)(udp + 1); 18557 } else 18558 th = (struct tcphdr *)(ip6 + 1); 18559 tcpip_fillheaders(inp, tp->t_port, ip6, th); 18560 } else 18561 #endif /* INET6 */ 18562 { 18563 ip = mtod(m, struct ip *); 18564 #ifdef TCPDEBUG 18565 ipov = (struct ipovly *)ip; 18566 #endif 18567 if (tp->t_port) { 18568 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 18569 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 18570 udp->uh_dport = tp->t_port; 18571 ulen = hdrlen + len - sizeof(struct ip); 18572 udp->uh_ulen = htons(ulen); 18573 th = (struct tcphdr *)(udp + 1); 18574 } else 18575 th = (struct tcphdr *)(ip + 1); 18576 tcpip_fillheaders(inp, tp->t_port, ip, th); 18577 } 18578 } 18579 /* 18580 * Fill in fields, remembering maximum advertised window for use in 18581 * delaying messages about window sizes. If resending a FIN, be sure 18582 * not to use a new sequence number. 18583 */ 18584 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 18585 tp->snd_nxt == tp->snd_max) 18586 tp->snd_nxt--; 18587 /* 18588 * If we are starting a connection, send ECN setup SYN packet. If we 18589 * are on a retransmit, we may resend those bits a number of times 18590 * as per RFC 3168. 18591 */ 18592 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { 18593 flags |= tcp_ecn_output_syn_sent(tp); 18594 } 18595 /* Also handle parallel SYN for ECN */ 18596 if (TCPS_HAVERCVDSYN(tp->t_state) && 18597 (tp->t_flags2 & TF2_ECN_PERMIT)) { 18598 int ect = tcp_ecn_output_established(tp, &flags, len); 18599 if ((tp->t_state == TCPS_SYN_RECEIVED) && 18600 (tp->t_flags2 & TF2_ECN_SND_ECE)) 18601 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 18602 #ifdef INET6 18603 if (isipv6) { 18604 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 18605 ip6->ip6_flow |= htonl(ect << 20); 18606 } 18607 else 18608 #endif 18609 { 18610 ip->ip_tos &= ~IPTOS_ECN_MASK; 18611 ip->ip_tos |= ect; 18612 } 18613 } 18614 /* 18615 * If we are doing retransmissions, then snd_nxt will not reflect 18616 * the first unsent octet. For ACK only packets, we do not want the 18617 * sequence number of the retransmitted packet, we want the sequence 18618 * number of the next unsent octet. So, if there is no data (and no 18619 * SYN or FIN), use snd_max instead of snd_nxt when filling in 18620 * ti_seq. But if we are in persist state, snd_max might reflect 18621 * one byte beyond the right edge of the window, so use snd_nxt in 18622 * that case, since we know we aren't doing a retransmission. 18623 * (retransmit and persist are mutually exclusive...) 18624 */ 18625 if (sack_rxmit == 0) { 18626 if (len || (flags & (TH_SYN | TH_FIN))) { 18627 th->th_seq = htonl(tp->snd_nxt); 18628 rack_seq = tp->snd_nxt; 18629 } else { 18630 th->th_seq = htonl(tp->snd_max); 18631 rack_seq = tp->snd_max; 18632 } 18633 } else { 18634 th->th_seq = htonl(rsm->r_start); 18635 rack_seq = rsm->r_start; 18636 } 18637 th->th_ack = htonl(tp->rcv_nxt); 18638 tcp_set_flags(th, flags); 18639 /* 18640 * Calculate receive window. Don't shrink window, but avoid silly 18641 * window syndrome. 18642 * If a RST segment is sent, advertise a window of zero. 18643 */ 18644 if (flags & TH_RST) { 18645 recwin = 0; 18646 } else { 18647 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 18648 recwin < (long)segsiz) { 18649 recwin = 0; 18650 } 18651 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 18652 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 18653 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 18654 } 18655 18656 /* 18657 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 18658 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 18659 * handled in syncache. 18660 */ 18661 if (flags & TH_SYN) 18662 th->th_win = htons((u_short) 18663 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 18664 else { 18665 /* Avoid shrinking window with window scaling. */ 18666 recwin = roundup2(recwin, 1 << tp->rcv_scale); 18667 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 18668 } 18669 /* 18670 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 18671 * window. This may cause the remote transmitter to stall. This 18672 * flag tells soreceive() to disable delayed acknowledgements when 18673 * draining the buffer. This can occur if the receiver is 18674 * attempting to read more data than can be buffered prior to 18675 * transmitting on the connection. 18676 */ 18677 if (th->th_win == 0) { 18678 tp->t_sndzerowin++; 18679 tp->t_flags |= TF_RXWIN0SENT; 18680 } else 18681 tp->t_flags &= ~TF_RXWIN0SENT; 18682 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 18683 /* Now are we using fsb?, if so copy the template data to the mbuf */ 18684 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 18685 uint8_t *cpto; 18686 18687 cpto = mtod(m, uint8_t *); 18688 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 18689 /* 18690 * We have just copied in: 18691 * IP/IP6 18692 * <optional udphdr> 18693 * tcphdr (no options) 18694 * 18695 * We need to grab the correct pointers into the mbuf 18696 * for both the tcp header, and possibly the udp header (if tunneling). 18697 * We do this by using the offset in the copy buffer and adding it 18698 * to the mbuf base pointer (cpto). 18699 */ 18700 #ifdef INET6 18701 if (isipv6) 18702 ip6 = mtod(m, struct ip6_hdr *); 18703 else 18704 #endif /* INET6 */ 18705 ip = mtod(m, struct ip *); 18706 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 18707 /* If we have a udp header lets set it into the mbuf as well */ 18708 if (udp) 18709 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); 18710 } 18711 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18712 if (to.to_flags & TOF_SIGNATURE) { 18713 /* 18714 * Calculate MD5 signature and put it into the place 18715 * determined before. 18716 * NOTE: since TCP options buffer doesn't point into 18717 * mbuf's data, calculate offset and use it. 18718 */ 18719 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 18720 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 18721 /* 18722 * Do not send segment if the calculation of MD5 18723 * digest has failed. 18724 */ 18725 goto out; 18726 } 18727 } 18728 #endif 18729 if (optlen) { 18730 bcopy(opt, th + 1, optlen); 18731 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 18732 } 18733 /* 18734 * Put TCP length in extended header, and then checksum extended 18735 * header and data. 18736 */ 18737 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 18738 #ifdef INET6 18739 if (isipv6) { 18740 /* 18741 * ip6_plen is not need to be filled now, and will be filled 18742 * in ip6_output. 18743 */ 18744 if (tp->t_port) { 18745 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 18746 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18747 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 18748 th->th_sum = htons(0); 18749 UDPSTAT_INC(udps_opackets); 18750 } else { 18751 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 18752 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18753 th->th_sum = in6_cksum_pseudo(ip6, 18754 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 18755 0); 18756 } 18757 } 18758 #endif 18759 #if defined(INET6) && defined(INET) 18760 else 18761 #endif 18762 #ifdef INET 18763 { 18764 if (tp->t_port) { 18765 m->m_pkthdr.csum_flags = CSUM_UDP; 18766 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18767 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 18768 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 18769 th->th_sum = htons(0); 18770 UDPSTAT_INC(udps_opackets); 18771 } else { 18772 m->m_pkthdr.csum_flags = CSUM_TCP; 18773 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18774 th->th_sum = in_pseudo(ip->ip_src.s_addr, 18775 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 18776 IPPROTO_TCP + len + optlen)); 18777 } 18778 /* IP version must be set here for ipv4/ipv6 checking later */ 18779 KASSERT(ip->ip_v == IPVERSION, 18780 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 18781 } 18782 #endif 18783 /* 18784 * Enable TSO and specify the size of the segments. The TCP pseudo 18785 * header checksum is always provided. XXX: Fixme: This is currently 18786 * not the case for IPv6. 18787 */ 18788 if (tso) { 18789 KASSERT(len > tp->t_maxseg - optlen, 18790 ("%s: len <= tso_segsz", __func__)); 18791 m->m_pkthdr.csum_flags |= CSUM_TSO; 18792 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 18793 } 18794 KASSERT(len + hdrlen == m_length(m, NULL), 18795 ("%s: mbuf chain different than expected: %d + %u != %u", 18796 __func__, len, hdrlen, m_length(m, NULL))); 18797 18798 #ifdef TCP_HHOOK 18799 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 18800 hhook_run_tcp_est_out(tp, th, &to, len, tso); 18801 #endif 18802 /* We're getting ready to send; log now. */ 18803 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 18804 union tcp_log_stackspecific log; 18805 18806 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18807 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 18808 if (rack->rack_no_prr) 18809 log.u_bbr.flex1 = 0; 18810 else 18811 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 18812 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 18813 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 18814 log.u_bbr.flex4 = orig_len; 18815 if (filled_all) 18816 log.u_bbr.flex5 = 0x80000000; 18817 else 18818 log.u_bbr.flex5 = 0; 18819 /* Save off the early/late values */ 18820 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 18821 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 18822 log.u_bbr.bw_inuse = rack_get_bw(rack); 18823 if (rsm || sack_rxmit) { 18824 if (doing_tlp) 18825 log.u_bbr.flex8 = 2; 18826 else 18827 log.u_bbr.flex8 = 1; 18828 } else { 18829 if (doing_tlp) 18830 log.u_bbr.flex8 = 3; 18831 else 18832 log.u_bbr.flex8 = 0; 18833 } 18834 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 18835 log.u_bbr.flex7 = mark; 18836 log.u_bbr.flex7 <<= 8; 18837 log.u_bbr.flex7 |= pass; 18838 log.u_bbr.pkts_out = tp->t_maxseg; 18839 log.u_bbr.timeStamp = cts; 18840 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18841 log.u_bbr.lt_epoch = cwnd_to_use; 18842 log.u_bbr.delivered = sendalot; 18843 lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 18844 len, &log, false, NULL, NULL, 0, &tv); 18845 } else 18846 lgb = NULL; 18847 18848 /* 18849 * Fill in IP length and desired time to live and send to IP level. 18850 * There should be a better way to handle ttl and tos; we could keep 18851 * them in the template, but need a way to checksum without them. 18852 */ 18853 /* 18854 * m->m_pkthdr.len should have been set before cksum calcuration, 18855 * because in6_cksum() need it. 18856 */ 18857 #ifdef INET6 18858 if (isipv6) { 18859 /* 18860 * we separately set hoplimit for every segment, since the 18861 * user might want to change the value via setsockopt. Also, 18862 * desired default hop limit might be changed via Neighbor 18863 * Discovery. 18864 */ 18865 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); 18866 18867 /* 18868 * Set the packet size here for the benefit of DTrace 18869 * probes. ip6_output() will set it properly; it's supposed 18870 * to include the option header lengths as well. 18871 */ 18872 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 18873 18874 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 18875 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18876 else 18877 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18878 18879 if (tp->t_state == TCPS_SYN_SENT) 18880 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 18881 18882 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 18883 /* TODO: IPv6 IP6TOS_ECT bit on */ 18884 error = ip6_output(m, 18885 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18886 inp->in6p_outputopts, 18887 #else 18888 NULL, 18889 #endif 18890 &inp->inp_route6, 18891 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 18892 NULL, NULL, inp); 18893 18894 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 18895 mtu = inp->inp_route6.ro_nh->nh_mtu; 18896 } 18897 #endif /* INET6 */ 18898 #if defined(INET) && defined(INET6) 18899 else 18900 #endif 18901 #ifdef INET 18902 { 18903 ip->ip_len = htons(m->m_pkthdr.len); 18904 #ifdef INET6 18905 if (inp->inp_vflag & INP_IPV6PROTO) 18906 ip->ip_ttl = in6_selecthlim(inp, NULL); 18907 #endif /* INET6 */ 18908 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; 18909 /* 18910 * If we do path MTU discovery, then we set DF on every 18911 * packet. This might not be the best thing to do according 18912 * to RFC3390 Section 2. However the tcp hostcache migitates 18913 * the problem so it affects only the first tcp connection 18914 * with a host. 18915 * 18916 * NB: Don't set DF on small MTU/MSS to have a safe 18917 * fallback. 18918 */ 18919 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 18920 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18921 if (tp->t_port == 0 || len < V_tcp_minmss) { 18922 ip->ip_off |= htons(IP_DF); 18923 } 18924 } else { 18925 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18926 } 18927 18928 if (tp->t_state == TCPS_SYN_SENT) 18929 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 18930 18931 TCP_PROBE5(send, NULL, tp, ip, tp, th); 18932 18933 error = ip_output(m, 18934 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18935 inp->inp_options, 18936 #else 18937 NULL, 18938 #endif 18939 &inp->inp_route, 18940 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0, 18941 inp); 18942 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 18943 mtu = inp->inp_route.ro_nh->nh_mtu; 18944 } 18945 #endif /* INET */ 18946 18947 out: 18948 if (lgb) { 18949 lgb->tlb_errno = error; 18950 lgb = NULL; 18951 } 18952 /* 18953 * In transmit state, time the transmission and arrange for the 18954 * retransmit. In persist state, just set snd_max. 18955 */ 18956 if (error == 0) { 18957 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls); 18958 if (rsm && doing_tlp) { 18959 rack->rc_last_sent_tlp_past_cumack = 0; 18960 rack->rc_last_sent_tlp_seq_valid = 1; 18961 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 18962 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 18963 } 18964 rack->forced_ack = 0; /* If we send something zap the FA flag */ 18965 if (rsm && (doing_tlp == 0)) { 18966 /* Set we retransmitted */ 18967 rack->rc_gp_saw_rec = 1; 18968 } else { 18969 if (cwnd_to_use > tp->snd_ssthresh) { 18970 /* Set we sent in CA */ 18971 rack->rc_gp_saw_ca = 1; 18972 } else { 18973 /* Set we sent in SS */ 18974 rack->rc_gp_saw_ss = 1; 18975 } 18976 } 18977 if (TCPS_HAVEESTABLISHED(tp->t_state) && 18978 (tp->t_flags & TF_SACK_PERMIT) && 18979 tp->rcv_numsacks > 0) 18980 tcp_clean_dsack_blocks(tp); 18981 tot_len_this_send += len; 18982 if (len == 0) 18983 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 18984 else if (len == 1) { 18985 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 18986 } else if (len > 1) { 18987 int idx; 18988 18989 idx = (len / segsiz) + 3; 18990 if (idx >= TCP_MSS_ACCT_ATIMER) 18991 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 18992 else 18993 counter_u64_add(rack_out_size[idx], 1); 18994 } 18995 } 18996 if ((rack->rack_no_prr == 0) && 18997 sub_from_prr && 18998 (error == 0)) { 18999 if (rack->r_ctl.rc_prr_sndcnt >= len) 19000 rack->r_ctl.rc_prr_sndcnt -= len; 19001 else 19002 rack->r_ctl.rc_prr_sndcnt = 0; 19003 } 19004 sub_from_prr = 0; 19005 if (doing_tlp) { 19006 /* Make sure the TLP is added */ 19007 add_flag |= RACK_TLP; 19008 } else if (rsm) { 19009 /* If its a resend without TLP then it must not have the flag */ 19010 rsm->r_flags &= ~RACK_TLP; 19011 } 19012 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, 19013 rack_to_usec_ts(&tv), 19014 rsm, add_flag, s_mb, s_moff, hw_tls); 19015 19016 19017 if ((error == 0) && 19018 (len > 0) && 19019 (tp->snd_una == tp->snd_max)) 19020 rack->r_ctl.rc_tlp_rxt_last_time = cts; 19021 { 19022 tcp_seq startseq = tp->snd_nxt; 19023 19024 /* Track our lost count */ 19025 if (rsm && (doing_tlp == 0)) 19026 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; 19027 /* 19028 * Advance snd_nxt over sequence space of this segment. 19029 */ 19030 if (error) 19031 /* We don't log or do anything with errors */ 19032 goto nomore; 19033 if (doing_tlp == 0) { 19034 if (rsm == NULL) { 19035 /* 19036 * Not a retransmission of some 19037 * sort, new data is going out so 19038 * clear our TLP count and flag. 19039 */ 19040 rack->rc_tlp_in_progress = 0; 19041 rack->r_ctl.rc_tlp_cnt_out = 0; 19042 } 19043 } else { 19044 /* 19045 * We have just sent a TLP, mark that it is true 19046 * and make sure our in progress is set so we 19047 * continue to check the count. 19048 */ 19049 rack->rc_tlp_in_progress = 1; 19050 rack->r_ctl.rc_tlp_cnt_out++; 19051 } 19052 if (flags & (TH_SYN | TH_FIN)) { 19053 if (flags & TH_SYN) 19054 tp->snd_nxt++; 19055 if (flags & TH_FIN) { 19056 tp->snd_nxt++; 19057 tp->t_flags |= TF_SENTFIN; 19058 } 19059 } 19060 /* In the ENOBUFS case we do *not* update snd_max */ 19061 if (sack_rxmit) 19062 goto nomore; 19063 19064 tp->snd_nxt += len; 19065 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { 19066 if (tp->snd_una == tp->snd_max) { 19067 /* 19068 * Update the time we just added data since 19069 * none was outstanding. 19070 */ 19071 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 19072 tp->t_acktime = ticks; 19073 } 19074 tp->snd_max = tp->snd_nxt; 19075 /* 19076 * Time this transmission if not a retransmission and 19077 * not currently timing anything. 19078 * This is only relevant in case of switching back to 19079 * the base stack. 19080 */ 19081 if (tp->t_rtttime == 0) { 19082 tp->t_rtttime = ticks; 19083 tp->t_rtseq = startseq; 19084 KMOD_TCPSTAT_INC(tcps_segstimed); 19085 } 19086 if (len && 19087 ((tp->t_flags & TF_GPUTINPROG) == 0)) 19088 rack_start_gp_measurement(tp, rack, startseq, sb_offset); 19089 } 19090 /* 19091 * If we are doing FO we need to update the mbuf position and subtract 19092 * this happens when the peer sends us duplicate information and 19093 * we thus want to send a DSACK. 19094 * 19095 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO 19096 * turned off? If not then we are going to echo multiple DSACK blocks 19097 * out (with the TSO), which we should not be doing. 19098 */ 19099 if (rack->r_fast_output && len) { 19100 if (rack->r_ctl.fsb.left_to_send > len) 19101 rack->r_ctl.fsb.left_to_send -= len; 19102 else 19103 rack->r_ctl.fsb.left_to_send = 0; 19104 if (rack->r_ctl.fsb.left_to_send < segsiz) 19105 rack->r_fast_output = 0; 19106 if (rack->r_fast_output) { 19107 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19108 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19109 } 19110 } 19111 } 19112 nomore: 19113 if (error) { 19114 rack->r_ctl.rc_agg_delayed = 0; 19115 rack->r_early = 0; 19116 rack->r_late = 0; 19117 rack->r_ctl.rc_agg_early = 0; 19118 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 19119 /* 19120 * Failures do not advance the seq counter above. For the 19121 * case of ENOBUFS we will fall out and retry in 1ms with 19122 * the hpts. Everything else will just have to retransmit 19123 * with the timer. 19124 * 19125 * In any case, we do not want to loop around for another 19126 * send without a good reason. 19127 */ 19128 sendalot = 0; 19129 switch (error) { 19130 case EPERM: 19131 tp->t_softerror = error; 19132 #ifdef TCP_ACCOUNTING 19133 crtsc = get_cyclecount(); 19134 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19135 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 19136 } 19137 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 19138 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19139 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 19140 } 19141 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 19142 sched_unpin(); 19143 #endif 19144 return (error); 19145 case ENOBUFS: 19146 /* 19147 * Pace us right away to retry in a some 19148 * time 19149 */ 19150 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 19151 if (rack->rc_enobuf < 0x7f) 19152 rack->rc_enobuf++; 19153 if (slot < (10 * HPTS_USEC_IN_MSEC)) 19154 slot = 10 * HPTS_USEC_IN_MSEC; 19155 if (rack->r_ctl.crte != NULL) { 19156 counter_u64_add(rack_saw_enobuf_hw, 1); 19157 tcp_rl_log_enobuf(rack->r_ctl.crte); 19158 } 19159 counter_u64_add(rack_saw_enobuf, 1); 19160 goto enobufs; 19161 case EMSGSIZE: 19162 /* 19163 * For some reason the interface we used initially 19164 * to send segments changed to another or lowered 19165 * its MTU. If TSO was active we either got an 19166 * interface without TSO capabilits or TSO was 19167 * turned off. If we obtained mtu from ip_output() 19168 * then update it and try again. 19169 */ 19170 if (tso) 19171 tp->t_flags &= ~TF_TSO; 19172 if (mtu != 0) { 19173 tcp_mss_update(tp, -1, mtu, NULL, NULL); 19174 goto again; 19175 } 19176 slot = 10 * HPTS_USEC_IN_MSEC; 19177 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 19178 #ifdef TCP_ACCOUNTING 19179 crtsc = get_cyclecount(); 19180 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19181 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 19182 } 19183 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 19184 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19185 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 19186 } 19187 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 19188 sched_unpin(); 19189 #endif 19190 return (error); 19191 case ENETUNREACH: 19192 counter_u64_add(rack_saw_enetunreach, 1); 19193 case EHOSTDOWN: 19194 case EHOSTUNREACH: 19195 case ENETDOWN: 19196 if (TCPS_HAVERCVDSYN(tp->t_state)) { 19197 tp->t_softerror = error; 19198 } 19199 /* FALLTHROUGH */ 19200 default: 19201 slot = 10 * HPTS_USEC_IN_MSEC; 19202 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 19203 #ifdef TCP_ACCOUNTING 19204 crtsc = get_cyclecount(); 19205 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19206 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 19207 } 19208 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 19209 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19210 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 19211 } 19212 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 19213 sched_unpin(); 19214 #endif 19215 return (error); 19216 } 19217 } else { 19218 rack->rc_enobuf = 0; 19219 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 19220 rack->r_ctl.retran_during_recovery += len; 19221 } 19222 KMOD_TCPSTAT_INC(tcps_sndtotal); 19223 19224 /* 19225 * Data sent (as far as we can tell). If this advertises a larger 19226 * window than any other segment, then remember the size of the 19227 * advertised window. Any pending ACK has now been sent. 19228 */ 19229 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 19230 tp->rcv_adv = tp->rcv_nxt + recwin; 19231 19232 tp->last_ack_sent = tp->rcv_nxt; 19233 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 19234 enobufs: 19235 if (sendalot) { 19236 /* Do we need to turn off sendalot? */ 19237 if (rack->r_ctl.rc_pace_max_segs && 19238 (tot_len_this_send >= rack->r_ctl.rc_pace_max_segs)) { 19239 /* We hit our max. */ 19240 sendalot = 0; 19241 } else if ((rack->rc_user_set_max_segs) && 19242 (tot_len_this_send >= (rack->rc_user_set_max_segs * segsiz))) { 19243 /* We hit the user defined max */ 19244 sendalot = 0; 19245 } 19246 } 19247 if ((error == 0) && (flags & TH_FIN)) 19248 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN); 19249 if (flags & TH_RST) { 19250 /* 19251 * We don't send again after sending a RST. 19252 */ 19253 slot = 0; 19254 sendalot = 0; 19255 if (error == 0) 19256 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 19257 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) { 19258 /* 19259 * Get our pacing rate, if an error 19260 * occurred in sending (ENOBUF) we would 19261 * hit the else if with slot preset. Other 19262 * errors return. 19263 */ 19264 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz); 19265 } 19266 if (rsm && 19267 (rsm->r_flags & RACK_HAS_SYN) == 0 && 19268 rack->use_rack_rr) { 19269 /* Its a retransmit and we use the rack cheat? */ 19270 if ((slot == 0) || 19271 (rack->rc_always_pace == 0) || 19272 (rack->r_rr_config == 1)) { 19273 /* 19274 * We have no pacing set or we 19275 * are using old-style rack or 19276 * we are overriden to use the old 1ms pacing. 19277 */ 19278 slot = rack->r_ctl.rc_min_to; 19279 } 19280 } 19281 /* We have sent clear the flag */ 19282 rack->r_ent_rec_ns = 0; 19283 if (rack->r_must_retran) { 19284 if (rsm) { 19285 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 19286 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 19287 /* 19288 * We have retransmitted all. 19289 */ 19290 rack->r_must_retran = 0; 19291 rack->r_ctl.rc_out_at_rto = 0; 19292 } 19293 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 19294 /* 19295 * Sending new data will also kill 19296 * the loop. 19297 */ 19298 rack->r_must_retran = 0; 19299 rack->r_ctl.rc_out_at_rto = 0; 19300 } 19301 } 19302 rack->r_ctl.fsb.recwin = recwin; 19303 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && 19304 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 19305 /* 19306 * We hit an RTO and now have past snd_max at the RTO 19307 * clear all the WAS flags. 19308 */ 19309 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); 19310 } 19311 if (slot) { 19312 /* set the rack tcb into the slot N */ 19313 counter_u64_add(rack_paced_segments, 1); 19314 if ((error == 0) && 19315 rack_use_rfo && 19316 ((flags & (TH_SYN|TH_FIN)) == 0) && 19317 (rsm == NULL) && 19318 (tp->snd_nxt == tp->snd_max) && 19319 (ipoptlen == 0) && 19320 (tp->rcv_numsacks == 0) && 19321 rack->r_fsb_inited && 19322 TCPS_HAVEESTABLISHED(tp->t_state) && 19323 (rack->r_must_retran == 0) && 19324 ((tp->t_flags & TF_NEEDFIN) == 0) && 19325 (len > 0) && (orig_len > 0) && 19326 (orig_len > len) && 19327 ((orig_len - len) >= segsiz) && 19328 ((optlen == 0) || 19329 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 19330 /* We can send at least one more MSS using our fsb */ 19331 19332 rack->r_fast_output = 1; 19333 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19334 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19335 rack->r_ctl.fsb.tcp_flags = flags; 19336 rack->r_ctl.fsb.left_to_send = orig_len - len; 19337 if (hw_tls) 19338 rack->r_ctl.fsb.hw_tls = 1; 19339 else 19340 rack->r_ctl.fsb.hw_tls = 0; 19341 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19342 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19343 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19344 (tp->snd_max - tp->snd_una))); 19345 if (rack->r_ctl.fsb.left_to_send < segsiz) 19346 rack->r_fast_output = 0; 19347 else { 19348 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19349 rack->r_ctl.fsb.rfo_apply_push = 1; 19350 else 19351 rack->r_ctl.fsb.rfo_apply_push = 0; 19352 } 19353 } else 19354 rack->r_fast_output = 0; 19355 rack_log_fsb(rack, tp, so, flags, 19356 ipoptlen, orig_len, len, error, 19357 (rsm == NULL), optlen, __LINE__, 2); 19358 } else if (sendalot) { 19359 int ret; 19360 19361 if (len) 19362 counter_u64_add(rack_unpaced_segments, 1); 19363 sack_rxmit = 0; 19364 if ((error == 0) && 19365 rack_use_rfo && 19366 ((flags & (TH_SYN|TH_FIN)) == 0) && 19367 (rsm == NULL) && 19368 (ipoptlen == 0) && 19369 (tp->rcv_numsacks == 0) && 19370 (tp->snd_nxt == tp->snd_max) && 19371 (rack->r_must_retran == 0) && 19372 rack->r_fsb_inited && 19373 TCPS_HAVEESTABLISHED(tp->t_state) && 19374 ((tp->t_flags & TF_NEEDFIN) == 0) && 19375 (len > 0) && (orig_len > 0) && 19376 (orig_len > len) && 19377 ((orig_len - len) >= segsiz) && 19378 ((optlen == 0) || 19379 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 19380 /* we can use fast_output for more */ 19381 19382 rack->r_fast_output = 1; 19383 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19384 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19385 rack->r_ctl.fsb.tcp_flags = flags; 19386 rack->r_ctl.fsb.left_to_send = orig_len - len; 19387 if (hw_tls) 19388 rack->r_ctl.fsb.hw_tls = 1; 19389 else 19390 rack->r_ctl.fsb.hw_tls = 0; 19391 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19392 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19393 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19394 (tp->snd_max - tp->snd_una))); 19395 if (rack->r_ctl.fsb.left_to_send < segsiz) { 19396 rack->r_fast_output = 0; 19397 } 19398 if (rack->r_fast_output) { 19399 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19400 rack->r_ctl.fsb.rfo_apply_push = 1; 19401 else 19402 rack->r_ctl.fsb.rfo_apply_push = 0; 19403 rack_log_fsb(rack, tp, so, flags, 19404 ipoptlen, orig_len, len, error, 19405 (rsm == NULL), optlen, __LINE__, 3); 19406 error = 0; 19407 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 19408 if (ret >= 0) 19409 return (ret); 19410 else if (error) 19411 goto nomore; 19412 19413 } 19414 } 19415 goto again; 19416 } else if (len) { 19417 counter_u64_add(rack_unpaced_segments, 1); 19418 } 19419 /* Assure when we leave that snd_nxt will point to top */ 19420 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 19421 tp->snd_nxt = tp->snd_max; 19422 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); 19423 #ifdef TCP_ACCOUNTING 19424 crtsc = get_cyclecount() - ts_val; 19425 if (tot_len_this_send) { 19426 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19427 tp->tcp_cnt_counters[SND_OUT_DATA]++; 19428 } 19429 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1); 19430 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19431 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; 19432 } 19433 counter_u64_add(tcp_proc_time[SND_OUT_DATA], crtsc); 19434 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19435 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); 19436 } 19437 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) /segsiz)); 19438 } else { 19439 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19440 tp->tcp_cnt_counters[SND_OUT_ACK]++; 19441 } 19442 counter_u64_add(tcp_cnt_counters[SND_OUT_ACK], 1); 19443 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19444 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; 19445 } 19446 counter_u64_add(tcp_proc_time[SND_OUT_ACK], crtsc); 19447 } 19448 sched_unpin(); 19449 #endif 19450 if (error == ENOBUFS) 19451 error = 0; 19452 return (error); 19453 } 19454 19455 static void 19456 rack_update_seg(struct tcp_rack *rack) 19457 { 19458 uint32_t orig_val; 19459 19460 orig_val = rack->r_ctl.rc_pace_max_segs; 19461 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 19462 if (orig_val != rack->r_ctl.rc_pace_max_segs) 19463 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0); 19464 } 19465 19466 static void 19467 rack_mtu_change(struct tcpcb *tp) 19468 { 19469 /* 19470 * The MSS may have changed 19471 */ 19472 struct tcp_rack *rack; 19473 struct rack_sendmap *rsm; 19474 19475 rack = (struct tcp_rack *)tp->t_fb_ptr; 19476 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { 19477 /* 19478 * The MTU has changed we need to resend everything 19479 * since all we have sent is lost. We first fix 19480 * up the mtu though. 19481 */ 19482 rack_set_pace_segments(tp, rack, __LINE__, NULL); 19483 /* We treat this like a full retransmit timeout without the cwnd adjustment */ 19484 rack_remxt_tmr(tp); 19485 rack->r_fast_output = 0; 19486 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, 19487 rack->r_ctl.rc_sacked); 19488 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 19489 rack->r_must_retran = 1; 19490 /* Mark all inflight to needing to be rxt'd */ 19491 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 19492 rsm->r_flags |= RACK_MUST_RXT; 19493 } 19494 } 19495 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 19496 /* We don't use snd_nxt to retransmit */ 19497 tp->snd_nxt = tp->snd_max; 19498 } 19499 19500 static int 19501 rack_set_profile(struct tcp_rack *rack, int prof) 19502 { 19503 int err = EINVAL; 19504 if (prof == 1) { 19505 /* pace_always=1 */ 19506 if (rack->rc_always_pace == 0) { 19507 if (tcp_can_enable_pacing() == 0) 19508 return (EBUSY); 19509 } 19510 rack->rc_always_pace = 1; 19511 if (rack->use_fixed_rate || rack->gp_ready) 19512 rack_set_cc_pacing(rack); 19513 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19514 rack->rack_attempt_hdwr_pace = 0; 19515 /* cmpack=1 */ 19516 if (rack_use_cmp_acks) 19517 rack->r_use_cmp_ack = 1; 19518 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 19519 rack->r_use_cmp_ack) 19520 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19521 /* scwnd=1 */ 19522 rack->rack_enable_scwnd = 1; 19523 /* dynamic=100 */ 19524 rack->rc_gp_dyn_mul = 1; 19525 /* gp_inc_ca */ 19526 rack->r_ctl.rack_per_of_gp_ca = 100; 19527 /* rrr_conf=3 */ 19528 rack->r_rr_config = 3; 19529 /* npush=2 */ 19530 rack->r_ctl.rc_no_push_at_mrtt = 2; 19531 /* fillcw=1 */ 19532 rack->rc_pace_to_cwnd = 1; 19533 rack->rc_pace_fill_if_rttin_range = 0; 19534 rack->rtt_limit_mul = 0; 19535 /* noprr=1 */ 19536 rack->rack_no_prr = 1; 19537 /* lscwnd=1 */ 19538 rack->r_limit_scw = 1; 19539 /* gp_inc_rec */ 19540 rack->r_ctl.rack_per_of_gp_rec = 90; 19541 err = 0; 19542 19543 } else if (prof == 3) { 19544 /* Same as profile one execept fill_cw becomes 2 (less aggressive set) */ 19545 /* pace_always=1 */ 19546 if (rack->rc_always_pace == 0) { 19547 if (tcp_can_enable_pacing() == 0) 19548 return (EBUSY); 19549 } 19550 rack->rc_always_pace = 1; 19551 if (rack->use_fixed_rate || rack->gp_ready) 19552 rack_set_cc_pacing(rack); 19553 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19554 rack->rack_attempt_hdwr_pace = 0; 19555 /* cmpack=1 */ 19556 if (rack_use_cmp_acks) 19557 rack->r_use_cmp_ack = 1; 19558 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 19559 rack->r_use_cmp_ack) 19560 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19561 /* scwnd=1 */ 19562 rack->rack_enable_scwnd = 1; 19563 /* dynamic=100 */ 19564 rack->rc_gp_dyn_mul = 1; 19565 /* gp_inc_ca */ 19566 rack->r_ctl.rack_per_of_gp_ca = 100; 19567 /* rrr_conf=3 */ 19568 rack->r_rr_config = 3; 19569 /* npush=2 */ 19570 rack->r_ctl.rc_no_push_at_mrtt = 2; 19571 /* fillcw=2 */ 19572 rack->rc_pace_to_cwnd = 1; 19573 rack->r_fill_less_agg = 1; 19574 rack->rc_pace_fill_if_rttin_range = 0; 19575 rack->rtt_limit_mul = 0; 19576 /* noprr=1 */ 19577 rack->rack_no_prr = 1; 19578 /* lscwnd=1 */ 19579 rack->r_limit_scw = 1; 19580 /* gp_inc_rec */ 19581 rack->r_ctl.rack_per_of_gp_rec = 90; 19582 err = 0; 19583 19584 19585 } else if (prof == 2) { 19586 /* cmpack=1 */ 19587 if (rack->rc_always_pace == 0) { 19588 if (tcp_can_enable_pacing() == 0) 19589 return (EBUSY); 19590 } 19591 rack->rc_always_pace = 1; 19592 if (rack->use_fixed_rate || rack->gp_ready) 19593 rack_set_cc_pacing(rack); 19594 rack->r_use_cmp_ack = 1; 19595 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 19596 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19597 /* pace_always=1 */ 19598 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19599 /* scwnd=1 */ 19600 rack->rack_enable_scwnd = 1; 19601 /* dynamic=100 */ 19602 rack->rc_gp_dyn_mul = 1; 19603 rack->r_ctl.rack_per_of_gp_ca = 100; 19604 /* rrr_conf=3 */ 19605 rack->r_rr_config = 3; 19606 /* npush=2 */ 19607 rack->r_ctl.rc_no_push_at_mrtt = 2; 19608 /* fillcw=1 */ 19609 rack->rc_pace_to_cwnd = 1; 19610 rack->rc_pace_fill_if_rttin_range = 0; 19611 rack->rtt_limit_mul = 0; 19612 /* noprr=1 */ 19613 rack->rack_no_prr = 1; 19614 /* lscwnd=0 */ 19615 rack->r_limit_scw = 0; 19616 err = 0; 19617 } else if (prof == 0) { 19618 /* This changes things back to the default settings */ 19619 err = 0; 19620 if (rack->rc_always_pace) { 19621 tcp_decrement_paced_conn(); 19622 rack_undo_cc_pacing(rack); 19623 rack->rc_always_pace = 0; 19624 } 19625 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 19626 rack->rc_always_pace = 1; 19627 if (rack->use_fixed_rate || rack->gp_ready) 19628 rack_set_cc_pacing(rack); 19629 } else 19630 rack->rc_always_pace = 0; 19631 if (rack_dsack_std_based & 0x1) { 19632 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 19633 rack->rc_rack_tmr_std_based = 1; 19634 } 19635 if (rack_dsack_std_based & 0x2) { 19636 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 19637 rack->rc_rack_use_dsack = 1; 19638 } 19639 if (rack_use_cmp_acks) 19640 rack->r_use_cmp_ack = 1; 19641 else 19642 rack->r_use_cmp_ack = 0; 19643 if (rack_disable_prr) 19644 rack->rack_no_prr = 1; 19645 else 19646 rack->rack_no_prr = 0; 19647 if (rack_gp_no_rec_chg) 19648 rack->rc_gp_no_rec_chg = 1; 19649 else 19650 rack->rc_gp_no_rec_chg = 0; 19651 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { 19652 rack->r_mbuf_queue = 1; 19653 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 19654 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19655 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19656 } else { 19657 rack->r_mbuf_queue = 0; 19658 rack->rc_inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19659 } 19660 if (rack_enable_shared_cwnd) 19661 rack->rack_enable_scwnd = 1; 19662 else 19663 rack->rack_enable_scwnd = 0; 19664 if (rack_do_dyn_mul) { 19665 /* When dynamic adjustment is on CA needs to start at 100% */ 19666 rack->rc_gp_dyn_mul = 1; 19667 if (rack_do_dyn_mul >= 100) 19668 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 19669 } else { 19670 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 19671 rack->rc_gp_dyn_mul = 0; 19672 } 19673 rack->r_rr_config = 0; 19674 rack->r_ctl.rc_no_push_at_mrtt = 0; 19675 rack->rc_pace_to_cwnd = 0; 19676 rack->rc_pace_fill_if_rttin_range = 0; 19677 rack->rtt_limit_mul = 0; 19678 19679 if (rack_enable_hw_pacing) 19680 rack->rack_hdw_pace_ena = 1; 19681 else 19682 rack->rack_hdw_pace_ena = 0; 19683 if (rack_disable_prr) 19684 rack->rack_no_prr = 1; 19685 else 19686 rack->rack_no_prr = 0; 19687 if (rack_limits_scwnd) 19688 rack->r_limit_scw = 1; 19689 else 19690 rack->r_limit_scw = 0; 19691 err = 0; 19692 } 19693 return (err); 19694 } 19695 19696 static int 19697 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval) 19698 { 19699 struct deferred_opt_list *dol; 19700 19701 dol = malloc(sizeof(struct deferred_opt_list), 19702 M_TCPFSB, M_NOWAIT|M_ZERO); 19703 if (dol == NULL) { 19704 /* 19705 * No space yikes -- fail out.. 19706 */ 19707 return (0); 19708 } 19709 dol->optname = sopt_name; 19710 dol->optval = loptval; 19711 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); 19712 return (1); 19713 } 19714 19715 static int 19716 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name, 19717 uint32_t optval, uint64_t loptval) 19718 { 19719 struct epoch_tracker et; 19720 struct sockopt sopt; 19721 struct cc_newreno_opts opt; 19722 uint64_t val; 19723 int error = 0; 19724 uint16_t ca, ss; 19725 19726 switch (sopt_name) { 19727 19728 case TCP_RACK_DSACK_OPT: 19729 RACK_OPTS_INC(tcp_rack_dsack_opt); 19730 if (optval & 0x1) { 19731 rack->rc_rack_tmr_std_based = 1; 19732 } else { 19733 rack->rc_rack_tmr_std_based = 0; 19734 } 19735 if (optval & 0x2) { 19736 rack->rc_rack_use_dsack = 1; 19737 } else { 19738 rack->rc_rack_use_dsack = 0; 19739 } 19740 rack_log_dsack_event(rack, 5, __LINE__, 0, 0); 19741 break; 19742 case TCP_RACK_PACING_BETA: 19743 RACK_OPTS_INC(tcp_rack_beta); 19744 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 19745 /* This only works for newreno. */ 19746 error = EINVAL; 19747 break; 19748 } 19749 if (rack->rc_pacing_cc_set) { 19750 /* 19751 * Set them into the real CC module 19752 * whats in the rack pcb is the old values 19753 * to be used on restoral/ 19754 */ 19755 sopt.sopt_dir = SOPT_SET; 19756 opt.name = CC_NEWRENO_BETA; 19757 opt.val = optval; 19758 if (CC_ALGO(tp)->ctl_output != NULL) 19759 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 19760 else { 19761 error = ENOENT; 19762 break; 19763 } 19764 } else { 19765 /* 19766 * Not pacing yet so set it into our local 19767 * rack pcb storage. 19768 */ 19769 rack->r_ctl.rc_saved_beta.beta = optval; 19770 } 19771 break; 19772 case TCP_RACK_TIMER_SLOP: 19773 RACK_OPTS_INC(tcp_rack_timer_slop); 19774 rack->r_ctl.timer_slop = optval; 19775 if (rack->rc_tp->t_srtt) { 19776 /* 19777 * If we have an SRTT lets update t_rxtcur 19778 * to have the new slop. 19779 */ 19780 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 19781 rack_rto_min, rack_rto_max, 19782 rack->r_ctl.timer_slop); 19783 } 19784 break; 19785 case TCP_RACK_PACING_BETA_ECN: 19786 RACK_OPTS_INC(tcp_rack_beta_ecn); 19787 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 19788 /* This only works for newreno. */ 19789 error = EINVAL; 19790 break; 19791 } 19792 if (rack->rc_pacing_cc_set) { 19793 /* 19794 * Set them into the real CC module 19795 * whats in the rack pcb is the old values 19796 * to be used on restoral/ 19797 */ 19798 sopt.sopt_dir = SOPT_SET; 19799 opt.name = CC_NEWRENO_BETA_ECN; 19800 opt.val = optval; 19801 if (CC_ALGO(tp)->ctl_output != NULL) 19802 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 19803 else 19804 error = ENOENT; 19805 } else { 19806 /* 19807 * Not pacing yet so set it into our local 19808 * rack pcb storage. 19809 */ 19810 rack->r_ctl.rc_saved_beta.beta_ecn = optval; 19811 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED; 19812 } 19813 break; 19814 case TCP_DEFER_OPTIONS: 19815 RACK_OPTS_INC(tcp_defer_opt); 19816 if (optval) { 19817 if (rack->gp_ready) { 19818 /* Too late */ 19819 error = EINVAL; 19820 break; 19821 } 19822 rack->defer_options = 1; 19823 } else 19824 rack->defer_options = 0; 19825 break; 19826 case TCP_RACK_MEASURE_CNT: 19827 RACK_OPTS_INC(tcp_rack_measure_cnt); 19828 if (optval && (optval <= 0xff)) { 19829 rack->r_ctl.req_measurements = optval; 19830 } else 19831 error = EINVAL; 19832 break; 19833 case TCP_REC_ABC_VAL: 19834 RACK_OPTS_INC(tcp_rec_abc_val); 19835 if (optval > 0) 19836 rack->r_use_labc_for_rec = 1; 19837 else 19838 rack->r_use_labc_for_rec = 0; 19839 break; 19840 case TCP_RACK_ABC_VAL: 19841 RACK_OPTS_INC(tcp_rack_abc_val); 19842 if ((optval > 0) && (optval < 255)) 19843 rack->rc_labc = optval; 19844 else 19845 error = EINVAL; 19846 break; 19847 case TCP_HDWR_UP_ONLY: 19848 RACK_OPTS_INC(tcp_pacing_up_only); 19849 if (optval) 19850 rack->r_up_only = 1; 19851 else 19852 rack->r_up_only = 0; 19853 break; 19854 case TCP_PACING_RATE_CAP: 19855 RACK_OPTS_INC(tcp_pacing_rate_cap); 19856 rack->r_ctl.bw_rate_cap = loptval; 19857 break; 19858 case TCP_RACK_PROFILE: 19859 RACK_OPTS_INC(tcp_profile); 19860 error = rack_set_profile(rack, optval); 19861 break; 19862 case TCP_USE_CMP_ACKS: 19863 RACK_OPTS_INC(tcp_use_cmp_acks); 19864 if ((optval == 0) && (rack->rc_inp->inp_flags2 & INP_MBUF_ACKCMP)) { 19865 /* You can't turn it off once its on! */ 19866 error = EINVAL; 19867 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { 19868 rack->r_use_cmp_ack = 1; 19869 rack->r_mbuf_queue = 1; 19870 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19871 } 19872 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 19873 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19874 break; 19875 case TCP_SHARED_CWND_TIME_LIMIT: 19876 RACK_OPTS_INC(tcp_lscwnd); 19877 if (optval) 19878 rack->r_limit_scw = 1; 19879 else 19880 rack->r_limit_scw = 0; 19881 break; 19882 case TCP_RACK_PACE_TO_FILL: 19883 RACK_OPTS_INC(tcp_fillcw); 19884 if (optval == 0) 19885 rack->rc_pace_to_cwnd = 0; 19886 else { 19887 rack->rc_pace_to_cwnd = 1; 19888 if (optval > 1) 19889 rack->r_fill_less_agg = 1; 19890 } 19891 if ((optval >= rack_gp_rtt_maxmul) && 19892 rack_gp_rtt_maxmul && 19893 (optval < 0xf)) { 19894 rack->rc_pace_fill_if_rttin_range = 1; 19895 rack->rtt_limit_mul = optval; 19896 } else { 19897 rack->rc_pace_fill_if_rttin_range = 0; 19898 rack->rtt_limit_mul = 0; 19899 } 19900 break; 19901 case TCP_RACK_NO_PUSH_AT_MAX: 19902 RACK_OPTS_INC(tcp_npush); 19903 if (optval == 0) 19904 rack->r_ctl.rc_no_push_at_mrtt = 0; 19905 else if (optval < 0xff) 19906 rack->r_ctl.rc_no_push_at_mrtt = optval; 19907 else 19908 error = EINVAL; 19909 break; 19910 case TCP_SHARED_CWND_ENABLE: 19911 RACK_OPTS_INC(tcp_rack_scwnd); 19912 if (optval == 0) 19913 rack->rack_enable_scwnd = 0; 19914 else 19915 rack->rack_enable_scwnd = 1; 19916 break; 19917 case TCP_RACK_MBUF_QUEUE: 19918 /* Now do we use the LRO mbuf-queue feature */ 19919 RACK_OPTS_INC(tcp_rack_mbufq); 19920 if (optval || rack->r_use_cmp_ack) 19921 rack->r_mbuf_queue = 1; 19922 else 19923 rack->r_mbuf_queue = 0; 19924 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 19925 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19926 else 19927 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19928 break; 19929 case TCP_RACK_NONRXT_CFG_RATE: 19930 RACK_OPTS_INC(tcp_rack_cfg_rate); 19931 if (optval == 0) 19932 rack->rack_rec_nonrxt_use_cr = 0; 19933 else 19934 rack->rack_rec_nonrxt_use_cr = 1; 19935 break; 19936 case TCP_NO_PRR: 19937 RACK_OPTS_INC(tcp_rack_noprr); 19938 if (optval == 0) 19939 rack->rack_no_prr = 0; 19940 else if (optval == 1) 19941 rack->rack_no_prr = 1; 19942 else if (optval == 2) 19943 rack->no_prr_addback = 1; 19944 else 19945 error = EINVAL; 19946 break; 19947 case TCP_TIMELY_DYN_ADJ: 19948 RACK_OPTS_INC(tcp_timely_dyn); 19949 if (optval == 0) 19950 rack->rc_gp_dyn_mul = 0; 19951 else { 19952 rack->rc_gp_dyn_mul = 1; 19953 if (optval >= 100) { 19954 /* 19955 * If the user sets something 100 or more 19956 * its the gp_ca value. 19957 */ 19958 rack->r_ctl.rack_per_of_gp_ca = optval; 19959 } 19960 } 19961 break; 19962 case TCP_RACK_DO_DETECTION: 19963 RACK_OPTS_INC(tcp_rack_do_detection); 19964 if (optval == 0) 19965 rack->do_detection = 0; 19966 else 19967 rack->do_detection = 1; 19968 break; 19969 case TCP_RACK_TLP_USE: 19970 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 19971 error = EINVAL; 19972 break; 19973 } 19974 RACK_OPTS_INC(tcp_tlp_use); 19975 rack->rack_tlp_threshold_use = optval; 19976 break; 19977 case TCP_RACK_TLP_REDUCE: 19978 /* RACK TLP cwnd reduction (bool) */ 19979 RACK_OPTS_INC(tcp_rack_tlp_reduce); 19980 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 19981 break; 19982 /* Pacing related ones */ 19983 case TCP_RACK_PACE_ALWAYS: 19984 /* 19985 * zero is old rack method, 1 is new 19986 * method using a pacing rate. 19987 */ 19988 RACK_OPTS_INC(tcp_rack_pace_always); 19989 if (optval > 0) { 19990 if (rack->rc_always_pace) { 19991 error = EALREADY; 19992 break; 19993 } else if (tcp_can_enable_pacing()) { 19994 rack->rc_always_pace = 1; 19995 if (rack->use_fixed_rate || rack->gp_ready) 19996 rack_set_cc_pacing(rack); 19997 } 19998 else { 19999 error = ENOSPC; 20000 break; 20001 } 20002 } else { 20003 if (rack->rc_always_pace) { 20004 tcp_decrement_paced_conn(); 20005 rack->rc_always_pace = 0; 20006 rack_undo_cc_pacing(rack); 20007 } 20008 } 20009 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 20010 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 20011 else 20012 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 20013 /* A rate may be set irate or other, if so set seg size */ 20014 rack_update_seg(rack); 20015 break; 20016 case TCP_BBR_RACK_INIT_RATE: 20017 RACK_OPTS_INC(tcp_initial_rate); 20018 val = optval; 20019 /* Change from kbits per second to bytes per second */ 20020 val *= 1000; 20021 val /= 8; 20022 rack->r_ctl.init_rate = val; 20023 if (rack->rc_init_win != rack_default_init_window) { 20024 uint32_t win, snt; 20025 20026 /* 20027 * Options don't always get applied 20028 * in the order you think. So in order 20029 * to assure we update a cwnd we need 20030 * to check and see if we are still 20031 * where we should raise the cwnd. 20032 */ 20033 win = rc_init_window(rack); 20034 if (SEQ_GT(tp->snd_max, tp->iss)) 20035 snt = tp->snd_max - tp->iss; 20036 else 20037 snt = 0; 20038 if ((snt < win) && 20039 (tp->snd_cwnd < win)) 20040 tp->snd_cwnd = win; 20041 } 20042 if (rack->rc_always_pace) 20043 rack_update_seg(rack); 20044 break; 20045 case TCP_BBR_IWINTSO: 20046 RACK_OPTS_INC(tcp_initial_win); 20047 if (optval && (optval <= 0xff)) { 20048 uint32_t win, snt; 20049 20050 rack->rc_init_win = optval; 20051 win = rc_init_window(rack); 20052 if (SEQ_GT(tp->snd_max, tp->iss)) 20053 snt = tp->snd_max - tp->iss; 20054 else 20055 snt = 0; 20056 if ((snt < win) && 20057 (tp->t_srtt | 20058 #ifdef NETFLIX_PEAKRATE 20059 tp->t_maxpeakrate | 20060 #endif 20061 rack->r_ctl.init_rate)) { 20062 /* 20063 * We are not past the initial window 20064 * and we have some bases for pacing, 20065 * so we need to possibly adjust up 20066 * the cwnd. Note even if we don't set 20067 * the cwnd, its still ok to raise the rc_init_win 20068 * which can be used coming out of idle when we 20069 * would have a rate. 20070 */ 20071 if (tp->snd_cwnd < win) 20072 tp->snd_cwnd = win; 20073 } 20074 if (rack->rc_always_pace) 20075 rack_update_seg(rack); 20076 } else 20077 error = EINVAL; 20078 break; 20079 case TCP_RACK_FORCE_MSEG: 20080 RACK_OPTS_INC(tcp_rack_force_max_seg); 20081 if (optval) 20082 rack->rc_force_max_seg = 1; 20083 else 20084 rack->rc_force_max_seg = 0; 20085 break; 20086 case TCP_RACK_PACE_MAX_SEG: 20087 /* Max segments size in a pace in bytes */ 20088 RACK_OPTS_INC(tcp_rack_max_seg); 20089 rack->rc_user_set_max_segs = optval; 20090 rack_set_pace_segments(tp, rack, __LINE__, NULL); 20091 break; 20092 case TCP_RACK_PACE_RATE_REC: 20093 /* Set the fixed pacing rate in Bytes per second ca */ 20094 RACK_OPTS_INC(tcp_rack_pace_rate_rec); 20095 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 20096 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 20097 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 20098 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 20099 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 20100 rack->use_fixed_rate = 1; 20101 if (rack->rc_always_pace) 20102 rack_set_cc_pacing(rack); 20103 rack_log_pacing_delay_calc(rack, 20104 rack->r_ctl.rc_fixed_pacing_rate_ss, 20105 rack->r_ctl.rc_fixed_pacing_rate_ca, 20106 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 20107 __LINE__, NULL,0); 20108 break; 20109 20110 case TCP_RACK_PACE_RATE_SS: 20111 /* Set the fixed pacing rate in Bytes per second ca */ 20112 RACK_OPTS_INC(tcp_rack_pace_rate_ss); 20113 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 20114 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 20115 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 20116 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 20117 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 20118 rack->use_fixed_rate = 1; 20119 if (rack->rc_always_pace) 20120 rack_set_cc_pacing(rack); 20121 rack_log_pacing_delay_calc(rack, 20122 rack->r_ctl.rc_fixed_pacing_rate_ss, 20123 rack->r_ctl.rc_fixed_pacing_rate_ca, 20124 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 20125 __LINE__, NULL, 0); 20126 break; 20127 20128 case TCP_RACK_PACE_RATE_CA: 20129 /* Set the fixed pacing rate in Bytes per second ca */ 20130 RACK_OPTS_INC(tcp_rack_pace_rate_ca); 20131 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 20132 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 20133 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 20134 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 20135 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 20136 rack->use_fixed_rate = 1; 20137 if (rack->rc_always_pace) 20138 rack_set_cc_pacing(rack); 20139 rack_log_pacing_delay_calc(rack, 20140 rack->r_ctl.rc_fixed_pacing_rate_ss, 20141 rack->r_ctl.rc_fixed_pacing_rate_ca, 20142 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 20143 __LINE__, NULL, 0); 20144 break; 20145 case TCP_RACK_GP_INCREASE_REC: 20146 RACK_OPTS_INC(tcp_gp_inc_rec); 20147 rack->r_ctl.rack_per_of_gp_rec = optval; 20148 rack_log_pacing_delay_calc(rack, 20149 rack->r_ctl.rack_per_of_gp_ss, 20150 rack->r_ctl.rack_per_of_gp_ca, 20151 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 20152 __LINE__, NULL, 0); 20153 break; 20154 case TCP_RACK_GP_INCREASE_CA: 20155 RACK_OPTS_INC(tcp_gp_inc_ca); 20156 ca = optval; 20157 if (ca < 100) { 20158 /* 20159 * We don't allow any reduction 20160 * over the GP b/w. 20161 */ 20162 error = EINVAL; 20163 break; 20164 } 20165 rack->r_ctl.rack_per_of_gp_ca = ca; 20166 rack_log_pacing_delay_calc(rack, 20167 rack->r_ctl.rack_per_of_gp_ss, 20168 rack->r_ctl.rack_per_of_gp_ca, 20169 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 20170 __LINE__, NULL, 0); 20171 break; 20172 case TCP_RACK_GP_INCREASE_SS: 20173 RACK_OPTS_INC(tcp_gp_inc_ss); 20174 ss = optval; 20175 if (ss < 100) { 20176 /* 20177 * We don't allow any reduction 20178 * over the GP b/w. 20179 */ 20180 error = EINVAL; 20181 break; 20182 } 20183 rack->r_ctl.rack_per_of_gp_ss = ss; 20184 rack_log_pacing_delay_calc(rack, 20185 rack->r_ctl.rack_per_of_gp_ss, 20186 rack->r_ctl.rack_per_of_gp_ca, 20187 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 20188 __LINE__, NULL, 0); 20189 break; 20190 case TCP_RACK_RR_CONF: 20191 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate); 20192 if (optval && optval <= 3) 20193 rack->r_rr_config = optval; 20194 else 20195 rack->r_rr_config = 0; 20196 break; 20197 case TCP_HDWR_RATE_CAP: 20198 RACK_OPTS_INC(tcp_hdwr_rate_cap); 20199 if (optval) { 20200 if (rack->r_rack_hw_rate_caps == 0) 20201 rack->r_rack_hw_rate_caps = 1; 20202 else 20203 error = EALREADY; 20204 } else { 20205 rack->r_rack_hw_rate_caps = 0; 20206 } 20207 break; 20208 case TCP_BBR_HDWR_PACE: 20209 RACK_OPTS_INC(tcp_hdwr_pacing); 20210 if (optval){ 20211 if (rack->rack_hdrw_pacing == 0) { 20212 rack->rack_hdw_pace_ena = 1; 20213 rack->rack_attempt_hdwr_pace = 0; 20214 } else 20215 error = EALREADY; 20216 } else { 20217 rack->rack_hdw_pace_ena = 0; 20218 #ifdef RATELIMIT 20219 if (rack->r_ctl.crte != NULL) { 20220 rack->rack_hdrw_pacing = 0; 20221 rack->rack_attempt_hdwr_pace = 0; 20222 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 20223 rack->r_ctl.crte = NULL; 20224 } 20225 #endif 20226 } 20227 break; 20228 /* End Pacing related ones */ 20229 case TCP_RACK_PRR_SENDALOT: 20230 /* Allow PRR to send more than one seg */ 20231 RACK_OPTS_INC(tcp_rack_prr_sendalot); 20232 rack->r_ctl.rc_prr_sendalot = optval; 20233 break; 20234 case TCP_RACK_MIN_TO: 20235 /* Minimum time between rack t-o's in ms */ 20236 RACK_OPTS_INC(tcp_rack_min_to); 20237 rack->r_ctl.rc_min_to = optval; 20238 break; 20239 case TCP_RACK_EARLY_SEG: 20240 /* If early recovery max segments */ 20241 RACK_OPTS_INC(tcp_rack_early_seg); 20242 rack->r_ctl.rc_early_recovery_segs = optval; 20243 break; 20244 case TCP_RACK_ENABLE_HYSTART: 20245 { 20246 if (optval) { 20247 tp->ccv->flags |= CCF_HYSTART_ALLOWED; 20248 if (rack_do_hystart > RACK_HYSTART_ON) 20249 tp->ccv->flags |= CCF_HYSTART_CAN_SH_CWND; 20250 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 20251 tp->ccv->flags |= CCF_HYSTART_CONS_SSTH; 20252 } else { 20253 tp->ccv->flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 20254 } 20255 } 20256 break; 20257 case TCP_RACK_REORD_THRESH: 20258 /* RACK reorder threshold (shift amount) */ 20259 RACK_OPTS_INC(tcp_rack_reord_thresh); 20260 if ((optval > 0) && (optval < 31)) 20261 rack->r_ctl.rc_reorder_shift = optval; 20262 else 20263 error = EINVAL; 20264 break; 20265 case TCP_RACK_REORD_FADE: 20266 /* Does reordering fade after ms time */ 20267 RACK_OPTS_INC(tcp_rack_reord_fade); 20268 rack->r_ctl.rc_reorder_fade = optval; 20269 break; 20270 case TCP_RACK_TLP_THRESH: 20271 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 20272 RACK_OPTS_INC(tcp_rack_tlp_thresh); 20273 if (optval) 20274 rack->r_ctl.rc_tlp_threshold = optval; 20275 else 20276 error = EINVAL; 20277 break; 20278 case TCP_BBR_USE_RACK_RR: 20279 RACK_OPTS_INC(tcp_rack_rr); 20280 if (optval) 20281 rack->use_rack_rr = 1; 20282 else 20283 rack->use_rack_rr = 0; 20284 break; 20285 case TCP_FAST_RSM_HACK: 20286 RACK_OPTS_INC(tcp_rack_fastrsm_hack); 20287 if (optval) 20288 rack->fast_rsm_hack = 1; 20289 else 20290 rack->fast_rsm_hack = 0; 20291 break; 20292 case TCP_RACK_PKT_DELAY: 20293 /* RACK added ms i.e. rack-rtt + reord + N */ 20294 RACK_OPTS_INC(tcp_rack_pkt_delay); 20295 rack->r_ctl.rc_pkt_delay = optval; 20296 break; 20297 case TCP_DELACK: 20298 RACK_OPTS_INC(tcp_rack_delayed_ack); 20299 if (optval == 0) 20300 tp->t_delayed_ack = 0; 20301 else 20302 tp->t_delayed_ack = 1; 20303 if (tp->t_flags & TF_DELACK) { 20304 tp->t_flags &= ~TF_DELACK; 20305 tp->t_flags |= TF_ACKNOW; 20306 NET_EPOCH_ENTER(et); 20307 rack_output(tp); 20308 NET_EPOCH_EXIT(et); 20309 } 20310 break; 20311 20312 case TCP_BBR_RACK_RTT_USE: 20313 RACK_OPTS_INC(tcp_rack_rtt_use); 20314 if ((optval != USE_RTT_HIGH) && 20315 (optval != USE_RTT_LOW) && 20316 (optval != USE_RTT_AVG)) 20317 error = EINVAL; 20318 else 20319 rack->r_ctl.rc_rate_sample_method = optval; 20320 break; 20321 case TCP_DATA_AFTER_CLOSE: 20322 RACK_OPTS_INC(tcp_data_after_close); 20323 if (optval) 20324 rack->rc_allow_data_af_clo = 1; 20325 else 20326 rack->rc_allow_data_af_clo = 0; 20327 break; 20328 default: 20329 break; 20330 } 20331 #ifdef NETFLIX_STATS 20332 tcp_log_socket_option(tp, sopt_name, optval, error); 20333 #endif 20334 return (error); 20335 } 20336 20337 20338 static void 20339 rack_apply_deferred_options(struct tcp_rack *rack) 20340 { 20341 struct deferred_opt_list *dol, *sdol; 20342 uint32_t s_optval; 20343 20344 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { 20345 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 20346 /* Disadvantage of deferal is you loose the error return */ 20347 s_optval = (uint32_t)dol->optval; 20348 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval); 20349 free(dol, M_TCPDO); 20350 } 20351 } 20352 20353 static void 20354 rack_hw_tls_change(struct tcpcb *tp, int chg) 20355 { 20356 /* 20357 * HW tls state has changed.. fix all 20358 * rsm's in flight. 20359 */ 20360 struct tcp_rack *rack; 20361 struct rack_sendmap *rsm; 20362 20363 rack = (struct tcp_rack *)tp->t_fb_ptr; 20364 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 20365 if (chg) 20366 rsm->r_hw_tls = 1; 20367 else 20368 rsm->r_hw_tls = 0; 20369 } 20370 if (chg) 20371 rack->r_ctl.fsb.hw_tls = 1; 20372 else 20373 rack->r_ctl.fsb.hw_tls = 0; 20374 } 20375 20376 static int 20377 rack_pru_options(struct tcpcb *tp, int flags) 20378 { 20379 if (flags & PRUS_OOB) 20380 return (EOPNOTSUPP); 20381 return (0); 20382 } 20383 20384 static struct tcp_function_block __tcp_rack = { 20385 .tfb_tcp_block_name = __XSTRING(STACKNAME), 20386 .tfb_tcp_output = rack_output, 20387 .tfb_do_queued_segments = ctf_do_queued_segments, 20388 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 20389 .tfb_tcp_do_segment = rack_do_segment, 20390 .tfb_tcp_ctloutput = rack_ctloutput, 20391 .tfb_tcp_fb_init = rack_init, 20392 .tfb_tcp_fb_fini = rack_fini, 20393 .tfb_tcp_timer_stop_all = rack_stopall, 20394 .tfb_tcp_timer_activate = rack_timer_activate, 20395 .tfb_tcp_timer_active = rack_timer_active, 20396 .tfb_tcp_timer_stop = rack_timer_stop, 20397 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 20398 .tfb_tcp_handoff_ok = rack_handoff_ok, 20399 .tfb_tcp_mtu_chg = rack_mtu_change, 20400 .tfb_pru_options = rack_pru_options, 20401 .tfb_hwtls_change = rack_hw_tls_change, 20402 .tfb_flags = TCP_FUNC_OUTPUT_CANDROP, 20403 }; 20404 20405 /* 20406 * rack_ctloutput() must drop the inpcb lock before performing copyin on 20407 * socket option arguments. When it re-acquires the lock after the copy, it 20408 * has to revalidate that the connection is still valid for the socket 20409 * option. 20410 */ 20411 static int 20412 rack_set_sockopt(struct inpcb *inp, struct sockopt *sopt) 20413 { 20414 #ifdef INET6 20415 struct ip6_hdr *ip6; 20416 #endif 20417 #ifdef INET 20418 struct ip *ip; 20419 #endif 20420 struct tcpcb *tp; 20421 struct tcp_rack *rack; 20422 uint64_t loptval; 20423 int32_t error = 0, optval; 20424 20425 tp = intotcpcb(inp); 20426 rack = (struct tcp_rack *)tp->t_fb_ptr; 20427 if (rack == NULL) { 20428 INP_WUNLOCK(inp); 20429 return (EINVAL); 20430 } 20431 #ifdef INET6 20432 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 20433 #endif 20434 #ifdef INET 20435 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 20436 #endif 20437 20438 switch (sopt->sopt_level) { 20439 #ifdef INET6 20440 case IPPROTO_IPV6: 20441 MPASS(inp->inp_vflag & INP_IPV6PROTO); 20442 switch (sopt->sopt_name) { 20443 case IPV6_USE_MIN_MTU: 20444 tcp6_use_min_mtu(tp); 20445 break; 20446 case IPV6_TCLASS: 20447 /* 20448 * The DSCP codepoint has changed, update the fsb. 20449 */ 20450 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 20451 (rack->rc_inp->inp_flow & IPV6_FLOWINFO_MASK); 20452 break; 20453 } 20454 INP_WUNLOCK(inp); 20455 return (0); 20456 #endif 20457 #ifdef INET 20458 case IPPROTO_IP: 20459 switch (sopt->sopt_name) { 20460 case IP_TOS: 20461 /* 20462 * The DSCP codepoint has changed, update the fsb. 20463 */ 20464 ip->ip_tos = rack->rc_inp->inp_ip_tos; 20465 break; 20466 case IP_TTL: 20467 /* 20468 * The TTL has changed, update the fsb. 20469 */ 20470 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; 20471 break; 20472 } 20473 INP_WUNLOCK(inp); 20474 return (0); 20475 #endif 20476 } 20477 20478 switch (sopt->sopt_name) { 20479 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */ 20480 /* Pacing related ones */ 20481 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */ 20482 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */ 20483 case TCP_BBR_IWINTSO: /* URL:tso_iwin */ 20484 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */ 20485 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */ 20486 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */ 20487 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/ 20488 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */ 20489 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */ 20490 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */ 20491 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */ 20492 case TCP_RACK_RR_CONF: /* URL:rrr_conf */ 20493 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */ 20494 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */ 20495 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ 20496 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ 20497 /* End pacing related */ 20498 case TCP_FAST_RSM_HACK: /* URL:frsm_hack */ 20499 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */ 20500 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */ 20501 case TCP_RACK_MIN_TO: /* URL:min_to */ 20502 case TCP_RACK_EARLY_SEG: /* URL:early_seg */ 20503 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */ 20504 case TCP_RACK_REORD_FADE: /* URL:reord_fade */ 20505 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */ 20506 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */ 20507 case TCP_RACK_TLP_USE: /* URL:tlp_use */ 20508 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */ 20509 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */ 20510 case TCP_RACK_DO_DETECTION: /* URL:detect */ 20511 case TCP_NO_PRR: /* URL:noprr */ 20512 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */ 20513 case TCP_DATA_AFTER_CLOSE: /* no URL */ 20514 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */ 20515 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */ 20516 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */ 20517 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */ 20518 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */ 20519 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */ 20520 case TCP_RACK_PROFILE: /* URL:profile */ 20521 case TCP_USE_CMP_ACKS: /* URL:cmpack */ 20522 case TCP_RACK_ABC_VAL: /* URL:labc */ 20523 case TCP_REC_ABC_VAL: /* URL:reclabc */ 20524 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */ 20525 case TCP_DEFER_OPTIONS: /* URL:defer */ 20526 case TCP_RACK_DSACK_OPT: /* URL:dsack */ 20527 case TCP_RACK_PACING_BETA: /* URL:pacing_beta */ 20528 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */ 20529 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */ 20530 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */ 20531 break; 20532 default: 20533 /* Filter off all unknown options to the base stack */ 20534 return (tcp_default_ctloutput(inp, sopt)); 20535 break; 20536 } 20537 INP_WUNLOCK(inp); 20538 if (sopt->sopt_name == TCP_PACING_RATE_CAP) { 20539 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval)); 20540 /* 20541 * We truncate it down to 32 bits for the socket-option trace this 20542 * means rates > 34Gbps won't show right, but thats probably ok. 20543 */ 20544 optval = (uint32_t)loptval; 20545 } else { 20546 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 20547 /* Save it in 64 bit form too */ 20548 loptval = optval; 20549 } 20550 if (error) 20551 return (error); 20552 INP_WLOCK(inp); 20553 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 20554 INP_WUNLOCK(inp); 20555 return (ECONNRESET); 20556 } 20557 if (tp->t_fb != &__tcp_rack) { 20558 INP_WUNLOCK(inp); 20559 return (ENOPROTOOPT); 20560 } 20561 if (rack->defer_options && (rack->gp_ready == 0) && 20562 (sopt->sopt_name != TCP_DEFER_OPTIONS) && 20563 (sopt->sopt_name != TCP_RACK_PACING_BETA) && 20564 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && 20565 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { 20566 /* Options are beind deferred */ 20567 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { 20568 INP_WUNLOCK(inp); 20569 return (0); 20570 } else { 20571 /* No memory to defer, fail */ 20572 INP_WUNLOCK(inp); 20573 return (ENOMEM); 20574 } 20575 } 20576 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval); 20577 INP_WUNLOCK(inp); 20578 return (error); 20579 } 20580 20581 static void 20582 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti) 20583 { 20584 20585 INP_WLOCK_ASSERT(tp->t_inpcb); 20586 bzero(ti, sizeof(*ti)); 20587 20588 ti->tcpi_state = tp->t_state; 20589 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) 20590 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; 20591 if (tp->t_flags & TF_SACK_PERMIT) 20592 ti->tcpi_options |= TCPI_OPT_SACK; 20593 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { 20594 ti->tcpi_options |= TCPI_OPT_WSCALE; 20595 ti->tcpi_snd_wscale = tp->snd_scale; 20596 ti->tcpi_rcv_wscale = tp->rcv_scale; 20597 } 20598 if (tp->t_flags2 & TF2_ECN_PERMIT) 20599 ti->tcpi_options |= TCPI_OPT_ECN; 20600 if (tp->t_flags & TF_FASTOPEN) 20601 ti->tcpi_options |= TCPI_OPT_TFO; 20602 /* still kept in ticks is t_rcvtime */ 20603 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; 20604 /* Since we hold everything in precise useconds this is easy */ 20605 ti->tcpi_rtt = tp->t_srtt; 20606 ti->tcpi_rttvar = tp->t_rttvar; 20607 ti->tcpi_rto = tp->t_rxtcur; 20608 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; 20609 ti->tcpi_snd_cwnd = tp->snd_cwnd; 20610 /* 20611 * FreeBSD-specific extension fields for tcp_info. 20612 */ 20613 ti->tcpi_rcv_space = tp->rcv_wnd; 20614 ti->tcpi_rcv_nxt = tp->rcv_nxt; 20615 ti->tcpi_snd_wnd = tp->snd_wnd; 20616 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ 20617 ti->tcpi_snd_nxt = tp->snd_nxt; 20618 ti->tcpi_snd_mss = tp->t_maxseg; 20619 ti->tcpi_rcv_mss = tp->t_maxseg; 20620 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; 20621 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; 20622 ti->tcpi_snd_zerowin = tp->t_sndzerowin; 20623 #ifdef NETFLIX_STATS 20624 ti->tcpi_total_tlp = tp->t_sndtlppack; 20625 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; 20626 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); 20627 #endif 20628 #ifdef TCP_OFFLOAD 20629 if (tp->t_flags & TF_TOE) { 20630 ti->tcpi_options |= TCPI_OPT_TOE; 20631 tcp_offload_tcp_info(tp, ti); 20632 } 20633 #endif 20634 } 20635 20636 static int 20637 rack_get_sockopt(struct inpcb *inp, struct sockopt *sopt) 20638 { 20639 struct tcpcb *tp; 20640 struct tcp_rack *rack; 20641 int32_t error, optval; 20642 uint64_t val, loptval; 20643 struct tcp_info ti; 20644 /* 20645 * Because all our options are either boolean or an int, we can just 20646 * pull everything into optval and then unlock and copy. If we ever 20647 * add a option that is not a int, then this will have quite an 20648 * impact to this routine. 20649 */ 20650 error = 0; 20651 tp = intotcpcb(inp); 20652 rack = (struct tcp_rack *)tp->t_fb_ptr; 20653 if (rack == NULL) { 20654 INP_WUNLOCK(inp); 20655 return (EINVAL); 20656 } 20657 switch (sopt->sopt_name) { 20658 case TCP_INFO: 20659 /* First get the info filled */ 20660 rack_fill_info(tp, &ti); 20661 /* Fix up the rtt related fields if needed */ 20662 INP_WUNLOCK(inp); 20663 error = sooptcopyout(sopt, &ti, sizeof ti); 20664 return (error); 20665 /* 20666 * Beta is the congestion control value for NewReno that influences how 20667 * much of a backoff happens when loss is detected. It is normally set 20668 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value 20669 * when you exit recovery. 20670 */ 20671 case TCP_RACK_PACING_BETA: 20672 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) 20673 error = EINVAL; 20674 else if (rack->rc_pacing_cc_set == 0) 20675 optval = rack->r_ctl.rc_saved_beta.beta; 20676 else { 20677 /* 20678 * Reach out into the CC data and report back what 20679 * I have previously set. Yeah it looks hackish but 20680 * we don't want to report the saved values. 20681 */ 20682 if (tp->ccv->cc_data) 20683 optval = ((struct newreno *)tp->ccv->cc_data)->beta; 20684 else 20685 error = EINVAL; 20686 } 20687 break; 20688 /* 20689 * Beta_ecn is the congestion control value for NewReno that influences how 20690 * much of a backoff happens when a ECN mark is detected. It is normally set 20691 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when 20692 * you exit recovery. Note that classic ECN has a beta of 50, it is only 20693 * ABE Ecn that uses this "less" value, but we do too with pacing :) 20694 */ 20695 20696 case TCP_RACK_PACING_BETA_ECN: 20697 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) 20698 error = EINVAL; 20699 else if (rack->rc_pacing_cc_set == 0) 20700 optval = rack->r_ctl.rc_saved_beta.beta_ecn; 20701 else { 20702 /* 20703 * Reach out into the CC data and report back what 20704 * I have previously set. Yeah it looks hackish but 20705 * we don't want to report the saved values. 20706 */ 20707 if (tp->ccv->cc_data) 20708 optval = ((struct newreno *)tp->ccv->cc_data)->beta_ecn; 20709 else 20710 error = EINVAL; 20711 } 20712 break; 20713 case TCP_RACK_DSACK_OPT: 20714 optval = 0; 20715 if (rack->rc_rack_tmr_std_based) { 20716 optval |= 1; 20717 } 20718 if (rack->rc_rack_use_dsack) { 20719 optval |= 2; 20720 } 20721 break; 20722 case TCP_RACK_ENABLE_HYSTART: 20723 { 20724 if (tp->ccv->flags & CCF_HYSTART_ALLOWED) { 20725 optval = RACK_HYSTART_ON; 20726 if (tp->ccv->flags & CCF_HYSTART_CAN_SH_CWND) 20727 optval = RACK_HYSTART_ON_W_SC; 20728 if (tp->ccv->flags & CCF_HYSTART_CONS_SSTH) 20729 optval = RACK_HYSTART_ON_W_SC_C; 20730 } else { 20731 optval = RACK_HYSTART_OFF; 20732 } 20733 } 20734 break; 20735 case TCP_FAST_RSM_HACK: 20736 optval = rack->fast_rsm_hack; 20737 break; 20738 case TCP_DEFER_OPTIONS: 20739 optval = rack->defer_options; 20740 break; 20741 case TCP_RACK_MEASURE_CNT: 20742 optval = rack->r_ctl.req_measurements; 20743 break; 20744 case TCP_REC_ABC_VAL: 20745 optval = rack->r_use_labc_for_rec; 20746 break; 20747 case TCP_RACK_ABC_VAL: 20748 optval = rack->rc_labc; 20749 break; 20750 case TCP_HDWR_UP_ONLY: 20751 optval= rack->r_up_only; 20752 break; 20753 case TCP_PACING_RATE_CAP: 20754 loptval = rack->r_ctl.bw_rate_cap; 20755 break; 20756 case TCP_RACK_PROFILE: 20757 /* You cannot retrieve a profile, its write only */ 20758 error = EINVAL; 20759 break; 20760 case TCP_USE_CMP_ACKS: 20761 optval = rack->r_use_cmp_ack; 20762 break; 20763 case TCP_RACK_PACE_TO_FILL: 20764 optval = rack->rc_pace_to_cwnd; 20765 if (optval && rack->r_fill_less_agg) 20766 optval++; 20767 break; 20768 case TCP_RACK_NO_PUSH_AT_MAX: 20769 optval = rack->r_ctl.rc_no_push_at_mrtt; 20770 break; 20771 case TCP_SHARED_CWND_ENABLE: 20772 optval = rack->rack_enable_scwnd; 20773 break; 20774 case TCP_RACK_NONRXT_CFG_RATE: 20775 optval = rack->rack_rec_nonrxt_use_cr; 20776 break; 20777 case TCP_NO_PRR: 20778 if (rack->rack_no_prr == 1) 20779 optval = 1; 20780 else if (rack->no_prr_addback == 1) 20781 optval = 2; 20782 else 20783 optval = 0; 20784 break; 20785 case TCP_RACK_DO_DETECTION: 20786 optval = rack->do_detection; 20787 break; 20788 case TCP_RACK_MBUF_QUEUE: 20789 /* Now do we use the LRO mbuf-queue feature */ 20790 optval = rack->r_mbuf_queue; 20791 break; 20792 case TCP_TIMELY_DYN_ADJ: 20793 optval = rack->rc_gp_dyn_mul; 20794 break; 20795 case TCP_BBR_IWINTSO: 20796 optval = rack->rc_init_win; 20797 break; 20798 case TCP_RACK_TLP_REDUCE: 20799 /* RACK TLP cwnd reduction (bool) */ 20800 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 20801 break; 20802 case TCP_BBR_RACK_INIT_RATE: 20803 val = rack->r_ctl.init_rate; 20804 /* convert to kbits per sec */ 20805 val *= 8; 20806 val /= 1000; 20807 optval = (uint32_t)val; 20808 break; 20809 case TCP_RACK_FORCE_MSEG: 20810 optval = rack->rc_force_max_seg; 20811 break; 20812 case TCP_RACK_PACE_MAX_SEG: 20813 /* Max segments in a pace */ 20814 optval = rack->rc_user_set_max_segs; 20815 break; 20816 case TCP_RACK_PACE_ALWAYS: 20817 /* Use the always pace method */ 20818 optval = rack->rc_always_pace; 20819 break; 20820 case TCP_RACK_PRR_SENDALOT: 20821 /* Allow PRR to send more than one seg */ 20822 optval = rack->r_ctl.rc_prr_sendalot; 20823 break; 20824 case TCP_RACK_MIN_TO: 20825 /* Minimum time between rack t-o's in ms */ 20826 optval = rack->r_ctl.rc_min_to; 20827 break; 20828 case TCP_RACK_EARLY_SEG: 20829 /* If early recovery max segments */ 20830 optval = rack->r_ctl.rc_early_recovery_segs; 20831 break; 20832 case TCP_RACK_REORD_THRESH: 20833 /* RACK reorder threshold (shift amount) */ 20834 optval = rack->r_ctl.rc_reorder_shift; 20835 break; 20836 case TCP_RACK_REORD_FADE: 20837 /* Does reordering fade after ms time */ 20838 optval = rack->r_ctl.rc_reorder_fade; 20839 break; 20840 case TCP_BBR_USE_RACK_RR: 20841 /* Do we use the rack cheat for rxt */ 20842 optval = rack->use_rack_rr; 20843 break; 20844 case TCP_RACK_RR_CONF: 20845 optval = rack->r_rr_config; 20846 break; 20847 case TCP_HDWR_RATE_CAP: 20848 optval = rack->r_rack_hw_rate_caps; 20849 break; 20850 case TCP_BBR_HDWR_PACE: 20851 optval = rack->rack_hdw_pace_ena; 20852 break; 20853 case TCP_RACK_TLP_THRESH: 20854 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 20855 optval = rack->r_ctl.rc_tlp_threshold; 20856 break; 20857 case TCP_RACK_PKT_DELAY: 20858 /* RACK added ms i.e. rack-rtt + reord + N */ 20859 optval = rack->r_ctl.rc_pkt_delay; 20860 break; 20861 case TCP_RACK_TLP_USE: 20862 optval = rack->rack_tlp_threshold_use; 20863 break; 20864 case TCP_RACK_PACE_RATE_CA: 20865 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; 20866 break; 20867 case TCP_RACK_PACE_RATE_SS: 20868 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; 20869 break; 20870 case TCP_RACK_PACE_RATE_REC: 20871 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; 20872 break; 20873 case TCP_RACK_GP_INCREASE_SS: 20874 optval = rack->r_ctl.rack_per_of_gp_ca; 20875 break; 20876 case TCP_RACK_GP_INCREASE_CA: 20877 optval = rack->r_ctl.rack_per_of_gp_ss; 20878 break; 20879 case TCP_BBR_RACK_RTT_USE: 20880 optval = rack->r_ctl.rc_rate_sample_method; 20881 break; 20882 case TCP_DELACK: 20883 optval = tp->t_delayed_ack; 20884 break; 20885 case TCP_DATA_AFTER_CLOSE: 20886 optval = rack->rc_allow_data_af_clo; 20887 break; 20888 case TCP_SHARED_CWND_TIME_LIMIT: 20889 optval = rack->r_limit_scw; 20890 break; 20891 case TCP_RACK_TIMER_SLOP: 20892 optval = rack->r_ctl.timer_slop; 20893 break; 20894 default: 20895 return (tcp_default_ctloutput(inp, sopt)); 20896 break; 20897 } 20898 INP_WUNLOCK(inp); 20899 if (error == 0) { 20900 if (TCP_PACING_RATE_CAP) 20901 error = sooptcopyout(sopt, &loptval, sizeof loptval); 20902 else 20903 error = sooptcopyout(sopt, &optval, sizeof optval); 20904 } 20905 return (error); 20906 } 20907 20908 static int 20909 rack_ctloutput(struct inpcb *inp, struct sockopt *sopt) 20910 { 20911 if (sopt->sopt_dir == SOPT_SET) { 20912 return (rack_set_sockopt(inp, sopt)); 20913 } else if (sopt->sopt_dir == SOPT_GET) { 20914 return (rack_get_sockopt(inp, sopt)); 20915 } else { 20916 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir); 20917 } 20918 } 20919 20920 static const char *rack_stack_names[] = { 20921 __XSTRING(STACKNAME), 20922 #ifdef STACKALIAS 20923 __XSTRING(STACKALIAS), 20924 #endif 20925 }; 20926 20927 static int 20928 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 20929 { 20930 memset(mem, 0, size); 20931 return (0); 20932 } 20933 20934 static void 20935 rack_dtor(void *mem, int32_t size, void *arg) 20936 { 20937 20938 } 20939 20940 static bool rack_mod_inited = false; 20941 20942 static int 20943 tcp_addrack(module_t mod, int32_t type, void *data) 20944 { 20945 int32_t err = 0; 20946 int num_stacks; 20947 20948 switch (type) { 20949 case MOD_LOAD: 20950 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 20951 sizeof(struct rack_sendmap), 20952 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 20953 20954 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 20955 sizeof(struct tcp_rack), 20956 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 20957 20958 sysctl_ctx_init(&rack_sysctl_ctx); 20959 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 20960 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 20961 OID_AUTO, 20962 #ifdef STACKALIAS 20963 __XSTRING(STACKALIAS), 20964 #else 20965 __XSTRING(STACKNAME), 20966 #endif 20967 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 20968 ""); 20969 if (rack_sysctl_root == NULL) { 20970 printf("Failed to add sysctl node\n"); 20971 err = EFAULT; 20972 goto free_uma; 20973 } 20974 rack_init_sysctls(); 20975 num_stacks = nitems(rack_stack_names); 20976 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 20977 rack_stack_names, &num_stacks); 20978 if (err) { 20979 printf("Failed to register %s stack name for " 20980 "%s module\n", rack_stack_names[num_stacks], 20981 __XSTRING(MODNAME)); 20982 sysctl_ctx_free(&rack_sysctl_ctx); 20983 free_uma: 20984 uma_zdestroy(rack_zone); 20985 uma_zdestroy(rack_pcb_zone); 20986 rack_counter_destroy(); 20987 printf("Failed to register rack module -- err:%d\n", err); 20988 return (err); 20989 } 20990 tcp_lro_reg_mbufq(); 20991 rack_mod_inited = true; 20992 break; 20993 case MOD_QUIESCE: 20994 err = deregister_tcp_functions(&__tcp_rack, true, false); 20995 break; 20996 case MOD_UNLOAD: 20997 err = deregister_tcp_functions(&__tcp_rack, false, true); 20998 if (err == EBUSY) 20999 break; 21000 if (rack_mod_inited) { 21001 uma_zdestroy(rack_zone); 21002 uma_zdestroy(rack_pcb_zone); 21003 sysctl_ctx_free(&rack_sysctl_ctx); 21004 rack_counter_destroy(); 21005 rack_mod_inited = false; 21006 } 21007 tcp_lro_dereg_mbufq(); 21008 err = 0; 21009 break; 21010 default: 21011 return (EOPNOTSUPP); 21012 } 21013 return (err); 21014 } 21015 21016 static moduledata_t tcp_rack = { 21017 .name = __XSTRING(MODNAME), 21018 .evhand = tcp_addrack, 21019 .priv = 0 21020 }; 21021 21022 MODULE_VERSION(MODNAME, 1); 21023 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 21024 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 21025